1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved. 24 */ 25 /* 26 * Copyright (c) 2011 Bayard G. Bell. All rights reserved. 27 * Copyright (c) 2012, 2016 by Delphix. All rights reserved. 28 * Copyright 2012 DEY Storage Systems, Inc. All rights reserved. 29 * Copyright 2016 Joyent, Inc. 30 * Copyright 2017 Nexenta Systems, Inc. 31 */ 32 /* 33 * Copyright 2011 cyril.galibern@opensvc.com 34 */ 35 36 /* 37 * SCSI disk target driver. 38 */ 39 #include <sys/scsi/scsi.h> 40 #include <sys/dkbad.h> 41 #include <sys/dklabel.h> 42 #include <sys/dkio.h> 43 #include <sys/fdio.h> 44 #include <sys/cdio.h> 45 #include <sys/mhd.h> 46 #include <sys/vtoc.h> 47 #include <sys/dktp/fdisk.h> 48 #include <sys/kstat.h> 49 #include <sys/vtrace.h> 50 #include <sys/note.h> 51 #include <sys/thread.h> 52 #include <sys/proc.h> 53 #include <sys/efi_partition.h> 54 #include <sys/var.h> 55 #include <sys/aio_req.h> 56 #include <sys/dkioc_free_util.h> 57 58 #ifdef __lock_lint 59 #define _LP64 60 #define __amd64 61 #endif 62 63 #if (defined(__fibre)) 64 /* Note: is there a leadville version of the following? */ 65 #include <sys/fc4/fcal_linkapp.h> 66 #endif 67 #include <sys/taskq.h> 68 #include <sys/uuid.h> 69 #include <sys/byteorder.h> 70 #include <sys/sdt.h> 71 72 #include "sd_xbuf.h" 73 74 #include <sys/scsi/targets/sddef.h> 75 #include <sys/cmlb.h> 76 #include <sys/sysevent/eventdefs.h> 77 #include <sys/sysevent/dev.h> 78 79 #include <sys/fm/protocol.h> 80 81 /* 82 * Loadable module info. 83 */ 84 #if (defined(__fibre)) 85 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver" 86 #else /* !__fibre */ 87 #define SD_MODULE_NAME "SCSI Disk Driver" 88 #endif /* !__fibre */ 89 90 /* 91 * Define the interconnect type, to allow the driver to distinguish 92 * between parallel SCSI (sd) and fibre channel (ssd) behaviors. 93 * 94 * This is really for backward compatibility. In the future, the driver 95 * should actually check the "interconnect-type" property as reported by 96 * the HBA; however at present this property is not defined by all HBAs, 97 * so we will use this #define (1) to permit the driver to run in 98 * backward-compatibility mode; and (2) to print a notification message 99 * if an FC HBA does not support the "interconnect-type" property. The 100 * behavior of the driver will be to assume parallel SCSI behaviors unless 101 * the "interconnect-type" property is defined by the HBA **AND** has a 102 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or 103 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre 104 * Channel behaviors (as per the old ssd). (Note that the 105 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and 106 * will result in the driver assuming parallel SCSI behaviors.) 107 * 108 * (see common/sys/scsi/impl/services.h) 109 * 110 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default 111 * since some FC HBAs may already support that, and there is some code in 112 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the 113 * default would confuse that code, and besides things should work fine 114 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the 115 * "interconnect_type" property. 116 * 117 */ 118 #if (defined(__fibre)) 119 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE 120 #else 121 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL 122 #endif 123 124 /* 125 * The name of the driver, established from the module name in _init. 126 */ 127 static char *sd_label = NULL; 128 129 /* 130 * Driver name is unfortunately prefixed on some driver.conf properties. 131 */ 132 #if (defined(__fibre)) 133 #define sd_max_xfer_size ssd_max_xfer_size 134 #define sd_config_list ssd_config_list 135 static char *sd_max_xfer_size = "ssd_max_xfer_size"; 136 static char *sd_config_list = "ssd-config-list"; 137 #else 138 static char *sd_max_xfer_size = "sd_max_xfer_size"; 139 static char *sd_config_list = "sd-config-list"; 140 #endif 141 142 /* 143 * Driver global variables 144 */ 145 146 #if (defined(__fibre)) 147 /* 148 * These #defines are to avoid namespace collisions that occur because this 149 * code is currently used to compile two separate driver modules: sd and ssd. 150 * All global variables need to be treated this way (even if declared static) 151 * in order to allow the debugger to resolve the names properly. 152 * It is anticipated that in the near future the ssd module will be obsoleted, 153 * at which time this namespace issue should go away. 154 */ 155 #define sd_state ssd_state 156 #define sd_io_time ssd_io_time 157 #define sd_failfast_enable ssd_failfast_enable 158 #define sd_ua_retry_count ssd_ua_retry_count 159 #define sd_report_pfa ssd_report_pfa 160 #define sd_max_throttle ssd_max_throttle 161 #define sd_min_throttle ssd_min_throttle 162 #define sd_rot_delay ssd_rot_delay 163 164 #define sd_retry_on_reservation_conflict \ 165 ssd_retry_on_reservation_conflict 166 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay 167 #define sd_resv_conflict_name ssd_resv_conflict_name 168 169 #define sd_component_mask ssd_component_mask 170 #define sd_level_mask ssd_level_mask 171 #define sd_debug_un ssd_debug_un 172 #define sd_error_level ssd_error_level 173 174 #define sd_xbuf_active_limit ssd_xbuf_active_limit 175 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit 176 177 #define sd_tr ssd_tr 178 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout 179 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout 180 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable 181 #define sd_check_media_time ssd_check_media_time 182 #define sd_wait_cmds_complete ssd_wait_cmds_complete 183 #define sd_label_mutex ssd_label_mutex 184 #define sd_detach_mutex ssd_detach_mutex 185 #define sd_log_buf ssd_log_buf 186 #define sd_log_mutex ssd_log_mutex 187 188 #define sd_disk_table ssd_disk_table 189 #define sd_disk_table_size ssd_disk_table_size 190 #define sd_sense_mutex ssd_sense_mutex 191 #define sd_cdbtab ssd_cdbtab 192 193 #define sd_cb_ops ssd_cb_ops 194 #define sd_ops ssd_ops 195 #define sd_additional_codes ssd_additional_codes 196 #define sd_tgops ssd_tgops 197 198 #define sd_minor_data ssd_minor_data 199 #define sd_minor_data_efi ssd_minor_data_efi 200 201 #define sd_tq ssd_tq 202 #define sd_wmr_tq ssd_wmr_tq 203 #define sd_taskq_name ssd_taskq_name 204 #define sd_wmr_taskq_name ssd_wmr_taskq_name 205 #define sd_taskq_minalloc ssd_taskq_minalloc 206 #define sd_taskq_maxalloc ssd_taskq_maxalloc 207 208 #define sd_dump_format_string ssd_dump_format_string 209 210 #define sd_iostart_chain ssd_iostart_chain 211 #define sd_iodone_chain ssd_iodone_chain 212 213 #define sd_pm_idletime ssd_pm_idletime 214 215 #define sd_force_pm_supported ssd_force_pm_supported 216 217 #define sd_dtype_optical_bind ssd_dtype_optical_bind 218 219 #define sd_ssc_init ssd_ssc_init 220 #define sd_ssc_send ssd_ssc_send 221 #define sd_ssc_fini ssd_ssc_fini 222 #define sd_ssc_assessment ssd_ssc_assessment 223 #define sd_ssc_post ssd_ssc_post 224 #define sd_ssc_print ssd_ssc_print 225 #define sd_ssc_ereport_post ssd_ssc_ereport_post 226 #define sd_ssc_set_info ssd_ssc_set_info 227 #define sd_ssc_extract_info ssd_ssc_extract_info 228 229 #endif 230 231 #ifdef SDDEBUG 232 int sd_force_pm_supported = 0; 233 #endif /* SDDEBUG */ 234 235 void *sd_state = NULL; 236 int sd_io_time = SD_IO_TIME; 237 int sd_failfast_enable = 1; 238 int sd_ua_retry_count = SD_UA_RETRY_COUNT; 239 int sd_report_pfa = 1; 240 int sd_max_throttle = SD_MAX_THROTTLE; 241 int sd_min_throttle = SD_MIN_THROTTLE; 242 int sd_rot_delay = 4; /* Default 4ms Rotation delay */ 243 int sd_qfull_throttle_enable = TRUE; 244 245 int sd_retry_on_reservation_conflict = 1; 246 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 247 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) 248 249 static int sd_dtype_optical_bind = -1; 250 251 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ 252 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; 253 254 /* 255 * Global data for debug logging. To enable debug printing, sd_component_mask 256 * and sd_level_mask should be set to the desired bit patterns as outlined in 257 * sddef.h. 258 */ 259 uint_t sd_component_mask = 0x0; 260 uint_t sd_level_mask = 0x0; 261 struct sd_lun *sd_debug_un = NULL; 262 uint_t sd_error_level = SCSI_ERR_RETRYABLE; 263 264 /* Note: these may go away in the future... */ 265 static uint32_t sd_xbuf_active_limit = 512; 266 static uint32_t sd_xbuf_reserve_limit = 16; 267 268 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 }; 269 270 /* 271 * Timer value used to reset the throttle after it has been reduced 272 * (typically in response to TRAN_BUSY or STATUS_QFULL) 273 */ 274 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT; 275 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT; 276 277 /* 278 * Interval value associated with the media change scsi watch. 279 */ 280 static int sd_check_media_time = 3000000; 281 282 /* 283 * Wait value used for in progress operations during a DDI_SUSPEND 284 */ 285 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE; 286 287 /* 288 * sd_label_mutex protects a static buffer used in the disk label 289 * component of the driver 290 */ 291 static kmutex_t sd_label_mutex; 292 293 /* 294 * sd_detach_mutex protects un_layer_count, un_detach_count, and 295 * un_opens_in_progress in the sd_lun structure. 296 */ 297 static kmutex_t sd_detach_mutex; 298 299 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex, 300 sd_lun::{un_layer_count un_detach_count un_opens_in_progress})) 301 302 /* 303 * Global buffer and mutex for debug logging 304 */ 305 static char sd_log_buf[1024]; 306 static kmutex_t sd_log_mutex; 307 308 /* 309 * Structs and globals for recording attached lun information. 310 * This maintains a chain. Each node in the chain represents a SCSI controller. 311 * The structure records the number of luns attached to each target connected 312 * with the controller. 313 * For parallel scsi device only. 314 */ 315 struct sd_scsi_hba_tgt_lun { 316 struct sd_scsi_hba_tgt_lun *next; 317 dev_info_t *pdip; 318 int nlun[NTARGETS_WIDE]; 319 }; 320 321 /* 322 * Flag to indicate the lun is attached or detached 323 */ 324 #define SD_SCSI_LUN_ATTACH 0 325 #define SD_SCSI_LUN_DETACH 1 326 327 static kmutex_t sd_scsi_target_lun_mutex; 328 static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL; 329 330 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 331 sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip)) 332 333 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 334 sd_scsi_target_lun_head)) 335 336 /* 337 * "Smart" Probe Caching structs, globals, #defines, etc. 338 * For parallel scsi and non-self-identify device only. 339 */ 340 341 /* 342 * The following resources and routines are implemented to support 343 * "smart" probing, which caches the scsi_probe() results in an array, 344 * in order to help avoid long probe times. 345 */ 346 struct sd_scsi_probe_cache { 347 struct sd_scsi_probe_cache *next; 348 dev_info_t *pdip; 349 int cache[NTARGETS_WIDE]; 350 }; 351 352 static kmutex_t sd_scsi_probe_cache_mutex; 353 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; 354 355 /* 356 * Really we only need protection on the head of the linked list, but 357 * better safe than sorry. 358 */ 359 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 360 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) 361 362 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 363 sd_scsi_probe_cache_head)) 364 365 /* 366 * Power attribute table 367 */ 368 static sd_power_attr_ss sd_pwr_ss = { 369 { "NAME=spindle-motor", "0=off", "1=on", NULL }, 370 {0, 100}, 371 {30, 0}, 372 {20000, 0} 373 }; 374 375 static sd_power_attr_pc sd_pwr_pc = { 376 { "NAME=spindle-motor", "0=stopped", "1=standby", "2=idle", 377 "3=active", NULL }, 378 {0, 0, 0, 100}, 379 {90, 90, 20, 0}, 380 {15000, 15000, 1000, 0} 381 }; 382 383 /* 384 * Power level to power condition 385 */ 386 static int sd_pl2pc[] = { 387 SD_TARGET_START_VALID, 388 SD_TARGET_STANDBY, 389 SD_TARGET_IDLE, 390 SD_TARGET_ACTIVE 391 }; 392 393 /* 394 * Vendor specific data name property declarations 395 */ 396 397 #if defined(__fibre) || defined(__i386) ||defined(__amd64) 398 399 static sd_tunables seagate_properties = { 400 SEAGATE_THROTTLE_VALUE, 401 0, 402 0, 403 0, 404 0, 405 0, 406 0, 407 0, 408 0 409 }; 410 411 412 static sd_tunables fujitsu_properties = { 413 FUJITSU_THROTTLE_VALUE, 414 0, 415 0, 416 0, 417 0, 418 0, 419 0, 420 0, 421 0 422 }; 423 424 static sd_tunables ibm_properties = { 425 IBM_THROTTLE_VALUE, 426 0, 427 0, 428 0, 429 0, 430 0, 431 0, 432 0, 433 0 434 }; 435 436 static sd_tunables purple_properties = { 437 PURPLE_THROTTLE_VALUE, 438 0, 439 0, 440 PURPLE_BUSY_RETRIES, 441 PURPLE_RESET_RETRY_COUNT, 442 PURPLE_RESERVE_RELEASE_TIME, 443 0, 444 0, 445 0 446 }; 447 448 static sd_tunables sve_properties = { 449 SVE_THROTTLE_VALUE, 450 0, 451 0, 452 SVE_BUSY_RETRIES, 453 SVE_RESET_RETRY_COUNT, 454 SVE_RESERVE_RELEASE_TIME, 455 SVE_MIN_THROTTLE_VALUE, 456 SVE_DISKSORT_DISABLED_FLAG, 457 0 458 }; 459 460 static sd_tunables maserati_properties = { 461 0, 462 0, 463 0, 464 0, 465 0, 466 0, 467 0, 468 MASERATI_DISKSORT_DISABLED_FLAG, 469 MASERATI_LUN_RESET_ENABLED_FLAG 470 }; 471 472 static sd_tunables pirus_properties = { 473 PIRUS_THROTTLE_VALUE, 474 0, 475 PIRUS_NRR_COUNT, 476 PIRUS_BUSY_RETRIES, 477 PIRUS_RESET_RETRY_COUNT, 478 0, 479 PIRUS_MIN_THROTTLE_VALUE, 480 PIRUS_DISKSORT_DISABLED_FLAG, 481 PIRUS_LUN_RESET_ENABLED_FLAG 482 }; 483 484 #endif 485 486 #if (defined(__sparc) && !defined(__fibre)) || \ 487 (defined(__i386) || defined(__amd64)) 488 489 490 static sd_tunables elite_properties = { 491 ELITE_THROTTLE_VALUE, 492 0, 493 0, 494 0, 495 0, 496 0, 497 0, 498 0, 499 0 500 }; 501 502 static sd_tunables st31200n_properties = { 503 ST31200N_THROTTLE_VALUE, 504 0, 505 0, 506 0, 507 0, 508 0, 509 0, 510 0, 511 0 512 }; 513 514 #endif /* Fibre or not */ 515 516 static sd_tunables lsi_properties_scsi = { 517 LSI_THROTTLE_VALUE, 518 0, 519 LSI_NOTREADY_RETRIES, 520 0, 521 0, 522 0, 523 0, 524 0, 525 0 526 }; 527 528 static sd_tunables symbios_properties = { 529 SYMBIOS_THROTTLE_VALUE, 530 0, 531 SYMBIOS_NOTREADY_RETRIES, 532 0, 533 0, 534 0, 535 0, 536 0, 537 0 538 }; 539 540 static sd_tunables lsi_properties = { 541 0, 542 0, 543 LSI_NOTREADY_RETRIES, 544 0, 545 0, 546 0, 547 0, 548 0, 549 0 550 }; 551 552 static sd_tunables lsi_oem_properties = { 553 0, 554 0, 555 LSI_OEM_NOTREADY_RETRIES, 556 0, 557 0, 558 0, 559 0, 560 0, 561 0, 562 1 563 }; 564 565 566 567 #if (defined(SD_PROP_TST)) 568 569 #define SD_TST_CTYPE_VAL CTYPE_CDROM 570 #define SD_TST_THROTTLE_VAL 16 571 #define SD_TST_NOTREADY_VAL 12 572 #define SD_TST_BUSY_VAL 60 573 #define SD_TST_RST_RETRY_VAL 36 574 #define SD_TST_RSV_REL_TIME 60 575 576 static sd_tunables tst_properties = { 577 SD_TST_THROTTLE_VAL, 578 SD_TST_CTYPE_VAL, 579 SD_TST_NOTREADY_VAL, 580 SD_TST_BUSY_VAL, 581 SD_TST_RST_RETRY_VAL, 582 SD_TST_RSV_REL_TIME, 583 0, 584 0, 585 0 586 }; 587 #endif 588 589 /* This is similar to the ANSI toupper implementation */ 590 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C)) 591 592 /* 593 * Static Driver Configuration Table 594 * 595 * This is the table of disks which need throttle adjustment (or, perhaps 596 * something else as defined by the flags at a future time.) device_id 597 * is a string consisting of concatenated vid (vendor), pid (product/model) 598 * and revision strings as defined in the scsi_inquiry structure. Offsets of 599 * the parts of the string are as defined by the sizes in the scsi_inquiry 600 * structure. Device type is searched as far as the device_id string is 601 * defined. Flags defines which values are to be set in the driver from the 602 * properties list. 603 * 604 * Entries below which begin and end with a "*" are a special case. 605 * These do not have a specific vendor, and the string which follows 606 * can appear anywhere in the 16 byte PID portion of the inquiry data. 607 * 608 * Entries below which begin and end with a " " (blank) are a special 609 * case. The comparison function will treat multiple consecutive blanks 610 * as equivalent to a single blank. For example, this causes a 611 * sd_disk_table entry of " NEC CDROM " to match a device's id string 612 * of "NEC CDROM". 613 * 614 * Note: The MD21 controller type has been obsoleted. 615 * ST318202F is a Legacy device 616 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been 617 * made with an FC connection. The entries here are a legacy. 618 */ 619 static sd_disk_config_t sd_disk_table[] = { 620 #if defined(__fibre) || defined(__i386) || defined(__amd64) 621 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 622 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 623 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 624 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 625 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 626 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 627 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 628 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 629 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 630 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 631 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 632 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 633 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 634 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 635 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 636 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 637 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 638 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 639 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 640 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 641 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 642 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 643 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 644 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties }, 645 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties }, 646 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties }, 647 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties }, 648 { "IBM 1724-100", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 649 { "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 650 { "IBM 1726-22x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 651 { "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 652 { "IBM 1726-42x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 653 { "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 654 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 655 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 656 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 657 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 658 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 659 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 660 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 661 { "IBM 1814", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 662 { "IBM 1814-200", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 663 { "IBM 1818", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 664 { "DELL MD3000", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 665 { "DELL MD3000i", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 666 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 667 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 668 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 669 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 670 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT | 671 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 672 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT | 673 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 674 { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties }, 675 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, 676 { "SUN T3", SD_CONF_BSET_THROTTLE | 677 SD_CONF_BSET_BSY_RETRY_COUNT| 678 SD_CONF_BSET_RST_RETRIES| 679 SD_CONF_BSET_RSV_REL_TIME, 680 &purple_properties }, 681 { "SUN SESS01", SD_CONF_BSET_THROTTLE | 682 SD_CONF_BSET_BSY_RETRY_COUNT| 683 SD_CONF_BSET_RST_RETRIES| 684 SD_CONF_BSET_RSV_REL_TIME| 685 SD_CONF_BSET_MIN_THROTTLE| 686 SD_CONF_BSET_DISKSORT_DISABLED, 687 &sve_properties }, 688 { "SUN T4", SD_CONF_BSET_THROTTLE | 689 SD_CONF_BSET_BSY_RETRY_COUNT| 690 SD_CONF_BSET_RST_RETRIES| 691 SD_CONF_BSET_RSV_REL_TIME, 692 &purple_properties }, 693 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | 694 SD_CONF_BSET_LUN_RESET_ENABLED, 695 &maserati_properties }, 696 { "SUN SE6920", SD_CONF_BSET_THROTTLE | 697 SD_CONF_BSET_NRR_COUNT| 698 SD_CONF_BSET_BSY_RETRY_COUNT| 699 SD_CONF_BSET_RST_RETRIES| 700 SD_CONF_BSET_MIN_THROTTLE| 701 SD_CONF_BSET_DISKSORT_DISABLED| 702 SD_CONF_BSET_LUN_RESET_ENABLED, 703 &pirus_properties }, 704 { "SUN SE6940", SD_CONF_BSET_THROTTLE | 705 SD_CONF_BSET_NRR_COUNT| 706 SD_CONF_BSET_BSY_RETRY_COUNT| 707 SD_CONF_BSET_RST_RETRIES| 708 SD_CONF_BSET_MIN_THROTTLE| 709 SD_CONF_BSET_DISKSORT_DISABLED| 710 SD_CONF_BSET_LUN_RESET_ENABLED, 711 &pirus_properties }, 712 { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE | 713 SD_CONF_BSET_NRR_COUNT| 714 SD_CONF_BSET_BSY_RETRY_COUNT| 715 SD_CONF_BSET_RST_RETRIES| 716 SD_CONF_BSET_MIN_THROTTLE| 717 SD_CONF_BSET_DISKSORT_DISABLED| 718 SD_CONF_BSET_LUN_RESET_ENABLED, 719 &pirus_properties }, 720 { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE | 721 SD_CONF_BSET_NRR_COUNT| 722 SD_CONF_BSET_BSY_RETRY_COUNT| 723 SD_CONF_BSET_RST_RETRIES| 724 SD_CONF_BSET_MIN_THROTTLE| 725 SD_CONF_BSET_DISKSORT_DISABLED| 726 SD_CONF_BSET_LUN_RESET_ENABLED, 727 &pirus_properties }, 728 { "SUN PSX1000", SD_CONF_BSET_THROTTLE | 729 SD_CONF_BSET_NRR_COUNT| 730 SD_CONF_BSET_BSY_RETRY_COUNT| 731 SD_CONF_BSET_RST_RETRIES| 732 SD_CONF_BSET_MIN_THROTTLE| 733 SD_CONF_BSET_DISKSORT_DISABLED| 734 SD_CONF_BSET_LUN_RESET_ENABLED, 735 &pirus_properties }, 736 { "SUN SE6330", SD_CONF_BSET_THROTTLE | 737 SD_CONF_BSET_NRR_COUNT| 738 SD_CONF_BSET_BSY_RETRY_COUNT| 739 SD_CONF_BSET_RST_RETRIES| 740 SD_CONF_BSET_MIN_THROTTLE| 741 SD_CONF_BSET_DISKSORT_DISABLED| 742 SD_CONF_BSET_LUN_RESET_ENABLED, 743 &pirus_properties }, 744 { "SUN STK6580_6780", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 745 { "SUN SUN_6180", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 746 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 747 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 748 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 749 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 750 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, 751 #endif /* fibre or NON-sparc platforms */ 752 #if ((defined(__sparc) && !defined(__fibre)) ||\ 753 (defined(__i386) || defined(__amd64))) 754 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, 755 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, 756 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, 757 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, 758 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, 759 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL }, 760 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, 761 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, 762 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, 763 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, 764 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, 765 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, 766 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, 767 &symbios_properties }, 768 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, 769 &lsi_properties_scsi }, 770 #if defined(__i386) || defined(__amd64) 771 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD 772 | SD_CONF_BSET_READSUB_BCD 773 | SD_CONF_BSET_READ_TOC_ADDR_BCD 774 | SD_CONF_BSET_NO_READ_HEADER 775 | SD_CONF_BSET_READ_CD_XD4), NULL }, 776 777 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD 778 | SD_CONF_BSET_READSUB_BCD 779 | SD_CONF_BSET_READ_TOC_ADDR_BCD 780 | SD_CONF_BSET_NO_READ_HEADER 781 | SD_CONF_BSET_READ_CD_XD4), NULL }, 782 #endif /* __i386 || __amd64 */ 783 #endif /* sparc NON-fibre or NON-sparc platforms */ 784 785 #if (defined(SD_PROP_TST)) 786 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE 787 | SD_CONF_BSET_CTYPE 788 | SD_CONF_BSET_NRR_COUNT 789 | SD_CONF_BSET_FAB_DEVID 790 | SD_CONF_BSET_NOCACHE 791 | SD_CONF_BSET_BSY_RETRY_COUNT 792 | SD_CONF_BSET_PLAYMSF_BCD 793 | SD_CONF_BSET_READSUB_BCD 794 | SD_CONF_BSET_READ_TOC_TRK_BCD 795 | SD_CONF_BSET_READ_TOC_ADDR_BCD 796 | SD_CONF_BSET_NO_READ_HEADER 797 | SD_CONF_BSET_READ_CD_XD4 798 | SD_CONF_BSET_RST_RETRIES 799 | SD_CONF_BSET_RSV_REL_TIME 800 | SD_CONF_BSET_TUR_CHECK), &tst_properties}, 801 #endif 802 }; 803 804 static const int sd_disk_table_size = 805 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); 806 807 /* 808 * Emulation mode disk drive VID/PID table 809 */ 810 static char sd_flash_dev_table[][25] = { 811 "ATA MARVELL SD88SA02", 812 "MARVELL SD88SA02", 813 "TOSHIBA THNSNV05", 814 }; 815 816 static const int sd_flash_dev_table_size = 817 sizeof (sd_flash_dev_table) / sizeof (sd_flash_dev_table[0]); 818 819 #define SD_INTERCONNECT_PARALLEL 0 820 #define SD_INTERCONNECT_FABRIC 1 821 #define SD_INTERCONNECT_FIBRE 2 822 #define SD_INTERCONNECT_SSA 3 823 #define SD_INTERCONNECT_SATA 4 824 #define SD_INTERCONNECT_SAS 5 825 826 #define SD_IS_PARALLEL_SCSI(un) \ 827 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL) 828 #define SD_IS_SERIAL(un) \ 829 (((un)->un_interconnect_type == SD_INTERCONNECT_SATA) ||\ 830 ((un)->un_interconnect_type == SD_INTERCONNECT_SAS)) 831 832 /* 833 * Definitions used by device id registration routines 834 */ 835 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */ 836 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */ 837 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */ 838 839 static kmutex_t sd_sense_mutex = {0}; 840 841 /* 842 * Macros for updates of the driver state 843 */ 844 #define New_state(un, s) \ 845 (un)->un_last_state = (un)->un_state, (un)->un_state = (s) 846 #define Restore_state(un) \ 847 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); } 848 849 static struct sd_cdbinfo sd_cdbtab[] = { 850 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, }, 851 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, }, 852 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, }, 853 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, }, 854 }; 855 856 /* 857 * Specifies the number of seconds that must have elapsed since the last 858 * cmd. has completed for a device to be declared idle to the PM framework. 859 */ 860 static int sd_pm_idletime = 1; 861 862 /* 863 * Internal function prototypes 864 */ 865 866 #if (defined(__fibre)) 867 /* 868 * These #defines are to avoid namespace collisions that occur because this 869 * code is currently used to compile two separate driver modules: sd and ssd. 870 * All function names need to be treated this way (even if declared static) 871 * in order to allow the debugger to resolve the names properly. 872 * It is anticipated that in the near future the ssd module will be obsoleted, 873 * at which time this ugliness should go away. 874 */ 875 #define sd_log_trace ssd_log_trace 876 #define sd_log_info ssd_log_info 877 #define sd_log_err ssd_log_err 878 #define sdprobe ssdprobe 879 #define sdinfo ssdinfo 880 #define sd_prop_op ssd_prop_op 881 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init 882 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini 883 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache 884 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache 885 #define sd_scsi_target_lun_init ssd_scsi_target_lun_init 886 #define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini 887 #define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count 888 #define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target 889 #define sd_spin_up_unit ssd_spin_up_unit 890 #define sd_enable_descr_sense ssd_enable_descr_sense 891 #define sd_reenable_dsense_task ssd_reenable_dsense_task 892 #define sd_set_mmc_caps ssd_set_mmc_caps 893 #define sd_read_unit_properties ssd_read_unit_properties 894 #define sd_process_sdconf_file ssd_process_sdconf_file 895 #define sd_process_sdconf_table ssd_process_sdconf_table 896 #define sd_sdconf_id_match ssd_sdconf_id_match 897 #define sd_blank_cmp ssd_blank_cmp 898 #define sd_chk_vers1_data ssd_chk_vers1_data 899 #define sd_set_vers1_properties ssd_set_vers1_properties 900 #define sd_check_bdc_vpd ssd_check_bdc_vpd 901 #define sd_check_emulation_mode ssd_check_emulation_mode 902 903 #define sd_get_physical_geometry ssd_get_physical_geometry 904 #define sd_get_virtual_geometry ssd_get_virtual_geometry 905 #define sd_update_block_info ssd_update_block_info 906 #define sd_register_devid ssd_register_devid 907 #define sd_get_devid ssd_get_devid 908 #define sd_create_devid ssd_create_devid 909 #define sd_write_deviceid ssd_write_deviceid 910 #define sd_check_vpd_page_support ssd_check_vpd_page_support 911 #define sd_setup_pm ssd_setup_pm 912 #define sd_create_pm_components ssd_create_pm_components 913 #define sd_ddi_suspend ssd_ddi_suspend 914 #define sd_ddi_resume ssd_ddi_resume 915 #define sd_pm_state_change ssd_pm_state_change 916 #define sdpower ssdpower 917 #define sdattach ssdattach 918 #define sddetach ssddetach 919 #define sd_unit_attach ssd_unit_attach 920 #define sd_unit_detach ssd_unit_detach 921 #define sd_set_unit_attributes ssd_set_unit_attributes 922 #define sd_create_errstats ssd_create_errstats 923 #define sd_set_errstats ssd_set_errstats 924 #define sd_set_pstats ssd_set_pstats 925 #define sddump ssddump 926 #define sd_scsi_poll ssd_scsi_poll 927 #define sd_send_polled_RQS ssd_send_polled_RQS 928 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll 929 #define sd_init_event_callbacks ssd_init_event_callbacks 930 #define sd_event_callback ssd_event_callback 931 #define sd_cache_control ssd_cache_control 932 #define sd_get_write_cache_enabled ssd_get_write_cache_enabled 933 #define sd_get_write_cache_changeable ssd_get_write_cache_changeable 934 #define sd_get_nv_sup ssd_get_nv_sup 935 #define sd_make_device ssd_make_device 936 #define sdopen ssdopen 937 #define sdclose ssdclose 938 #define sd_ready_and_valid ssd_ready_and_valid 939 #define sdmin ssdmin 940 #define sdread ssdread 941 #define sdwrite ssdwrite 942 #define sdaread ssdaread 943 #define sdawrite ssdawrite 944 #define sdstrategy ssdstrategy 945 #define sdioctl ssdioctl 946 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart 947 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart 948 #define sd_checksum_iostart ssd_checksum_iostart 949 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart 950 #define sd_pm_iostart ssd_pm_iostart 951 #define sd_core_iostart ssd_core_iostart 952 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone 953 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone 954 #define sd_checksum_iodone ssd_checksum_iodone 955 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone 956 #define sd_pm_iodone ssd_pm_iodone 957 #define sd_initpkt_for_buf ssd_initpkt_for_buf 958 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf 959 #define sd_setup_rw_pkt ssd_setup_rw_pkt 960 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt 961 #define sd_buf_iodone ssd_buf_iodone 962 #define sd_uscsi_strategy ssd_uscsi_strategy 963 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi 964 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi 965 #define sd_uscsi_iodone ssd_uscsi_iodone 966 #define sd_xbuf_strategy ssd_xbuf_strategy 967 #define sd_xbuf_init ssd_xbuf_init 968 #define sd_pm_entry ssd_pm_entry 969 #define sd_pm_exit ssd_pm_exit 970 971 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler 972 #define sd_pm_timeout_handler ssd_pm_timeout_handler 973 974 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq 975 #define sdintr ssdintr 976 #define sd_start_cmds ssd_start_cmds 977 #define sd_send_scsi_cmd ssd_send_scsi_cmd 978 #define sd_bioclone_alloc ssd_bioclone_alloc 979 #define sd_bioclone_free ssd_bioclone_free 980 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc 981 #define sd_shadow_buf_free ssd_shadow_buf_free 982 #define sd_print_transport_rejected_message \ 983 ssd_print_transport_rejected_message 984 #define sd_retry_command ssd_retry_command 985 #define sd_set_retry_bp ssd_set_retry_bp 986 #define sd_send_request_sense_command ssd_send_request_sense_command 987 #define sd_start_retry_command ssd_start_retry_command 988 #define sd_start_direct_priority_command \ 989 ssd_start_direct_priority_command 990 #define sd_return_failed_command ssd_return_failed_command 991 #define sd_return_failed_command_no_restart \ 992 ssd_return_failed_command_no_restart 993 #define sd_return_command ssd_return_command 994 #define sd_sync_with_callback ssd_sync_with_callback 995 #define sdrunout ssdrunout 996 #define sd_mark_rqs_busy ssd_mark_rqs_busy 997 #define sd_mark_rqs_idle ssd_mark_rqs_idle 998 #define sd_reduce_throttle ssd_reduce_throttle 999 #define sd_restore_throttle ssd_restore_throttle 1000 #define sd_print_incomplete_msg ssd_print_incomplete_msg 1001 #define sd_init_cdb_limits ssd_init_cdb_limits 1002 #define sd_pkt_status_good ssd_pkt_status_good 1003 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition 1004 #define sd_pkt_status_busy ssd_pkt_status_busy 1005 #define sd_pkt_status_reservation_conflict \ 1006 ssd_pkt_status_reservation_conflict 1007 #define sd_pkt_status_qfull ssd_pkt_status_qfull 1008 #define sd_handle_request_sense ssd_handle_request_sense 1009 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense 1010 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg 1011 #define sd_validate_sense_data ssd_validate_sense_data 1012 #define sd_decode_sense ssd_decode_sense 1013 #define sd_print_sense_msg ssd_print_sense_msg 1014 #define sd_sense_key_no_sense ssd_sense_key_no_sense 1015 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error 1016 #define sd_sense_key_not_ready ssd_sense_key_not_ready 1017 #define sd_sense_key_medium_or_hardware_error \ 1018 ssd_sense_key_medium_or_hardware_error 1019 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request 1020 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention 1021 #define sd_sense_key_fail_command ssd_sense_key_fail_command 1022 #define sd_sense_key_blank_check ssd_sense_key_blank_check 1023 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command 1024 #define sd_sense_key_default ssd_sense_key_default 1025 #define sd_print_retry_msg ssd_print_retry_msg 1026 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg 1027 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete 1028 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err 1029 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset 1030 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted 1031 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout 1032 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free 1033 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject 1034 #define sd_pkt_reason_default ssd_pkt_reason_default 1035 #define sd_reset_target ssd_reset_target 1036 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback 1037 #define sd_start_stop_unit_task ssd_start_stop_unit_task 1038 #define sd_taskq_create ssd_taskq_create 1039 #define sd_taskq_delete ssd_taskq_delete 1040 #define sd_target_change_task ssd_target_change_task 1041 #define sd_log_dev_status_event ssd_log_dev_status_event 1042 #define sd_log_lun_expansion_event ssd_log_lun_expansion_event 1043 #define sd_log_eject_request_event ssd_log_eject_request_event 1044 #define sd_media_change_task ssd_media_change_task 1045 #define sd_handle_mchange ssd_handle_mchange 1046 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK 1047 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY 1048 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 1049 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION 1050 #define sd_send_scsi_feature_GET_CONFIGURATION \ 1051 sd_send_scsi_feature_GET_CONFIGURATION 1052 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT 1053 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY 1054 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY 1055 #define sd_send_scsi_PERSISTENT_RESERVE_IN \ 1056 ssd_send_scsi_PERSISTENT_RESERVE_IN 1057 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ 1058 ssd_send_scsi_PERSISTENT_RESERVE_OUT 1059 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE 1060 #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \ 1061 ssd_send_scsi_SYNCHRONIZE_CACHE_biodone 1062 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE 1063 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT 1064 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR 1065 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE 1066 #define sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION \ 1067 ssd_send_scsi_GET_EVENT_STATUS_NOTIFICATION 1068 #define sd_gesn_media_data_valid ssd_gesn_media_data_valid 1069 #define sd_alloc_rqs ssd_alloc_rqs 1070 #define sd_free_rqs ssd_free_rqs 1071 #define sd_dump_memory ssd_dump_memory 1072 #define sd_get_media_info_com ssd_get_media_info_com 1073 #define sd_get_media_info ssd_get_media_info 1074 #define sd_get_media_info_ext ssd_get_media_info_ext 1075 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info 1076 #define sd_nvpair_str_decode ssd_nvpair_str_decode 1077 #define sd_strtok_r ssd_strtok_r 1078 #define sd_set_properties ssd_set_properties 1079 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf 1080 #define sd_setup_next_xfer ssd_setup_next_xfer 1081 #define sd_dkio_get_temp ssd_dkio_get_temp 1082 #define sd_check_mhd ssd_check_mhd 1083 #define sd_mhd_watch_cb ssd_mhd_watch_cb 1084 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete 1085 #define sd_sname ssd_sname 1086 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover 1087 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread 1088 #define sd_take_ownership ssd_take_ownership 1089 #define sd_reserve_release ssd_reserve_release 1090 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req 1091 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb 1092 #define sd_persistent_reservation_in_read_keys \ 1093 ssd_persistent_reservation_in_read_keys 1094 #define sd_persistent_reservation_in_read_resv \ 1095 ssd_persistent_reservation_in_read_resv 1096 #define sd_mhdioc_takeown ssd_mhdioc_takeown 1097 #define sd_mhdioc_failfast ssd_mhdioc_failfast 1098 #define sd_mhdioc_release ssd_mhdioc_release 1099 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid 1100 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys 1101 #define sd_mhdioc_inresv ssd_mhdioc_inresv 1102 #define sr_change_blkmode ssr_change_blkmode 1103 #define sr_change_speed ssr_change_speed 1104 #define sr_atapi_change_speed ssr_atapi_change_speed 1105 #define sr_pause_resume ssr_pause_resume 1106 #define sr_play_msf ssr_play_msf 1107 #define sr_play_trkind ssr_play_trkind 1108 #define sr_read_all_subcodes ssr_read_all_subcodes 1109 #define sr_read_subchannel ssr_read_subchannel 1110 #define sr_read_tocentry ssr_read_tocentry 1111 #define sr_read_tochdr ssr_read_tochdr 1112 #define sr_read_cdda ssr_read_cdda 1113 #define sr_read_cdxa ssr_read_cdxa 1114 #define sr_read_mode1 ssr_read_mode1 1115 #define sr_read_mode2 ssr_read_mode2 1116 #define sr_read_cd_mode2 ssr_read_cd_mode2 1117 #define sr_sector_mode ssr_sector_mode 1118 #define sr_eject ssr_eject 1119 #define sr_ejected ssr_ejected 1120 #define sr_check_wp ssr_check_wp 1121 #define sd_watch_request_submit ssd_watch_request_submit 1122 #define sd_check_media ssd_check_media 1123 #define sd_media_watch_cb ssd_media_watch_cb 1124 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast 1125 #define sr_volume_ctrl ssr_volume_ctrl 1126 #define sr_read_sony_session_offset ssr_read_sony_session_offset 1127 #define sd_log_page_supported ssd_log_page_supported 1128 #define sd_check_for_writable_cd ssd_check_for_writable_cd 1129 #define sd_wm_cache_constructor ssd_wm_cache_constructor 1130 #define sd_wm_cache_destructor ssd_wm_cache_destructor 1131 #define sd_range_lock ssd_range_lock 1132 #define sd_get_range ssd_get_range 1133 #define sd_free_inlist_wmap ssd_free_inlist_wmap 1134 #define sd_range_unlock ssd_range_unlock 1135 #define sd_read_modify_write_task ssd_read_modify_write_task 1136 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw 1137 1138 #define sd_iostart_chain ssd_iostart_chain 1139 #define sd_iodone_chain ssd_iodone_chain 1140 #define sd_initpkt_map ssd_initpkt_map 1141 #define sd_destroypkt_map ssd_destroypkt_map 1142 #define sd_chain_type_map ssd_chain_type_map 1143 #define sd_chain_index_map ssd_chain_index_map 1144 1145 #define sd_failfast_flushctl ssd_failfast_flushctl 1146 #define sd_failfast_flushq ssd_failfast_flushq 1147 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback 1148 1149 #define sd_is_lsi ssd_is_lsi 1150 #define sd_tg_rdwr ssd_tg_rdwr 1151 #define sd_tg_getinfo ssd_tg_getinfo 1152 #define sd_rmw_msg_print_handler ssd_rmw_msg_print_handler 1153 1154 #endif /* #if (defined(__fibre)) */ 1155 1156 typedef struct unmap_param_hdr_s { 1157 uint16_t uph_data_len; 1158 uint16_t uph_descr_data_len; 1159 uint32_t uph_reserved; 1160 } unmap_param_hdr_t; 1161 1162 typedef struct unmap_blk_descr_s { 1163 uint64_t ubd_lba; 1164 uint32_t ubd_lba_cnt; 1165 uint32_t ubd_reserved; 1166 } unmap_blk_descr_t; 1167 1168 /* Max number of block descriptors in UNMAP command */ 1169 #define SD_UNMAP_MAX_DESCR \ 1170 ((UINT16_MAX - sizeof (unmap_param_hdr_t)) / sizeof (unmap_blk_descr_t)) 1171 /* Max size of the UNMAP parameter list in bytes */ 1172 #define SD_UNMAP_PARAM_LIST_MAXSZ (sizeof (unmap_param_hdr_t) + \ 1173 SD_UNMAP_MAX_DESCR * sizeof (unmap_blk_descr_t)) 1174 1175 int _init(void); 1176 int _fini(void); 1177 int _info(struct modinfo *modinfop); 1178 1179 /*PRINTFLIKE3*/ 1180 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1181 /*PRINTFLIKE3*/ 1182 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1183 /*PRINTFLIKE3*/ 1184 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1185 1186 static int sdprobe(dev_info_t *devi); 1187 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 1188 void **result); 1189 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1190 int mod_flags, char *name, caddr_t valuep, int *lengthp); 1191 1192 /* 1193 * Smart probe for parallel scsi 1194 */ 1195 static void sd_scsi_probe_cache_init(void); 1196 static void sd_scsi_probe_cache_fini(void); 1197 static void sd_scsi_clear_probe_cache(void); 1198 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)()); 1199 1200 /* 1201 * Attached luns on target for parallel scsi 1202 */ 1203 static void sd_scsi_target_lun_init(void); 1204 static void sd_scsi_target_lun_fini(void); 1205 static int sd_scsi_get_target_lun_count(dev_info_t *dip, int target); 1206 static void sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag); 1207 1208 static int sd_spin_up_unit(sd_ssc_t *ssc); 1209 1210 /* 1211 * Using sd_ssc_init to establish sd_ssc_t struct 1212 * Using sd_ssc_send to send uscsi internal command 1213 * Using sd_ssc_fini to free sd_ssc_t struct 1214 */ 1215 static sd_ssc_t *sd_ssc_init(struct sd_lun *un); 1216 static int sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, 1217 int flag, enum uio_seg dataspace, int path_flag); 1218 static void sd_ssc_fini(sd_ssc_t *ssc); 1219 1220 /* 1221 * Using sd_ssc_assessment to set correct type-of-assessment 1222 * Using sd_ssc_post to post ereport & system log 1223 * sd_ssc_post will call sd_ssc_print to print system log 1224 * sd_ssc_post will call sd_ssd_ereport_post to post ereport 1225 */ 1226 static void sd_ssc_assessment(sd_ssc_t *ssc, 1227 enum sd_type_assessment tp_assess); 1228 1229 static void sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess); 1230 static void sd_ssc_print(sd_ssc_t *ssc, int sd_severity); 1231 static void sd_ssc_ereport_post(sd_ssc_t *ssc, 1232 enum sd_driver_assessment drv_assess); 1233 1234 /* 1235 * Using sd_ssc_set_info to mark an un-decodable-data error. 1236 * Using sd_ssc_extract_info to transfer information from internal 1237 * data structures to sd_ssc_t. 1238 */ 1239 static void sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, uint_t comp, 1240 const char *fmt, ...); 1241 static void sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, 1242 struct scsi_pkt *pktp, struct buf *bp, struct sd_xbuf *xp); 1243 1244 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1245 enum uio_seg dataspace, int path_flag); 1246 1247 #ifdef _LP64 1248 static void sd_enable_descr_sense(sd_ssc_t *ssc); 1249 static void sd_reenable_dsense_task(void *arg); 1250 #endif /* _LP64 */ 1251 1252 static void sd_set_mmc_caps(sd_ssc_t *ssc); 1253 1254 static void sd_read_unit_properties(struct sd_lun *un); 1255 static int sd_process_sdconf_file(struct sd_lun *un); 1256 static void sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str); 1257 static char *sd_strtok_r(char *string, const char *sepset, char **lasts); 1258 static void sd_set_properties(struct sd_lun *un, char *name, char *value); 1259 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags, 1260 int *data_list, sd_tunables *values); 1261 static void sd_process_sdconf_table(struct sd_lun *un); 1262 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen); 1263 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen); 1264 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 1265 int list_len, char *dataname_ptr); 1266 static void sd_set_vers1_properties(struct sd_lun *un, int flags, 1267 sd_tunables *prop_list); 1268 1269 static void sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, 1270 int reservation_flag); 1271 static int sd_get_devid(sd_ssc_t *ssc); 1272 static ddi_devid_t sd_create_devid(sd_ssc_t *ssc); 1273 static int sd_write_deviceid(sd_ssc_t *ssc); 1274 static int sd_check_vpd_page_support(sd_ssc_t *ssc); 1275 1276 static void sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi); 1277 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); 1278 1279 static int sd_ddi_suspend(dev_info_t *devi); 1280 static int sd_ddi_resume(dev_info_t *devi); 1281 static int sd_pm_state_change(struct sd_lun *un, int level, int flag); 1282 static int sdpower(dev_info_t *devi, int component, int level); 1283 1284 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 1285 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 1286 static int sd_unit_attach(dev_info_t *devi); 1287 static int sd_unit_detach(dev_info_t *devi); 1288 1289 static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi); 1290 static void sd_create_errstats(struct sd_lun *un, int instance); 1291 static void sd_set_errstats(struct sd_lun *un); 1292 static void sd_set_pstats(struct sd_lun *un); 1293 1294 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 1295 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); 1296 static int sd_send_polled_RQS(struct sd_lun *un); 1297 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); 1298 1299 #if (defined(__fibre)) 1300 /* 1301 * Event callbacks (photon) 1302 */ 1303 static void sd_init_event_callbacks(struct sd_lun *un); 1304 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); 1305 #endif 1306 1307 /* 1308 * Defines for sd_cache_control 1309 */ 1310 1311 #define SD_CACHE_ENABLE 1 1312 #define SD_CACHE_DISABLE 0 1313 #define SD_CACHE_NOCHANGE -1 1314 1315 static int sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag); 1316 static int sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled); 1317 static void sd_get_write_cache_changeable(sd_ssc_t *ssc, int *is_changeable); 1318 static void sd_get_nv_sup(sd_ssc_t *ssc); 1319 static dev_t sd_make_device(dev_info_t *devi); 1320 static void sd_check_bdc_vpd(sd_ssc_t *ssc); 1321 static void sd_check_emulation_mode(sd_ssc_t *ssc); 1322 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize, 1323 uint64_t capacity); 1324 1325 /* 1326 * Driver entry point functions. 1327 */ 1328 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 1329 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 1330 static int sd_ready_and_valid(sd_ssc_t *ssc, int part); 1331 1332 static void sdmin(struct buf *bp); 1333 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p); 1334 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 1335 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1336 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1337 1338 static int sdstrategy(struct buf *bp); 1339 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1340 1341 /* 1342 * Function prototypes for layering functions in the iostart chain. 1343 */ 1344 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un, 1345 struct buf *bp); 1346 static void sd_mapblocksize_iostart(int index, struct sd_lun *un, 1347 struct buf *bp); 1348 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp); 1349 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un, 1350 struct buf *bp); 1351 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp); 1352 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp); 1353 1354 /* 1355 * Function prototypes for layering functions in the iodone chain. 1356 */ 1357 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp); 1358 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp); 1359 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un, 1360 struct buf *bp); 1361 static void sd_mapblocksize_iodone(int index, struct sd_lun *un, 1362 struct buf *bp); 1363 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp); 1364 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un, 1365 struct buf *bp); 1366 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp); 1367 1368 /* 1369 * Prototypes for functions to support buf(9S) based IO. 1370 */ 1371 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg); 1372 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **); 1373 static void sd_destroypkt_for_buf(struct buf *); 1374 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp, 1375 struct buf *bp, int flags, 1376 int (*callback)(caddr_t), caddr_t callback_arg, 1377 diskaddr_t lba, uint32_t blockcount); 1378 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp, 1379 struct buf *bp, diskaddr_t lba, uint32_t blockcount); 1380 1381 /* 1382 * Prototypes for functions to support USCSI IO. 1383 */ 1384 static int sd_uscsi_strategy(struct buf *bp); 1385 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **); 1386 static void sd_destroypkt_for_uscsi(struct buf *); 1387 1388 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 1389 uchar_t chain_type, void *pktinfop); 1390 1391 static int sd_pm_entry(struct sd_lun *un); 1392 static void sd_pm_exit(struct sd_lun *un); 1393 1394 static void sd_pm_idletimeout_handler(void *arg); 1395 1396 /* 1397 * sd_core internal functions (used at the sd_core_io layer). 1398 */ 1399 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp); 1400 static void sdintr(struct scsi_pkt *pktp); 1401 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp); 1402 1403 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1404 enum uio_seg dataspace, int path_flag); 1405 1406 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen, 1407 daddr_t blkno, int (*func)(struct buf *)); 1408 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen, 1409 uint_t bflags, daddr_t blkno, int (*func)(struct buf *)); 1410 static void sd_bioclone_free(struct buf *bp); 1411 static void sd_shadow_buf_free(struct buf *bp); 1412 1413 static void sd_print_transport_rejected_message(struct sd_lun *un, 1414 struct sd_xbuf *xp, int code); 1415 static void sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, 1416 void *arg, int code); 1417 static void sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, 1418 void *arg, int code); 1419 static void sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, 1420 void *arg, int code); 1421 1422 static void sd_retry_command(struct sd_lun *un, struct buf *bp, 1423 int retry_check_flag, 1424 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int c), 1425 void *user_arg, int failure_code, clock_t retry_delay, 1426 void (*statp)(kstat_io_t *)); 1427 1428 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, 1429 clock_t retry_delay, void (*statp)(kstat_io_t *)); 1430 1431 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 1432 struct scsi_pkt *pktp); 1433 static void sd_start_retry_command(void *arg); 1434 static void sd_start_direct_priority_command(void *arg); 1435 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, 1436 int errcode); 1437 static void sd_return_failed_command_no_restart(struct sd_lun *un, 1438 struct buf *bp, int errcode); 1439 static void sd_return_command(struct sd_lun *un, struct buf *bp); 1440 static void sd_sync_with_callback(struct sd_lun *un); 1441 static int sdrunout(caddr_t arg); 1442 1443 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp); 1444 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp); 1445 1446 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type); 1447 static void sd_restore_throttle(void *arg); 1448 1449 static void sd_init_cdb_limits(struct sd_lun *un); 1450 1451 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 1452 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1453 1454 /* 1455 * Error handling functions 1456 */ 1457 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 1458 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1459 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, 1460 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1461 static void sd_pkt_status_reservation_conflict(struct sd_lun *un, 1462 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1463 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 1464 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1465 1466 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp, 1467 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1468 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 1469 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1470 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp, 1471 struct sd_xbuf *xp, size_t actual_len); 1472 static void sd_decode_sense(struct sd_lun *un, struct buf *bp, 1473 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1474 1475 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp, 1476 void *arg, int code); 1477 1478 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 1479 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1480 static void sd_sense_key_recoverable_error(struct sd_lun *un, 1481 uint8_t *sense_datap, 1482 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1483 static void sd_sense_key_not_ready(struct sd_lun *un, 1484 uint8_t *sense_datap, 1485 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1486 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 1487 uint8_t *sense_datap, 1488 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1489 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 1490 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1491 static void sd_sense_key_unit_attention(struct sd_lun *un, 1492 uint8_t *sense_datap, 1493 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1494 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 1495 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1496 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 1497 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1498 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 1499 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1500 static void sd_sense_key_default(struct sd_lun *un, 1501 uint8_t *sense_datap, 1502 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1503 1504 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp, 1505 void *arg, int flag); 1506 1507 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 1508 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1509 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 1510 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1511 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 1512 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1513 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 1514 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1515 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 1516 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1517 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 1518 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1519 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 1520 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1521 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 1522 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1523 1524 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp); 1525 1526 static void sd_start_stop_unit_callback(void *arg); 1527 static void sd_start_stop_unit_task(void *arg); 1528 1529 static void sd_taskq_create(void); 1530 static void sd_taskq_delete(void); 1531 static void sd_target_change_task(void *arg); 1532 static void sd_log_dev_status_event(struct sd_lun *un, char *esc, int km_flag); 1533 static void sd_log_lun_expansion_event(struct sd_lun *un, int km_flag); 1534 static void sd_log_eject_request_event(struct sd_lun *un, int km_flag); 1535 static void sd_media_change_task(void *arg); 1536 1537 static int sd_handle_mchange(struct sd_lun *un); 1538 static int sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag); 1539 static int sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, 1540 uint32_t *lbap, int path_flag); 1541 static int sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp, 1542 uint32_t *lbap, uint32_t *psp, int path_flag); 1543 static int sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int pc_flag, 1544 int flag, int path_flag); 1545 static int sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, 1546 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp); 1547 static int sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag); 1548 static int sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, 1549 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp); 1550 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, 1551 uchar_t usr_cmd, uchar_t *usr_bufp); 1552 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, 1553 struct dk_callback *dkc); 1554 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp); 1555 static int sd_send_scsi_UNMAP(dev_t dev, sd_ssc_t *ssc, dkioc_free_list_t *dfl, 1556 int flag); 1557 static int sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, 1558 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1559 uchar_t *bufaddr, uint_t buflen, int path_flag); 1560 static int sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc, 1561 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1562 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag); 1563 static int sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, 1564 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag); 1565 static int sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, 1566 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag); 1567 static int sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr, 1568 size_t buflen, daddr_t start_block, int path_flag); 1569 #define sd_send_scsi_READ(ssc, bufaddr, buflen, start_block, path_flag) \ 1570 sd_send_scsi_RDWR(ssc, SCMD_READ, bufaddr, buflen, start_block, \ 1571 path_flag) 1572 #define sd_send_scsi_WRITE(ssc, bufaddr, buflen, start_block, path_flag)\ 1573 sd_send_scsi_RDWR(ssc, SCMD_WRITE, bufaddr, buflen, start_block,\ 1574 path_flag) 1575 1576 static int sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, 1577 uint16_t buflen, uchar_t page_code, uchar_t page_control, 1578 uint16_t param_ptr, int path_flag); 1579 static int sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION(sd_ssc_t *ssc, 1580 uchar_t *bufaddr, size_t buflen, uchar_t class_req); 1581 static boolean_t sd_gesn_media_data_valid(uchar_t *data); 1582 1583 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un); 1584 static void sd_free_rqs(struct sd_lun *un); 1585 1586 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, 1587 uchar_t *data, int len, int fmt); 1588 static void sd_panic_for_res_conflict(struct sd_lun *un); 1589 1590 /* 1591 * Disk Ioctl Function Prototypes 1592 */ 1593 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag); 1594 static int sd_get_media_info_ext(dev_t dev, caddr_t arg, int flag); 1595 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag); 1596 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag); 1597 1598 /* 1599 * Multi-host Ioctl Prototypes 1600 */ 1601 static int sd_check_mhd(dev_t dev, int interval); 1602 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1603 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt); 1604 static char *sd_sname(uchar_t status); 1605 static void sd_mhd_resvd_recover(void *arg); 1606 static void sd_resv_reclaim_thread(); 1607 static int sd_take_ownership(dev_t dev, struct mhioctkown *p); 1608 static int sd_reserve_release(dev_t dev, int cmd); 1609 static void sd_rmv_resv_reclaim_req(dev_t dev); 1610 static void sd_mhd_reset_notify_cb(caddr_t arg); 1611 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un, 1612 mhioc_inkeys_t *usrp, int flag); 1613 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un, 1614 mhioc_inresvs_t *usrp, int flag); 1615 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag); 1616 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag); 1617 static int sd_mhdioc_release(dev_t dev); 1618 static int sd_mhdioc_register_devid(dev_t dev); 1619 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag); 1620 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag); 1621 1622 /* 1623 * SCSI removable prototypes 1624 */ 1625 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag); 1626 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1627 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1628 static int sr_pause_resume(dev_t dev, int mode); 1629 static int sr_play_msf(dev_t dev, caddr_t data, int flag); 1630 static int sr_play_trkind(dev_t dev, caddr_t data, int flag); 1631 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag); 1632 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag); 1633 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag); 1634 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag); 1635 static int sr_read_cdda(dev_t dev, caddr_t data, int flag); 1636 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag); 1637 static int sr_read_mode1(dev_t dev, caddr_t data, int flag); 1638 static int sr_read_mode2(dev_t dev, caddr_t data, int flag); 1639 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag); 1640 static int sr_sector_mode(dev_t dev, uint32_t blksize); 1641 static int sr_eject(dev_t dev); 1642 static void sr_ejected(register struct sd_lun *un); 1643 static int sr_check_wp(dev_t dev); 1644 static opaque_t sd_watch_request_submit(struct sd_lun *un); 1645 static int sd_check_media(dev_t dev, enum dkio_state state); 1646 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1647 static void sd_delayed_cv_broadcast(void *arg); 1648 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); 1649 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); 1650 1651 static int sd_log_page_supported(sd_ssc_t *ssc, int log_page); 1652 1653 /* 1654 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. 1655 */ 1656 static void sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag); 1657 static int sd_wm_cache_constructor(void *wm, void *un, int flags); 1658 static void sd_wm_cache_destructor(void *wm, void *un); 1659 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb, 1660 daddr_t endb, ushort_t typ); 1661 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb, 1662 daddr_t endb); 1663 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp); 1664 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm); 1665 static void sd_read_modify_write_task(void * arg); 1666 static int 1667 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 1668 struct buf **bpp); 1669 1670 1671 /* 1672 * Function prototypes for failfast support. 1673 */ 1674 static void sd_failfast_flushq(struct sd_lun *un); 1675 static int sd_failfast_flushq_callback(struct buf *bp); 1676 1677 /* 1678 * Function prototypes to check for lsi devices 1679 */ 1680 static void sd_is_lsi(struct sd_lun *un); 1681 1682 /* 1683 * Function prototypes for partial DMA support 1684 */ 1685 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 1686 struct scsi_pkt *pkt, struct sd_xbuf *xp); 1687 1688 1689 /* Function prototypes for cmlb */ 1690 static int sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 1691 diskaddr_t start_block, size_t reqlength, void *tg_cookie); 1692 1693 static int sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie); 1694 1695 /* 1696 * For printing RMW warning message timely 1697 */ 1698 static void sd_rmw_msg_print_handler(void *arg); 1699 1700 /* 1701 * Constants for failfast support: 1702 * 1703 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO 1704 * failfast processing being performed. 1705 * 1706 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing 1707 * failfast processing on all bufs with B_FAILFAST set. 1708 */ 1709 1710 #define SD_FAILFAST_INACTIVE 0 1711 #define SD_FAILFAST_ACTIVE 1 1712 1713 /* 1714 * Bitmask to control behavior of buf(9S) flushes when a transition to 1715 * the failfast state occurs. Optional bits include: 1716 * 1717 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that 1718 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will 1719 * be flushed. 1720 * 1721 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the 1722 * driver, in addition to the regular wait queue. This includes the xbuf 1723 * queues. When clear, only the driver's wait queue will be flushed. 1724 */ 1725 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 1726 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 1727 1728 /* 1729 * The default behavior is to only flush bufs that have B_FAILFAST set, but 1730 * to flush all queues within the driver. 1731 */ 1732 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; 1733 1734 1735 /* 1736 * SD Testing Fault Injection 1737 */ 1738 #ifdef SD_FAULT_INJECTION 1739 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); 1740 static void sd_faultinjection(struct scsi_pkt *pktp); 1741 static void sd_injection_log(char *buf, struct sd_lun *un); 1742 #endif 1743 1744 /* 1745 * Device driver ops vector 1746 */ 1747 static struct cb_ops sd_cb_ops = { 1748 sdopen, /* open */ 1749 sdclose, /* close */ 1750 sdstrategy, /* strategy */ 1751 nodev, /* print */ 1752 sddump, /* dump */ 1753 sdread, /* read */ 1754 sdwrite, /* write */ 1755 sdioctl, /* ioctl */ 1756 nodev, /* devmap */ 1757 nodev, /* mmap */ 1758 nodev, /* segmap */ 1759 nochpoll, /* poll */ 1760 sd_prop_op, /* cb_prop_op */ 1761 0, /* streamtab */ 1762 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */ 1763 CB_REV, /* cb_rev */ 1764 sdaread, /* async I/O read entry point */ 1765 sdawrite /* async I/O write entry point */ 1766 }; 1767 1768 struct dev_ops sd_ops = { 1769 DEVO_REV, /* devo_rev, */ 1770 0, /* refcnt */ 1771 sdinfo, /* info */ 1772 nulldev, /* identify */ 1773 sdprobe, /* probe */ 1774 sdattach, /* attach */ 1775 sddetach, /* detach */ 1776 nodev, /* reset */ 1777 &sd_cb_ops, /* driver operations */ 1778 NULL, /* bus operations */ 1779 sdpower, /* power */ 1780 ddi_quiesce_not_needed, /* quiesce */ 1781 }; 1782 1783 /* 1784 * This is the loadable module wrapper. 1785 */ 1786 #include <sys/modctl.h> 1787 1788 static struct modldrv modldrv = { 1789 &mod_driverops, /* Type of module. This one is a driver */ 1790 SD_MODULE_NAME, /* Module name. */ 1791 &sd_ops /* driver ops */ 1792 }; 1793 1794 static struct modlinkage modlinkage = { 1795 MODREV_1, &modldrv, NULL 1796 }; 1797 1798 static cmlb_tg_ops_t sd_tgops = { 1799 TG_DK_OPS_VERSION_1, 1800 sd_tg_rdwr, 1801 sd_tg_getinfo 1802 }; 1803 1804 static struct scsi_asq_key_strings sd_additional_codes[] = { 1805 0x81, 0, "Logical Unit is Reserved", 1806 0x85, 0, "Audio Address Not Valid", 1807 0xb6, 0, "Media Load Mechanism Failed", 1808 0xB9, 0, "Audio Play Operation Aborted", 1809 0xbf, 0, "Buffer Overflow for Read All Subcodes Command", 1810 0x53, 2, "Medium removal prevented", 1811 0x6f, 0, "Authentication failed during key exchange", 1812 0x6f, 1, "Key not present", 1813 0x6f, 2, "Key not established", 1814 0x6f, 3, "Read without proper authentication", 1815 0x6f, 4, "Mismatched region to this logical unit", 1816 0x6f, 5, "Region reset count error", 1817 0xffff, 0x0, NULL 1818 }; 1819 1820 1821 /* 1822 * Struct for passing printing information for sense data messages 1823 */ 1824 struct sd_sense_info { 1825 int ssi_severity; 1826 int ssi_pfa_flag; 1827 }; 1828 1829 /* 1830 * Table of function pointers for iostart-side routines. Separate "chains" 1831 * of layered function calls are formed by placing the function pointers 1832 * sequentially in the desired order. Functions are called according to an 1833 * incrementing table index ordering. The last function in each chain must 1834 * be sd_core_iostart(). The corresponding iodone-side routines are expected 1835 * in the sd_iodone_chain[] array. 1836 * 1837 * Note: It may seem more natural to organize both the iostart and iodone 1838 * functions together, into an array of structures (or some similar 1839 * organization) with a common index, rather than two separate arrays which 1840 * must be maintained in synchronization. The purpose of this division is 1841 * to achieve improved performance: individual arrays allows for more 1842 * effective cache line utilization on certain platforms. 1843 */ 1844 1845 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp); 1846 1847 1848 static sd_chain_t sd_iostart_chain[] = { 1849 1850 /* Chain for buf IO for disk drive targets (PM enabled) */ 1851 sd_mapblockaddr_iostart, /* Index: 0 */ 1852 sd_pm_iostart, /* Index: 1 */ 1853 sd_core_iostart, /* Index: 2 */ 1854 1855 /* Chain for buf IO for disk drive targets (PM disabled) */ 1856 sd_mapblockaddr_iostart, /* Index: 3 */ 1857 sd_core_iostart, /* Index: 4 */ 1858 1859 /* 1860 * Chain for buf IO for removable-media or large sector size 1861 * disk drive targets with RMW needed (PM enabled) 1862 */ 1863 sd_mapblockaddr_iostart, /* Index: 5 */ 1864 sd_mapblocksize_iostart, /* Index: 6 */ 1865 sd_pm_iostart, /* Index: 7 */ 1866 sd_core_iostart, /* Index: 8 */ 1867 1868 /* 1869 * Chain for buf IO for removable-media or large sector size 1870 * disk drive targets with RMW needed (PM disabled) 1871 */ 1872 sd_mapblockaddr_iostart, /* Index: 9 */ 1873 sd_mapblocksize_iostart, /* Index: 10 */ 1874 sd_core_iostart, /* Index: 11 */ 1875 1876 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1877 sd_mapblockaddr_iostart, /* Index: 12 */ 1878 sd_checksum_iostart, /* Index: 13 */ 1879 sd_pm_iostart, /* Index: 14 */ 1880 sd_core_iostart, /* Index: 15 */ 1881 1882 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1883 sd_mapblockaddr_iostart, /* Index: 16 */ 1884 sd_checksum_iostart, /* Index: 17 */ 1885 sd_core_iostart, /* Index: 18 */ 1886 1887 /* Chain for USCSI commands (all targets) */ 1888 sd_pm_iostart, /* Index: 19 */ 1889 sd_core_iostart, /* Index: 20 */ 1890 1891 /* Chain for checksumming USCSI commands (all targets) */ 1892 sd_checksum_uscsi_iostart, /* Index: 21 */ 1893 sd_pm_iostart, /* Index: 22 */ 1894 sd_core_iostart, /* Index: 23 */ 1895 1896 /* Chain for "direct" USCSI commands (all targets) */ 1897 sd_core_iostart, /* Index: 24 */ 1898 1899 /* Chain for "direct priority" USCSI commands (all targets) */ 1900 sd_core_iostart, /* Index: 25 */ 1901 1902 /* 1903 * Chain for buf IO for large sector size disk drive targets 1904 * with RMW needed with checksumming (PM enabled) 1905 */ 1906 sd_mapblockaddr_iostart, /* Index: 26 */ 1907 sd_mapblocksize_iostart, /* Index: 27 */ 1908 sd_checksum_iostart, /* Index: 28 */ 1909 sd_pm_iostart, /* Index: 29 */ 1910 sd_core_iostart, /* Index: 30 */ 1911 1912 /* 1913 * Chain for buf IO for large sector size disk drive targets 1914 * with RMW needed with checksumming (PM disabled) 1915 */ 1916 sd_mapblockaddr_iostart, /* Index: 31 */ 1917 sd_mapblocksize_iostart, /* Index: 32 */ 1918 sd_checksum_iostart, /* Index: 33 */ 1919 sd_core_iostart, /* Index: 34 */ 1920 1921 }; 1922 1923 /* 1924 * Macros to locate the first function of each iostart chain in the 1925 * sd_iostart_chain[] array. These are located by the index in the array. 1926 */ 1927 #define SD_CHAIN_DISK_IOSTART 0 1928 #define SD_CHAIN_DISK_IOSTART_NO_PM 3 1929 #define SD_CHAIN_MSS_DISK_IOSTART 5 1930 #define SD_CHAIN_RMMEDIA_IOSTART 5 1931 #define SD_CHAIN_MSS_DISK_IOSTART_NO_PM 9 1932 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9 1933 #define SD_CHAIN_CHKSUM_IOSTART 12 1934 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16 1935 #define SD_CHAIN_USCSI_CMD_IOSTART 19 1936 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21 1937 #define SD_CHAIN_DIRECT_CMD_IOSTART 24 1938 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25 1939 #define SD_CHAIN_MSS_CHKSUM_IOSTART 26 1940 #define SD_CHAIN_MSS_CHKSUM_IOSTART_NO_PM 31 1941 1942 1943 /* 1944 * Table of function pointers for the iodone-side routines for the driver- 1945 * internal layering mechanism. The calling sequence for iodone routines 1946 * uses a decrementing table index, so the last routine called in a chain 1947 * must be at the lowest array index location for that chain. The last 1948 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs) 1949 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering 1950 * of the functions in an iodone side chain must correspond to the ordering 1951 * of the iostart routines for that chain. Note that there is no iodone 1952 * side routine that corresponds to sd_core_iostart(), so there is no 1953 * entry in the table for this. 1954 */ 1955 1956 static sd_chain_t sd_iodone_chain[] = { 1957 1958 /* Chain for buf IO for disk drive targets (PM enabled) */ 1959 sd_buf_iodone, /* Index: 0 */ 1960 sd_mapblockaddr_iodone, /* Index: 1 */ 1961 sd_pm_iodone, /* Index: 2 */ 1962 1963 /* Chain for buf IO for disk drive targets (PM disabled) */ 1964 sd_buf_iodone, /* Index: 3 */ 1965 sd_mapblockaddr_iodone, /* Index: 4 */ 1966 1967 /* 1968 * Chain for buf IO for removable-media or large sector size 1969 * disk drive targets with RMW needed (PM enabled) 1970 */ 1971 sd_buf_iodone, /* Index: 5 */ 1972 sd_mapblockaddr_iodone, /* Index: 6 */ 1973 sd_mapblocksize_iodone, /* Index: 7 */ 1974 sd_pm_iodone, /* Index: 8 */ 1975 1976 /* 1977 * Chain for buf IO for removable-media or large sector size 1978 * disk drive targets with RMW needed (PM disabled) 1979 */ 1980 sd_buf_iodone, /* Index: 9 */ 1981 sd_mapblockaddr_iodone, /* Index: 10 */ 1982 sd_mapblocksize_iodone, /* Index: 11 */ 1983 1984 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1985 sd_buf_iodone, /* Index: 12 */ 1986 sd_mapblockaddr_iodone, /* Index: 13 */ 1987 sd_checksum_iodone, /* Index: 14 */ 1988 sd_pm_iodone, /* Index: 15 */ 1989 1990 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1991 sd_buf_iodone, /* Index: 16 */ 1992 sd_mapblockaddr_iodone, /* Index: 17 */ 1993 sd_checksum_iodone, /* Index: 18 */ 1994 1995 /* Chain for USCSI commands (non-checksum targets) */ 1996 sd_uscsi_iodone, /* Index: 19 */ 1997 sd_pm_iodone, /* Index: 20 */ 1998 1999 /* Chain for USCSI commands (checksum targets) */ 2000 sd_uscsi_iodone, /* Index: 21 */ 2001 sd_checksum_uscsi_iodone, /* Index: 22 */ 2002 sd_pm_iodone, /* Index: 22 */ 2003 2004 /* Chain for "direct" USCSI commands (all targets) */ 2005 sd_uscsi_iodone, /* Index: 24 */ 2006 2007 /* Chain for "direct priority" USCSI commands (all targets) */ 2008 sd_uscsi_iodone, /* Index: 25 */ 2009 2010 /* 2011 * Chain for buf IO for large sector size disk drive targets 2012 * with checksumming (PM enabled) 2013 */ 2014 sd_buf_iodone, /* Index: 26 */ 2015 sd_mapblockaddr_iodone, /* Index: 27 */ 2016 sd_mapblocksize_iodone, /* Index: 28 */ 2017 sd_checksum_iodone, /* Index: 29 */ 2018 sd_pm_iodone, /* Index: 30 */ 2019 2020 /* 2021 * Chain for buf IO for large sector size disk drive targets 2022 * with checksumming (PM disabled) 2023 */ 2024 sd_buf_iodone, /* Index: 31 */ 2025 sd_mapblockaddr_iodone, /* Index: 32 */ 2026 sd_mapblocksize_iodone, /* Index: 33 */ 2027 sd_checksum_iodone, /* Index: 34 */ 2028 }; 2029 2030 2031 /* 2032 * Macros to locate the "first" function in the sd_iodone_chain[] array for 2033 * each iodone-side chain. These are located by the array index, but as the 2034 * iodone side functions are called in a decrementing-index order, the 2035 * highest index number in each chain must be specified (as these correspond 2036 * to the first function in the iodone chain that will be called by the core 2037 * at IO completion time). 2038 */ 2039 2040 #define SD_CHAIN_DISK_IODONE 2 2041 #define SD_CHAIN_DISK_IODONE_NO_PM 4 2042 #define SD_CHAIN_RMMEDIA_IODONE 8 2043 #define SD_CHAIN_MSS_DISK_IODONE 8 2044 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11 2045 #define SD_CHAIN_MSS_DISK_IODONE_NO_PM 11 2046 #define SD_CHAIN_CHKSUM_IODONE 15 2047 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18 2048 #define SD_CHAIN_USCSI_CMD_IODONE 20 2049 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22 2050 #define SD_CHAIN_DIRECT_CMD_IODONE 24 2051 #define SD_CHAIN_PRIORITY_CMD_IODONE 25 2052 #define SD_CHAIN_MSS_CHKSUM_IODONE 30 2053 #define SD_CHAIN_MSS_CHKSUM_IODONE_NO_PM 34 2054 2055 2056 2057 /* 2058 * Array to map a layering chain index to the appropriate initpkt routine. 2059 * The redundant entries are present so that the index used for accessing 2060 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 2061 * with this table as well. 2062 */ 2063 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **); 2064 2065 static sd_initpkt_t sd_initpkt_map[] = { 2066 2067 /* Chain for buf IO for disk drive targets (PM enabled) */ 2068 sd_initpkt_for_buf, /* Index: 0 */ 2069 sd_initpkt_for_buf, /* Index: 1 */ 2070 sd_initpkt_for_buf, /* Index: 2 */ 2071 2072 /* Chain for buf IO for disk drive targets (PM disabled) */ 2073 sd_initpkt_for_buf, /* Index: 3 */ 2074 sd_initpkt_for_buf, /* Index: 4 */ 2075 2076 /* 2077 * Chain for buf IO for removable-media or large sector size 2078 * disk drive targets (PM enabled) 2079 */ 2080 sd_initpkt_for_buf, /* Index: 5 */ 2081 sd_initpkt_for_buf, /* Index: 6 */ 2082 sd_initpkt_for_buf, /* Index: 7 */ 2083 sd_initpkt_for_buf, /* Index: 8 */ 2084 2085 /* 2086 * Chain for buf IO for removable-media or large sector size 2087 * disk drive targets (PM disabled) 2088 */ 2089 sd_initpkt_for_buf, /* Index: 9 */ 2090 sd_initpkt_for_buf, /* Index: 10 */ 2091 sd_initpkt_for_buf, /* Index: 11 */ 2092 2093 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2094 sd_initpkt_for_buf, /* Index: 12 */ 2095 sd_initpkt_for_buf, /* Index: 13 */ 2096 sd_initpkt_for_buf, /* Index: 14 */ 2097 sd_initpkt_for_buf, /* Index: 15 */ 2098 2099 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2100 sd_initpkt_for_buf, /* Index: 16 */ 2101 sd_initpkt_for_buf, /* Index: 17 */ 2102 sd_initpkt_for_buf, /* Index: 18 */ 2103 2104 /* Chain for USCSI commands (non-checksum targets) */ 2105 sd_initpkt_for_uscsi, /* Index: 19 */ 2106 sd_initpkt_for_uscsi, /* Index: 20 */ 2107 2108 /* Chain for USCSI commands (checksum targets) */ 2109 sd_initpkt_for_uscsi, /* Index: 21 */ 2110 sd_initpkt_for_uscsi, /* Index: 22 */ 2111 sd_initpkt_for_uscsi, /* Index: 22 */ 2112 2113 /* Chain for "direct" USCSI commands (all targets) */ 2114 sd_initpkt_for_uscsi, /* Index: 24 */ 2115 2116 /* Chain for "direct priority" USCSI commands (all targets) */ 2117 sd_initpkt_for_uscsi, /* Index: 25 */ 2118 2119 /* 2120 * Chain for buf IO for large sector size disk drive targets 2121 * with checksumming (PM enabled) 2122 */ 2123 sd_initpkt_for_buf, /* Index: 26 */ 2124 sd_initpkt_for_buf, /* Index: 27 */ 2125 sd_initpkt_for_buf, /* Index: 28 */ 2126 sd_initpkt_for_buf, /* Index: 29 */ 2127 sd_initpkt_for_buf, /* Index: 30 */ 2128 2129 /* 2130 * Chain for buf IO for large sector size disk drive targets 2131 * with checksumming (PM disabled) 2132 */ 2133 sd_initpkt_for_buf, /* Index: 31 */ 2134 sd_initpkt_for_buf, /* Index: 32 */ 2135 sd_initpkt_for_buf, /* Index: 33 */ 2136 sd_initpkt_for_buf, /* Index: 34 */ 2137 }; 2138 2139 2140 /* 2141 * Array to map a layering chain index to the appropriate destroypktpkt routine. 2142 * The redundant entries are present so that the index used for accessing 2143 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 2144 * with this table as well. 2145 */ 2146 typedef void (*sd_destroypkt_t)(struct buf *); 2147 2148 static sd_destroypkt_t sd_destroypkt_map[] = { 2149 2150 /* Chain for buf IO for disk drive targets (PM enabled) */ 2151 sd_destroypkt_for_buf, /* Index: 0 */ 2152 sd_destroypkt_for_buf, /* Index: 1 */ 2153 sd_destroypkt_for_buf, /* Index: 2 */ 2154 2155 /* Chain for buf IO for disk drive targets (PM disabled) */ 2156 sd_destroypkt_for_buf, /* Index: 3 */ 2157 sd_destroypkt_for_buf, /* Index: 4 */ 2158 2159 /* 2160 * Chain for buf IO for removable-media or large sector size 2161 * disk drive targets (PM enabled) 2162 */ 2163 sd_destroypkt_for_buf, /* Index: 5 */ 2164 sd_destroypkt_for_buf, /* Index: 6 */ 2165 sd_destroypkt_for_buf, /* Index: 7 */ 2166 sd_destroypkt_for_buf, /* Index: 8 */ 2167 2168 /* 2169 * Chain for buf IO for removable-media or large sector size 2170 * disk drive targets (PM disabled) 2171 */ 2172 sd_destroypkt_for_buf, /* Index: 9 */ 2173 sd_destroypkt_for_buf, /* Index: 10 */ 2174 sd_destroypkt_for_buf, /* Index: 11 */ 2175 2176 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2177 sd_destroypkt_for_buf, /* Index: 12 */ 2178 sd_destroypkt_for_buf, /* Index: 13 */ 2179 sd_destroypkt_for_buf, /* Index: 14 */ 2180 sd_destroypkt_for_buf, /* Index: 15 */ 2181 2182 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2183 sd_destroypkt_for_buf, /* Index: 16 */ 2184 sd_destroypkt_for_buf, /* Index: 17 */ 2185 sd_destroypkt_for_buf, /* Index: 18 */ 2186 2187 /* Chain for USCSI commands (non-checksum targets) */ 2188 sd_destroypkt_for_uscsi, /* Index: 19 */ 2189 sd_destroypkt_for_uscsi, /* Index: 20 */ 2190 2191 /* Chain for USCSI commands (checksum targets) */ 2192 sd_destroypkt_for_uscsi, /* Index: 21 */ 2193 sd_destroypkt_for_uscsi, /* Index: 22 */ 2194 sd_destroypkt_for_uscsi, /* Index: 22 */ 2195 2196 /* Chain for "direct" USCSI commands (all targets) */ 2197 sd_destroypkt_for_uscsi, /* Index: 24 */ 2198 2199 /* Chain for "direct priority" USCSI commands (all targets) */ 2200 sd_destroypkt_for_uscsi, /* Index: 25 */ 2201 2202 /* 2203 * Chain for buf IO for large sector size disk drive targets 2204 * with checksumming (PM disabled) 2205 */ 2206 sd_destroypkt_for_buf, /* Index: 26 */ 2207 sd_destroypkt_for_buf, /* Index: 27 */ 2208 sd_destroypkt_for_buf, /* Index: 28 */ 2209 sd_destroypkt_for_buf, /* Index: 29 */ 2210 sd_destroypkt_for_buf, /* Index: 30 */ 2211 2212 /* 2213 * Chain for buf IO for large sector size disk drive targets 2214 * with checksumming (PM enabled) 2215 */ 2216 sd_destroypkt_for_buf, /* Index: 31 */ 2217 sd_destroypkt_for_buf, /* Index: 32 */ 2218 sd_destroypkt_for_buf, /* Index: 33 */ 2219 sd_destroypkt_for_buf, /* Index: 34 */ 2220 }; 2221 2222 2223 2224 /* 2225 * Array to map a layering chain index to the appropriate chain "type". 2226 * The chain type indicates a specific property/usage of the chain. 2227 * The redundant entries are present so that the index used for accessing 2228 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 2229 * with this table as well. 2230 */ 2231 2232 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */ 2233 #define SD_CHAIN_BUFIO 1 /* regular buf IO */ 2234 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */ 2235 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */ 2236 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */ 2237 /* (for error recovery) */ 2238 2239 static int sd_chain_type_map[] = { 2240 2241 /* Chain for buf IO for disk drive targets (PM enabled) */ 2242 SD_CHAIN_BUFIO, /* Index: 0 */ 2243 SD_CHAIN_BUFIO, /* Index: 1 */ 2244 SD_CHAIN_BUFIO, /* Index: 2 */ 2245 2246 /* Chain for buf IO for disk drive targets (PM disabled) */ 2247 SD_CHAIN_BUFIO, /* Index: 3 */ 2248 SD_CHAIN_BUFIO, /* Index: 4 */ 2249 2250 /* 2251 * Chain for buf IO for removable-media or large sector size 2252 * disk drive targets (PM enabled) 2253 */ 2254 SD_CHAIN_BUFIO, /* Index: 5 */ 2255 SD_CHAIN_BUFIO, /* Index: 6 */ 2256 SD_CHAIN_BUFIO, /* Index: 7 */ 2257 SD_CHAIN_BUFIO, /* Index: 8 */ 2258 2259 /* 2260 * Chain for buf IO for removable-media or large sector size 2261 * disk drive targets (PM disabled) 2262 */ 2263 SD_CHAIN_BUFIO, /* Index: 9 */ 2264 SD_CHAIN_BUFIO, /* Index: 10 */ 2265 SD_CHAIN_BUFIO, /* Index: 11 */ 2266 2267 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2268 SD_CHAIN_BUFIO, /* Index: 12 */ 2269 SD_CHAIN_BUFIO, /* Index: 13 */ 2270 SD_CHAIN_BUFIO, /* Index: 14 */ 2271 SD_CHAIN_BUFIO, /* Index: 15 */ 2272 2273 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2274 SD_CHAIN_BUFIO, /* Index: 16 */ 2275 SD_CHAIN_BUFIO, /* Index: 17 */ 2276 SD_CHAIN_BUFIO, /* Index: 18 */ 2277 2278 /* Chain for USCSI commands (non-checksum targets) */ 2279 SD_CHAIN_USCSI, /* Index: 19 */ 2280 SD_CHAIN_USCSI, /* Index: 20 */ 2281 2282 /* Chain for USCSI commands (checksum targets) */ 2283 SD_CHAIN_USCSI, /* Index: 21 */ 2284 SD_CHAIN_USCSI, /* Index: 22 */ 2285 SD_CHAIN_USCSI, /* Index: 23 */ 2286 2287 /* Chain for "direct" USCSI commands (all targets) */ 2288 SD_CHAIN_DIRECT, /* Index: 24 */ 2289 2290 /* Chain for "direct priority" USCSI commands (all targets) */ 2291 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */ 2292 2293 /* 2294 * Chain for buf IO for large sector size disk drive targets 2295 * with checksumming (PM enabled) 2296 */ 2297 SD_CHAIN_BUFIO, /* Index: 26 */ 2298 SD_CHAIN_BUFIO, /* Index: 27 */ 2299 SD_CHAIN_BUFIO, /* Index: 28 */ 2300 SD_CHAIN_BUFIO, /* Index: 29 */ 2301 SD_CHAIN_BUFIO, /* Index: 30 */ 2302 2303 /* 2304 * Chain for buf IO for large sector size disk drive targets 2305 * with checksumming (PM disabled) 2306 */ 2307 SD_CHAIN_BUFIO, /* Index: 31 */ 2308 SD_CHAIN_BUFIO, /* Index: 32 */ 2309 SD_CHAIN_BUFIO, /* Index: 33 */ 2310 SD_CHAIN_BUFIO, /* Index: 34 */ 2311 }; 2312 2313 2314 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */ 2315 #define SD_IS_BUFIO(xp) \ 2316 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO) 2317 2318 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */ 2319 #define SD_IS_DIRECT_PRIORITY(xp) \ 2320 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY) 2321 2322 2323 2324 /* 2325 * Struct, array, and macros to map a specific chain to the appropriate 2326 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays. 2327 * 2328 * The sd_chain_index_map[] array is used at attach time to set the various 2329 * un_xxx_chain type members of the sd_lun softstate to the specific layering 2330 * chain to be used with the instance. This allows different instances to use 2331 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart 2332 * and xb_chain_iodone index values in the sd_xbuf are initialized to these 2333 * values at sd_xbuf init time, this allows (1) layering chains may be changed 2334 * dynamically & without the use of locking; and (2) a layer may update the 2335 * xb_chain_io[start|done] member in a given xbuf with its current index value, 2336 * to allow for deferred processing of an IO within the same chain from a 2337 * different execution context. 2338 */ 2339 2340 struct sd_chain_index { 2341 int sci_iostart_index; 2342 int sci_iodone_index; 2343 }; 2344 2345 static struct sd_chain_index sd_chain_index_map[] = { 2346 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE }, 2347 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM }, 2348 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE }, 2349 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM }, 2350 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE }, 2351 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM }, 2352 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE }, 2353 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE }, 2354 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE }, 2355 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE }, 2356 { SD_CHAIN_MSS_CHKSUM_IOSTART, SD_CHAIN_MSS_CHKSUM_IODONE }, 2357 { SD_CHAIN_MSS_CHKSUM_IOSTART_NO_PM, SD_CHAIN_MSS_CHKSUM_IODONE_NO_PM }, 2358 2359 }; 2360 2361 2362 /* 2363 * The following are indexes into the sd_chain_index_map[] array. 2364 */ 2365 2366 /* un->un_buf_chain_type must be set to one of these */ 2367 #define SD_CHAIN_INFO_DISK 0 2368 #define SD_CHAIN_INFO_DISK_NO_PM 1 2369 #define SD_CHAIN_INFO_RMMEDIA 2 2370 #define SD_CHAIN_INFO_MSS_DISK 2 2371 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3 2372 #define SD_CHAIN_INFO_MSS_DSK_NO_PM 3 2373 #define SD_CHAIN_INFO_CHKSUM 4 2374 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5 2375 #define SD_CHAIN_INFO_MSS_DISK_CHKSUM 10 2376 #define SD_CHAIN_INFO_MSS_DISK_CHKSUM_NO_PM 11 2377 2378 /* un->un_uscsi_chain_type must be set to one of these */ 2379 #define SD_CHAIN_INFO_USCSI_CMD 6 2380 /* USCSI with PM disabled is the same as DIRECT */ 2381 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8 2382 #define SD_CHAIN_INFO_USCSI_CHKSUM 7 2383 2384 /* un->un_direct_chain_type must be set to one of these */ 2385 #define SD_CHAIN_INFO_DIRECT_CMD 8 2386 2387 /* un->un_priority_chain_type must be set to one of these */ 2388 #define SD_CHAIN_INFO_PRIORITY_CMD 9 2389 2390 /* size for devid inquiries */ 2391 #define MAX_INQUIRY_SIZE 0xF0 2392 2393 /* 2394 * Macros used by functions to pass a given buf(9S) struct along to the 2395 * next function in the layering chain for further processing. 2396 * 2397 * In the following macros, passing more than three arguments to the called 2398 * routines causes the optimizer for the SPARC compiler to stop doing tail 2399 * call elimination which results in significant performance degradation. 2400 */ 2401 #define SD_BEGIN_IOSTART(index, un, bp) \ 2402 ((*(sd_iostart_chain[index]))(index, un, bp)) 2403 2404 #define SD_BEGIN_IODONE(index, un, bp) \ 2405 ((*(sd_iodone_chain[index]))(index, un, bp)) 2406 2407 #define SD_NEXT_IOSTART(index, un, bp) \ 2408 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp)) 2409 2410 #define SD_NEXT_IODONE(index, un, bp) \ 2411 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp)) 2412 2413 /* 2414 * Function: _init 2415 * 2416 * Description: This is the driver _init(9E) entry point. 2417 * 2418 * Return Code: Returns the value from mod_install(9F) or 2419 * ddi_soft_state_init(9F) as appropriate. 2420 * 2421 * Context: Called when driver module loaded. 2422 */ 2423 2424 int 2425 _init(void) 2426 { 2427 int err; 2428 2429 /* establish driver name from module name */ 2430 sd_label = (char *)mod_modname(&modlinkage); 2431 2432 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun), 2433 SD_MAXUNIT); 2434 if (err != 0) { 2435 return (err); 2436 } 2437 2438 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL); 2439 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL); 2440 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL); 2441 2442 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL); 2443 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL); 2444 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL); 2445 2446 /* 2447 * it's ok to init here even for fibre device 2448 */ 2449 sd_scsi_probe_cache_init(); 2450 2451 sd_scsi_target_lun_init(); 2452 2453 /* 2454 * Creating taskq before mod_install ensures that all callers (threads) 2455 * that enter the module after a successful mod_install encounter 2456 * a valid taskq. 2457 */ 2458 sd_taskq_create(); 2459 2460 err = mod_install(&modlinkage); 2461 if (err != 0) { 2462 /* delete taskq if install fails */ 2463 sd_taskq_delete(); 2464 2465 mutex_destroy(&sd_detach_mutex); 2466 mutex_destroy(&sd_log_mutex); 2467 mutex_destroy(&sd_label_mutex); 2468 2469 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2470 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2471 cv_destroy(&sd_tr.srq_inprocess_cv); 2472 2473 sd_scsi_probe_cache_fini(); 2474 2475 sd_scsi_target_lun_fini(); 2476 2477 ddi_soft_state_fini(&sd_state); 2478 2479 return (err); 2480 } 2481 2482 return (err); 2483 } 2484 2485 2486 /* 2487 * Function: _fini 2488 * 2489 * Description: This is the driver _fini(9E) entry point. 2490 * 2491 * Return Code: Returns the value from mod_remove(9F) 2492 * 2493 * Context: Called when driver module is unloaded. 2494 */ 2495 2496 int 2497 _fini(void) 2498 { 2499 int err; 2500 2501 if ((err = mod_remove(&modlinkage)) != 0) { 2502 return (err); 2503 } 2504 2505 sd_taskq_delete(); 2506 2507 mutex_destroy(&sd_detach_mutex); 2508 mutex_destroy(&sd_log_mutex); 2509 mutex_destroy(&sd_label_mutex); 2510 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2511 2512 sd_scsi_probe_cache_fini(); 2513 2514 sd_scsi_target_lun_fini(); 2515 2516 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2517 cv_destroy(&sd_tr.srq_inprocess_cv); 2518 2519 ddi_soft_state_fini(&sd_state); 2520 2521 return (err); 2522 } 2523 2524 2525 /* 2526 * Function: _info 2527 * 2528 * Description: This is the driver _info(9E) entry point. 2529 * 2530 * Arguments: modinfop - pointer to the driver modinfo structure 2531 * 2532 * Return Code: Returns the value from mod_info(9F). 2533 * 2534 * Context: Kernel thread context 2535 */ 2536 2537 int 2538 _info(struct modinfo *modinfop) 2539 { 2540 return (mod_info(&modlinkage, modinfop)); 2541 } 2542 2543 2544 /* 2545 * The following routines implement the driver message logging facility. 2546 * They provide component- and level- based debug output filtering. 2547 * Output may also be restricted to messages for a single instance by 2548 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set 2549 * to NULL, then messages for all instances are printed. 2550 * 2551 * These routines have been cloned from each other due to the language 2552 * constraints of macros and variable argument list processing. 2553 */ 2554 2555 2556 /* 2557 * Function: sd_log_err 2558 * 2559 * Description: This routine is called by the SD_ERROR macro for debug 2560 * logging of error conditions. 2561 * 2562 * Arguments: comp - driver component being logged 2563 * dev - pointer to driver info structure 2564 * fmt - error string and format to be logged 2565 */ 2566 2567 static void 2568 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...) 2569 { 2570 va_list ap; 2571 dev_info_t *dev; 2572 2573 ASSERT(un != NULL); 2574 dev = SD_DEVINFO(un); 2575 ASSERT(dev != NULL); 2576 2577 /* 2578 * Filter messages based on the global component and level masks. 2579 * Also print if un matches the value of sd_debug_un, or if 2580 * sd_debug_un is set to NULL. 2581 */ 2582 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) && 2583 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2584 mutex_enter(&sd_log_mutex); 2585 va_start(ap, fmt); 2586 (void) vsprintf(sd_log_buf, fmt, ap); 2587 va_end(ap); 2588 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2589 mutex_exit(&sd_log_mutex); 2590 } 2591 #ifdef SD_FAULT_INJECTION 2592 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2593 if (un->sd_injection_mask & comp) { 2594 mutex_enter(&sd_log_mutex); 2595 va_start(ap, fmt); 2596 (void) vsprintf(sd_log_buf, fmt, ap); 2597 va_end(ap); 2598 sd_injection_log(sd_log_buf, un); 2599 mutex_exit(&sd_log_mutex); 2600 } 2601 #endif 2602 } 2603 2604 2605 /* 2606 * Function: sd_log_info 2607 * 2608 * Description: This routine is called by the SD_INFO macro for debug 2609 * logging of general purpose informational conditions. 2610 * 2611 * Arguments: comp - driver component being logged 2612 * dev - pointer to driver info structure 2613 * fmt - info string and format to be logged 2614 */ 2615 2616 static void 2617 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...) 2618 { 2619 va_list ap; 2620 dev_info_t *dev; 2621 2622 ASSERT(un != NULL); 2623 dev = SD_DEVINFO(un); 2624 ASSERT(dev != NULL); 2625 2626 /* 2627 * Filter messages based on the global component and level masks. 2628 * Also print if un matches the value of sd_debug_un, or if 2629 * sd_debug_un is set to NULL. 2630 */ 2631 if ((sd_component_mask & component) && 2632 (sd_level_mask & SD_LOGMASK_INFO) && 2633 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2634 mutex_enter(&sd_log_mutex); 2635 va_start(ap, fmt); 2636 (void) vsprintf(sd_log_buf, fmt, ap); 2637 va_end(ap); 2638 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2639 mutex_exit(&sd_log_mutex); 2640 } 2641 #ifdef SD_FAULT_INJECTION 2642 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2643 if (un->sd_injection_mask & component) { 2644 mutex_enter(&sd_log_mutex); 2645 va_start(ap, fmt); 2646 (void) vsprintf(sd_log_buf, fmt, ap); 2647 va_end(ap); 2648 sd_injection_log(sd_log_buf, un); 2649 mutex_exit(&sd_log_mutex); 2650 } 2651 #endif 2652 } 2653 2654 2655 /* 2656 * Function: sd_log_trace 2657 * 2658 * Description: This routine is called by the SD_TRACE macro for debug 2659 * logging of trace conditions (i.e. function entry/exit). 2660 * 2661 * Arguments: comp - driver component being logged 2662 * dev - pointer to driver info structure 2663 * fmt - trace string and format to be logged 2664 */ 2665 2666 static void 2667 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...) 2668 { 2669 va_list ap; 2670 dev_info_t *dev; 2671 2672 ASSERT(un != NULL); 2673 dev = SD_DEVINFO(un); 2674 ASSERT(dev != NULL); 2675 2676 /* 2677 * Filter messages based on the global component and level masks. 2678 * Also print if un matches the value of sd_debug_un, or if 2679 * sd_debug_un is set to NULL. 2680 */ 2681 if ((sd_component_mask & component) && 2682 (sd_level_mask & SD_LOGMASK_TRACE) && 2683 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2684 mutex_enter(&sd_log_mutex); 2685 va_start(ap, fmt); 2686 (void) vsprintf(sd_log_buf, fmt, ap); 2687 va_end(ap); 2688 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2689 mutex_exit(&sd_log_mutex); 2690 } 2691 #ifdef SD_FAULT_INJECTION 2692 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2693 if (un->sd_injection_mask & component) { 2694 mutex_enter(&sd_log_mutex); 2695 va_start(ap, fmt); 2696 (void) vsprintf(sd_log_buf, fmt, ap); 2697 va_end(ap); 2698 sd_injection_log(sd_log_buf, un); 2699 mutex_exit(&sd_log_mutex); 2700 } 2701 #endif 2702 } 2703 2704 2705 /* 2706 * Function: sdprobe 2707 * 2708 * Description: This is the driver probe(9e) entry point function. 2709 * 2710 * Arguments: devi - opaque device info handle 2711 * 2712 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful. 2713 * DDI_PROBE_FAILURE: If the probe failed. 2714 * DDI_PROBE_PARTIAL: If the instance is not present now, 2715 * but may be present in the future. 2716 */ 2717 2718 static int 2719 sdprobe(dev_info_t *devi) 2720 { 2721 struct scsi_device *devp; 2722 int rval; 2723 int instance = ddi_get_instance(devi); 2724 2725 /* 2726 * if it wasn't for pln, sdprobe could actually be nulldev 2727 * in the "__fibre" case. 2728 */ 2729 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { 2730 return (DDI_PROBE_DONTCARE); 2731 } 2732 2733 devp = ddi_get_driver_private(devi); 2734 2735 if (devp == NULL) { 2736 /* Ooops... nexus driver is mis-configured... */ 2737 return (DDI_PROBE_FAILURE); 2738 } 2739 2740 if (ddi_get_soft_state(sd_state, instance) != NULL) { 2741 return (DDI_PROBE_PARTIAL); 2742 } 2743 2744 /* 2745 * Call the SCSA utility probe routine to see if we actually 2746 * have a target at this SCSI nexus. 2747 */ 2748 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) { 2749 case SCSIPROBE_EXISTS: 2750 switch (devp->sd_inq->inq_dtype) { 2751 case DTYPE_DIRECT: 2752 rval = DDI_PROBE_SUCCESS; 2753 break; 2754 case DTYPE_RODIRECT: 2755 /* CDs etc. Can be removable media */ 2756 rval = DDI_PROBE_SUCCESS; 2757 break; 2758 case DTYPE_OPTICAL: 2759 /* 2760 * Rewritable optical driver HP115AA 2761 * Can also be removable media 2762 */ 2763 2764 /* 2765 * Do not attempt to bind to DTYPE_OPTICAL if 2766 * pre solaris 9 sparc sd behavior is required 2767 * 2768 * If first time through and sd_dtype_optical_bind 2769 * has not been set in /etc/system check properties 2770 */ 2771 2772 if (sd_dtype_optical_bind < 0) { 2773 sd_dtype_optical_bind = ddi_prop_get_int 2774 (DDI_DEV_T_ANY, devi, 0, 2775 "optical-device-bind", 1); 2776 } 2777 2778 if (sd_dtype_optical_bind == 0) { 2779 rval = DDI_PROBE_FAILURE; 2780 } else { 2781 rval = DDI_PROBE_SUCCESS; 2782 } 2783 break; 2784 2785 case DTYPE_NOTPRESENT: 2786 default: 2787 rval = DDI_PROBE_FAILURE; 2788 break; 2789 } 2790 break; 2791 default: 2792 rval = DDI_PROBE_PARTIAL; 2793 break; 2794 } 2795 2796 /* 2797 * This routine checks for resource allocation prior to freeing, 2798 * so it will take care of the "smart probing" case where a 2799 * scsi_probe() may or may not have been issued and will *not* 2800 * free previously-freed resources. 2801 */ 2802 scsi_unprobe(devp); 2803 return (rval); 2804 } 2805 2806 2807 /* 2808 * Function: sdinfo 2809 * 2810 * Description: This is the driver getinfo(9e) entry point function. 2811 * Given the device number, return the devinfo pointer from 2812 * the scsi_device structure or the instance number 2813 * associated with the dev_t. 2814 * 2815 * Arguments: dip - pointer to device info structure 2816 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO, 2817 * DDI_INFO_DEVT2INSTANCE) 2818 * arg - driver dev_t 2819 * resultp - user buffer for request response 2820 * 2821 * Return Code: DDI_SUCCESS 2822 * DDI_FAILURE 2823 */ 2824 /* ARGSUSED */ 2825 static int 2826 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2827 { 2828 struct sd_lun *un; 2829 dev_t dev; 2830 int instance; 2831 int error; 2832 2833 switch (infocmd) { 2834 case DDI_INFO_DEVT2DEVINFO: 2835 dev = (dev_t)arg; 2836 instance = SDUNIT(dev); 2837 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 2838 return (DDI_FAILURE); 2839 } 2840 *result = (void *) SD_DEVINFO(un); 2841 error = DDI_SUCCESS; 2842 break; 2843 case DDI_INFO_DEVT2INSTANCE: 2844 dev = (dev_t)arg; 2845 instance = SDUNIT(dev); 2846 *result = (void *)(uintptr_t)instance; 2847 error = DDI_SUCCESS; 2848 break; 2849 default: 2850 error = DDI_FAILURE; 2851 } 2852 return (error); 2853 } 2854 2855 /* 2856 * Function: sd_prop_op 2857 * 2858 * Description: This is the driver prop_op(9e) entry point function. 2859 * Return the number of blocks for the partition in question 2860 * or forward the request to the property facilities. 2861 * 2862 * Arguments: dev - device number 2863 * dip - pointer to device info structure 2864 * prop_op - property operator 2865 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent 2866 * name - pointer to property name 2867 * valuep - pointer or address of the user buffer 2868 * lengthp - property length 2869 * 2870 * Return Code: DDI_PROP_SUCCESS 2871 * DDI_PROP_NOT_FOUND 2872 * DDI_PROP_UNDEFINED 2873 * DDI_PROP_NO_MEMORY 2874 * DDI_PROP_BUF_TOO_SMALL 2875 */ 2876 2877 static int 2878 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 2879 char *name, caddr_t valuep, int *lengthp) 2880 { 2881 struct sd_lun *un; 2882 2883 if ((un = ddi_get_soft_state(sd_state, ddi_get_instance(dip))) == NULL) 2884 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2885 name, valuep, lengthp)); 2886 2887 return (cmlb_prop_op(un->un_cmlbhandle, 2888 dev, dip, prop_op, mod_flags, name, valuep, lengthp, 2889 SDPART(dev), (void *)SD_PATH_DIRECT)); 2890 } 2891 2892 /* 2893 * The following functions are for smart probing: 2894 * sd_scsi_probe_cache_init() 2895 * sd_scsi_probe_cache_fini() 2896 * sd_scsi_clear_probe_cache() 2897 * sd_scsi_probe_with_cache() 2898 */ 2899 2900 /* 2901 * Function: sd_scsi_probe_cache_init 2902 * 2903 * Description: Initializes the probe response cache mutex and head pointer. 2904 * 2905 * Context: Kernel thread context 2906 */ 2907 2908 static void 2909 sd_scsi_probe_cache_init(void) 2910 { 2911 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL); 2912 sd_scsi_probe_cache_head = NULL; 2913 } 2914 2915 2916 /* 2917 * Function: sd_scsi_probe_cache_fini 2918 * 2919 * Description: Frees all resources associated with the probe response cache. 2920 * 2921 * Context: Kernel thread context 2922 */ 2923 2924 static void 2925 sd_scsi_probe_cache_fini(void) 2926 { 2927 struct sd_scsi_probe_cache *cp; 2928 struct sd_scsi_probe_cache *ncp; 2929 2930 /* Clean up our smart probing linked list */ 2931 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) { 2932 ncp = cp->next; 2933 kmem_free(cp, sizeof (struct sd_scsi_probe_cache)); 2934 } 2935 sd_scsi_probe_cache_head = NULL; 2936 mutex_destroy(&sd_scsi_probe_cache_mutex); 2937 } 2938 2939 2940 /* 2941 * Function: sd_scsi_clear_probe_cache 2942 * 2943 * Description: This routine clears the probe response cache. This is 2944 * done when open() returns ENXIO so that when deferred 2945 * attach is attempted (possibly after a device has been 2946 * turned on) we will retry the probe. Since we don't know 2947 * which target we failed to open, we just clear the 2948 * entire cache. 2949 * 2950 * Context: Kernel thread context 2951 */ 2952 2953 static void 2954 sd_scsi_clear_probe_cache(void) 2955 { 2956 struct sd_scsi_probe_cache *cp; 2957 int i; 2958 2959 mutex_enter(&sd_scsi_probe_cache_mutex); 2960 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2961 /* 2962 * Reset all entries to SCSIPROBE_EXISTS. This will 2963 * force probing to be performed the next time 2964 * sd_scsi_probe_with_cache is called. 2965 */ 2966 for (i = 0; i < NTARGETS_WIDE; i++) { 2967 cp->cache[i] = SCSIPROBE_EXISTS; 2968 } 2969 } 2970 mutex_exit(&sd_scsi_probe_cache_mutex); 2971 } 2972 2973 2974 /* 2975 * Function: sd_scsi_probe_with_cache 2976 * 2977 * Description: This routine implements support for a scsi device probe 2978 * with cache. The driver maintains a cache of the target 2979 * responses to scsi probes. If we get no response from a 2980 * target during a probe inquiry, we remember that, and we 2981 * avoid additional calls to scsi_probe on non-zero LUNs 2982 * on the same target until the cache is cleared. By doing 2983 * so we avoid the 1/4 sec selection timeout for nonzero 2984 * LUNs. lun0 of a target is always probed. 2985 * 2986 * Arguments: devp - Pointer to a scsi_device(9S) structure 2987 * waitfunc - indicates what the allocator routines should 2988 * do when resources are not available. This value 2989 * is passed on to scsi_probe() when that routine 2990 * is called. 2991 * 2992 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache; 2993 * otherwise the value returned by scsi_probe(9F). 2994 * 2995 * Context: Kernel thread context 2996 */ 2997 2998 static int 2999 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)()) 3000 { 3001 struct sd_scsi_probe_cache *cp; 3002 dev_info_t *pdip = ddi_get_parent(devp->sd_dev); 3003 int lun, tgt; 3004 3005 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 3006 SCSI_ADDR_PROP_LUN, 0); 3007 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 3008 SCSI_ADDR_PROP_TARGET, -1); 3009 3010 /* Make sure caching enabled and target in range */ 3011 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) { 3012 /* do it the old way (no cache) */ 3013 return (scsi_probe(devp, waitfn)); 3014 } 3015 3016 mutex_enter(&sd_scsi_probe_cache_mutex); 3017 3018 /* Find the cache for this scsi bus instance */ 3019 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 3020 if (cp->pdip == pdip) { 3021 break; 3022 } 3023 } 3024 3025 /* If we can't find a cache for this pdip, create one */ 3026 if (cp == NULL) { 3027 int i; 3028 3029 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache), 3030 KM_SLEEP); 3031 cp->pdip = pdip; 3032 cp->next = sd_scsi_probe_cache_head; 3033 sd_scsi_probe_cache_head = cp; 3034 for (i = 0; i < NTARGETS_WIDE; i++) { 3035 cp->cache[i] = SCSIPROBE_EXISTS; 3036 } 3037 } 3038 3039 mutex_exit(&sd_scsi_probe_cache_mutex); 3040 3041 /* Recompute the cache for this target if LUN zero */ 3042 if (lun == 0) { 3043 cp->cache[tgt] = SCSIPROBE_EXISTS; 3044 } 3045 3046 /* Don't probe if cache remembers a NORESP from a previous LUN. */ 3047 if (cp->cache[tgt] != SCSIPROBE_EXISTS) { 3048 return (SCSIPROBE_NORESP); 3049 } 3050 3051 /* Do the actual probe; save & return the result */ 3052 return (cp->cache[tgt] = scsi_probe(devp, waitfn)); 3053 } 3054 3055 3056 /* 3057 * Function: sd_scsi_target_lun_init 3058 * 3059 * Description: Initializes the attached lun chain mutex and head pointer. 3060 * 3061 * Context: Kernel thread context 3062 */ 3063 3064 static void 3065 sd_scsi_target_lun_init(void) 3066 { 3067 mutex_init(&sd_scsi_target_lun_mutex, NULL, MUTEX_DRIVER, NULL); 3068 sd_scsi_target_lun_head = NULL; 3069 } 3070 3071 3072 /* 3073 * Function: sd_scsi_target_lun_fini 3074 * 3075 * Description: Frees all resources associated with the attached lun 3076 * chain 3077 * 3078 * Context: Kernel thread context 3079 */ 3080 3081 static void 3082 sd_scsi_target_lun_fini(void) 3083 { 3084 struct sd_scsi_hba_tgt_lun *cp; 3085 struct sd_scsi_hba_tgt_lun *ncp; 3086 3087 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = ncp) { 3088 ncp = cp->next; 3089 kmem_free(cp, sizeof (struct sd_scsi_hba_tgt_lun)); 3090 } 3091 sd_scsi_target_lun_head = NULL; 3092 mutex_destroy(&sd_scsi_target_lun_mutex); 3093 } 3094 3095 3096 /* 3097 * Function: sd_scsi_get_target_lun_count 3098 * 3099 * Description: This routine will check in the attached lun chain to see 3100 * how many luns are attached on the required SCSI controller 3101 * and target. Currently, some capabilities like tagged queue 3102 * are supported per target based by HBA. So all luns in a 3103 * target have the same capabilities. Based on this assumption, 3104 * sd should only set these capabilities once per target. This 3105 * function is called when sd needs to decide how many luns 3106 * already attached on a target. 3107 * 3108 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 3109 * controller device. 3110 * target - The target ID on the controller's SCSI bus. 3111 * 3112 * Return Code: The number of luns attached on the required target and 3113 * controller. 3114 * -1 if target ID is not in parallel SCSI scope or the given 3115 * dip is not in the chain. 3116 * 3117 * Context: Kernel thread context 3118 */ 3119 3120 static int 3121 sd_scsi_get_target_lun_count(dev_info_t *dip, int target) 3122 { 3123 struct sd_scsi_hba_tgt_lun *cp; 3124 3125 if ((target < 0) || (target >= NTARGETS_WIDE)) { 3126 return (-1); 3127 } 3128 3129 mutex_enter(&sd_scsi_target_lun_mutex); 3130 3131 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 3132 if (cp->pdip == dip) { 3133 break; 3134 } 3135 } 3136 3137 mutex_exit(&sd_scsi_target_lun_mutex); 3138 3139 if (cp == NULL) { 3140 return (-1); 3141 } 3142 3143 return (cp->nlun[target]); 3144 } 3145 3146 3147 /* 3148 * Function: sd_scsi_update_lun_on_target 3149 * 3150 * Description: This routine is used to update the attached lun chain when a 3151 * lun is attached or detached on a target. 3152 * 3153 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 3154 * controller device. 3155 * target - The target ID on the controller's SCSI bus. 3156 * flag - Indicate the lun is attached or detached. 3157 * 3158 * Context: Kernel thread context 3159 */ 3160 3161 static void 3162 sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag) 3163 { 3164 struct sd_scsi_hba_tgt_lun *cp; 3165 3166 mutex_enter(&sd_scsi_target_lun_mutex); 3167 3168 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 3169 if (cp->pdip == dip) { 3170 break; 3171 } 3172 } 3173 3174 if ((cp == NULL) && (flag == SD_SCSI_LUN_ATTACH)) { 3175 cp = kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun), 3176 KM_SLEEP); 3177 cp->pdip = dip; 3178 cp->next = sd_scsi_target_lun_head; 3179 sd_scsi_target_lun_head = cp; 3180 } 3181 3182 mutex_exit(&sd_scsi_target_lun_mutex); 3183 3184 if (cp != NULL) { 3185 if (flag == SD_SCSI_LUN_ATTACH) { 3186 cp->nlun[target] ++; 3187 } else { 3188 cp->nlun[target] --; 3189 } 3190 } 3191 } 3192 3193 3194 /* 3195 * Function: sd_spin_up_unit 3196 * 3197 * Description: Issues the following commands to spin-up the device: 3198 * START STOP UNIT, and INQUIRY. 3199 * 3200 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3201 * structure for this target. 3202 * 3203 * Return Code: 0 - success 3204 * EIO - failure 3205 * EACCES - reservation conflict 3206 * 3207 * Context: Kernel thread context 3208 */ 3209 3210 static int 3211 sd_spin_up_unit(sd_ssc_t *ssc) 3212 { 3213 size_t resid = 0; 3214 int has_conflict = FALSE; 3215 uchar_t *bufaddr; 3216 int status; 3217 struct sd_lun *un; 3218 3219 ASSERT(ssc != NULL); 3220 un = ssc->ssc_un; 3221 ASSERT(un != NULL); 3222 3223 /* 3224 * Send a throwaway START UNIT command. 3225 * 3226 * If we fail on this, we don't care presently what precisely 3227 * is wrong. EMC's arrays will also fail this with a check 3228 * condition (0x2/0x4/0x3) if the device is "inactive," but 3229 * we don't want to fail the attach because it may become 3230 * "active" later. 3231 * We don't know if power condition is supported or not at 3232 * this stage, use START STOP bit. 3233 */ 3234 status = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 3235 SD_TARGET_START, SD_PATH_DIRECT); 3236 3237 if (status != 0) { 3238 if (status == EACCES) 3239 has_conflict = TRUE; 3240 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3241 } 3242 3243 /* 3244 * Send another INQUIRY command to the target. This is necessary for 3245 * non-removable media direct access devices because their INQUIRY data 3246 * may not be fully qualified until they are spun up (perhaps via the 3247 * START command above). Note: This seems to be needed for some 3248 * legacy devices only.) The INQUIRY command should succeed even if a 3249 * Reservation Conflict is present. 3250 */ 3251 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP); 3252 3253 if (sd_send_scsi_INQUIRY(ssc, bufaddr, SUN_INQSIZE, 0, 0, &resid) 3254 != 0) { 3255 kmem_free(bufaddr, SUN_INQSIZE); 3256 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 3257 return (EIO); 3258 } 3259 3260 /* 3261 * If we got enough INQUIRY data, copy it over the old INQUIRY data. 3262 * Note that this routine does not return a failure here even if the 3263 * INQUIRY command did not return any data. This is a legacy behavior. 3264 */ 3265 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) { 3266 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE); 3267 } 3268 3269 kmem_free(bufaddr, SUN_INQSIZE); 3270 3271 /* If we hit a reservation conflict above, tell the caller. */ 3272 if (has_conflict == TRUE) { 3273 return (EACCES); 3274 } 3275 3276 return (0); 3277 } 3278 3279 #ifdef _LP64 3280 /* 3281 * Function: sd_enable_descr_sense 3282 * 3283 * Description: This routine attempts to select descriptor sense format 3284 * using the Control mode page. Devices that support 64 bit 3285 * LBAs (for >2TB luns) should also implement descriptor 3286 * sense data so we will call this function whenever we see 3287 * a lun larger than 2TB. If for some reason the device 3288 * supports 64 bit LBAs but doesn't support descriptor sense 3289 * presumably the mode select will fail. Everything will 3290 * continue to work normally except that we will not get 3291 * complete sense data for commands that fail with an LBA 3292 * larger than 32 bits. 3293 * 3294 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3295 * structure for this target. 3296 * 3297 * Context: Kernel thread context only 3298 */ 3299 3300 static void 3301 sd_enable_descr_sense(sd_ssc_t *ssc) 3302 { 3303 uchar_t *header; 3304 struct mode_control_scsi3 *ctrl_bufp; 3305 size_t buflen; 3306 size_t bd_len; 3307 int status; 3308 struct sd_lun *un; 3309 3310 ASSERT(ssc != NULL); 3311 un = ssc->ssc_un; 3312 ASSERT(un != NULL); 3313 3314 /* 3315 * Read MODE SENSE page 0xA, Control Mode Page 3316 */ 3317 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH + 3318 sizeof (struct mode_control_scsi3); 3319 header = kmem_zalloc(buflen, KM_SLEEP); 3320 3321 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 3322 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT); 3323 3324 if (status != 0) { 3325 SD_ERROR(SD_LOG_COMMON, un, 3326 "sd_enable_descr_sense: mode sense ctrl page failed\n"); 3327 goto eds_exit; 3328 } 3329 3330 /* 3331 * Determine size of Block Descriptors in order to locate 3332 * the mode page data. ATAPI devices return 0, SCSI devices 3333 * should return MODE_BLK_DESC_LENGTH. 3334 */ 3335 bd_len = ((struct mode_header *)header)->bdesc_length; 3336 3337 /* Clear the mode data length field for MODE SELECT */ 3338 ((struct mode_header *)header)->length = 0; 3339 3340 ctrl_bufp = (struct mode_control_scsi3 *) 3341 (header + MODE_HEADER_LENGTH + bd_len); 3342 3343 /* 3344 * If the page length is smaller than the expected value, 3345 * the target device doesn't support D_SENSE. Bail out here. 3346 */ 3347 if (ctrl_bufp->mode_page.length < 3348 sizeof (struct mode_control_scsi3) - 2) { 3349 SD_ERROR(SD_LOG_COMMON, un, 3350 "sd_enable_descr_sense: enable D_SENSE failed\n"); 3351 goto eds_exit; 3352 } 3353 3354 /* 3355 * Clear PS bit for MODE SELECT 3356 */ 3357 ctrl_bufp->mode_page.ps = 0; 3358 3359 /* 3360 * Set D_SENSE to enable descriptor sense format. 3361 */ 3362 ctrl_bufp->d_sense = 1; 3363 3364 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3365 3366 /* 3367 * Use MODE SELECT to commit the change to the D_SENSE bit 3368 */ 3369 status = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, header, 3370 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT); 3371 3372 if (status != 0) { 3373 SD_INFO(SD_LOG_COMMON, un, 3374 "sd_enable_descr_sense: mode select ctrl page failed\n"); 3375 } else { 3376 kmem_free(header, buflen); 3377 return; 3378 } 3379 3380 eds_exit: 3381 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3382 kmem_free(header, buflen); 3383 } 3384 3385 /* 3386 * Function: sd_reenable_dsense_task 3387 * 3388 * Description: Re-enable descriptor sense after device or bus reset 3389 * 3390 * Context: Executes in a taskq() thread context 3391 */ 3392 static void 3393 sd_reenable_dsense_task(void *arg) 3394 { 3395 struct sd_lun *un = arg; 3396 sd_ssc_t *ssc; 3397 3398 ASSERT(un != NULL); 3399 3400 ssc = sd_ssc_init(un); 3401 sd_enable_descr_sense(ssc); 3402 sd_ssc_fini(ssc); 3403 } 3404 #endif /* _LP64 */ 3405 3406 /* 3407 * Function: sd_set_mmc_caps 3408 * 3409 * Description: This routine determines if the device is MMC compliant and if 3410 * the device supports CDDA via a mode sense of the CDVD 3411 * capabilities mode page. Also checks if the device is a 3412 * dvdram writable device. 3413 * 3414 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3415 * structure for this target. 3416 * 3417 * Context: Kernel thread context only 3418 */ 3419 3420 static void 3421 sd_set_mmc_caps(sd_ssc_t *ssc) 3422 { 3423 struct mode_header_grp2 *sense_mhp; 3424 uchar_t *sense_page; 3425 caddr_t buf; 3426 int bd_len; 3427 int status; 3428 struct uscsi_cmd com; 3429 int rtn; 3430 uchar_t *out_data_rw, *out_data_hd; 3431 uchar_t *rqbuf_rw, *rqbuf_hd; 3432 uchar_t *out_data_gesn; 3433 int gesn_len; 3434 struct sd_lun *un; 3435 3436 ASSERT(ssc != NULL); 3437 un = ssc->ssc_un; 3438 ASSERT(un != NULL); 3439 3440 /* 3441 * The flags which will be set in this function are - mmc compliant, 3442 * dvdram writable device, cdda support. Initialize them to FALSE 3443 * and if a capability is detected - it will be set to TRUE. 3444 */ 3445 un->un_f_mmc_cap = FALSE; 3446 un->un_f_dvdram_writable_device = FALSE; 3447 un->un_f_cfg_cdda = FALSE; 3448 3449 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3450 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf, 3451 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 3452 3453 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3454 3455 if (status != 0) { 3456 /* command failed; just return */ 3457 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3458 return; 3459 } 3460 /* 3461 * If the mode sense request for the CDROM CAPABILITIES 3462 * page (0x2A) succeeds the device is assumed to be MMC. 3463 */ 3464 un->un_f_mmc_cap = TRUE; 3465 3466 /* See if GET STATUS EVENT NOTIFICATION is supported */ 3467 if (un->un_f_mmc_gesn_polling) { 3468 gesn_len = SD_GESN_HEADER_LEN + SD_GESN_MEDIA_DATA_LEN; 3469 out_data_gesn = kmem_zalloc(gesn_len, KM_SLEEP); 3470 3471 rtn = sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION(ssc, 3472 out_data_gesn, gesn_len, 1 << SD_GESN_MEDIA_CLASS); 3473 3474 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3475 3476 if ((rtn != 0) || !sd_gesn_media_data_valid(out_data_gesn)) { 3477 un->un_f_mmc_gesn_polling = FALSE; 3478 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3479 "sd_set_mmc_caps: gesn not supported " 3480 "%d %x %x %x %x\n", rtn, 3481 out_data_gesn[0], out_data_gesn[1], 3482 out_data_gesn[2], out_data_gesn[3]); 3483 } 3484 3485 kmem_free(out_data_gesn, gesn_len); 3486 } 3487 3488 /* Get to the page data */ 3489 sense_mhp = (struct mode_header_grp2 *)buf; 3490 bd_len = (sense_mhp->bdesc_length_hi << 8) | 3491 sense_mhp->bdesc_length_lo; 3492 if (bd_len > MODE_BLK_DESC_LENGTH) { 3493 /* 3494 * We did not get back the expected block descriptor 3495 * length so we cannot determine if the device supports 3496 * CDDA. However, we still indicate the device is MMC 3497 * according to the successful response to the page 3498 * 0x2A mode sense request. 3499 */ 3500 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3501 "sd_set_mmc_caps: Mode Sense returned " 3502 "invalid block descriptor length\n"); 3503 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3504 return; 3505 } 3506 3507 /* See if read CDDA is supported */ 3508 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + 3509 bd_len); 3510 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE; 3511 3512 /* See if writing DVD RAM is supported. */ 3513 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE; 3514 if (un->un_f_dvdram_writable_device == TRUE) { 3515 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3516 return; 3517 } 3518 3519 /* 3520 * If the device presents DVD or CD capabilities in the mode 3521 * page, we can return here since a RRD will not have 3522 * these capabilities. 3523 */ 3524 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3525 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3526 return; 3527 } 3528 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3529 3530 /* 3531 * If un->un_f_dvdram_writable_device is still FALSE, 3532 * check for a Removable Rigid Disk (RRD). A RRD 3533 * device is identified by the features RANDOM_WRITABLE and 3534 * HARDWARE_DEFECT_MANAGEMENT. 3535 */ 3536 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3537 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3538 3539 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw, 3540 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3541 RANDOM_WRITABLE, SD_PATH_STANDARD); 3542 3543 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3544 3545 if (rtn != 0) { 3546 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3547 kmem_free(rqbuf_rw, SENSE_LENGTH); 3548 return; 3549 } 3550 3551 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3552 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3553 3554 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd, 3555 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3556 HARDWARE_DEFECT_MANAGEMENT, SD_PATH_STANDARD); 3557 3558 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3559 3560 if (rtn == 0) { 3561 /* 3562 * We have good information, check for random writable 3563 * and hardware defect features. 3564 */ 3565 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3566 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) { 3567 un->un_f_dvdram_writable_device = TRUE; 3568 } 3569 } 3570 3571 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3572 kmem_free(rqbuf_rw, SENSE_LENGTH); 3573 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3574 kmem_free(rqbuf_hd, SENSE_LENGTH); 3575 } 3576 3577 /* 3578 * Function: sd_check_for_writable_cd 3579 * 3580 * Description: This routine determines if the media in the device is 3581 * writable or not. It uses the get configuration command (0x46) 3582 * to determine if the media is writable 3583 * 3584 * Arguments: un - driver soft state (unit) structure 3585 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" 3586 * chain and the normal command waitq, or 3587 * SD_PATH_DIRECT_PRIORITY to use the USCSI 3588 * "direct" chain and bypass the normal command 3589 * waitq. 3590 * 3591 * Context: Never called at interrupt context. 3592 */ 3593 3594 static void 3595 sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag) 3596 { 3597 struct uscsi_cmd com; 3598 uchar_t *out_data; 3599 uchar_t *rqbuf; 3600 int rtn; 3601 uchar_t *out_data_rw, *out_data_hd; 3602 uchar_t *rqbuf_rw, *rqbuf_hd; 3603 struct mode_header_grp2 *sense_mhp; 3604 uchar_t *sense_page; 3605 caddr_t buf; 3606 int bd_len; 3607 int status; 3608 struct sd_lun *un; 3609 3610 ASSERT(ssc != NULL); 3611 un = ssc->ssc_un; 3612 ASSERT(un != NULL); 3613 ASSERT(mutex_owned(SD_MUTEX(un))); 3614 3615 /* 3616 * Initialize the writable media to false, if configuration info. 3617 * tells us otherwise then only we will set it. 3618 */ 3619 un->un_f_mmc_writable_media = FALSE; 3620 mutex_exit(SD_MUTEX(un)); 3621 3622 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 3623 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3624 3625 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, SENSE_LENGTH, 3626 out_data, SD_PROFILE_HEADER_LEN, path_flag); 3627 3628 if (rtn != 0) 3629 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3630 3631 mutex_enter(SD_MUTEX(un)); 3632 if (rtn == 0) { 3633 /* 3634 * We have good information, check for writable DVD. 3635 */ 3636 if ((out_data[6] == 0) && (out_data[7] == 0x12)) { 3637 un->un_f_mmc_writable_media = TRUE; 3638 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3639 kmem_free(rqbuf, SENSE_LENGTH); 3640 return; 3641 } 3642 } 3643 3644 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3645 kmem_free(rqbuf, SENSE_LENGTH); 3646 3647 /* 3648 * Determine if this is a RRD type device. 3649 */ 3650 mutex_exit(SD_MUTEX(un)); 3651 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3652 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf, 3653 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, path_flag); 3654 3655 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3656 3657 mutex_enter(SD_MUTEX(un)); 3658 if (status != 0) { 3659 /* command failed; just return */ 3660 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3661 return; 3662 } 3663 3664 /* Get to the page data */ 3665 sense_mhp = (struct mode_header_grp2 *)buf; 3666 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 3667 if (bd_len > MODE_BLK_DESC_LENGTH) { 3668 /* 3669 * We did not get back the expected block descriptor length so 3670 * we cannot check the mode page. 3671 */ 3672 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3673 "sd_check_for_writable_cd: Mode Sense returned " 3674 "invalid block descriptor length\n"); 3675 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3676 return; 3677 } 3678 3679 /* 3680 * If the device presents DVD or CD capabilities in the mode 3681 * page, we can return here since a RRD device will not have 3682 * these capabilities. 3683 */ 3684 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len); 3685 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3686 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3687 return; 3688 } 3689 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3690 3691 /* 3692 * If un->un_f_mmc_writable_media is still FALSE, 3693 * check for RRD type media. A RRD device is identified 3694 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT. 3695 */ 3696 mutex_exit(SD_MUTEX(un)); 3697 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3698 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3699 3700 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw, 3701 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3702 RANDOM_WRITABLE, path_flag); 3703 3704 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3705 if (rtn != 0) { 3706 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3707 kmem_free(rqbuf_rw, SENSE_LENGTH); 3708 mutex_enter(SD_MUTEX(un)); 3709 return; 3710 } 3711 3712 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3713 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3714 3715 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd, 3716 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3717 HARDWARE_DEFECT_MANAGEMENT, path_flag); 3718 3719 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3720 mutex_enter(SD_MUTEX(un)); 3721 if (rtn == 0) { 3722 /* 3723 * We have good information, check for random writable 3724 * and hardware defect features as current. 3725 */ 3726 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3727 (out_data_rw[10] & 0x1) && 3728 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) && 3729 (out_data_hd[10] & 0x1)) { 3730 un->un_f_mmc_writable_media = TRUE; 3731 } 3732 } 3733 3734 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3735 kmem_free(rqbuf_rw, SENSE_LENGTH); 3736 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3737 kmem_free(rqbuf_hd, SENSE_LENGTH); 3738 } 3739 3740 /* 3741 * Function: sd_read_unit_properties 3742 * 3743 * Description: The following implements a property lookup mechanism. 3744 * Properties for particular disks (keyed on vendor, model 3745 * and rev numbers) are sought in the sd.conf file via 3746 * sd_process_sdconf_file(), and if not found there, are 3747 * looked for in a list hardcoded in this driver via 3748 * sd_process_sdconf_table() Once located the properties 3749 * are used to update the driver unit structure. 3750 * 3751 * Arguments: un - driver soft state (unit) structure 3752 */ 3753 3754 static void 3755 sd_read_unit_properties(struct sd_lun *un) 3756 { 3757 /* 3758 * sd_process_sdconf_file returns SD_FAILURE if it cannot find 3759 * the "sd-config-list" property (from the sd.conf file) or if 3760 * there was not a match for the inquiry vid/pid. If this event 3761 * occurs the static driver configuration table is searched for 3762 * a match. 3763 */ 3764 ASSERT(un != NULL); 3765 if (sd_process_sdconf_file(un) == SD_FAILURE) { 3766 sd_process_sdconf_table(un); 3767 } 3768 3769 /* check for LSI device */ 3770 sd_is_lsi(un); 3771 3772 3773 } 3774 3775 3776 /* 3777 * Function: sd_process_sdconf_file 3778 * 3779 * Description: Use ddi_prop_lookup(9F) to obtain the properties from the 3780 * driver's config file (ie, sd.conf) and update the driver 3781 * soft state structure accordingly. 3782 * 3783 * Arguments: un - driver soft state (unit) structure 3784 * 3785 * Return Code: SD_SUCCESS - The properties were successfully set according 3786 * to the driver configuration file. 3787 * SD_FAILURE - The driver config list was not obtained or 3788 * there was no vid/pid match. This indicates that 3789 * the static config table should be used. 3790 * 3791 * The config file has a property, "sd-config-list". Currently we support 3792 * two kinds of formats. For both formats, the value of this property 3793 * is a list of duplets: 3794 * 3795 * sd-config-list= 3796 * <duplet>, 3797 * [,<duplet>]*; 3798 * 3799 * For the improved format, where 3800 * 3801 * <duplet>:= "<vid+pid>","<tunable-list>" 3802 * 3803 * and 3804 * 3805 * <tunable-list>:= <tunable> [, <tunable> ]*; 3806 * <tunable> = <name> : <value> 3807 * 3808 * The <vid+pid> is the string that is returned by the target device on a 3809 * SCSI inquiry command, the <tunable-list> contains one or more tunables 3810 * to apply to all target devices with the specified <vid+pid>. 3811 * 3812 * Each <tunable> is a "<name> : <value>" pair. 3813 * 3814 * For the old format, the structure of each duplet is as follows: 3815 * 3816 * <duplet>:= "<vid+pid>","<data-property-name_list>" 3817 * 3818 * The first entry of the duplet is the device ID string (the concatenated 3819 * vid & pid; not to be confused with a device_id). This is defined in 3820 * the same way as in the sd_disk_table. 3821 * 3822 * The second part of the duplet is a string that identifies a 3823 * data-property-name-list. The data-property-name-list is defined as 3824 * follows: 3825 * 3826 * <data-property-name-list>:=<data-property-name> [<data-property-name>] 3827 * 3828 * The syntax of <data-property-name> depends on the <version> field. 3829 * 3830 * If version = SD_CONF_VERSION_1 we have the following syntax: 3831 * 3832 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3833 * 3834 * where the prop0 value will be used to set prop0 if bit0 set in the 3835 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1 3836 * 3837 */ 3838 3839 static int 3840 sd_process_sdconf_file(struct sd_lun *un) 3841 { 3842 char **config_list = NULL; 3843 uint_t nelements; 3844 char *vidptr; 3845 int vidlen; 3846 char *dnlist_ptr; 3847 char *dataname_ptr; 3848 char *dataname_lasts; 3849 int *data_list = NULL; 3850 uint_t data_list_len; 3851 int rval = SD_FAILURE; 3852 int i; 3853 3854 ASSERT(un != NULL); 3855 3856 /* Obtain the configuration list associated with the .conf file */ 3857 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, SD_DEVINFO(un), 3858 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, sd_config_list, 3859 &config_list, &nelements) != DDI_PROP_SUCCESS) { 3860 return (SD_FAILURE); 3861 } 3862 3863 /* 3864 * Compare vids in each duplet to the inquiry vid - if a match is 3865 * made, get the data value and update the soft state structure 3866 * accordingly. 3867 * 3868 * Each duplet should show as a pair of strings, return SD_FAILURE 3869 * otherwise. 3870 */ 3871 if (nelements & 1) { 3872 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3873 "sd-config-list should show as pairs of strings.\n"); 3874 if (config_list) 3875 ddi_prop_free(config_list); 3876 return (SD_FAILURE); 3877 } 3878 3879 for (i = 0; i < nelements; i += 2) { 3880 /* 3881 * Note: The assumption here is that each vid entry is on 3882 * a unique line from its associated duplet. 3883 */ 3884 vidptr = config_list[i]; 3885 vidlen = (int)strlen(vidptr); 3886 if (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS) { 3887 continue; 3888 } 3889 3890 /* 3891 * dnlist contains 1 or more blank separated 3892 * data-property-name entries 3893 */ 3894 dnlist_ptr = config_list[i + 1]; 3895 3896 if (strchr(dnlist_ptr, ':') != NULL) { 3897 /* 3898 * Decode the improved format sd-config-list. 3899 */ 3900 sd_nvpair_str_decode(un, dnlist_ptr); 3901 } else { 3902 /* 3903 * The old format sd-config-list, loop through all 3904 * data-property-name entries in the 3905 * data-property-name-list 3906 * setting the properties for each. 3907 */ 3908 for (dataname_ptr = sd_strtok_r(dnlist_ptr, " \t", 3909 &dataname_lasts); dataname_ptr != NULL; 3910 dataname_ptr = sd_strtok_r(NULL, " \t", 3911 &dataname_lasts)) { 3912 int version; 3913 3914 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3915 "sd_process_sdconf_file: disk:%s, " 3916 "data:%s\n", vidptr, dataname_ptr); 3917 3918 /* Get the data list */ 3919 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 3920 SD_DEVINFO(un), 0, dataname_ptr, &data_list, 3921 &data_list_len) != DDI_PROP_SUCCESS) { 3922 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3923 "sd_process_sdconf_file: data " 3924 "property (%s) has no value\n", 3925 dataname_ptr); 3926 continue; 3927 } 3928 3929 version = data_list[0]; 3930 3931 if (version == SD_CONF_VERSION_1) { 3932 sd_tunables values; 3933 3934 /* Set the properties */ 3935 if (sd_chk_vers1_data(un, data_list[1], 3936 &data_list[2], data_list_len, 3937 dataname_ptr) == SD_SUCCESS) { 3938 sd_get_tunables_from_conf(un, 3939 data_list[1], &data_list[2], 3940 &values); 3941 sd_set_vers1_properties(un, 3942 data_list[1], &values); 3943 rval = SD_SUCCESS; 3944 } else { 3945 rval = SD_FAILURE; 3946 } 3947 } else { 3948 scsi_log(SD_DEVINFO(un), sd_label, 3949 CE_WARN, "data property %s version " 3950 "0x%x is invalid.", 3951 dataname_ptr, version); 3952 rval = SD_FAILURE; 3953 } 3954 if (data_list) 3955 ddi_prop_free(data_list); 3956 } 3957 } 3958 } 3959 3960 /* free up the memory allocated by ddi_prop_lookup_string_array(). */ 3961 if (config_list) { 3962 ddi_prop_free(config_list); 3963 } 3964 3965 return (rval); 3966 } 3967 3968 /* 3969 * Function: sd_nvpair_str_decode() 3970 * 3971 * Description: Parse the improved format sd-config-list to get 3972 * each entry of tunable, which includes a name-value pair. 3973 * Then call sd_set_properties() to set the property. 3974 * 3975 * Arguments: un - driver soft state (unit) structure 3976 * nvpair_str - the tunable list 3977 */ 3978 static void 3979 sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str) 3980 { 3981 char *nv, *name, *value, *token; 3982 char *nv_lasts, *v_lasts, *x_lasts; 3983 3984 for (nv = sd_strtok_r(nvpair_str, ",", &nv_lasts); nv != NULL; 3985 nv = sd_strtok_r(NULL, ",", &nv_lasts)) { 3986 token = sd_strtok_r(nv, ":", &v_lasts); 3987 name = sd_strtok_r(token, " \t", &x_lasts); 3988 token = sd_strtok_r(NULL, ":", &v_lasts); 3989 value = sd_strtok_r(token, " \t", &x_lasts); 3990 if (name == NULL || value == NULL) { 3991 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3992 "sd_nvpair_str_decode: " 3993 "name or value is not valid!\n"); 3994 } else { 3995 sd_set_properties(un, name, value); 3996 } 3997 } 3998 } 3999 4000 /* 4001 * Function: sd_strtok_r() 4002 * 4003 * Description: This function uses strpbrk and strspn to break 4004 * string into tokens on sequentially subsequent calls. Return 4005 * NULL when no non-separator characters remain. The first 4006 * argument is NULL for subsequent calls. 4007 */ 4008 static char * 4009 sd_strtok_r(char *string, const char *sepset, char **lasts) 4010 { 4011 char *q, *r; 4012 4013 /* First or subsequent call */ 4014 if (string == NULL) 4015 string = *lasts; 4016 4017 if (string == NULL) 4018 return (NULL); 4019 4020 /* Skip leading separators */ 4021 q = string + strspn(string, sepset); 4022 4023 if (*q == '\0') 4024 return (NULL); 4025 4026 if ((r = strpbrk(q, sepset)) == NULL) { 4027 *lasts = NULL; 4028 } else { 4029 *r = '\0'; 4030 *lasts = r + 1; 4031 } 4032 return (q); 4033 } 4034 4035 /* 4036 * Function: sd_set_properties() 4037 * 4038 * Description: Set device properties based on the improved 4039 * format sd-config-list. 4040 * 4041 * Arguments: un - driver soft state (unit) structure 4042 * name - supported tunable name 4043 * value - tunable value 4044 */ 4045 static void 4046 sd_set_properties(struct sd_lun *un, char *name, char *value) 4047 { 4048 char *endptr = NULL; 4049 long val = 0; 4050 4051 if (strcasecmp(name, "cache-nonvolatile") == 0) { 4052 if (strcasecmp(value, "true") == 0) { 4053 un->un_f_suppress_cache_flush = TRUE; 4054 } else if (strcasecmp(value, "false") == 0) { 4055 un->un_f_suppress_cache_flush = FALSE; 4056 } else { 4057 goto value_invalid; 4058 } 4059 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4060 "suppress_cache_flush flag set to %d\n", 4061 un->un_f_suppress_cache_flush); 4062 return; 4063 } 4064 4065 if (strcasecmp(name, "controller-type") == 0) { 4066 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4067 un->un_ctype = val; 4068 } else { 4069 goto value_invalid; 4070 } 4071 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4072 "ctype set to %d\n", un->un_ctype); 4073 return; 4074 } 4075 4076 if (strcasecmp(name, "delay-busy") == 0) { 4077 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4078 un->un_busy_timeout = drv_usectohz(val / 1000); 4079 } else { 4080 goto value_invalid; 4081 } 4082 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4083 "busy_timeout set to %d\n", un->un_busy_timeout); 4084 return; 4085 } 4086 4087 if (strcasecmp(name, "disksort") == 0) { 4088 if (strcasecmp(value, "true") == 0) { 4089 un->un_f_disksort_disabled = FALSE; 4090 } else if (strcasecmp(value, "false") == 0) { 4091 un->un_f_disksort_disabled = TRUE; 4092 } else { 4093 goto value_invalid; 4094 } 4095 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4096 "disksort disabled flag set to %d\n", 4097 un->un_f_disksort_disabled); 4098 return; 4099 } 4100 4101 if (strcasecmp(name, "power-condition") == 0) { 4102 if (strcasecmp(value, "true") == 0) { 4103 un->un_f_power_condition_disabled = FALSE; 4104 } else if (strcasecmp(value, "false") == 0) { 4105 un->un_f_power_condition_disabled = TRUE; 4106 } else { 4107 goto value_invalid; 4108 } 4109 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4110 "power condition disabled flag set to %d\n", 4111 un->un_f_power_condition_disabled); 4112 return; 4113 } 4114 4115 if (strcasecmp(name, "timeout-releasereservation") == 0) { 4116 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4117 un->un_reserve_release_time = val; 4118 } else { 4119 goto value_invalid; 4120 } 4121 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4122 "reservation release timeout set to %d\n", 4123 un->un_reserve_release_time); 4124 return; 4125 } 4126 4127 if (strcasecmp(name, "reset-lun") == 0) { 4128 if (strcasecmp(value, "true") == 0) { 4129 un->un_f_lun_reset_enabled = TRUE; 4130 } else if (strcasecmp(value, "false") == 0) { 4131 un->un_f_lun_reset_enabled = FALSE; 4132 } else { 4133 goto value_invalid; 4134 } 4135 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4136 "lun reset enabled flag set to %d\n", 4137 un->un_f_lun_reset_enabled); 4138 return; 4139 } 4140 4141 if (strcasecmp(name, "retries-busy") == 0) { 4142 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4143 un->un_busy_retry_count = val; 4144 } else { 4145 goto value_invalid; 4146 } 4147 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4148 "busy retry count set to %d\n", un->un_busy_retry_count); 4149 return; 4150 } 4151 4152 if (strcasecmp(name, "retries-timeout") == 0) { 4153 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4154 un->un_retry_count = val; 4155 } else { 4156 goto value_invalid; 4157 } 4158 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4159 "timeout retry count set to %d\n", un->un_retry_count); 4160 return; 4161 } 4162 4163 if (strcasecmp(name, "retries-notready") == 0) { 4164 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4165 un->un_notready_retry_count = val; 4166 } else { 4167 goto value_invalid; 4168 } 4169 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4170 "notready retry count set to %d\n", 4171 un->un_notready_retry_count); 4172 return; 4173 } 4174 4175 if (strcasecmp(name, "retries-reset") == 0) { 4176 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4177 un->un_reset_retry_count = val; 4178 } else { 4179 goto value_invalid; 4180 } 4181 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4182 "reset retry count set to %d\n", 4183 un->un_reset_retry_count); 4184 return; 4185 } 4186 4187 if (strcasecmp(name, "throttle-max") == 0) { 4188 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4189 un->un_saved_throttle = un->un_throttle = val; 4190 } else { 4191 goto value_invalid; 4192 } 4193 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4194 "throttle set to %d\n", un->un_throttle); 4195 } 4196 4197 if (strcasecmp(name, "throttle-min") == 0) { 4198 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4199 un->un_min_throttle = val; 4200 } else { 4201 goto value_invalid; 4202 } 4203 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4204 "min throttle set to %d\n", un->un_min_throttle); 4205 } 4206 4207 if (strcasecmp(name, "rmw-type") == 0) { 4208 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4209 un->un_f_rmw_type = val; 4210 } else { 4211 goto value_invalid; 4212 } 4213 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4214 "RMW type set to %d\n", un->un_f_rmw_type); 4215 } 4216 4217 if (strcasecmp(name, "physical-block-size") == 0) { 4218 if (ddi_strtol(value, &endptr, 0, &val) == 0 && 4219 ISP2(val) && val >= un->un_tgt_blocksize && 4220 val >= un->un_sys_blocksize) { 4221 un->un_phy_blocksize = val; 4222 } else { 4223 goto value_invalid; 4224 } 4225 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4226 "physical block size set to %d\n", un->un_phy_blocksize); 4227 } 4228 4229 if (strcasecmp(name, "retries-victim") == 0) { 4230 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4231 un->un_victim_retry_count = val; 4232 } else { 4233 goto value_invalid; 4234 } 4235 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4236 "victim retry count set to %d\n", 4237 un->un_victim_retry_count); 4238 return; 4239 } 4240 4241 /* 4242 * Validate the throttle values. 4243 * If any of the numbers are invalid, set everything to defaults. 4244 */ 4245 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4246 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4247 (un->un_min_throttle > un->un_throttle)) { 4248 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4249 un->un_min_throttle = sd_min_throttle; 4250 } 4251 4252 if (strcasecmp(name, "mmc-gesn-polling") == 0) { 4253 if (strcasecmp(value, "true") == 0) { 4254 un->un_f_mmc_gesn_polling = TRUE; 4255 } else if (strcasecmp(value, "false") == 0) { 4256 un->un_f_mmc_gesn_polling = FALSE; 4257 } else { 4258 goto value_invalid; 4259 } 4260 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4261 "mmc-gesn-polling set to %d\n", 4262 un->un_f_mmc_gesn_polling); 4263 } 4264 4265 return; 4266 4267 value_invalid: 4268 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4269 "value of prop %s is invalid\n", name); 4270 } 4271 4272 /* 4273 * Function: sd_get_tunables_from_conf() 4274 * 4275 * 4276 * This function reads the data list from the sd.conf file and pulls 4277 * the values that can have numeric values as arguments and places 4278 * the values in the appropriate sd_tunables member. 4279 * Since the order of the data list members varies across platforms 4280 * This function reads them from the data list in a platform specific 4281 * order and places them into the correct sd_tunable member that is 4282 * consistent across all platforms. 4283 */ 4284 static void 4285 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list, 4286 sd_tunables *values) 4287 { 4288 int i; 4289 int mask; 4290 4291 bzero(values, sizeof (sd_tunables)); 4292 4293 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 4294 4295 mask = 1 << i; 4296 if (mask > flags) { 4297 break; 4298 } 4299 4300 switch (mask & flags) { 4301 case 0: /* This mask bit not set in flags */ 4302 continue; 4303 case SD_CONF_BSET_THROTTLE: 4304 values->sdt_throttle = data_list[i]; 4305 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4306 "sd_get_tunables_from_conf: throttle = %d\n", 4307 values->sdt_throttle); 4308 break; 4309 case SD_CONF_BSET_CTYPE: 4310 values->sdt_ctype = data_list[i]; 4311 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4312 "sd_get_tunables_from_conf: ctype = %d\n", 4313 values->sdt_ctype); 4314 break; 4315 case SD_CONF_BSET_NRR_COUNT: 4316 values->sdt_not_rdy_retries = data_list[i]; 4317 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4318 "sd_get_tunables_from_conf: not_rdy_retries = %d\n", 4319 values->sdt_not_rdy_retries); 4320 break; 4321 case SD_CONF_BSET_BSY_RETRY_COUNT: 4322 values->sdt_busy_retries = data_list[i]; 4323 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4324 "sd_get_tunables_from_conf: busy_retries = %d\n", 4325 values->sdt_busy_retries); 4326 break; 4327 case SD_CONF_BSET_RST_RETRIES: 4328 values->sdt_reset_retries = data_list[i]; 4329 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4330 "sd_get_tunables_from_conf: reset_retries = %d\n", 4331 values->sdt_reset_retries); 4332 break; 4333 case SD_CONF_BSET_RSV_REL_TIME: 4334 values->sdt_reserv_rel_time = data_list[i]; 4335 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4336 "sd_get_tunables_from_conf: reserv_rel_time = %d\n", 4337 values->sdt_reserv_rel_time); 4338 break; 4339 case SD_CONF_BSET_MIN_THROTTLE: 4340 values->sdt_min_throttle = data_list[i]; 4341 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4342 "sd_get_tunables_from_conf: min_throttle = %d\n", 4343 values->sdt_min_throttle); 4344 break; 4345 case SD_CONF_BSET_DISKSORT_DISABLED: 4346 values->sdt_disk_sort_dis = data_list[i]; 4347 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4348 "sd_get_tunables_from_conf: disk_sort_dis = %d\n", 4349 values->sdt_disk_sort_dis); 4350 break; 4351 case SD_CONF_BSET_LUN_RESET_ENABLED: 4352 values->sdt_lun_reset_enable = data_list[i]; 4353 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4354 "sd_get_tunables_from_conf: lun_reset_enable = %d" 4355 "\n", values->sdt_lun_reset_enable); 4356 break; 4357 case SD_CONF_BSET_CACHE_IS_NV: 4358 values->sdt_suppress_cache_flush = data_list[i]; 4359 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4360 "sd_get_tunables_from_conf: \ 4361 suppress_cache_flush = %d" 4362 "\n", values->sdt_suppress_cache_flush); 4363 break; 4364 case SD_CONF_BSET_PC_DISABLED: 4365 values->sdt_disk_sort_dis = data_list[i]; 4366 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4367 "sd_get_tunables_from_conf: power_condition_dis = " 4368 "%d\n", values->sdt_power_condition_dis); 4369 break; 4370 } 4371 } 4372 } 4373 4374 /* 4375 * Function: sd_process_sdconf_table 4376 * 4377 * Description: Search the static configuration table for a match on the 4378 * inquiry vid/pid and update the driver soft state structure 4379 * according to the table property values for the device. 4380 * 4381 * The form of a configuration table entry is: 4382 * <vid+pid>,<flags>,<property-data> 4383 * "SEAGATE ST42400N",1,0x40000, 4384 * 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1; 4385 * 4386 * Arguments: un - driver soft state (unit) structure 4387 */ 4388 4389 static void 4390 sd_process_sdconf_table(struct sd_lun *un) 4391 { 4392 char *id = NULL; 4393 int table_index; 4394 int idlen; 4395 4396 ASSERT(un != NULL); 4397 for (table_index = 0; table_index < sd_disk_table_size; 4398 table_index++) { 4399 id = sd_disk_table[table_index].device_id; 4400 idlen = strlen(id); 4401 4402 /* 4403 * The static configuration table currently does not 4404 * implement version 10 properties. Additionally, 4405 * multiple data-property-name entries are not 4406 * implemented in the static configuration table. 4407 */ 4408 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4409 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4410 "sd_process_sdconf_table: disk %s\n", id); 4411 sd_set_vers1_properties(un, 4412 sd_disk_table[table_index].flags, 4413 sd_disk_table[table_index].properties); 4414 break; 4415 } 4416 } 4417 } 4418 4419 4420 /* 4421 * Function: sd_sdconf_id_match 4422 * 4423 * Description: This local function implements a case sensitive vid/pid 4424 * comparison as well as the boundary cases of wild card and 4425 * multiple blanks. 4426 * 4427 * Note: An implicit assumption made here is that the scsi 4428 * inquiry structure will always keep the vid, pid and 4429 * revision strings in consecutive sequence, so they can be 4430 * read as a single string. If this assumption is not the 4431 * case, a separate string, to be used for the check, needs 4432 * to be built with these strings concatenated. 4433 * 4434 * Arguments: un - driver soft state (unit) structure 4435 * id - table or config file vid/pid 4436 * idlen - length of the vid/pid (bytes) 4437 * 4438 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4439 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4440 */ 4441 4442 static int 4443 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen) 4444 { 4445 struct scsi_inquiry *sd_inq; 4446 int rval = SD_SUCCESS; 4447 4448 ASSERT(un != NULL); 4449 sd_inq = un->un_sd->sd_inq; 4450 ASSERT(id != NULL); 4451 4452 /* 4453 * We use the inq_vid as a pointer to a buffer containing the 4454 * vid and pid and use the entire vid/pid length of the table 4455 * entry for the comparison. This works because the inq_pid 4456 * data member follows inq_vid in the scsi_inquiry structure. 4457 */ 4458 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) { 4459 /* 4460 * The user id string is compared to the inquiry vid/pid 4461 * using a case insensitive comparison and ignoring 4462 * multiple spaces. 4463 */ 4464 rval = sd_blank_cmp(un, id, idlen); 4465 if (rval != SD_SUCCESS) { 4466 /* 4467 * User id strings that start and end with a "*" 4468 * are a special case. These do not have a 4469 * specific vendor, and the product string can 4470 * appear anywhere in the 16 byte PID portion of 4471 * the inquiry data. This is a simple strstr() 4472 * type search for the user id in the inquiry data. 4473 */ 4474 if ((id[0] == '*') && (id[idlen - 1] == '*')) { 4475 char *pidptr = &id[1]; 4476 int i; 4477 int j; 4478 int pidstrlen = idlen - 2; 4479 j = sizeof (SD_INQUIRY(un)->inq_pid) - 4480 pidstrlen; 4481 4482 if (j < 0) { 4483 return (SD_FAILURE); 4484 } 4485 for (i = 0; i < j; i++) { 4486 if (bcmp(&SD_INQUIRY(un)->inq_pid[i], 4487 pidptr, pidstrlen) == 0) { 4488 rval = SD_SUCCESS; 4489 break; 4490 } 4491 } 4492 } 4493 } 4494 } 4495 return (rval); 4496 } 4497 4498 4499 /* 4500 * Function: sd_blank_cmp 4501 * 4502 * Description: If the id string starts and ends with a space, treat 4503 * multiple consecutive spaces as equivalent to a single 4504 * space. For example, this causes a sd_disk_table entry 4505 * of " NEC CDROM " to match a device's id string of 4506 * "NEC CDROM". 4507 * 4508 * Note: The success exit condition for this routine is if 4509 * the pointer to the table entry is '\0' and the cnt of 4510 * the inquiry length is zero. This will happen if the inquiry 4511 * string returned by the device is padded with spaces to be 4512 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The 4513 * SCSI spec states that the inquiry string is to be padded with 4514 * spaces. 4515 * 4516 * Arguments: un - driver soft state (unit) structure 4517 * id - table or config file vid/pid 4518 * idlen - length of the vid/pid (bytes) 4519 * 4520 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4521 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4522 */ 4523 4524 static int 4525 sd_blank_cmp(struct sd_lun *un, char *id, int idlen) 4526 { 4527 char *p1; 4528 char *p2; 4529 int cnt; 4530 cnt = sizeof (SD_INQUIRY(un)->inq_vid) + 4531 sizeof (SD_INQUIRY(un)->inq_pid); 4532 4533 ASSERT(un != NULL); 4534 p2 = un->un_sd->sd_inq->inq_vid; 4535 ASSERT(id != NULL); 4536 p1 = id; 4537 4538 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) { 4539 /* 4540 * Note: string p1 is terminated by a NUL but string p2 4541 * isn't. The end of p2 is determined by cnt. 4542 */ 4543 for (;;) { 4544 /* skip over any extra blanks in both strings */ 4545 while ((*p1 != '\0') && (*p1 == ' ')) { 4546 p1++; 4547 } 4548 while ((cnt != 0) && (*p2 == ' ')) { 4549 p2++; 4550 cnt--; 4551 } 4552 4553 /* compare the two strings */ 4554 if ((cnt == 0) || 4555 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) { 4556 break; 4557 } 4558 while ((cnt > 0) && 4559 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) { 4560 p1++; 4561 p2++; 4562 cnt--; 4563 } 4564 } 4565 } 4566 4567 /* return SD_SUCCESS if both strings match */ 4568 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE); 4569 } 4570 4571 4572 /* 4573 * Function: sd_chk_vers1_data 4574 * 4575 * Description: Verify the version 1 device properties provided by the 4576 * user via the configuration file 4577 * 4578 * Arguments: un - driver soft state (unit) structure 4579 * flags - integer mask indicating properties to be set 4580 * prop_list - integer list of property values 4581 * list_len - number of the elements 4582 * 4583 * Return Code: SD_SUCCESS - Indicates the user provided data is valid 4584 * SD_FAILURE - Indicates the user provided data is invalid 4585 */ 4586 4587 static int 4588 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 4589 int list_len, char *dataname_ptr) 4590 { 4591 int i; 4592 int mask = 1; 4593 int index = 0; 4594 4595 ASSERT(un != NULL); 4596 4597 /* Check for a NULL property name and list */ 4598 if (dataname_ptr == NULL) { 4599 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4600 "sd_chk_vers1_data: NULL data property name."); 4601 return (SD_FAILURE); 4602 } 4603 if (prop_list == NULL) { 4604 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4605 "sd_chk_vers1_data: %s NULL data property list.", 4606 dataname_ptr); 4607 return (SD_FAILURE); 4608 } 4609 4610 /* Display a warning if undefined bits are set in the flags */ 4611 if (flags & ~SD_CONF_BIT_MASK) { 4612 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4613 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. " 4614 "Properties not set.", 4615 (flags & ~SD_CONF_BIT_MASK), dataname_ptr); 4616 return (SD_FAILURE); 4617 } 4618 4619 /* 4620 * Verify the length of the list by identifying the highest bit set 4621 * in the flags and validating that the property list has a length 4622 * up to the index of this bit. 4623 */ 4624 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 4625 if (flags & mask) { 4626 index++; 4627 } 4628 mask = 1 << i; 4629 } 4630 if (list_len < (index + 2)) { 4631 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4632 "sd_chk_vers1_data: " 4633 "Data property list %s size is incorrect. " 4634 "Properties not set.", dataname_ptr); 4635 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: " 4636 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS); 4637 return (SD_FAILURE); 4638 } 4639 return (SD_SUCCESS); 4640 } 4641 4642 4643 /* 4644 * Function: sd_set_vers1_properties 4645 * 4646 * Description: Set version 1 device properties based on a property list 4647 * retrieved from the driver configuration file or static 4648 * configuration table. Version 1 properties have the format: 4649 * 4650 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 4651 * 4652 * where the prop0 value will be used to set prop0 if bit0 4653 * is set in the flags 4654 * 4655 * Arguments: un - driver soft state (unit) structure 4656 * flags - integer mask indicating properties to be set 4657 * prop_list - integer list of property values 4658 */ 4659 4660 static void 4661 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list) 4662 { 4663 ASSERT(un != NULL); 4664 4665 /* 4666 * Set the flag to indicate cache is to be disabled. An attempt 4667 * to disable the cache via sd_cache_control() will be made 4668 * later during attach once the basic initialization is complete. 4669 */ 4670 if (flags & SD_CONF_BSET_NOCACHE) { 4671 un->un_f_opt_disable_cache = TRUE; 4672 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4673 "sd_set_vers1_properties: caching disabled flag set\n"); 4674 } 4675 4676 /* CD-specific configuration parameters */ 4677 if (flags & SD_CONF_BSET_PLAYMSF_BCD) { 4678 un->un_f_cfg_playmsf_bcd = TRUE; 4679 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4680 "sd_set_vers1_properties: playmsf_bcd set\n"); 4681 } 4682 if (flags & SD_CONF_BSET_READSUB_BCD) { 4683 un->un_f_cfg_readsub_bcd = TRUE; 4684 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4685 "sd_set_vers1_properties: readsub_bcd set\n"); 4686 } 4687 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) { 4688 un->un_f_cfg_read_toc_trk_bcd = TRUE; 4689 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4690 "sd_set_vers1_properties: read_toc_trk_bcd set\n"); 4691 } 4692 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) { 4693 un->un_f_cfg_read_toc_addr_bcd = TRUE; 4694 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4695 "sd_set_vers1_properties: read_toc_addr_bcd set\n"); 4696 } 4697 if (flags & SD_CONF_BSET_NO_READ_HEADER) { 4698 un->un_f_cfg_no_read_header = TRUE; 4699 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4700 "sd_set_vers1_properties: no_read_header set\n"); 4701 } 4702 if (flags & SD_CONF_BSET_READ_CD_XD4) { 4703 un->un_f_cfg_read_cd_xd4 = TRUE; 4704 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4705 "sd_set_vers1_properties: read_cd_xd4 set\n"); 4706 } 4707 4708 /* Support for devices which do not have valid/unique serial numbers */ 4709 if (flags & SD_CONF_BSET_FAB_DEVID) { 4710 un->un_f_opt_fab_devid = TRUE; 4711 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4712 "sd_set_vers1_properties: fab_devid bit set\n"); 4713 } 4714 4715 /* Support for user throttle configuration */ 4716 if (flags & SD_CONF_BSET_THROTTLE) { 4717 ASSERT(prop_list != NULL); 4718 un->un_saved_throttle = un->un_throttle = 4719 prop_list->sdt_throttle; 4720 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4721 "sd_set_vers1_properties: throttle set to %d\n", 4722 prop_list->sdt_throttle); 4723 } 4724 4725 /* Set the per disk retry count according to the conf file or table. */ 4726 if (flags & SD_CONF_BSET_NRR_COUNT) { 4727 ASSERT(prop_list != NULL); 4728 if (prop_list->sdt_not_rdy_retries) { 4729 un->un_notready_retry_count = 4730 prop_list->sdt_not_rdy_retries; 4731 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4732 "sd_set_vers1_properties: not ready retry count" 4733 " set to %d\n", un->un_notready_retry_count); 4734 } 4735 } 4736 4737 /* The controller type is reported for generic disk driver ioctls */ 4738 if (flags & SD_CONF_BSET_CTYPE) { 4739 ASSERT(prop_list != NULL); 4740 switch (prop_list->sdt_ctype) { 4741 case CTYPE_CDROM: 4742 un->un_ctype = prop_list->sdt_ctype; 4743 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4744 "sd_set_vers1_properties: ctype set to " 4745 "CTYPE_CDROM\n"); 4746 break; 4747 case CTYPE_CCS: 4748 un->un_ctype = prop_list->sdt_ctype; 4749 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4750 "sd_set_vers1_properties: ctype set to " 4751 "CTYPE_CCS\n"); 4752 break; 4753 case CTYPE_ROD: /* RW optical */ 4754 un->un_ctype = prop_list->sdt_ctype; 4755 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4756 "sd_set_vers1_properties: ctype set to " 4757 "CTYPE_ROD\n"); 4758 break; 4759 default: 4760 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4761 "sd_set_vers1_properties: Could not set " 4762 "invalid ctype value (%d)", 4763 prop_list->sdt_ctype); 4764 } 4765 } 4766 4767 /* Purple failover timeout */ 4768 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) { 4769 ASSERT(prop_list != NULL); 4770 un->un_busy_retry_count = 4771 prop_list->sdt_busy_retries; 4772 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4773 "sd_set_vers1_properties: " 4774 "busy retry count set to %d\n", 4775 un->un_busy_retry_count); 4776 } 4777 4778 /* Purple reset retry count */ 4779 if (flags & SD_CONF_BSET_RST_RETRIES) { 4780 ASSERT(prop_list != NULL); 4781 un->un_reset_retry_count = 4782 prop_list->sdt_reset_retries; 4783 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4784 "sd_set_vers1_properties: " 4785 "reset retry count set to %d\n", 4786 un->un_reset_retry_count); 4787 } 4788 4789 /* Purple reservation release timeout */ 4790 if (flags & SD_CONF_BSET_RSV_REL_TIME) { 4791 ASSERT(prop_list != NULL); 4792 un->un_reserve_release_time = 4793 prop_list->sdt_reserv_rel_time; 4794 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4795 "sd_set_vers1_properties: " 4796 "reservation release timeout set to %d\n", 4797 un->un_reserve_release_time); 4798 } 4799 4800 /* 4801 * Driver flag telling the driver to verify that no commands are pending 4802 * for a device before issuing a Test Unit Ready. This is a workaround 4803 * for a firmware bug in some Seagate eliteI drives. 4804 */ 4805 if (flags & SD_CONF_BSET_TUR_CHECK) { 4806 un->un_f_cfg_tur_check = TRUE; 4807 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4808 "sd_set_vers1_properties: tur queue check set\n"); 4809 } 4810 4811 if (flags & SD_CONF_BSET_MIN_THROTTLE) { 4812 un->un_min_throttle = prop_list->sdt_min_throttle; 4813 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4814 "sd_set_vers1_properties: min throttle set to %d\n", 4815 un->un_min_throttle); 4816 } 4817 4818 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) { 4819 un->un_f_disksort_disabled = 4820 (prop_list->sdt_disk_sort_dis != 0) ? 4821 TRUE : FALSE; 4822 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4823 "sd_set_vers1_properties: disksort disabled " 4824 "flag set to %d\n", 4825 prop_list->sdt_disk_sort_dis); 4826 } 4827 4828 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) { 4829 un->un_f_lun_reset_enabled = 4830 (prop_list->sdt_lun_reset_enable != 0) ? 4831 TRUE : FALSE; 4832 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4833 "sd_set_vers1_properties: lun reset enabled " 4834 "flag set to %d\n", 4835 prop_list->sdt_lun_reset_enable); 4836 } 4837 4838 if (flags & SD_CONF_BSET_CACHE_IS_NV) { 4839 un->un_f_suppress_cache_flush = 4840 (prop_list->sdt_suppress_cache_flush != 0) ? 4841 TRUE : FALSE; 4842 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4843 "sd_set_vers1_properties: suppress_cache_flush " 4844 "flag set to %d\n", 4845 prop_list->sdt_suppress_cache_flush); 4846 } 4847 4848 if (flags & SD_CONF_BSET_PC_DISABLED) { 4849 un->un_f_power_condition_disabled = 4850 (prop_list->sdt_power_condition_dis != 0) ? 4851 TRUE : FALSE; 4852 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4853 "sd_set_vers1_properties: power_condition_disabled " 4854 "flag set to %d\n", 4855 prop_list->sdt_power_condition_dis); 4856 } 4857 4858 /* 4859 * Validate the throttle values. 4860 * If any of the numbers are invalid, set everything to defaults. 4861 */ 4862 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4863 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4864 (un->un_min_throttle > un->un_throttle)) { 4865 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4866 un->un_min_throttle = sd_min_throttle; 4867 } 4868 } 4869 4870 /* 4871 * Function: sd_is_lsi() 4872 * 4873 * Description: Check for lsi devices, step through the static device 4874 * table to match vid/pid. 4875 * 4876 * Args: un - ptr to sd_lun 4877 * 4878 * Notes: When creating new LSI property, need to add the new LSI property 4879 * to this function. 4880 */ 4881 static void 4882 sd_is_lsi(struct sd_lun *un) 4883 { 4884 char *id = NULL; 4885 int table_index; 4886 int idlen; 4887 void *prop; 4888 4889 ASSERT(un != NULL); 4890 for (table_index = 0; table_index < sd_disk_table_size; 4891 table_index++) { 4892 id = sd_disk_table[table_index].device_id; 4893 idlen = strlen(id); 4894 if (idlen == 0) { 4895 continue; 4896 } 4897 4898 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4899 prop = sd_disk_table[table_index].properties; 4900 if (prop == &lsi_properties || 4901 prop == &lsi_oem_properties || 4902 prop == &lsi_properties_scsi || 4903 prop == &symbios_properties) { 4904 un->un_f_cfg_is_lsi = TRUE; 4905 } 4906 break; 4907 } 4908 } 4909 } 4910 4911 /* 4912 * Function: sd_get_physical_geometry 4913 * 4914 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and 4915 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the 4916 * target, and use this information to initialize the physical 4917 * geometry cache specified by pgeom_p. 4918 * 4919 * MODE SENSE is an optional command, so failure in this case 4920 * does not necessarily denote an error. We want to use the 4921 * MODE SENSE commands to derive the physical geometry of the 4922 * device, but if either command fails, the logical geometry is 4923 * used as the fallback for disk label geometry in cmlb. 4924 * 4925 * This requires that un->un_blockcount and un->un_tgt_blocksize 4926 * have already been initialized for the current target and 4927 * that the current values be passed as args so that we don't 4928 * end up ever trying to use -1 as a valid value. This could 4929 * happen if either value is reset while we're not holding 4930 * the mutex. 4931 * 4932 * Arguments: un - driver soft state (unit) structure 4933 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4934 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4935 * to use the USCSI "direct" chain and bypass the normal 4936 * command waitq. 4937 * 4938 * Context: Kernel thread only (can sleep). 4939 */ 4940 4941 static int 4942 sd_get_physical_geometry(struct sd_lun *un, cmlb_geom_t *pgeom_p, 4943 diskaddr_t capacity, int lbasize, int path_flag) 4944 { 4945 struct mode_format *page3p; 4946 struct mode_geometry *page4p; 4947 struct mode_header *headerp; 4948 int sector_size; 4949 int nsect; 4950 int nhead; 4951 int ncyl; 4952 int intrlv; 4953 int spc; 4954 diskaddr_t modesense_capacity; 4955 int rpm; 4956 int bd_len; 4957 int mode_header_length; 4958 uchar_t *p3bufp; 4959 uchar_t *p4bufp; 4960 int cdbsize; 4961 int ret = EIO; 4962 sd_ssc_t *ssc; 4963 int status; 4964 4965 ASSERT(un != NULL); 4966 4967 if (lbasize == 0) { 4968 if (ISCD(un)) { 4969 lbasize = 2048; 4970 } else { 4971 lbasize = un->un_sys_blocksize; 4972 } 4973 } 4974 pgeom_p->g_secsize = (unsigned short)lbasize; 4975 4976 /* 4977 * If the unit is a cd/dvd drive MODE SENSE page three 4978 * and MODE SENSE page four are reserved (see SBC spec 4979 * and MMC spec). To prevent soft errors just return 4980 * using the default LBA size. 4981 * 4982 * Since SATA MODE SENSE function (sata_txlt_mode_sense()) does not 4983 * implement support for mode pages 3 and 4 return here to prevent 4984 * illegal requests on SATA drives. 4985 * 4986 * These pages are also reserved in SBC-2 and later. We assume SBC-2 4987 * or later for a direct-attached block device if the SCSI version is 4988 * at least SPC-3. 4989 */ 4990 4991 if (ISCD(un) || 4992 un->un_interconnect_type == SD_INTERCONNECT_SATA || 4993 (un->un_ctype == CTYPE_CCS && SD_INQUIRY(un)->inq_ansi >= 5)) 4994 return (ret); 4995 4996 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0; 4997 4998 /* 4999 * Retrieve MODE SENSE page 3 - Format Device Page 5000 */ 5001 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP); 5002 ssc = sd_ssc_init(un); 5003 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p3bufp, 5004 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag); 5005 if (status != 0) { 5006 SD_ERROR(SD_LOG_COMMON, un, 5007 "sd_get_physical_geometry: mode sense page 3 failed\n"); 5008 goto page3_exit; 5009 } 5010 5011 /* 5012 * Determine size of Block Descriptors in order to locate the mode 5013 * page data. ATAPI devices return 0, SCSI devices should return 5014 * MODE_BLK_DESC_LENGTH. 5015 */ 5016 headerp = (struct mode_header *)p3bufp; 5017 if (un->un_f_cfg_is_atapi == TRUE) { 5018 struct mode_header_grp2 *mhp = 5019 (struct mode_header_grp2 *)headerp; 5020 mode_header_length = MODE_HEADER_LENGTH_GRP2; 5021 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 5022 } else { 5023 mode_header_length = MODE_HEADER_LENGTH; 5024 bd_len = ((struct mode_header *)headerp)->bdesc_length; 5025 } 5026 5027 if (bd_len > MODE_BLK_DESC_LENGTH) { 5028 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 5029 "sd_get_physical_geometry: received unexpected bd_len " 5030 "of %d, page3\n", bd_len); 5031 status = EIO; 5032 goto page3_exit; 5033 } 5034 5035 page3p = (struct mode_format *) 5036 ((caddr_t)headerp + mode_header_length + bd_len); 5037 5038 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) { 5039 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 5040 "sd_get_physical_geometry: mode sense pg3 code mismatch " 5041 "%d\n", page3p->mode_page.code); 5042 status = EIO; 5043 goto page3_exit; 5044 } 5045 5046 /* 5047 * Use this physical geometry data only if BOTH MODE SENSE commands 5048 * complete successfully; otherwise, revert to the logical geometry. 5049 * So, we need to save everything in temporary variables. 5050 */ 5051 sector_size = BE_16(page3p->data_bytes_sect); 5052 5053 /* 5054 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size 5055 */ 5056 if (sector_size == 0) { 5057 sector_size = un->un_sys_blocksize; 5058 } else { 5059 sector_size &= ~(un->un_sys_blocksize - 1); 5060 } 5061 5062 nsect = BE_16(page3p->sect_track); 5063 intrlv = BE_16(page3p->interleave); 5064 5065 SD_INFO(SD_LOG_COMMON, un, 5066 "sd_get_physical_geometry: Format Parameters (page 3)\n"); 5067 SD_INFO(SD_LOG_COMMON, un, 5068 " mode page: %d; nsect: %d; sector size: %d;\n", 5069 page3p->mode_page.code, nsect, sector_size); 5070 SD_INFO(SD_LOG_COMMON, un, 5071 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv, 5072 BE_16(page3p->track_skew), 5073 BE_16(page3p->cylinder_skew)); 5074 5075 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 5076 5077 /* 5078 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page 5079 */ 5080 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP); 5081 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p4bufp, 5082 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag); 5083 if (status != 0) { 5084 SD_ERROR(SD_LOG_COMMON, un, 5085 "sd_get_physical_geometry: mode sense page 4 failed\n"); 5086 goto page4_exit; 5087 } 5088 5089 /* 5090 * Determine size of Block Descriptors in order to locate the mode 5091 * page data. ATAPI devices return 0, SCSI devices should return 5092 * MODE_BLK_DESC_LENGTH. 5093 */ 5094 headerp = (struct mode_header *)p4bufp; 5095 if (un->un_f_cfg_is_atapi == TRUE) { 5096 struct mode_header_grp2 *mhp = 5097 (struct mode_header_grp2 *)headerp; 5098 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 5099 } else { 5100 bd_len = ((struct mode_header *)headerp)->bdesc_length; 5101 } 5102 5103 if (bd_len > MODE_BLK_DESC_LENGTH) { 5104 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 5105 "sd_get_physical_geometry: received unexpected bd_len of " 5106 "%d, page4\n", bd_len); 5107 status = EIO; 5108 goto page4_exit; 5109 } 5110 5111 page4p = (struct mode_geometry *) 5112 ((caddr_t)headerp + mode_header_length + bd_len); 5113 5114 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) { 5115 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 5116 "sd_get_physical_geometry: mode sense pg4 code mismatch " 5117 "%d\n", page4p->mode_page.code); 5118 status = EIO; 5119 goto page4_exit; 5120 } 5121 5122 /* 5123 * Stash the data now, after we know that both commands completed. 5124 */ 5125 5126 5127 nhead = (int)page4p->heads; /* uchar, so no conversion needed */ 5128 spc = nhead * nsect; 5129 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb; 5130 rpm = BE_16(page4p->rpm); 5131 5132 modesense_capacity = spc * ncyl; 5133 5134 SD_INFO(SD_LOG_COMMON, un, 5135 "sd_get_physical_geometry: Geometry Parameters (page 4)\n"); 5136 SD_INFO(SD_LOG_COMMON, un, 5137 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm); 5138 SD_INFO(SD_LOG_COMMON, un, 5139 " computed capacity(h*s*c): %d;\n", modesense_capacity); 5140 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n", 5141 (void *)pgeom_p, capacity); 5142 5143 /* 5144 * Compensate if the drive's geometry is not rectangular, i.e., 5145 * the product of C * H * S returned by MODE SENSE >= that returned 5146 * by read capacity. This is an idiosyncrasy of the original x86 5147 * disk subsystem. 5148 */ 5149 if (modesense_capacity >= capacity) { 5150 SD_INFO(SD_LOG_COMMON, un, 5151 "sd_get_physical_geometry: adjusting acyl; " 5152 "old: %d; new: %d\n", pgeom_p->g_acyl, 5153 (modesense_capacity - capacity + spc - 1) / spc); 5154 if (sector_size != 0) { 5155 /* 1243403: NEC D38x7 drives don't support sec size */ 5156 pgeom_p->g_secsize = (unsigned short)sector_size; 5157 } 5158 pgeom_p->g_nsect = (unsigned short)nsect; 5159 pgeom_p->g_nhead = (unsigned short)nhead; 5160 pgeom_p->g_capacity = capacity; 5161 pgeom_p->g_acyl = 5162 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc; 5163 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl; 5164 } 5165 5166 pgeom_p->g_rpm = (unsigned short)rpm; 5167 pgeom_p->g_intrlv = (unsigned short)intrlv; 5168 ret = 0; 5169 5170 SD_INFO(SD_LOG_COMMON, un, 5171 "sd_get_physical_geometry: mode sense geometry:\n"); 5172 SD_INFO(SD_LOG_COMMON, un, 5173 " nsect: %d; sector size: %d; interlv: %d\n", 5174 nsect, sector_size, intrlv); 5175 SD_INFO(SD_LOG_COMMON, un, 5176 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n", 5177 nhead, ncyl, rpm, modesense_capacity); 5178 SD_INFO(SD_LOG_COMMON, un, 5179 "sd_get_physical_geometry: (cached)\n"); 5180 SD_INFO(SD_LOG_COMMON, un, 5181 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 5182 pgeom_p->g_ncyl, pgeom_p->g_acyl, 5183 pgeom_p->g_nhead, pgeom_p->g_nsect); 5184 SD_INFO(SD_LOG_COMMON, un, 5185 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n", 5186 pgeom_p->g_secsize, pgeom_p->g_capacity, 5187 pgeom_p->g_intrlv, pgeom_p->g_rpm); 5188 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 5189 5190 page4_exit: 5191 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH); 5192 5193 page3_exit: 5194 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH); 5195 5196 if (status != 0) { 5197 if (status == EIO) { 5198 /* 5199 * Some disks do not support mode sense(6), we 5200 * should ignore this kind of error(sense key is 5201 * 0x5 - illegal request). 5202 */ 5203 uint8_t *sensep; 5204 int senlen; 5205 5206 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 5207 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 5208 ssc->ssc_uscsi_cmd->uscsi_rqresid); 5209 5210 if (senlen > 0 && 5211 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 5212 sd_ssc_assessment(ssc, 5213 SD_FMT_IGNORE_COMPROMISE); 5214 } else { 5215 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 5216 } 5217 } else { 5218 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5219 } 5220 } 5221 sd_ssc_fini(ssc); 5222 return (ret); 5223 } 5224 5225 /* 5226 * Function: sd_get_virtual_geometry 5227 * 5228 * Description: Ask the controller to tell us about the target device. 5229 * 5230 * Arguments: un - pointer to softstate 5231 * capacity - disk capacity in #blocks 5232 * lbasize - disk block size in bytes 5233 * 5234 * Context: Kernel thread only 5235 */ 5236 5237 static int 5238 sd_get_virtual_geometry(struct sd_lun *un, cmlb_geom_t *lgeom_p, 5239 diskaddr_t capacity, int lbasize) 5240 { 5241 uint_t geombuf; 5242 int spc; 5243 5244 ASSERT(un != NULL); 5245 5246 /* Set sector size, and total number of sectors */ 5247 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1); 5248 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1); 5249 5250 /* Let the HBA tell us its geometry */ 5251 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1); 5252 5253 /* A value of -1 indicates an undefined "geometry" property */ 5254 if (geombuf == (-1)) { 5255 return (EINVAL); 5256 } 5257 5258 /* Initialize the logical geometry cache. */ 5259 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff; 5260 lgeom_p->g_nsect = geombuf & 0xffff; 5261 lgeom_p->g_secsize = un->un_sys_blocksize; 5262 5263 spc = lgeom_p->g_nhead * lgeom_p->g_nsect; 5264 5265 /* 5266 * Note: The driver originally converted the capacity value from 5267 * target blocks to system blocks. However, the capacity value passed 5268 * to this routine is already in terms of system blocks (this scaling 5269 * is done when the READ CAPACITY command is issued and processed). 5270 * This 'error' may have gone undetected because the usage of g_ncyl 5271 * (which is based upon g_capacity) is very limited within the driver 5272 */ 5273 lgeom_p->g_capacity = capacity; 5274 5275 /* 5276 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The 5277 * hba may return zero values if the device has been removed. 5278 */ 5279 if (spc == 0) { 5280 lgeom_p->g_ncyl = 0; 5281 } else { 5282 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc; 5283 } 5284 lgeom_p->g_acyl = 0; 5285 5286 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n"); 5287 return (0); 5288 5289 } 5290 /* 5291 * Function: sd_update_block_info 5292 * 5293 * Description: Calculate a byte count to sector count bitshift value 5294 * from sector size. 5295 * 5296 * Arguments: un: unit struct. 5297 * lbasize: new target sector size 5298 * capacity: new target capacity, ie. block count 5299 * 5300 * Context: Kernel thread context 5301 */ 5302 5303 static void 5304 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity) 5305 { 5306 if (lbasize != 0) { 5307 un->un_tgt_blocksize = lbasize; 5308 un->un_f_tgt_blocksize_is_valid = TRUE; 5309 if (!un->un_f_has_removable_media) { 5310 un->un_sys_blocksize = lbasize; 5311 } 5312 } 5313 5314 if (capacity != 0) { 5315 un->un_blockcount = capacity; 5316 un->un_f_blockcount_is_valid = TRUE; 5317 5318 /* 5319 * The capacity has changed so update the errstats. 5320 */ 5321 if (un->un_errstats != NULL) { 5322 struct sd_errstats *stp; 5323 5324 capacity *= un->un_sys_blocksize; 5325 stp = (struct sd_errstats *)un->un_errstats->ks_data; 5326 if (stp->sd_capacity.value.ui64 < capacity) 5327 stp->sd_capacity.value.ui64 = capacity; 5328 } 5329 } 5330 } 5331 5332 /* 5333 * Parses the SCSI Block Limits VPD page (0xB0). It's legal to pass NULL for 5334 * vpd_pg, in which case all the block limits will be reset to the defaults. 5335 */ 5336 static void 5337 sd_parse_blk_limits_vpd(struct sd_lun *un, uchar_t *vpd_pg) 5338 { 5339 sd_blk_limits_t *lim = &un->un_blk_lim; 5340 unsigned pg_len; 5341 5342 if (vpd_pg != NULL) 5343 pg_len = BE_IN16(&vpd_pg[2]); 5344 else 5345 pg_len = 0; 5346 5347 /* Block Limits VPD can be 16 bytes or 64 bytes long - support both */ 5348 if (pg_len >= 0x10) { 5349 lim->lim_opt_xfer_len_gran = BE_IN16(&vpd_pg[6]); 5350 lim->lim_max_xfer_len = BE_IN32(&vpd_pg[8]); 5351 lim->lim_opt_xfer_len = BE_IN32(&vpd_pg[12]); 5352 5353 /* Zero means not reported, so use "unlimited" */ 5354 if (lim->lim_max_xfer_len == 0) 5355 lim->lim_max_xfer_len = UINT32_MAX; 5356 if (lim->lim_opt_xfer_len == 0) 5357 lim->lim_opt_xfer_len = UINT32_MAX; 5358 } else { 5359 lim->lim_opt_xfer_len_gran = 0; 5360 lim->lim_max_xfer_len = UINT32_MAX; 5361 lim->lim_opt_xfer_len = UINT32_MAX; 5362 } 5363 if (pg_len >= 0x3c) { 5364 lim->lim_max_pfetch_len = BE_IN32(&vpd_pg[16]); 5365 /* 5366 * A zero in either of the following two fields indicates lack 5367 * of UNMAP support. 5368 */ 5369 lim->lim_max_unmap_lba_cnt = BE_IN32(&vpd_pg[20]); 5370 lim->lim_max_unmap_descr_cnt = BE_IN32(&vpd_pg[24]); 5371 lim->lim_opt_unmap_gran = BE_IN32(&vpd_pg[28]); 5372 if ((vpd_pg[32] >> 7) == 1) { 5373 lim->lim_unmap_gran_align = 5374 ((vpd_pg[32] & 0x7f) << 24) | (vpd_pg[33] << 16) | 5375 (vpd_pg[34] << 8) | vpd_pg[35]; 5376 } else { 5377 lim->lim_unmap_gran_align = 0; 5378 } 5379 lim->lim_max_write_same_len = BE_IN64(&vpd_pg[36]); 5380 } else { 5381 lim->lim_max_pfetch_len = UINT32_MAX; 5382 lim->lim_max_unmap_lba_cnt = UINT32_MAX; 5383 lim->lim_max_unmap_descr_cnt = SD_UNMAP_MAX_DESCR; 5384 lim->lim_opt_unmap_gran = 0; 5385 lim->lim_unmap_gran_align = 0; 5386 lim->lim_max_write_same_len = UINT64_MAX; 5387 } 5388 } 5389 5390 /* 5391 * Collects VPD page B0 data if available (block limits). If the data is 5392 * not available or querying the device failed, we revert to the defaults. 5393 */ 5394 static void 5395 sd_setup_blk_limits(sd_ssc_t *ssc) 5396 { 5397 struct sd_lun *un = ssc->ssc_un; 5398 uchar_t *inqB0 = NULL; 5399 size_t inqB0_resid = 0; 5400 int rval; 5401 5402 if (un->un_vpd_page_mask & SD_VPD_BLK_LIMITS_PG) { 5403 inqB0 = kmem_zalloc(MAX_INQUIRY_SIZE, KM_SLEEP); 5404 rval = sd_send_scsi_INQUIRY(ssc, inqB0, MAX_INQUIRY_SIZE, 0x01, 5405 0xB0, &inqB0_resid); 5406 if (rval != 0) { 5407 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5408 kmem_free(inqB0, MAX_INQUIRY_SIZE); 5409 inqB0 = NULL; 5410 } 5411 } 5412 /* passing NULL inqB0 will reset to defaults */ 5413 sd_parse_blk_limits_vpd(ssc->ssc_un, inqB0); 5414 if (inqB0) 5415 kmem_free(inqB0, MAX_INQUIRY_SIZE); 5416 } 5417 5418 /* 5419 * Function: sd_register_devid 5420 * 5421 * Description: This routine will obtain the device id information from the 5422 * target, obtain the serial number, and register the device 5423 * id with the ddi framework. 5424 * 5425 * Arguments: devi - the system's dev_info_t for the device. 5426 * un - driver soft state (unit) structure 5427 * reservation_flag - indicates if a reservation conflict 5428 * occurred during attach 5429 * 5430 * Context: Kernel Thread 5431 */ 5432 static void 5433 sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, int reservation_flag) 5434 { 5435 int rval = 0; 5436 uchar_t *inq80 = NULL; 5437 size_t inq80_len = MAX_INQUIRY_SIZE; 5438 size_t inq80_resid = 0; 5439 uchar_t *inq83 = NULL; 5440 size_t inq83_len = MAX_INQUIRY_SIZE; 5441 size_t inq83_resid = 0; 5442 int dlen, len; 5443 char *sn; 5444 struct sd_lun *un; 5445 5446 ASSERT(ssc != NULL); 5447 un = ssc->ssc_un; 5448 ASSERT(un != NULL); 5449 ASSERT(mutex_owned(SD_MUTEX(un))); 5450 ASSERT((SD_DEVINFO(un)) == devi); 5451 5452 5453 /* 5454 * We check the availability of the World Wide Name (0x83) and Unit 5455 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using 5456 * un_vpd_page_mask from them, we decide which way to get the WWN. If 5457 * 0x83 is available, that is the best choice. Our next choice is 5458 * 0x80. If neither are available, we munge the devid from the device 5459 * vid/pid/serial # for Sun qualified disks, or use the ddi framework 5460 * to fabricate a devid for non-Sun qualified disks. 5461 */ 5462 if (sd_check_vpd_page_support(ssc) == 0) { 5463 /* collect page 80 data if available */ 5464 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) { 5465 5466 mutex_exit(SD_MUTEX(un)); 5467 inq80 = kmem_zalloc(inq80_len, KM_SLEEP); 5468 5469 rval = sd_send_scsi_INQUIRY(ssc, inq80, inq80_len, 5470 0x01, 0x80, &inq80_resid); 5471 5472 if (rval != 0) { 5473 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5474 kmem_free(inq80, inq80_len); 5475 inq80 = NULL; 5476 inq80_len = 0; 5477 } else if (ddi_prop_exists( 5478 DDI_DEV_T_NONE, SD_DEVINFO(un), 5479 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 5480 INQUIRY_SERIAL_NO) == 0) { 5481 /* 5482 * If we don't already have a serial number 5483 * property, do quick verify of data returned 5484 * and define property. 5485 */ 5486 dlen = inq80_len - inq80_resid; 5487 len = (size_t)inq80[3]; 5488 if ((dlen >= 4) && ((len + 4) <= dlen)) { 5489 /* 5490 * Ensure sn termination, skip leading 5491 * blanks, and create property 5492 * 'inquiry-serial-no'. 5493 */ 5494 sn = (char *)&inq80[4]; 5495 sn[len] = 0; 5496 while (*sn && (*sn == ' ')) 5497 sn++; 5498 if (*sn) { 5499 (void) ddi_prop_update_string( 5500 DDI_DEV_T_NONE, 5501 SD_DEVINFO(un), 5502 INQUIRY_SERIAL_NO, sn); 5503 } 5504 } 5505 } 5506 mutex_enter(SD_MUTEX(un)); 5507 } 5508 5509 /* collect page 83 data if available */ 5510 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) { 5511 mutex_exit(SD_MUTEX(un)); 5512 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 5513 5514 rval = sd_send_scsi_INQUIRY(ssc, inq83, inq83_len, 5515 0x01, 0x83, &inq83_resid); 5516 5517 if (rval != 0) { 5518 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5519 kmem_free(inq83, inq83_len); 5520 inq83 = NULL; 5521 inq83_len = 0; 5522 } 5523 mutex_enter(SD_MUTEX(un)); 5524 } 5525 } 5526 5527 /* 5528 * If transport has already registered a devid for this target 5529 * then that takes precedence over the driver's determination 5530 * of the devid. 5531 * 5532 * NOTE: The reason this check is done here instead of at the beginning 5533 * of the function is to allow the code above to create the 5534 * 'inquiry-serial-no' property. 5535 */ 5536 if (ddi_devid_get(SD_DEVINFO(un), &un->un_devid) == DDI_SUCCESS) { 5537 ASSERT(un->un_devid); 5538 un->un_f_devid_transport_defined = TRUE; 5539 goto cleanup; /* use devid registered by the transport */ 5540 } 5541 5542 /* 5543 * This is the case of antiquated Sun disk drives that have the 5544 * FAB_DEVID property set in the disk_table. These drives 5545 * manage the devid's by storing them in last 2 available sectors 5546 * on the drive and have them fabricated by the ddi layer by calling 5547 * ddi_devid_init and passing the DEVID_FAB flag. 5548 */ 5549 if (un->un_f_opt_fab_devid == TRUE) { 5550 /* 5551 * Depending on EINVAL isn't reliable, since a reserved disk 5552 * may result in invalid geometry, so check to make sure a 5553 * reservation conflict did not occur during attach. 5554 */ 5555 if ((sd_get_devid(ssc) == EINVAL) && 5556 (reservation_flag != SD_TARGET_IS_RESERVED)) { 5557 /* 5558 * The devid is invalid AND there is no reservation 5559 * conflict. Fabricate a new devid. 5560 */ 5561 (void) sd_create_devid(ssc); 5562 } 5563 5564 /* Register the devid if it exists */ 5565 if (un->un_devid != NULL) { 5566 (void) ddi_devid_register(SD_DEVINFO(un), 5567 un->un_devid); 5568 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5569 "sd_register_devid: Devid Fabricated\n"); 5570 } 5571 goto cleanup; 5572 } 5573 5574 /* encode best devid possible based on data available */ 5575 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, 5576 (char *)ddi_driver_name(SD_DEVINFO(un)), 5577 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)), 5578 inq80, inq80_len - inq80_resid, inq83, inq83_len - 5579 inq83_resid, &un->un_devid) == DDI_SUCCESS) { 5580 5581 /* devid successfully encoded, register devid */ 5582 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); 5583 5584 } else { 5585 /* 5586 * Unable to encode a devid based on data available. 5587 * This is not a Sun qualified disk. Older Sun disk 5588 * drives that have the SD_FAB_DEVID property 5589 * set in the disk_table and non Sun qualified 5590 * disks are treated in the same manner. These 5591 * drives manage the devid's by storing them in 5592 * last 2 available sectors on the drive and 5593 * have them fabricated by the ddi layer by 5594 * calling ddi_devid_init and passing the 5595 * DEVID_FAB flag. 5596 * Create a fabricate devid only if there's no 5597 * fabricate devid existed. 5598 */ 5599 if (sd_get_devid(ssc) == EINVAL) { 5600 (void) sd_create_devid(ssc); 5601 } 5602 un->un_f_opt_fab_devid = TRUE; 5603 5604 /* Register the devid if it exists */ 5605 if (un->un_devid != NULL) { 5606 (void) ddi_devid_register(SD_DEVINFO(un), 5607 un->un_devid); 5608 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5609 "sd_register_devid: devid fabricated using " 5610 "ddi framework\n"); 5611 } 5612 } 5613 5614 cleanup: 5615 /* clean up resources */ 5616 if (inq80 != NULL) { 5617 kmem_free(inq80, inq80_len); 5618 } 5619 if (inq83 != NULL) { 5620 kmem_free(inq83, inq83_len); 5621 } 5622 } 5623 5624 5625 5626 /* 5627 * Function: sd_get_devid 5628 * 5629 * Description: This routine will return 0 if a valid device id has been 5630 * obtained from the target and stored in the soft state. If a 5631 * valid device id has not been previously read and stored, a 5632 * read attempt will be made. 5633 * 5634 * Arguments: un - driver soft state (unit) structure 5635 * 5636 * Return Code: 0 if we successfully get the device id 5637 * 5638 * Context: Kernel Thread 5639 */ 5640 5641 static int 5642 sd_get_devid(sd_ssc_t *ssc) 5643 { 5644 struct dk_devid *dkdevid; 5645 ddi_devid_t tmpid; 5646 uint_t *ip; 5647 size_t sz; 5648 diskaddr_t blk; 5649 int status; 5650 int chksum; 5651 int i; 5652 size_t buffer_size; 5653 struct sd_lun *un; 5654 5655 ASSERT(ssc != NULL); 5656 un = ssc->ssc_un; 5657 ASSERT(un != NULL); 5658 ASSERT(mutex_owned(SD_MUTEX(un))); 5659 5660 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n", 5661 un); 5662 5663 if (un->un_devid != NULL) { 5664 return (0); 5665 } 5666 5667 mutex_exit(SD_MUTEX(un)); 5668 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5669 (void *)SD_PATH_DIRECT) != 0) { 5670 mutex_enter(SD_MUTEX(un)); 5671 return (EINVAL); 5672 } 5673 5674 /* 5675 * Read and verify device id, stored in the reserved cylinders at the 5676 * end of the disk. Backup label is on the odd sectors of the last 5677 * track of the last cylinder. Device id will be on track of the next 5678 * to last cylinder. 5679 */ 5680 mutex_enter(SD_MUTEX(un)); 5681 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid)); 5682 mutex_exit(SD_MUTEX(un)); 5683 dkdevid = kmem_alloc(buffer_size, KM_SLEEP); 5684 status = sd_send_scsi_READ(ssc, dkdevid, buffer_size, blk, 5685 SD_PATH_DIRECT); 5686 5687 if (status != 0) { 5688 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5689 goto error; 5690 } 5691 5692 /* Validate the revision */ 5693 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 5694 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 5695 status = EINVAL; 5696 goto error; 5697 } 5698 5699 /* Calculate the checksum */ 5700 chksum = 0; 5701 ip = (uint_t *)dkdevid; 5702 for (i = 0; i < ((DEV_BSIZE - sizeof (int)) / sizeof (int)); 5703 i++) { 5704 chksum ^= ip[i]; 5705 } 5706 5707 /* Compare the checksums */ 5708 if (DKD_GETCHKSUM(dkdevid) != chksum) { 5709 status = EINVAL; 5710 goto error; 5711 } 5712 5713 /* Validate the device id */ 5714 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 5715 status = EINVAL; 5716 goto error; 5717 } 5718 5719 /* 5720 * Store the device id in the driver soft state 5721 */ 5722 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 5723 tmpid = kmem_alloc(sz, KM_SLEEP); 5724 5725 mutex_enter(SD_MUTEX(un)); 5726 5727 un->un_devid = tmpid; 5728 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 5729 5730 kmem_free(dkdevid, buffer_size); 5731 5732 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un); 5733 5734 return (status); 5735 error: 5736 mutex_enter(SD_MUTEX(un)); 5737 kmem_free(dkdevid, buffer_size); 5738 return (status); 5739 } 5740 5741 5742 /* 5743 * Function: sd_create_devid 5744 * 5745 * Description: This routine will fabricate the device id and write it 5746 * to the disk. 5747 * 5748 * Arguments: un - driver soft state (unit) structure 5749 * 5750 * Return Code: value of the fabricated device id 5751 * 5752 * Context: Kernel Thread 5753 */ 5754 5755 static ddi_devid_t 5756 sd_create_devid(sd_ssc_t *ssc) 5757 { 5758 struct sd_lun *un; 5759 5760 ASSERT(ssc != NULL); 5761 un = ssc->ssc_un; 5762 ASSERT(un != NULL); 5763 5764 /* Fabricate the devid */ 5765 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid) 5766 == DDI_FAILURE) { 5767 return (NULL); 5768 } 5769 5770 /* Write the devid to disk */ 5771 if (sd_write_deviceid(ssc) != 0) { 5772 ddi_devid_free(un->un_devid); 5773 un->un_devid = NULL; 5774 } 5775 5776 return (un->un_devid); 5777 } 5778 5779 5780 /* 5781 * Function: sd_write_deviceid 5782 * 5783 * Description: This routine will write the device id to the disk 5784 * reserved sector. 5785 * 5786 * Arguments: un - driver soft state (unit) structure 5787 * 5788 * Return Code: EINVAL 5789 * value returned by sd_send_scsi_cmd 5790 * 5791 * Context: Kernel Thread 5792 */ 5793 5794 static int 5795 sd_write_deviceid(sd_ssc_t *ssc) 5796 { 5797 struct dk_devid *dkdevid; 5798 uchar_t *buf; 5799 diskaddr_t blk; 5800 uint_t *ip, chksum; 5801 int status; 5802 int i; 5803 struct sd_lun *un; 5804 5805 ASSERT(ssc != NULL); 5806 un = ssc->ssc_un; 5807 ASSERT(un != NULL); 5808 ASSERT(mutex_owned(SD_MUTEX(un))); 5809 5810 mutex_exit(SD_MUTEX(un)); 5811 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5812 (void *)SD_PATH_DIRECT) != 0) { 5813 mutex_enter(SD_MUTEX(un)); 5814 return (-1); 5815 } 5816 5817 5818 /* Allocate the buffer */ 5819 buf = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP); 5820 dkdevid = (struct dk_devid *)buf; 5821 5822 /* Fill in the revision */ 5823 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 5824 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 5825 5826 /* Copy in the device id */ 5827 mutex_enter(SD_MUTEX(un)); 5828 bcopy(un->un_devid, &dkdevid->dkd_devid, 5829 ddi_devid_sizeof(un->un_devid)); 5830 mutex_exit(SD_MUTEX(un)); 5831 5832 /* Calculate the checksum */ 5833 chksum = 0; 5834 ip = (uint_t *)dkdevid; 5835 for (i = 0; i < ((DEV_BSIZE - sizeof (int)) / sizeof (int)); 5836 i++) { 5837 chksum ^= ip[i]; 5838 } 5839 5840 /* Fill-in checksum */ 5841 DKD_FORMCHKSUM(chksum, dkdevid); 5842 5843 /* Write the reserved sector */ 5844 status = sd_send_scsi_WRITE(ssc, buf, un->un_sys_blocksize, blk, 5845 SD_PATH_DIRECT); 5846 if (status != 0) 5847 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5848 5849 kmem_free(buf, un->un_sys_blocksize); 5850 5851 mutex_enter(SD_MUTEX(un)); 5852 return (status); 5853 } 5854 5855 5856 /* 5857 * Function: sd_check_vpd_page_support 5858 * 5859 * Description: This routine sends an inquiry command with the EVPD bit set and 5860 * a page code of 0x00 to the device. It is used to determine which 5861 * vital product pages are available to find the devid. We are 5862 * looking for pages 0x83 0x80 or 0xB1. If we return a negative 1, 5863 * the device does not support that command. 5864 * 5865 * Arguments: un - driver soft state (unit) structure 5866 * 5867 * Return Code: 0 - success 5868 * 1 - check condition 5869 * 5870 * Context: This routine can sleep. 5871 */ 5872 5873 static int 5874 sd_check_vpd_page_support(sd_ssc_t *ssc) 5875 { 5876 uchar_t *page_list = NULL; 5877 uchar_t page_length = 0xff; /* Use max possible length */ 5878 uchar_t evpd = 0x01; /* Set the EVPD bit */ 5879 uchar_t page_code = 0x00; /* Supported VPD Pages */ 5880 int rval = 0; 5881 int counter; 5882 struct sd_lun *un; 5883 5884 ASSERT(ssc != NULL); 5885 un = ssc->ssc_un; 5886 ASSERT(un != NULL); 5887 ASSERT(mutex_owned(SD_MUTEX(un))); 5888 5889 mutex_exit(SD_MUTEX(un)); 5890 5891 /* 5892 * We'll set the page length to the maximum to save figuring it out 5893 * with an additional call. 5894 */ 5895 page_list = kmem_zalloc(page_length, KM_SLEEP); 5896 5897 rval = sd_send_scsi_INQUIRY(ssc, page_list, page_length, evpd, 5898 page_code, NULL); 5899 5900 if (rval != 0) 5901 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5902 5903 mutex_enter(SD_MUTEX(un)); 5904 5905 /* 5906 * Now we must validate that the device accepted the command, as some 5907 * drives do not support it. If the drive does support it, we will 5908 * return 0, and the supported pages will be in un_vpd_page_mask. If 5909 * not, we return -1. 5910 */ 5911 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) { 5912 /* Loop to find one of the 2 pages we need */ 5913 counter = 4; /* Supported pages start at byte 4, with 0x00 */ 5914 5915 /* 5916 * Pages are returned in ascending order, and 0x83 is what we 5917 * are hoping for. 5918 */ 5919 while ((page_list[counter] <= 0xB1) && 5920 (counter <= (page_list[VPD_PAGE_LENGTH] + 5921 VPD_HEAD_OFFSET))) { 5922 /* 5923 * Add 3 because page_list[3] is the number of 5924 * pages minus 3 5925 */ 5926 5927 switch (page_list[counter]) { 5928 case 0x00: 5929 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG; 5930 break; 5931 case 0x80: 5932 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG; 5933 break; 5934 case 0x81: 5935 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG; 5936 break; 5937 case 0x82: 5938 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG; 5939 break; 5940 case 0x83: 5941 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; 5942 break; 5943 case 0x86: 5944 un->un_vpd_page_mask |= SD_VPD_EXTENDED_DATA_PG; 5945 break; 5946 case 0xB0: 5947 un->un_vpd_page_mask |= SD_VPD_BLK_LIMITS_PG; 5948 break; 5949 case 0xB1: 5950 un->un_vpd_page_mask |= SD_VPD_DEV_CHARACTER_PG; 5951 break; 5952 } 5953 counter++; 5954 } 5955 5956 } else { 5957 rval = -1; 5958 5959 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5960 "sd_check_vpd_page_support: This drive does not implement " 5961 "VPD pages.\n"); 5962 } 5963 5964 kmem_free(page_list, page_length); 5965 5966 return (rval); 5967 } 5968 5969 5970 /* 5971 * Function: sd_setup_pm 5972 * 5973 * Description: Initialize Power Management on the device 5974 * 5975 * Context: Kernel Thread 5976 */ 5977 5978 static void 5979 sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi) 5980 { 5981 uint_t log_page_size; 5982 uchar_t *log_page_data; 5983 int rval = 0; 5984 struct sd_lun *un; 5985 5986 ASSERT(ssc != NULL); 5987 un = ssc->ssc_un; 5988 ASSERT(un != NULL); 5989 5990 /* 5991 * Since we are called from attach, holding a mutex for 5992 * un is unnecessary. Because some of the routines called 5993 * from here require SD_MUTEX to not be held, assert this 5994 * right up front. 5995 */ 5996 ASSERT(!mutex_owned(SD_MUTEX(un))); 5997 /* 5998 * Since the sd device does not have the 'reg' property, 5999 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 6000 * The following code is to tell cpr that this device 6001 * DOES need to be suspended and resumed. 6002 */ 6003 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 6004 "pm-hardware-state", "needs-suspend-resume"); 6005 6006 /* 6007 * This complies with the new power management framework 6008 * for certain desktop machines. Create the pm_components 6009 * property as a string array property. 6010 * If un_f_pm_supported is TRUE, that means the disk 6011 * attached HBA has set the "pm-capable" property and 6012 * the value of this property is bigger than 0. 6013 */ 6014 if (un->un_f_pm_supported) { 6015 /* 6016 * not all devices have a motor, try it first. 6017 * some devices may return ILLEGAL REQUEST, some 6018 * will hang 6019 * The following START_STOP_UNIT is used to check if target 6020 * device has a motor. 6021 */ 6022 un->un_f_start_stop_supported = TRUE; 6023 6024 if (un->un_f_power_condition_supported) { 6025 rval = sd_send_scsi_START_STOP_UNIT(ssc, 6026 SD_POWER_CONDITION, SD_TARGET_ACTIVE, 6027 SD_PATH_DIRECT); 6028 if (rval != 0) { 6029 un->un_f_power_condition_supported = FALSE; 6030 } 6031 } 6032 if (!un->un_f_power_condition_supported) { 6033 rval = sd_send_scsi_START_STOP_UNIT(ssc, 6034 SD_START_STOP, SD_TARGET_START, SD_PATH_DIRECT); 6035 } 6036 if (rval != 0) { 6037 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6038 un->un_f_start_stop_supported = FALSE; 6039 } 6040 6041 /* 6042 * create pm properties anyways otherwise the parent can't 6043 * go to sleep 6044 */ 6045 un->un_f_pm_is_enabled = TRUE; 6046 (void) sd_create_pm_components(devi, un); 6047 6048 /* 6049 * If it claims that log sense is supported, check it out. 6050 */ 6051 if (un->un_f_log_sense_supported) { 6052 rval = sd_log_page_supported(ssc, 6053 START_STOP_CYCLE_PAGE); 6054 if (rval == 1) { 6055 /* Page found, use it. */ 6056 un->un_start_stop_cycle_page = 6057 START_STOP_CYCLE_PAGE; 6058 } else { 6059 /* 6060 * Page not found or log sense is not 6061 * supported. 6062 * Notice we do not check the old style 6063 * START_STOP_CYCLE_VU_PAGE because this 6064 * code path does not apply to old disks. 6065 */ 6066 un->un_f_log_sense_supported = FALSE; 6067 un->un_f_pm_log_sense_smart = FALSE; 6068 } 6069 } 6070 6071 return; 6072 } 6073 6074 /* 6075 * For the disk whose attached HBA has not set the "pm-capable" 6076 * property, check if it supports the power management. 6077 */ 6078 if (!un->un_f_log_sense_supported) { 6079 un->un_power_level = SD_SPINDLE_ON; 6080 un->un_f_pm_is_enabled = FALSE; 6081 return; 6082 } 6083 6084 rval = sd_log_page_supported(ssc, START_STOP_CYCLE_PAGE); 6085 6086 #ifdef SDDEBUG 6087 if (sd_force_pm_supported) { 6088 /* Force a successful result */ 6089 rval = 1; 6090 } 6091 #endif 6092 6093 /* 6094 * If the start-stop cycle counter log page is not supported 6095 * or if the pm-capable property is set to be false (0), 6096 * then we should not create the pm_components property. 6097 */ 6098 if (rval == -1) { 6099 /* 6100 * Error. 6101 * Reading log sense failed, most likely this is 6102 * an older drive that does not support log sense. 6103 * If this fails auto-pm is not supported. 6104 */ 6105 un->un_power_level = SD_SPINDLE_ON; 6106 un->un_f_pm_is_enabled = FALSE; 6107 6108 } else if (rval == 0) { 6109 /* 6110 * Page not found. 6111 * The start stop cycle counter is implemented as page 6112 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For 6113 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE). 6114 */ 6115 if (sd_log_page_supported(ssc, START_STOP_CYCLE_VU_PAGE) == 1) { 6116 /* 6117 * Page found, use this one. 6118 */ 6119 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE; 6120 un->un_f_pm_is_enabled = TRUE; 6121 } else { 6122 /* 6123 * Error or page not found. 6124 * auto-pm is not supported for this device. 6125 */ 6126 un->un_power_level = SD_SPINDLE_ON; 6127 un->un_f_pm_is_enabled = FALSE; 6128 } 6129 } else { 6130 /* 6131 * Page found, use it. 6132 */ 6133 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE; 6134 un->un_f_pm_is_enabled = TRUE; 6135 } 6136 6137 6138 if (un->un_f_pm_is_enabled == TRUE) { 6139 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 6140 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 6141 6142 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 6143 log_page_size, un->un_start_stop_cycle_page, 6144 0x01, 0, SD_PATH_DIRECT); 6145 6146 if (rval != 0) { 6147 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6148 } 6149 6150 #ifdef SDDEBUG 6151 if (sd_force_pm_supported) { 6152 /* Force a successful result */ 6153 rval = 0; 6154 } 6155 #endif 6156 6157 /* 6158 * If the Log sense for Page( Start/stop cycle counter page) 6159 * succeeds, then power management is supported and we can 6160 * enable auto-pm. 6161 */ 6162 if (rval == 0) { 6163 (void) sd_create_pm_components(devi, un); 6164 } else { 6165 un->un_power_level = SD_SPINDLE_ON; 6166 un->un_f_pm_is_enabled = FALSE; 6167 } 6168 6169 kmem_free(log_page_data, log_page_size); 6170 } 6171 } 6172 6173 6174 /* 6175 * Function: sd_create_pm_components 6176 * 6177 * Description: Initialize PM property. 6178 * 6179 * Context: Kernel thread context 6180 */ 6181 6182 static void 6183 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un) 6184 { 6185 ASSERT(!mutex_owned(SD_MUTEX(un))); 6186 6187 if (un->un_f_power_condition_supported) { 6188 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 6189 "pm-components", sd_pwr_pc.pm_comp, 5) 6190 != DDI_PROP_SUCCESS) { 6191 un->un_power_level = SD_SPINDLE_ACTIVE; 6192 un->un_f_pm_is_enabled = FALSE; 6193 return; 6194 } 6195 } else { 6196 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 6197 "pm-components", sd_pwr_ss.pm_comp, 3) 6198 != DDI_PROP_SUCCESS) { 6199 un->un_power_level = SD_SPINDLE_ON; 6200 un->un_f_pm_is_enabled = FALSE; 6201 return; 6202 } 6203 } 6204 /* 6205 * When components are initially created they are idle, 6206 * power up any non-removables. 6207 * Note: the return value of pm_raise_power can't be used 6208 * for determining if PM should be enabled for this device. 6209 * Even if you check the return values and remove this 6210 * property created above, the PM framework will not honor the 6211 * change after the first call to pm_raise_power. Hence, 6212 * removal of that property does not help if pm_raise_power 6213 * fails. In the case of removable media, the start/stop 6214 * will fail if the media is not present. 6215 */ 6216 if (un->un_f_attach_spinup && (pm_raise_power(SD_DEVINFO(un), 0, 6217 SD_PM_STATE_ACTIVE(un)) == DDI_SUCCESS)) { 6218 mutex_enter(SD_MUTEX(un)); 6219 un->un_power_level = SD_PM_STATE_ACTIVE(un); 6220 mutex_enter(&un->un_pm_mutex); 6221 /* Set to on and not busy. */ 6222 un->un_pm_count = 0; 6223 } else { 6224 mutex_enter(SD_MUTEX(un)); 6225 un->un_power_level = SD_PM_STATE_STOPPED(un); 6226 mutex_enter(&un->un_pm_mutex); 6227 /* Set to off. */ 6228 un->un_pm_count = -1; 6229 } 6230 mutex_exit(&un->un_pm_mutex); 6231 mutex_exit(SD_MUTEX(un)); 6232 } 6233 6234 6235 /* 6236 * Function: sd_ddi_suspend 6237 * 6238 * Description: Performs system power-down operations. This includes 6239 * setting the drive state to indicate its suspended so 6240 * that no new commands will be accepted. Also, wait for 6241 * all commands that are in transport or queued to a timer 6242 * for retry to complete. All timeout threads are cancelled. 6243 * 6244 * Return Code: DDI_FAILURE or DDI_SUCCESS 6245 * 6246 * Context: Kernel thread context 6247 */ 6248 6249 static int 6250 sd_ddi_suspend(dev_info_t *devi) 6251 { 6252 struct sd_lun *un; 6253 clock_t wait_cmds_complete; 6254 6255 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 6256 if (un == NULL) { 6257 return (DDI_FAILURE); 6258 } 6259 6260 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n"); 6261 6262 mutex_enter(SD_MUTEX(un)); 6263 6264 /* Return success if the device is already suspended. */ 6265 if (un->un_state == SD_STATE_SUSPENDED) { 6266 mutex_exit(SD_MUTEX(un)); 6267 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 6268 "device already suspended, exiting\n"); 6269 return (DDI_SUCCESS); 6270 } 6271 6272 /* Return failure if the device is being used by HA */ 6273 if (un->un_resvd_status & 6274 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) { 6275 mutex_exit(SD_MUTEX(un)); 6276 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 6277 "device in use by HA, exiting\n"); 6278 return (DDI_FAILURE); 6279 } 6280 6281 /* 6282 * Return failure if the device is in a resource wait 6283 * or power changing state. 6284 */ 6285 if ((un->un_state == SD_STATE_RWAIT) || 6286 (un->un_state == SD_STATE_PM_CHANGING)) { 6287 mutex_exit(SD_MUTEX(un)); 6288 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 6289 "device in resource wait state, exiting\n"); 6290 return (DDI_FAILURE); 6291 } 6292 6293 6294 un->un_save_state = un->un_last_state; 6295 New_state(un, SD_STATE_SUSPENDED); 6296 6297 /* 6298 * Wait for all commands that are in transport or queued to a timer 6299 * for retry to complete. 6300 * 6301 * While waiting, no new commands will be accepted or sent because of 6302 * the new state we set above. 6303 * 6304 * Wait till current operation has completed. If we are in the resource 6305 * wait state (with an intr outstanding) then we need to wait till the 6306 * intr completes and starts the next cmd. We want to wait for 6307 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND. 6308 */ 6309 wait_cmds_complete = ddi_get_lbolt() + 6310 (sd_wait_cmds_complete * drv_usectohz(1000000)); 6311 6312 while (un->un_ncmds_in_transport != 0) { 6313 /* 6314 * Fail if commands do not finish in the specified time. 6315 */ 6316 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un), 6317 wait_cmds_complete) == -1) { 6318 /* 6319 * Undo the state changes made above. Everything 6320 * must go back to it's original value. 6321 */ 6322 Restore_state(un); 6323 un->un_last_state = un->un_save_state; 6324 /* Wake up any threads that might be waiting. */ 6325 cv_broadcast(&un->un_suspend_cv); 6326 mutex_exit(SD_MUTEX(un)); 6327 SD_ERROR(SD_LOG_IO_PM, un, 6328 "sd_ddi_suspend: failed due to outstanding cmds\n"); 6329 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n"); 6330 return (DDI_FAILURE); 6331 } 6332 } 6333 6334 /* 6335 * Cancel SCSI watch thread and timeouts, if any are active 6336 */ 6337 6338 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) { 6339 opaque_t temp_token = un->un_swr_token; 6340 mutex_exit(SD_MUTEX(un)); 6341 scsi_watch_suspend(temp_token); 6342 mutex_enter(SD_MUTEX(un)); 6343 } 6344 6345 if (un->un_reset_throttle_timeid != NULL) { 6346 timeout_id_t temp_id = un->un_reset_throttle_timeid; 6347 un->un_reset_throttle_timeid = NULL; 6348 mutex_exit(SD_MUTEX(un)); 6349 (void) untimeout(temp_id); 6350 mutex_enter(SD_MUTEX(un)); 6351 } 6352 6353 if (un->un_dcvb_timeid != NULL) { 6354 timeout_id_t temp_id = un->un_dcvb_timeid; 6355 un->un_dcvb_timeid = NULL; 6356 mutex_exit(SD_MUTEX(un)); 6357 (void) untimeout(temp_id); 6358 mutex_enter(SD_MUTEX(un)); 6359 } 6360 6361 mutex_enter(&un->un_pm_mutex); 6362 if (un->un_pm_timeid != NULL) { 6363 timeout_id_t temp_id = un->un_pm_timeid; 6364 un->un_pm_timeid = NULL; 6365 mutex_exit(&un->un_pm_mutex); 6366 mutex_exit(SD_MUTEX(un)); 6367 (void) untimeout(temp_id); 6368 mutex_enter(SD_MUTEX(un)); 6369 } else { 6370 mutex_exit(&un->un_pm_mutex); 6371 } 6372 6373 if (un->un_rmw_msg_timeid != NULL) { 6374 timeout_id_t temp_id = un->un_rmw_msg_timeid; 6375 un->un_rmw_msg_timeid = NULL; 6376 mutex_exit(SD_MUTEX(un)); 6377 (void) untimeout(temp_id); 6378 mutex_enter(SD_MUTEX(un)); 6379 } 6380 6381 if (un->un_retry_timeid != NULL) { 6382 timeout_id_t temp_id = un->un_retry_timeid; 6383 un->un_retry_timeid = NULL; 6384 mutex_exit(SD_MUTEX(un)); 6385 (void) untimeout(temp_id); 6386 mutex_enter(SD_MUTEX(un)); 6387 6388 if (un->un_retry_bp != NULL) { 6389 un->un_retry_bp->av_forw = un->un_waitq_headp; 6390 un->un_waitq_headp = un->un_retry_bp; 6391 if (un->un_waitq_tailp == NULL) { 6392 un->un_waitq_tailp = un->un_retry_bp; 6393 } 6394 un->un_retry_bp = NULL; 6395 un->un_retry_statp = NULL; 6396 } 6397 } 6398 6399 if (un->un_direct_priority_timeid != NULL) { 6400 timeout_id_t temp_id = un->un_direct_priority_timeid; 6401 un->un_direct_priority_timeid = NULL; 6402 mutex_exit(SD_MUTEX(un)); 6403 (void) untimeout(temp_id); 6404 mutex_enter(SD_MUTEX(un)); 6405 } 6406 6407 if (un->un_f_is_fibre == TRUE) { 6408 /* 6409 * Remove callbacks for insert and remove events 6410 */ 6411 if (un->un_insert_event != NULL) { 6412 mutex_exit(SD_MUTEX(un)); 6413 (void) ddi_remove_event_handler(un->un_insert_cb_id); 6414 mutex_enter(SD_MUTEX(un)); 6415 un->un_insert_event = NULL; 6416 } 6417 6418 if (un->un_remove_event != NULL) { 6419 mutex_exit(SD_MUTEX(un)); 6420 (void) ddi_remove_event_handler(un->un_remove_cb_id); 6421 mutex_enter(SD_MUTEX(un)); 6422 un->un_remove_event = NULL; 6423 } 6424 } 6425 6426 mutex_exit(SD_MUTEX(un)); 6427 6428 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); 6429 6430 return (DDI_SUCCESS); 6431 } 6432 6433 6434 /* 6435 * Function: sd_ddi_resume 6436 * 6437 * Description: Performs system power-up operations.. 6438 * 6439 * Return Code: DDI_SUCCESS 6440 * DDI_FAILURE 6441 * 6442 * Context: Kernel thread context 6443 */ 6444 6445 static int 6446 sd_ddi_resume(dev_info_t *devi) 6447 { 6448 struct sd_lun *un; 6449 6450 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 6451 if (un == NULL) { 6452 return (DDI_FAILURE); 6453 } 6454 6455 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n"); 6456 6457 mutex_enter(SD_MUTEX(un)); 6458 Restore_state(un); 6459 6460 /* 6461 * Restore the state which was saved to give the 6462 * the right state in un_last_state 6463 */ 6464 un->un_last_state = un->un_save_state; 6465 /* 6466 * Note: throttle comes back at full. 6467 * Also note: this MUST be done before calling pm_raise_power 6468 * otherwise the system can get hung in biowait. The scenario where 6469 * this'll happen is under cpr suspend. Writing of the system 6470 * state goes through sddump, which writes 0 to un_throttle. If 6471 * writing the system state then fails, example if the partition is 6472 * too small, then cpr attempts a resume. If throttle isn't restored 6473 * from the saved value until after calling pm_raise_power then 6474 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs 6475 * in biowait. 6476 */ 6477 un->un_throttle = un->un_saved_throttle; 6478 6479 /* 6480 * The chance of failure is very rare as the only command done in power 6481 * entry point is START command when you transition from 0->1 or 6482 * unknown->1. Put it to SPINDLE ON state irrespective of the state at 6483 * which suspend was done. Ignore the return value as the resume should 6484 * not be failed. In the case of removable media the media need not be 6485 * inserted and hence there is a chance that raise power will fail with 6486 * media not present. 6487 */ 6488 if (un->un_f_attach_spinup) { 6489 mutex_exit(SD_MUTEX(un)); 6490 (void) pm_raise_power(SD_DEVINFO(un), 0, 6491 SD_PM_STATE_ACTIVE(un)); 6492 mutex_enter(SD_MUTEX(un)); 6493 } 6494 6495 /* 6496 * Don't broadcast to the suspend cv and therefore possibly 6497 * start I/O until after power has been restored. 6498 */ 6499 cv_broadcast(&un->un_suspend_cv); 6500 cv_broadcast(&un->un_state_cv); 6501 6502 /* restart thread */ 6503 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { 6504 scsi_watch_resume(un->un_swr_token); 6505 } 6506 6507 #if (defined(__fibre)) 6508 if (un->un_f_is_fibre == TRUE) { 6509 /* 6510 * Add callbacks for insert and remove events 6511 */ 6512 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 6513 sd_init_event_callbacks(un); 6514 } 6515 } 6516 #endif 6517 6518 /* 6519 * Transport any pending commands to the target. 6520 * 6521 * If this is a low-activity device commands in queue will have to wait 6522 * until new commands come in, which may take awhile. Also, we 6523 * specifically don't check un_ncmds_in_transport because we know that 6524 * there really are no commands in progress after the unit was 6525 * suspended and we could have reached the throttle level, been 6526 * suspended, and have no new commands coming in for awhile. Highly 6527 * unlikely, but so is the low-activity disk scenario. 6528 */ 6529 ddi_xbuf_dispatch(un->un_xbuf_attr); 6530 6531 sd_start_cmds(un, NULL); 6532 mutex_exit(SD_MUTEX(un)); 6533 6534 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n"); 6535 6536 return (DDI_SUCCESS); 6537 } 6538 6539 6540 /* 6541 * Function: sd_pm_state_change 6542 * 6543 * Description: Change the driver power state. 6544 * Someone else is required to actually change the driver 6545 * power level. 6546 * 6547 * Arguments: un - driver soft state (unit) structure 6548 * level - the power level that is changed to 6549 * flag - to decide how to change the power state 6550 * 6551 * Return Code: DDI_SUCCESS 6552 * 6553 * Context: Kernel thread context 6554 */ 6555 static int 6556 sd_pm_state_change(struct sd_lun *un, int level, int flag) 6557 { 6558 ASSERT(un != NULL); 6559 SD_TRACE(SD_LOG_POWER, un, "sd_pm_state_change: entry\n"); 6560 6561 ASSERT(!mutex_owned(SD_MUTEX(un))); 6562 mutex_enter(SD_MUTEX(un)); 6563 6564 if (flag == SD_PM_STATE_ROLLBACK || SD_PM_IS_IO_CAPABLE(un, level)) { 6565 un->un_power_level = level; 6566 ASSERT(!mutex_owned(&un->un_pm_mutex)); 6567 mutex_enter(&un->un_pm_mutex); 6568 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 6569 un->un_pm_count++; 6570 ASSERT(un->un_pm_count == 0); 6571 } 6572 mutex_exit(&un->un_pm_mutex); 6573 } else { 6574 /* 6575 * Exit if power management is not enabled for this device, 6576 * or if the device is being used by HA. 6577 */ 6578 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status & 6579 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) { 6580 mutex_exit(SD_MUTEX(un)); 6581 SD_TRACE(SD_LOG_POWER, un, 6582 "sd_pm_state_change: exiting\n"); 6583 return (DDI_FAILURE); 6584 } 6585 6586 SD_INFO(SD_LOG_POWER, un, "sd_pm_state_change: " 6587 "un_ncmds_in_driver=%ld\n", un->un_ncmds_in_driver); 6588 6589 /* 6590 * See if the device is not busy, ie.: 6591 * - we have no commands in the driver for this device 6592 * - not waiting for resources 6593 */ 6594 if ((un->un_ncmds_in_driver == 0) && 6595 (un->un_state != SD_STATE_RWAIT)) { 6596 /* 6597 * The device is not busy, so it is OK to go to low 6598 * power state. Indicate low power, but rely on someone 6599 * else to actually change it. 6600 */ 6601 mutex_enter(&un->un_pm_mutex); 6602 un->un_pm_count = -1; 6603 mutex_exit(&un->un_pm_mutex); 6604 un->un_power_level = level; 6605 } 6606 } 6607 6608 mutex_exit(SD_MUTEX(un)); 6609 6610 SD_TRACE(SD_LOG_POWER, un, "sd_pm_state_change: exit\n"); 6611 6612 return (DDI_SUCCESS); 6613 } 6614 6615 6616 /* 6617 * Function: sd_pm_idletimeout_handler 6618 * 6619 * Description: A timer routine that's active only while a device is busy. 6620 * The purpose is to extend slightly the pm framework's busy 6621 * view of the device to prevent busy/idle thrashing for 6622 * back-to-back commands. Do this by comparing the current time 6623 * to the time at which the last command completed and when the 6624 * difference is greater than sd_pm_idletime, call 6625 * pm_idle_component. In addition to indicating idle to the pm 6626 * framework, update the chain type to again use the internal pm 6627 * layers of the driver. 6628 * 6629 * Arguments: arg - driver soft state (unit) structure 6630 * 6631 * Context: Executes in a timeout(9F) thread context 6632 */ 6633 6634 static void 6635 sd_pm_idletimeout_handler(void *arg) 6636 { 6637 const hrtime_t idletime = sd_pm_idletime * NANOSEC; 6638 struct sd_lun *un = arg; 6639 6640 mutex_enter(&sd_detach_mutex); 6641 if (un->un_detach_count != 0) { 6642 /* Abort if the instance is detaching */ 6643 mutex_exit(&sd_detach_mutex); 6644 return; 6645 } 6646 mutex_exit(&sd_detach_mutex); 6647 6648 /* 6649 * Grab both mutexes, in the proper order, since we're accessing 6650 * both PM and softstate variables. 6651 */ 6652 mutex_enter(SD_MUTEX(un)); 6653 mutex_enter(&un->un_pm_mutex); 6654 if (((gethrtime() - un->un_pm_idle_time) > idletime) && 6655 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) { 6656 /* 6657 * Update the chain types. 6658 * This takes affect on the next new command received. 6659 */ 6660 if (un->un_f_non_devbsize_supported) { 6661 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 6662 } else { 6663 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 6664 } 6665 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 6666 6667 SD_TRACE(SD_LOG_IO_PM, un, 6668 "sd_pm_idletimeout_handler: idling device\n"); 6669 (void) pm_idle_component(SD_DEVINFO(un), 0); 6670 un->un_pm_idle_timeid = NULL; 6671 } else { 6672 un->un_pm_idle_timeid = 6673 timeout(sd_pm_idletimeout_handler, un, 6674 (drv_usectohz((clock_t)300000))); /* 300 ms. */ 6675 } 6676 mutex_exit(&un->un_pm_mutex); 6677 mutex_exit(SD_MUTEX(un)); 6678 } 6679 6680 6681 /* 6682 * Function: sd_pm_timeout_handler 6683 * 6684 * Description: Callback to tell framework we are idle. 6685 * 6686 * Context: timeout(9f) thread context. 6687 */ 6688 6689 static void 6690 sd_pm_timeout_handler(void *arg) 6691 { 6692 struct sd_lun *un = arg; 6693 6694 (void) pm_idle_component(SD_DEVINFO(un), 0); 6695 mutex_enter(&un->un_pm_mutex); 6696 un->un_pm_timeid = NULL; 6697 mutex_exit(&un->un_pm_mutex); 6698 } 6699 6700 6701 /* 6702 * Function: sdpower 6703 * 6704 * Description: PM entry point. 6705 * 6706 * Return Code: DDI_SUCCESS 6707 * DDI_FAILURE 6708 * 6709 * Context: Kernel thread context 6710 */ 6711 6712 static int 6713 sdpower(dev_info_t *devi, int component, int level) 6714 { 6715 struct sd_lun *un; 6716 int instance; 6717 int rval = DDI_SUCCESS; 6718 uint_t i, log_page_size, maxcycles, ncycles; 6719 uchar_t *log_page_data; 6720 int log_sense_page; 6721 int medium_present; 6722 time_t intvlp; 6723 struct pm_trans_data sd_pm_tran_data; 6724 uchar_t save_state = SD_STATE_NORMAL; 6725 int sval; 6726 uchar_t state_before_pm; 6727 int got_semaphore_here; 6728 sd_ssc_t *ssc; 6729 int last_power_level = SD_SPINDLE_UNINIT; 6730 6731 instance = ddi_get_instance(devi); 6732 6733 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 6734 !SD_PM_IS_LEVEL_VALID(un, level) || component != 0) { 6735 return (DDI_FAILURE); 6736 } 6737 6738 ssc = sd_ssc_init(un); 6739 6740 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level); 6741 6742 /* 6743 * Must synchronize power down with close. 6744 * Attempt to decrement/acquire the open/close semaphore, 6745 * but do NOT wait on it. If it's not greater than zero, 6746 * ie. it can't be decremented without waiting, then 6747 * someone else, either open or close, already has it 6748 * and the try returns 0. Use that knowledge here to determine 6749 * if it's OK to change the device power level. 6750 * Also, only increment it on exit if it was decremented, ie. gotten, 6751 * here. 6752 */ 6753 got_semaphore_here = sema_tryp(&un->un_semoclose); 6754 6755 mutex_enter(SD_MUTEX(un)); 6756 6757 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n", 6758 un->un_ncmds_in_driver); 6759 6760 /* 6761 * If un_ncmds_in_driver is non-zero it indicates commands are 6762 * already being processed in the driver, or if the semaphore was 6763 * not gotten here it indicates an open or close is being processed. 6764 * At the same time somebody is requesting to go to a lower power 6765 * that can't perform I/O, which can't happen, therefore we need to 6766 * return failure. 6767 */ 6768 if ((!SD_PM_IS_IO_CAPABLE(un, level)) && 6769 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) { 6770 mutex_exit(SD_MUTEX(un)); 6771 6772 if (got_semaphore_here != 0) { 6773 sema_v(&un->un_semoclose); 6774 } 6775 SD_TRACE(SD_LOG_IO_PM, un, 6776 "sdpower: exit, device has queued cmds.\n"); 6777 6778 goto sdpower_failed; 6779 } 6780 6781 /* 6782 * if it is OFFLINE that means the disk is completely dead 6783 * in our case we have to put the disk in on or off by sending commands 6784 * Of course that will fail anyway so return back here. 6785 * 6786 * Power changes to a device that's OFFLINE or SUSPENDED 6787 * are not allowed. 6788 */ 6789 if ((un->un_state == SD_STATE_OFFLINE) || 6790 (un->un_state == SD_STATE_SUSPENDED)) { 6791 mutex_exit(SD_MUTEX(un)); 6792 6793 if (got_semaphore_here != 0) { 6794 sema_v(&un->un_semoclose); 6795 } 6796 SD_TRACE(SD_LOG_IO_PM, un, 6797 "sdpower: exit, device is off-line.\n"); 6798 6799 goto sdpower_failed; 6800 } 6801 6802 /* 6803 * Change the device's state to indicate it's power level 6804 * is being changed. Do this to prevent a power off in the 6805 * middle of commands, which is especially bad on devices 6806 * that are really powered off instead of just spun down. 6807 */ 6808 state_before_pm = un->un_state; 6809 un->un_state = SD_STATE_PM_CHANGING; 6810 6811 mutex_exit(SD_MUTEX(un)); 6812 6813 /* 6814 * If log sense command is not supported, bypass the 6815 * following checking, otherwise, check the log sense 6816 * information for this device. 6817 */ 6818 if (SD_PM_STOP_MOTOR_NEEDED(un, level) && 6819 un->un_f_log_sense_supported) { 6820 /* 6821 * Get the log sense information to understand whether the 6822 * the powercycle counts have gone beyond the threshhold. 6823 */ 6824 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 6825 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 6826 6827 mutex_enter(SD_MUTEX(un)); 6828 log_sense_page = un->un_start_stop_cycle_page; 6829 mutex_exit(SD_MUTEX(un)); 6830 6831 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 6832 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT); 6833 6834 if (rval != 0) { 6835 if (rval == EIO) 6836 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 6837 else 6838 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6839 } 6840 6841 #ifdef SDDEBUG 6842 if (sd_force_pm_supported) { 6843 /* Force a successful result */ 6844 rval = 0; 6845 } 6846 #endif 6847 if (rval != 0) { 6848 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 6849 "Log Sense Failed\n"); 6850 6851 kmem_free(log_page_data, log_page_size); 6852 /* Cannot support power management on those drives */ 6853 6854 if (got_semaphore_here != 0) { 6855 sema_v(&un->un_semoclose); 6856 } 6857 /* 6858 * On exit put the state back to it's original value 6859 * and broadcast to anyone waiting for the power 6860 * change completion. 6861 */ 6862 mutex_enter(SD_MUTEX(un)); 6863 un->un_state = state_before_pm; 6864 cv_broadcast(&un->un_suspend_cv); 6865 mutex_exit(SD_MUTEX(un)); 6866 SD_TRACE(SD_LOG_IO_PM, un, 6867 "sdpower: exit, Log Sense Failed.\n"); 6868 6869 goto sdpower_failed; 6870 } 6871 6872 /* 6873 * From the page data - Convert the essential information to 6874 * pm_trans_data 6875 */ 6876 maxcycles = 6877 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) | 6878 (log_page_data[0x1E] << 8) | log_page_data[0x1F]; 6879 6880 ncycles = 6881 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) | 6882 (log_page_data[0x26] << 8) | log_page_data[0x27]; 6883 6884 if (un->un_f_pm_log_sense_smart) { 6885 sd_pm_tran_data.un.smart_count.allowed = maxcycles; 6886 sd_pm_tran_data.un.smart_count.consumed = ncycles; 6887 sd_pm_tran_data.un.smart_count.flag = 0; 6888 sd_pm_tran_data.format = DC_SMART_FORMAT; 6889 } else { 6890 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles; 6891 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles; 6892 for (i = 0; i < DC_SCSI_MFR_LEN; i++) { 6893 sd_pm_tran_data.un.scsi_cycles.svc_date[i] = 6894 log_page_data[8+i]; 6895 } 6896 sd_pm_tran_data.un.scsi_cycles.flag = 0; 6897 sd_pm_tran_data.format = DC_SCSI_FORMAT; 6898 } 6899 6900 kmem_free(log_page_data, log_page_size); 6901 6902 /* 6903 * Call pm_trans_check routine to get the Ok from 6904 * the global policy 6905 */ 6906 rval = pm_trans_check(&sd_pm_tran_data, &intvlp); 6907 #ifdef SDDEBUG 6908 if (sd_force_pm_supported) { 6909 /* Force a successful result */ 6910 rval = 1; 6911 } 6912 #endif 6913 switch (rval) { 6914 case 0: 6915 /* 6916 * Not Ok to Power cycle or error in parameters passed 6917 * Would have given the advised time to consider power 6918 * cycle. Based on the new intvlp parameter we are 6919 * supposed to pretend we are busy so that pm framework 6920 * will never call our power entry point. Because of 6921 * that install a timeout handler and wait for the 6922 * recommended time to elapse so that power management 6923 * can be effective again. 6924 * 6925 * To effect this behavior, call pm_busy_component to 6926 * indicate to the framework this device is busy. 6927 * By not adjusting un_pm_count the rest of PM in 6928 * the driver will function normally, and independent 6929 * of this but because the framework is told the device 6930 * is busy it won't attempt powering down until it gets 6931 * a matching idle. The timeout handler sends this. 6932 * Note: sd_pm_entry can't be called here to do this 6933 * because sdpower may have been called as a result 6934 * of a call to pm_raise_power from within sd_pm_entry. 6935 * 6936 * If a timeout handler is already active then 6937 * don't install another. 6938 */ 6939 mutex_enter(&un->un_pm_mutex); 6940 if (un->un_pm_timeid == NULL) { 6941 un->un_pm_timeid = 6942 timeout(sd_pm_timeout_handler, 6943 un, intvlp * drv_usectohz(1000000)); 6944 mutex_exit(&un->un_pm_mutex); 6945 (void) pm_busy_component(SD_DEVINFO(un), 0); 6946 } else { 6947 mutex_exit(&un->un_pm_mutex); 6948 } 6949 if (got_semaphore_here != 0) { 6950 sema_v(&un->un_semoclose); 6951 } 6952 /* 6953 * On exit put the state back to it's original value 6954 * and broadcast to anyone waiting for the power 6955 * change completion. 6956 */ 6957 mutex_enter(SD_MUTEX(un)); 6958 un->un_state = state_before_pm; 6959 cv_broadcast(&un->un_suspend_cv); 6960 mutex_exit(SD_MUTEX(un)); 6961 6962 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, " 6963 "trans check Failed, not ok to power cycle.\n"); 6964 6965 goto sdpower_failed; 6966 case -1: 6967 if (got_semaphore_here != 0) { 6968 sema_v(&un->un_semoclose); 6969 } 6970 /* 6971 * On exit put the state back to it's original value 6972 * and broadcast to anyone waiting for the power 6973 * change completion. 6974 */ 6975 mutex_enter(SD_MUTEX(un)); 6976 un->un_state = state_before_pm; 6977 cv_broadcast(&un->un_suspend_cv); 6978 mutex_exit(SD_MUTEX(un)); 6979 SD_TRACE(SD_LOG_IO_PM, un, 6980 "sdpower: exit, trans check command Failed.\n"); 6981 6982 goto sdpower_failed; 6983 } 6984 } 6985 6986 if (!SD_PM_IS_IO_CAPABLE(un, level)) { 6987 /* 6988 * Save the last state... if the STOP FAILS we need it 6989 * for restoring 6990 */ 6991 mutex_enter(SD_MUTEX(un)); 6992 save_state = un->un_last_state; 6993 last_power_level = un->un_power_level; 6994 /* 6995 * There must not be any cmds. getting processed 6996 * in the driver when we get here. Power to the 6997 * device is potentially going off. 6998 */ 6999 ASSERT(un->un_ncmds_in_driver == 0); 7000 mutex_exit(SD_MUTEX(un)); 7001 7002 /* 7003 * For now PM suspend the device completely before spindle is 7004 * turned off 7005 */ 7006 if ((rval = sd_pm_state_change(un, level, SD_PM_STATE_CHANGE)) 7007 == DDI_FAILURE) { 7008 if (got_semaphore_here != 0) { 7009 sema_v(&un->un_semoclose); 7010 } 7011 /* 7012 * On exit put the state back to it's original value 7013 * and broadcast to anyone waiting for the power 7014 * change completion. 7015 */ 7016 mutex_enter(SD_MUTEX(un)); 7017 un->un_state = state_before_pm; 7018 un->un_power_level = last_power_level; 7019 cv_broadcast(&un->un_suspend_cv); 7020 mutex_exit(SD_MUTEX(un)); 7021 SD_TRACE(SD_LOG_IO_PM, un, 7022 "sdpower: exit, PM suspend Failed.\n"); 7023 7024 goto sdpower_failed; 7025 } 7026 } 7027 7028 /* 7029 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open, 7030 * close, or strategy. Dump no long uses this routine, it uses it's 7031 * own code so it can be done in polled mode. 7032 */ 7033 7034 medium_present = TRUE; 7035 7036 /* 7037 * When powering up, issue a TUR in case the device is at unit 7038 * attention. Don't do retries. Bypass the PM layer, otherwise 7039 * a deadlock on un_pm_busy_cv will occur. 7040 */ 7041 if (SD_PM_IS_IO_CAPABLE(un, level)) { 7042 sval = sd_send_scsi_TEST_UNIT_READY(ssc, 7043 SD_DONT_RETRY_TUR | SD_BYPASS_PM); 7044 if (sval != 0) 7045 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7046 } 7047 7048 if (un->un_f_power_condition_supported) { 7049 char *pm_condition_name[] = {"STOPPED", "STANDBY", 7050 "IDLE", "ACTIVE"}; 7051 SD_TRACE(SD_LOG_IO_PM, un, 7052 "sdpower: sending \'%s\' power condition", 7053 pm_condition_name[level]); 7054 sval = sd_send_scsi_START_STOP_UNIT(ssc, SD_POWER_CONDITION, 7055 sd_pl2pc[level], SD_PATH_DIRECT); 7056 } else { 7057 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n", 7058 ((level == SD_SPINDLE_ON) ? "START" : "STOP")); 7059 sval = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 7060 ((level == SD_SPINDLE_ON) ? SD_TARGET_START : 7061 SD_TARGET_STOP), SD_PATH_DIRECT); 7062 } 7063 if (sval != 0) { 7064 if (sval == EIO) 7065 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 7066 else 7067 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7068 } 7069 7070 /* Command failed, check for media present. */ 7071 if ((sval == ENXIO) && un->un_f_has_removable_media) { 7072 medium_present = FALSE; 7073 } 7074 7075 /* 7076 * The conditions of interest here are: 7077 * if a spindle off with media present fails, 7078 * then restore the state and return an error. 7079 * else if a spindle on fails, 7080 * then return an error (there's no state to restore). 7081 * In all other cases we setup for the new state 7082 * and return success. 7083 */ 7084 if (!SD_PM_IS_IO_CAPABLE(un, level)) { 7085 if ((medium_present == TRUE) && (sval != 0)) { 7086 /* The stop command from above failed */ 7087 rval = DDI_FAILURE; 7088 /* 7089 * The stop command failed, and we have media 7090 * present. Put the level back by calling the 7091 * sd_pm_resume() and set the state back to 7092 * it's previous value. 7093 */ 7094 (void) sd_pm_state_change(un, last_power_level, 7095 SD_PM_STATE_ROLLBACK); 7096 mutex_enter(SD_MUTEX(un)); 7097 un->un_last_state = save_state; 7098 mutex_exit(SD_MUTEX(un)); 7099 } else if (un->un_f_monitor_media_state) { 7100 /* 7101 * The stop command from above succeeded. 7102 * Terminate watch thread in case of removable media 7103 * devices going into low power state. This is as per 7104 * the requirements of pm framework, otherwise commands 7105 * will be generated for the device (through watch 7106 * thread), even when the device is in low power state. 7107 */ 7108 mutex_enter(SD_MUTEX(un)); 7109 un->un_f_watcht_stopped = FALSE; 7110 if (un->un_swr_token != NULL) { 7111 opaque_t temp_token = un->un_swr_token; 7112 un->un_f_watcht_stopped = TRUE; 7113 un->un_swr_token = NULL; 7114 mutex_exit(SD_MUTEX(un)); 7115 (void) scsi_watch_request_terminate(temp_token, 7116 SCSI_WATCH_TERMINATE_ALL_WAIT); 7117 } else { 7118 mutex_exit(SD_MUTEX(un)); 7119 } 7120 } 7121 } else { 7122 /* 7123 * The level requested is I/O capable. 7124 * Legacy behavior: return success on a failed spinup 7125 * if there is no media in the drive. 7126 * Do this by looking at medium_present here. 7127 */ 7128 if ((sval != 0) && medium_present) { 7129 /* The start command from above failed */ 7130 rval = DDI_FAILURE; 7131 } else { 7132 /* 7133 * The start command from above succeeded 7134 * PM resume the devices now that we have 7135 * started the disks 7136 */ 7137 (void) sd_pm_state_change(un, level, 7138 SD_PM_STATE_CHANGE); 7139 7140 /* 7141 * Resume the watch thread since it was suspended 7142 * when the device went into low power mode. 7143 */ 7144 if (un->un_f_monitor_media_state) { 7145 mutex_enter(SD_MUTEX(un)); 7146 if (un->un_f_watcht_stopped == TRUE) { 7147 opaque_t temp_token; 7148 7149 un->un_f_watcht_stopped = FALSE; 7150 mutex_exit(SD_MUTEX(un)); 7151 temp_token = 7152 sd_watch_request_submit(un); 7153 mutex_enter(SD_MUTEX(un)); 7154 un->un_swr_token = temp_token; 7155 } 7156 mutex_exit(SD_MUTEX(un)); 7157 } 7158 } 7159 } 7160 7161 if (got_semaphore_here != 0) { 7162 sema_v(&un->un_semoclose); 7163 } 7164 /* 7165 * On exit put the state back to it's original value 7166 * and broadcast to anyone waiting for the power 7167 * change completion. 7168 */ 7169 mutex_enter(SD_MUTEX(un)); 7170 un->un_state = state_before_pm; 7171 cv_broadcast(&un->un_suspend_cv); 7172 mutex_exit(SD_MUTEX(un)); 7173 7174 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval); 7175 7176 sd_ssc_fini(ssc); 7177 return (rval); 7178 7179 sdpower_failed: 7180 7181 sd_ssc_fini(ssc); 7182 return (DDI_FAILURE); 7183 } 7184 7185 7186 7187 /* 7188 * Function: sdattach 7189 * 7190 * Description: Driver's attach(9e) entry point function. 7191 * 7192 * Arguments: devi - opaque device info handle 7193 * cmd - attach type 7194 * 7195 * Return Code: DDI_SUCCESS 7196 * DDI_FAILURE 7197 * 7198 * Context: Kernel thread context 7199 */ 7200 7201 static int 7202 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 7203 { 7204 switch (cmd) { 7205 case DDI_ATTACH: 7206 return (sd_unit_attach(devi)); 7207 case DDI_RESUME: 7208 return (sd_ddi_resume(devi)); 7209 default: 7210 break; 7211 } 7212 return (DDI_FAILURE); 7213 } 7214 7215 7216 /* 7217 * Function: sddetach 7218 * 7219 * Description: Driver's detach(9E) entry point function. 7220 * 7221 * Arguments: devi - opaque device info handle 7222 * cmd - detach type 7223 * 7224 * Return Code: DDI_SUCCESS 7225 * DDI_FAILURE 7226 * 7227 * Context: Kernel thread context 7228 */ 7229 7230 static int 7231 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 7232 { 7233 switch (cmd) { 7234 case DDI_DETACH: 7235 return (sd_unit_detach(devi)); 7236 case DDI_SUSPEND: 7237 return (sd_ddi_suspend(devi)); 7238 default: 7239 break; 7240 } 7241 return (DDI_FAILURE); 7242 } 7243 7244 7245 /* 7246 * Function: sd_sync_with_callback 7247 * 7248 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft 7249 * state while the callback routine is active. 7250 * 7251 * Arguments: un: softstate structure for the instance 7252 * 7253 * Context: Kernel thread context 7254 */ 7255 7256 static void 7257 sd_sync_with_callback(struct sd_lun *un) 7258 { 7259 ASSERT(un != NULL); 7260 7261 mutex_enter(SD_MUTEX(un)); 7262 7263 ASSERT(un->un_in_callback >= 0); 7264 7265 while (un->un_in_callback > 0) { 7266 mutex_exit(SD_MUTEX(un)); 7267 delay(2); 7268 mutex_enter(SD_MUTEX(un)); 7269 } 7270 7271 mutex_exit(SD_MUTEX(un)); 7272 } 7273 7274 /* 7275 * Function: sd_unit_attach 7276 * 7277 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates 7278 * the soft state structure for the device and performs 7279 * all necessary structure and device initializations. 7280 * 7281 * Arguments: devi: the system's dev_info_t for the device. 7282 * 7283 * Return Code: DDI_SUCCESS if attach is successful. 7284 * DDI_FAILURE if any part of the attach fails. 7285 * 7286 * Context: Called at attach(9e) time for the DDI_ATTACH flag. 7287 * Kernel thread context only. Can sleep. 7288 */ 7289 7290 static int 7291 sd_unit_attach(dev_info_t *devi) 7292 { 7293 struct scsi_device *devp; 7294 struct sd_lun *un; 7295 char *variantp; 7296 char name_str[48]; 7297 int reservation_flag = SD_TARGET_IS_UNRESERVED; 7298 int instance; 7299 int rval; 7300 int wc_enabled; 7301 int wc_changeable; 7302 int tgt; 7303 uint64_t capacity; 7304 uint_t lbasize = 0; 7305 dev_info_t *pdip = ddi_get_parent(devi); 7306 int offbyone = 0; 7307 int geom_label_valid = 0; 7308 sd_ssc_t *ssc; 7309 int status; 7310 struct sd_fm_internal *sfip = NULL; 7311 int max_xfer_size; 7312 7313 /* 7314 * Retrieve the target driver's private data area. This was set 7315 * up by the HBA. 7316 */ 7317 devp = ddi_get_driver_private(devi); 7318 7319 /* 7320 * Retrieve the target ID of the device. 7321 */ 7322 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7323 SCSI_ADDR_PROP_TARGET, -1); 7324 7325 /* 7326 * Since we have no idea what state things were left in by the last 7327 * user of the device, set up some 'default' settings, ie. turn 'em 7328 * off. The scsi_ifsetcap calls force re-negotiations with the drive. 7329 * Do this before the scsi_probe, which sends an inquiry. 7330 * This is a fix for bug (4430280). 7331 * Of special importance is wide-xfer. The drive could have been left 7332 * in wide transfer mode by the last driver to communicate with it, 7333 * this includes us. If that's the case, and if the following is not 7334 * setup properly or we don't re-negotiate with the drive prior to 7335 * transferring data to/from the drive, it causes bus parity errors, 7336 * data overruns, and unexpected interrupts. This first occurred when 7337 * the fix for bug (4378686) was made. 7338 */ 7339 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1); 7340 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1); 7341 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1); 7342 7343 /* 7344 * Currently, scsi_ifsetcap sets tagged-qing capability for all LUNs 7345 * on a target. Setting it per lun instance actually sets the 7346 * capability of this target, which affects those luns already 7347 * attached on the same target. So during attach, we can only disable 7348 * this capability only when no other lun has been attached on this 7349 * target. By doing this, we assume a target has the same tagged-qing 7350 * capability for every lun. The condition can be removed when HBA 7351 * is changed to support per lun based tagged-qing capability. 7352 */ 7353 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 7354 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1); 7355 } 7356 7357 /* 7358 * Use scsi_probe() to issue an INQUIRY command to the device. 7359 * This call will allocate and fill in the scsi_inquiry structure 7360 * and point the sd_inq member of the scsi_device structure to it. 7361 * If the attach succeeds, then this memory will not be de-allocated 7362 * (via scsi_unprobe()) until the instance is detached. 7363 */ 7364 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 7365 goto probe_failed; 7366 } 7367 7368 /* 7369 * Check the device type as specified in the inquiry data and 7370 * claim it if it is of a type that we support. 7371 */ 7372 switch (devp->sd_inq->inq_dtype) { 7373 case DTYPE_DIRECT: 7374 break; 7375 case DTYPE_RODIRECT: 7376 break; 7377 case DTYPE_OPTICAL: 7378 break; 7379 case DTYPE_NOTPRESENT: 7380 default: 7381 /* Unsupported device type; fail the attach. */ 7382 goto probe_failed; 7383 } 7384 7385 /* 7386 * Allocate the soft state structure for this unit. 7387 * 7388 * We rely upon this memory being set to all zeroes by 7389 * ddi_soft_state_zalloc(). We assume that any member of the 7390 * soft state structure that is not explicitly initialized by 7391 * this routine will have a value of zero. 7392 */ 7393 instance = ddi_get_instance(devp->sd_dev); 7394 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) { 7395 goto probe_failed; 7396 } 7397 7398 /* 7399 * Retrieve a pointer to the newly-allocated soft state. 7400 * 7401 * This should NEVER fail if the ddi_soft_state_zalloc() call above 7402 * was successful, unless something has gone horribly wrong and the 7403 * ddi's soft state internals are corrupt (in which case it is 7404 * probably better to halt here than just fail the attach....) 7405 */ 7406 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 7407 panic("sd_unit_attach: NULL soft state on instance:0x%x", 7408 instance); 7409 /*NOTREACHED*/ 7410 } 7411 7412 /* 7413 * Link the back ptr of the driver soft state to the scsi_device 7414 * struct for this lun. 7415 * Save a pointer to the softstate in the driver-private area of 7416 * the scsi_device struct. 7417 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until 7418 * we first set un->un_sd below. 7419 */ 7420 un->un_sd = devp; 7421 devp->sd_private = (opaque_t)un; 7422 7423 /* 7424 * The following must be after devp is stored in the soft state struct. 7425 */ 7426 #ifdef SDDEBUG 7427 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7428 "%s_unit_attach: un:0x%p instance:%d\n", 7429 ddi_driver_name(devi), un, instance); 7430 #endif 7431 7432 /* 7433 * Set up the device type and node type (for the minor nodes). 7434 * By default we assume that the device can at least support the 7435 * Common Command Set. Call it a CD-ROM if it reports itself 7436 * as a RODIRECT device. 7437 */ 7438 switch (devp->sd_inq->inq_dtype) { 7439 case DTYPE_RODIRECT: 7440 un->un_node_type = DDI_NT_CD_CHAN; 7441 un->un_ctype = CTYPE_CDROM; 7442 break; 7443 case DTYPE_OPTICAL: 7444 un->un_node_type = DDI_NT_BLOCK_CHAN; 7445 un->un_ctype = CTYPE_ROD; 7446 break; 7447 default: 7448 un->un_node_type = DDI_NT_BLOCK_CHAN; 7449 un->un_ctype = CTYPE_CCS; 7450 break; 7451 } 7452 7453 /* 7454 * Try to read the interconnect type from the HBA. 7455 * 7456 * Note: This driver is currently compiled as two binaries, a parallel 7457 * scsi version (sd) and a fibre channel version (ssd). All functional 7458 * differences are determined at compile time. In the future a single 7459 * binary will be provided and the interconnect type will be used to 7460 * differentiate between fibre and parallel scsi behaviors. At that time 7461 * it will be necessary for all fibre channel HBAs to support this 7462 * property. 7463 * 7464 * set un_f_is_fiber to TRUE ( default fiber ) 7465 */ 7466 un->un_f_is_fibre = TRUE; 7467 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { 7468 case INTERCONNECT_SSA: 7469 un->un_interconnect_type = SD_INTERCONNECT_SSA; 7470 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7471 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); 7472 break; 7473 case INTERCONNECT_PARALLEL: 7474 un->un_f_is_fibre = FALSE; 7475 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7476 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7477 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); 7478 break; 7479 case INTERCONNECT_SAS: 7480 un->un_f_is_fibre = FALSE; 7481 un->un_interconnect_type = SD_INTERCONNECT_SAS; 7482 un->un_node_type = DDI_NT_BLOCK_SAS; 7483 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7484 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SAS\n", un); 7485 break; 7486 case INTERCONNECT_SATA: 7487 un->un_f_is_fibre = FALSE; 7488 un->un_interconnect_type = SD_INTERCONNECT_SATA; 7489 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7490 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un); 7491 break; 7492 case INTERCONNECT_FIBRE: 7493 un->un_interconnect_type = SD_INTERCONNECT_FIBRE; 7494 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7495 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); 7496 break; 7497 case INTERCONNECT_FABRIC: 7498 un->un_interconnect_type = SD_INTERCONNECT_FABRIC; 7499 un->un_node_type = DDI_NT_BLOCK_FABRIC; 7500 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7501 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); 7502 break; 7503 default: 7504 #ifdef SD_DEFAULT_INTERCONNECT_TYPE 7505 /* 7506 * The HBA does not support the "interconnect-type" property 7507 * (or did not provide a recognized type). 7508 * 7509 * Note: This will be obsoleted when a single fibre channel 7510 * and parallel scsi driver is delivered. In the meantime the 7511 * interconnect type will be set to the platform default.If that 7512 * type is not parallel SCSI, it means that we should be 7513 * assuming "ssd" semantics. However, here this also means that 7514 * the FC HBA is not supporting the "interconnect-type" property 7515 * like we expect it to, so log this occurrence. 7516 */ 7517 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; 7518 if (!SD_IS_PARALLEL_SCSI(un)) { 7519 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7520 "sd_unit_attach: un:0x%p Assuming " 7521 "INTERCONNECT_FIBRE\n", un); 7522 } else { 7523 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7524 "sd_unit_attach: un:0x%p Assuming " 7525 "INTERCONNECT_PARALLEL\n", un); 7526 un->un_f_is_fibre = FALSE; 7527 } 7528 #else 7529 /* 7530 * Note: This source will be implemented when a single fibre 7531 * channel and parallel scsi driver is delivered. The default 7532 * will be to assume that if a device does not support the 7533 * "interconnect-type" property it is a parallel SCSI HBA and 7534 * we will set the interconnect type for parallel scsi. 7535 */ 7536 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7537 un->un_f_is_fibre = FALSE; 7538 #endif 7539 break; 7540 } 7541 7542 if (un->un_f_is_fibre == TRUE) { 7543 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == 7544 SCSI_VERSION_3) { 7545 switch (un->un_interconnect_type) { 7546 case SD_INTERCONNECT_FIBRE: 7547 case SD_INTERCONNECT_SSA: 7548 un->un_node_type = DDI_NT_BLOCK_WWN; 7549 break; 7550 default: 7551 break; 7552 } 7553 } 7554 } 7555 7556 /* 7557 * Initialize the Request Sense command for the target 7558 */ 7559 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { 7560 goto alloc_rqs_failed; 7561 } 7562 7563 /* 7564 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc 7565 * with separate binary for sd and ssd. 7566 * 7567 * x86 has 1 binary, un_retry_count is set base on connection type. 7568 * The hardcoded values will go away when Sparc uses 1 binary 7569 * for sd and ssd. This hardcoded values need to match 7570 * SD_RETRY_COUNT in sddef.h 7571 * The value used is base on interconnect type. 7572 * fibre = 3, parallel = 5 7573 */ 7574 #if defined(__i386) || defined(__amd64) 7575 un->un_retry_count = un->un_f_is_fibre ? 3 : 5; 7576 #else 7577 un->un_retry_count = SD_RETRY_COUNT; 7578 #endif 7579 7580 /* 7581 * Set the per disk retry count to the default number of retries 7582 * for disks and CDROMs. This value can be overridden by the 7583 * disk property list or an entry in sd.conf. 7584 */ 7585 un->un_notready_retry_count = 7586 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un) 7587 : DISK_NOT_READY_RETRY_COUNT(un); 7588 7589 /* 7590 * Set the busy retry count to the default value of un_retry_count. 7591 * This can be overridden by entries in sd.conf or the device 7592 * config table. 7593 */ 7594 un->un_busy_retry_count = un->un_retry_count; 7595 7596 /* 7597 * Init the reset threshold for retries. This number determines 7598 * how many retries must be performed before a reset can be issued 7599 * (for certain error conditions). This can be overridden by entries 7600 * in sd.conf or the device config table. 7601 */ 7602 un->un_reset_retry_count = (un->un_retry_count / 2); 7603 7604 /* 7605 * Set the victim_retry_count to the default un_retry_count 7606 */ 7607 un->un_victim_retry_count = (2 * un->un_retry_count); 7608 7609 /* 7610 * Set the reservation release timeout to the default value of 7611 * 5 seconds. This can be overridden by entries in ssd.conf or the 7612 * device config table. 7613 */ 7614 un->un_reserve_release_time = 5; 7615 7616 /* 7617 * Set up the default maximum transfer size. Note that this may 7618 * get updated later in the attach, when setting up default wide 7619 * operations for disks. 7620 */ 7621 #if defined(__i386) || defined(__amd64) 7622 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; 7623 un->un_partial_dma_supported = 1; 7624 #else 7625 un->un_max_xfer_size = (uint_t)maxphys; 7626 #endif 7627 7628 /* 7629 * Get "allow bus device reset" property (defaults to "enabled" if 7630 * the property was not defined). This is to disable bus resets for 7631 * certain kinds of error recovery. Note: In the future when a run-time 7632 * fibre check is available the soft state flag should default to 7633 * enabled. 7634 */ 7635 if (un->un_f_is_fibre == TRUE) { 7636 un->un_f_allow_bus_device_reset = TRUE; 7637 } else { 7638 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7639 "allow-bus-device-reset", 1) != 0) { 7640 un->un_f_allow_bus_device_reset = TRUE; 7641 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7642 "sd_unit_attach: un:0x%p Bus device reset " 7643 "enabled\n", un); 7644 } else { 7645 un->un_f_allow_bus_device_reset = FALSE; 7646 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7647 "sd_unit_attach: un:0x%p Bus device reset " 7648 "disabled\n", un); 7649 } 7650 } 7651 7652 /* 7653 * Check if this is an ATAPI device. ATAPI devices use Group 1 7654 * Read/Write commands and Group 2 Mode Sense/Select commands. 7655 * 7656 * Note: The "obsolete" way of doing this is to check for the "atapi" 7657 * property. The new "variant" property with a value of "atapi" has been 7658 * introduced so that future 'variants' of standard SCSI behavior (like 7659 * atapi) could be specified by the underlying HBA drivers by supplying 7660 * a new value for the "variant" property, instead of having to define a 7661 * new property. 7662 */ 7663 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { 7664 un->un_f_cfg_is_atapi = TRUE; 7665 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7666 "sd_unit_attach: un:0x%p Atapi device\n", un); 7667 } 7668 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", 7669 &variantp) == DDI_PROP_SUCCESS) { 7670 if (strcmp(variantp, "atapi") == 0) { 7671 un->un_f_cfg_is_atapi = TRUE; 7672 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7673 "sd_unit_attach: un:0x%p Atapi device\n", un); 7674 } 7675 ddi_prop_free(variantp); 7676 } 7677 7678 un->un_cmd_timeout = SD_IO_TIME; 7679 7680 un->un_busy_timeout = SD_BSY_TIMEOUT; 7681 7682 /* Info on current states, statuses, etc. (Updated frequently) */ 7683 un->un_state = SD_STATE_NORMAL; 7684 un->un_last_state = SD_STATE_NORMAL; 7685 7686 /* Control & status info for command throttling */ 7687 un->un_throttle = sd_max_throttle; 7688 un->un_saved_throttle = sd_max_throttle; 7689 un->un_min_throttle = sd_min_throttle; 7690 7691 if (un->un_f_is_fibre == TRUE) { 7692 un->un_f_use_adaptive_throttle = TRUE; 7693 } else { 7694 un->un_f_use_adaptive_throttle = FALSE; 7695 } 7696 7697 /* Removable media support. */ 7698 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 7699 un->un_mediastate = DKIO_NONE; 7700 un->un_specified_mediastate = DKIO_NONE; 7701 7702 /* CVs for suspend/resume (PM or DR) */ 7703 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 7704 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 7705 7706 /* Power management support. */ 7707 un->un_power_level = SD_SPINDLE_UNINIT; 7708 7709 cv_init(&un->un_wcc_cv, NULL, CV_DRIVER, NULL); 7710 un->un_f_wcc_inprog = 0; 7711 7712 /* 7713 * The open/close semaphore is used to serialize threads executing 7714 * in the driver's open & close entry point routines for a given 7715 * instance. 7716 */ 7717 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 7718 7719 /* 7720 * The conf file entry and softstate variable is a forceful override, 7721 * meaning a non-zero value must be entered to change the default. 7722 */ 7723 un->un_f_disksort_disabled = FALSE; 7724 un->un_f_rmw_type = SD_RMW_TYPE_DEFAULT; 7725 un->un_f_enable_rmw = FALSE; 7726 7727 /* 7728 * GET EVENT STATUS NOTIFICATION media polling enabled by default, but 7729 * can be overridden via [s]sd-config-list "mmc-gesn-polling" property. 7730 */ 7731 un->un_f_mmc_gesn_polling = TRUE; 7732 7733 /* 7734 * physical sector size defaults to DEV_BSIZE currently. We can 7735 * override this value via the driver configuration file so we must 7736 * set it before calling sd_read_unit_properties(). 7737 */ 7738 un->un_phy_blocksize = DEV_BSIZE; 7739 7740 /* 7741 * Retrieve the properties from the static driver table or the driver 7742 * configuration file (.conf) for this unit and update the soft state 7743 * for the device as needed for the indicated properties. 7744 * Note: the property configuration needs to occur here as some of the 7745 * following routines may have dependencies on soft state flags set 7746 * as part of the driver property configuration. 7747 */ 7748 sd_read_unit_properties(un); 7749 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7750 "sd_unit_attach: un:0x%p property configuration complete.\n", un); 7751 7752 /* 7753 * Only if a device has "hotpluggable" property, it is 7754 * treated as hotpluggable device. Otherwise, it is 7755 * regarded as non-hotpluggable one. 7756 */ 7757 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable", 7758 -1) != -1) { 7759 un->un_f_is_hotpluggable = TRUE; 7760 } 7761 7762 /* 7763 * set unit's attributes(flags) according to "hotpluggable" and 7764 * RMB bit in INQUIRY data. 7765 */ 7766 sd_set_unit_attributes(un, devi); 7767 7768 /* 7769 * By default, we mark the capacity, lbasize, and geometry 7770 * as invalid. Only if we successfully read a valid capacity 7771 * will we update the un_blockcount and un_tgt_blocksize with the 7772 * valid values (the geometry will be validated later). 7773 */ 7774 un->un_f_blockcount_is_valid = FALSE; 7775 un->un_f_tgt_blocksize_is_valid = FALSE; 7776 7777 /* 7778 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine 7779 * otherwise. 7780 */ 7781 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE; 7782 un->un_blockcount = 0; 7783 7784 /* 7785 * Set up the per-instance info needed to determine the correct 7786 * CDBs and other info for issuing commands to the target. 7787 */ 7788 sd_init_cdb_limits(un); 7789 7790 /* 7791 * Set up the IO chains to use, based upon the target type. 7792 */ 7793 if (un->un_f_non_devbsize_supported) { 7794 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 7795 } else { 7796 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 7797 } 7798 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 7799 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD; 7800 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD; 7801 7802 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf), 7803 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit, 7804 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER); 7805 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi); 7806 7807 7808 if (ISCD(un)) { 7809 un->un_additional_codes = sd_additional_codes; 7810 } else { 7811 un->un_additional_codes = NULL; 7812 } 7813 7814 /* 7815 * Create the kstats here so they can be available for attach-time 7816 * routines that send commands to the unit (either polled or via 7817 * sd_send_scsi_cmd). 7818 * 7819 * Note: This is a critical sequence that needs to be maintained: 7820 * 1) Instantiate the kstats here, before any routines using the 7821 * iopath (i.e. sd_send_scsi_cmd). 7822 * 2) Instantiate and initialize the partition stats 7823 * (sd_set_pstats). 7824 * 3) Initialize the error stats (sd_set_errstats), following 7825 * sd_validate_geometry(),sd_register_devid(), 7826 * and sd_cache_control(). 7827 */ 7828 7829 un->un_stats = kstat_create(sd_label, instance, 7830 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 7831 if (un->un_stats != NULL) { 7832 un->un_stats->ks_lock = SD_MUTEX(un); 7833 kstat_install(un->un_stats); 7834 } 7835 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7836 "sd_unit_attach: un:0x%p un_stats created\n", un); 7837 7838 un->un_unmapstats_ks = kstat_create(sd_label, instance, "unmapstats", 7839 "misc", KSTAT_TYPE_NAMED, sizeof (*un->un_unmapstats) / 7840 sizeof (kstat_named_t), 0); 7841 if (un->un_unmapstats_ks) { 7842 un->un_unmapstats = un->un_unmapstats_ks->ks_data; 7843 7844 kstat_named_init(&un->un_unmapstats->us_cmds, 7845 "commands", KSTAT_DATA_UINT64); 7846 kstat_named_init(&un->un_unmapstats->us_errs, 7847 "errors", KSTAT_DATA_UINT64); 7848 kstat_named_init(&un->un_unmapstats->us_extents, 7849 "extents", KSTAT_DATA_UINT64); 7850 kstat_named_init(&un->un_unmapstats->us_bytes, 7851 "bytes", KSTAT_DATA_UINT64); 7852 7853 kstat_install(un->un_unmapstats_ks); 7854 } else { 7855 cmn_err(CE_NOTE, "!Cannot create unmap kstats for disk %d", 7856 instance); 7857 } 7858 7859 sd_create_errstats(un, instance); 7860 if (un->un_errstats == NULL) { 7861 goto create_errstats_failed; 7862 } 7863 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7864 "sd_unit_attach: un:0x%p errstats created\n", un); 7865 7866 /* 7867 * The following if/else code was relocated here from below as part 7868 * of the fix for bug (4430280). However with the default setup added 7869 * on entry to this routine, it's no longer absolutely necessary for 7870 * this to be before the call to sd_spin_up_unit. 7871 */ 7872 if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) { 7873 int tq_trigger_flag = (((devp->sd_inq->inq_ansi == 4) || 7874 (devp->sd_inq->inq_ansi == 5)) && 7875 devp->sd_inq->inq_bque) || devp->sd_inq->inq_cmdque; 7876 7877 /* 7878 * If tagged queueing is supported by the target 7879 * and by the host adapter then we will enable it 7880 */ 7881 un->un_tagflags = 0; 7882 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && tq_trigger_flag && 7883 (un->un_f_arq_enabled == TRUE)) { 7884 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 7885 1, 1) == 1) { 7886 un->un_tagflags = FLAG_STAG; 7887 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7888 "sd_unit_attach: un:0x%p tag queueing " 7889 "enabled\n", un); 7890 } else if (scsi_ifgetcap(SD_ADDRESS(un), 7891 "untagged-qing", 0) == 1) { 7892 un->un_f_opt_queueing = TRUE; 7893 un->un_saved_throttle = un->un_throttle = 7894 min(un->un_throttle, 3); 7895 } else { 7896 un->un_f_opt_queueing = FALSE; 7897 un->un_saved_throttle = un->un_throttle = 1; 7898 } 7899 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) 7900 == 1) && (un->un_f_arq_enabled == TRUE)) { 7901 /* The Host Adapter supports internal queueing. */ 7902 un->un_f_opt_queueing = TRUE; 7903 un->un_saved_throttle = un->un_throttle = 7904 min(un->un_throttle, 3); 7905 } else { 7906 un->un_f_opt_queueing = FALSE; 7907 un->un_saved_throttle = un->un_throttle = 1; 7908 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7909 "sd_unit_attach: un:0x%p no tag queueing\n", un); 7910 } 7911 7912 /* 7913 * Enable large transfers for SATA/SAS drives 7914 */ 7915 if (SD_IS_SERIAL(un)) { 7916 un->un_max_xfer_size = 7917 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7918 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7919 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7920 "sd_unit_attach: un:0x%p max transfer " 7921 "size=0x%x\n", un, un->un_max_xfer_size); 7922 7923 } 7924 7925 /* Setup or tear down default wide operations for disks */ 7926 7927 /* 7928 * Note: Legacy: it may be possible for both "sd_max_xfer_size" 7929 * and "ssd_max_xfer_size" to exist simultaneously on the same 7930 * system and be set to different values. In the future this 7931 * code may need to be updated when the ssd module is 7932 * obsoleted and removed from the system. (4299588) 7933 */ 7934 if (SD_IS_PARALLEL_SCSI(un) && 7935 (devp->sd_inq->inq_rdf == RDF_SCSI2) && 7936 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { 7937 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7938 1, 1) == 1) { 7939 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7940 "sd_unit_attach: un:0x%p Wide Transfer " 7941 "enabled\n", un); 7942 } 7943 7944 /* 7945 * If tagged queuing has also been enabled, then 7946 * enable large xfers 7947 */ 7948 if (un->un_saved_throttle == sd_max_throttle) { 7949 un->un_max_xfer_size = 7950 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7951 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7952 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7953 "sd_unit_attach: un:0x%p max transfer " 7954 "size=0x%x\n", un, un->un_max_xfer_size); 7955 } 7956 } else { 7957 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7958 0, 1) == 1) { 7959 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7960 "sd_unit_attach: un:0x%p " 7961 "Wide Transfer disabled\n", un); 7962 } 7963 } 7964 } else { 7965 un->un_tagflags = FLAG_STAG; 7966 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, 7967 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); 7968 } 7969 7970 /* 7971 * If this target supports LUN reset, try to enable it. 7972 */ 7973 if (un->un_f_lun_reset_enabled) { 7974 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { 7975 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7976 "un:0x%p lun_reset capability set\n", un); 7977 } else { 7978 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7979 "un:0x%p lun-reset capability not set\n", un); 7980 } 7981 } 7982 7983 /* 7984 * Adjust the maximum transfer size. This is to fix 7985 * the problem of partial DMA support on SPARC. Some 7986 * HBA driver, like aac, has very small dma_attr_maxxfer 7987 * size, which requires partial DMA support on SPARC. 7988 * In the future the SPARC pci nexus driver may solve 7989 * the problem instead of this fix. 7990 */ 7991 max_xfer_size = scsi_ifgetcap(SD_ADDRESS(un), "dma-max", 1); 7992 if ((max_xfer_size > 0) && (max_xfer_size < un->un_max_xfer_size)) { 7993 /* We need DMA partial even on sparc to ensure sddump() works */ 7994 un->un_max_xfer_size = max_xfer_size; 7995 if (un->un_partial_dma_supported == 0) 7996 un->un_partial_dma_supported = 1; 7997 } 7998 if (ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 7999 DDI_PROP_DONTPASS, "buf_break", 0) == 1) { 8000 if (ddi_xbuf_attr_setup_brk(un->un_xbuf_attr, 8001 un->un_max_xfer_size) == 1) { 8002 un->un_buf_breakup_supported = 1; 8003 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 8004 "un:0x%p Buf breakup enabled\n", un); 8005 } 8006 } 8007 8008 /* 8009 * Set PKT_DMA_PARTIAL flag. 8010 */ 8011 if (un->un_partial_dma_supported == 1) { 8012 un->un_pkt_flags = PKT_DMA_PARTIAL; 8013 } else { 8014 un->un_pkt_flags = 0; 8015 } 8016 8017 /* Initialize sd_ssc_t for internal uscsi commands */ 8018 ssc = sd_ssc_init(un); 8019 scsi_fm_init(devp); 8020 8021 /* 8022 * Allocate memory for SCSI FMA stuffs. 8023 */ 8024 un->un_fm_private = 8025 kmem_zalloc(sizeof (struct sd_fm_internal), KM_SLEEP); 8026 sfip = (struct sd_fm_internal *)un->un_fm_private; 8027 sfip->fm_ssc.ssc_uscsi_cmd = &sfip->fm_ucmd; 8028 sfip->fm_ssc.ssc_uscsi_info = &sfip->fm_uinfo; 8029 sfip->fm_ssc.ssc_un = un; 8030 8031 if (ISCD(un) || 8032 un->un_f_has_removable_media || 8033 devp->sd_fm_capable == DDI_FM_NOT_CAPABLE) { 8034 /* 8035 * We don't touch CDROM or the DDI_FM_NOT_CAPABLE device. 8036 * Their log are unchanged. 8037 */ 8038 sfip->fm_log_level = SD_FM_LOG_NSUP; 8039 } else { 8040 /* 8041 * If enter here, it should be non-CDROM and FM-capable 8042 * device, and it will not keep the old scsi_log as before 8043 * in /var/adm/messages. However, the property 8044 * "fm-scsi-log" will control whether the FM telemetry will 8045 * be logged in /var/adm/messages. 8046 */ 8047 int fm_scsi_log; 8048 fm_scsi_log = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 8049 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "fm-scsi-log", 0); 8050 8051 if (fm_scsi_log) 8052 sfip->fm_log_level = SD_FM_LOG_EREPORT; 8053 else 8054 sfip->fm_log_level = SD_FM_LOG_SILENT; 8055 } 8056 8057 /* 8058 * At this point in the attach, we have enough info in the 8059 * soft state to be able to issue commands to the target. 8060 * 8061 * All command paths used below MUST issue their commands as 8062 * SD_PATH_DIRECT. This is important as intermediate layers 8063 * are not all initialized yet (such as PM). 8064 */ 8065 8066 /* 8067 * Send a TEST UNIT READY command to the device. This should clear 8068 * any outstanding UNIT ATTENTION that may be present. 8069 * 8070 * Note: Don't check for success, just track if there is a reservation, 8071 * this is a throw away command to clear any unit attentions. 8072 * 8073 * Note: This MUST be the first command issued to the target during 8074 * attach to ensure power on UNIT ATTENTIONS are cleared. 8075 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated 8076 * with attempts at spinning up a device with no media. 8077 */ 8078 status = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR); 8079 if (status != 0) { 8080 if (status == EACCES) 8081 reservation_flag = SD_TARGET_IS_RESERVED; 8082 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 8083 } 8084 8085 /* 8086 * If the device is NOT a removable media device, attempt to spin 8087 * it up (using the START_STOP_UNIT command) and read its capacity 8088 * (using the READ CAPACITY command). Note, however, that either 8089 * of these could fail and in some cases we would continue with 8090 * the attach despite the failure (see below). 8091 */ 8092 if (un->un_f_descr_format_supported) { 8093 8094 switch (sd_spin_up_unit(ssc)) { 8095 case 0: 8096 /* 8097 * Spin-up was successful; now try to read the 8098 * capacity. If successful then save the results 8099 * and mark the capacity & lbasize as valid. 8100 */ 8101 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8102 "sd_unit_attach: un:0x%p spin-up successful\n", un); 8103 8104 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity, 8105 &lbasize, SD_PATH_DIRECT); 8106 8107 switch (status) { 8108 case 0: { 8109 if (capacity > DK_MAX_BLOCKS) { 8110 #ifdef _LP64 8111 if ((capacity + 1) > 8112 SD_GROUP1_MAX_ADDRESS) { 8113 /* 8114 * Enable descriptor format 8115 * sense data so that we can 8116 * get 64 bit sense data 8117 * fields. 8118 */ 8119 sd_enable_descr_sense(ssc); 8120 } 8121 #else 8122 /* 32-bit kernels can't handle this */ 8123 scsi_log(SD_DEVINFO(un), 8124 sd_label, CE_WARN, 8125 "disk has %llu blocks, which " 8126 "is too large for a 32-bit " 8127 "kernel", capacity); 8128 8129 #if defined(__i386) || defined(__amd64) 8130 /* 8131 * 1TB disk was treated as (1T - 512)B 8132 * in the past, so that it might have 8133 * valid VTOC and solaris partitions, 8134 * we have to allow it to continue to 8135 * work. 8136 */ 8137 if (capacity - 1 > DK_MAX_BLOCKS) 8138 #endif 8139 goto spinup_failed; 8140 #endif 8141 } 8142 8143 /* 8144 * Here it's not necessary to check the case: 8145 * the capacity of the device is bigger than 8146 * what the max hba cdb can support. Because 8147 * sd_send_scsi_READ_CAPACITY will retrieve 8148 * the capacity by sending USCSI command, which 8149 * is constrained by the max hba cdb. Actually, 8150 * sd_send_scsi_READ_CAPACITY will return 8151 * EINVAL when using bigger cdb than required 8152 * cdb length. Will handle this case in 8153 * "case EINVAL". 8154 */ 8155 8156 /* 8157 * The following relies on 8158 * sd_send_scsi_READ_CAPACITY never 8159 * returning 0 for capacity and/or lbasize. 8160 */ 8161 sd_update_block_info(un, lbasize, capacity); 8162 8163 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8164 "sd_unit_attach: un:0x%p capacity = %ld " 8165 "blocks; lbasize= %ld.\n", un, 8166 un->un_blockcount, un->un_tgt_blocksize); 8167 8168 break; 8169 } 8170 case EINVAL: 8171 /* 8172 * In the case where the max-cdb-length property 8173 * is smaller than the required CDB length for 8174 * a SCSI device, a target driver can fail to 8175 * attach to that device. 8176 */ 8177 scsi_log(SD_DEVINFO(un), 8178 sd_label, CE_WARN, 8179 "disk capacity is too large " 8180 "for current cdb length"); 8181 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 8182 8183 goto spinup_failed; 8184 case EACCES: 8185 /* 8186 * Should never get here if the spin-up 8187 * succeeded, but code it in anyway. 8188 * From here, just continue with the attach... 8189 */ 8190 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8191 "sd_unit_attach: un:0x%p " 8192 "sd_send_scsi_READ_CAPACITY " 8193 "returned reservation conflict\n", un); 8194 reservation_flag = SD_TARGET_IS_RESERVED; 8195 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 8196 break; 8197 default: 8198 /* 8199 * Likewise, should never get here if the 8200 * spin-up succeeded. Just continue with 8201 * the attach... 8202 */ 8203 if (status == EIO) 8204 sd_ssc_assessment(ssc, 8205 SD_FMT_STATUS_CHECK); 8206 else 8207 sd_ssc_assessment(ssc, 8208 SD_FMT_IGNORE); 8209 break; 8210 } 8211 break; 8212 case EACCES: 8213 /* 8214 * Device is reserved by another host. In this case 8215 * we could not spin it up or read the capacity, but 8216 * we continue with the attach anyway. 8217 */ 8218 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8219 "sd_unit_attach: un:0x%p spin-up reservation " 8220 "conflict.\n", un); 8221 reservation_flag = SD_TARGET_IS_RESERVED; 8222 break; 8223 default: 8224 /* Fail the attach if the spin-up failed. */ 8225 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8226 "sd_unit_attach: un:0x%p spin-up failed.", un); 8227 goto spinup_failed; 8228 } 8229 8230 } 8231 8232 /* 8233 * Check to see if this is a MMC drive 8234 */ 8235 if (ISCD(un)) { 8236 sd_set_mmc_caps(ssc); 8237 } 8238 8239 /* 8240 * Add a zero-length attribute to tell the world we support 8241 * kernel ioctls (for layered drivers) 8242 */ 8243 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 8244 DDI_KERNEL_IOCTL, NULL, 0); 8245 8246 /* 8247 * Add a boolean property to tell the world we support 8248 * the B_FAILFAST flag (for layered drivers) 8249 */ 8250 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 8251 "ddi-failfast-supported", NULL, 0); 8252 8253 /* 8254 * Initialize power management 8255 */ 8256 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); 8257 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); 8258 sd_setup_pm(ssc, devi); 8259 if (un->un_f_pm_is_enabled == FALSE) { 8260 /* 8261 * For performance, point to a jump table that does 8262 * not include pm. 8263 * The direct and priority chains don't change with PM. 8264 * 8265 * Note: this is currently done based on individual device 8266 * capabilities. When an interface for determining system 8267 * power enabled state becomes available, or when additional 8268 * layers are added to the command chain, these values will 8269 * have to be re-evaluated for correctness. 8270 */ 8271 if (un->un_f_non_devbsize_supported) { 8272 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM; 8273 } else { 8274 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM; 8275 } 8276 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 8277 } 8278 8279 /* 8280 * This property is set to 0 by HA software to avoid retries 8281 * on a reserved disk. (The preferred property name is 8282 * "retry-on-reservation-conflict") (1189689) 8283 * 8284 * Note: The use of a global here can have unintended consequences. A 8285 * per instance variable is preferable to match the capabilities of 8286 * different underlying hba's (4402600) 8287 */ 8288 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, 8289 DDI_PROP_DONTPASS, "retry-on-reservation-conflict", 8290 sd_retry_on_reservation_conflict); 8291 if (sd_retry_on_reservation_conflict != 0) { 8292 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, 8293 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, 8294 sd_retry_on_reservation_conflict); 8295 } 8296 8297 /* Set up options for QFULL handling. */ 8298 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 8299 "qfull-retries", -1)) != -1) { 8300 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries", 8301 rval, 1); 8302 } 8303 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 8304 "qfull-retry-interval", -1)) != -1) { 8305 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval", 8306 rval, 1); 8307 } 8308 8309 /* 8310 * This just prints a message that announces the existence of the 8311 * device. The message is always printed in the system logfile, but 8312 * only appears on the console if the system is booted with the 8313 * -v (verbose) argument. 8314 */ 8315 ddi_report_dev(devi); 8316 8317 un->un_mediastate = DKIO_NONE; 8318 8319 /* 8320 * Check Block Device Characteristics VPD. 8321 */ 8322 sd_check_bdc_vpd(ssc); 8323 8324 /* 8325 * Check whether the drive is in emulation mode. 8326 */ 8327 sd_check_emulation_mode(ssc); 8328 8329 cmlb_alloc_handle(&un->un_cmlbhandle); 8330 8331 #if defined(__i386) || defined(__amd64) 8332 /* 8333 * On x86, compensate for off-by-1 legacy error 8334 */ 8335 if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable && 8336 (lbasize == un->un_sys_blocksize)) 8337 offbyone = CMLB_OFF_BY_ONE; 8338 #endif 8339 8340 if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype, 8341 VOID2BOOLEAN(un->un_f_has_removable_media != 0), 8342 VOID2BOOLEAN(un->un_f_is_hotpluggable != 0), 8343 un->un_node_type, offbyone, un->un_cmlbhandle, 8344 (void *)SD_PATH_DIRECT) != 0) { 8345 goto cmlb_attach_failed; 8346 } 8347 8348 8349 /* 8350 * Read and validate the device's geometry (ie, disk label) 8351 * A new unformatted drive will not have a valid geometry, but 8352 * the driver needs to successfully attach to this device so 8353 * the drive can be formatted via ioctls. 8354 */ 8355 geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0, 8356 (void *)SD_PATH_DIRECT) == 0) ? 1: 0; 8357 8358 mutex_enter(SD_MUTEX(un)); 8359 8360 /* 8361 * Read and initialize the devid for the unit. 8362 */ 8363 if (un->un_f_devid_supported) { 8364 sd_register_devid(ssc, devi, reservation_flag); 8365 } 8366 mutex_exit(SD_MUTEX(un)); 8367 8368 #if (defined(__fibre)) 8369 /* 8370 * Register callbacks for fibre only. You can't do this solely 8371 * on the basis of the devid_type because this is hba specific. 8372 * We need to query our hba capabilities to find out whether to 8373 * register or not. 8374 */ 8375 if (un->un_f_is_fibre) { 8376 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 8377 sd_init_event_callbacks(un); 8378 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8379 "sd_unit_attach: un:0x%p event callbacks inserted", 8380 un); 8381 } 8382 } 8383 #endif 8384 8385 if (un->un_f_opt_disable_cache == TRUE) { 8386 /* 8387 * Disable both read cache and write cache. This is 8388 * the historic behavior of the keywords in the config file. 8389 */ 8390 if (sd_cache_control(ssc, SD_CACHE_DISABLE, SD_CACHE_DISABLE) != 8391 0) { 8392 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8393 "sd_unit_attach: un:0x%p Could not disable " 8394 "caching", un); 8395 goto devid_failed; 8396 } 8397 } 8398 8399 /* 8400 * Check the value of the WCE bit and if it's allowed to be changed, 8401 * set un_f_write_cache_enabled and un_f_cache_mode_changeable 8402 * accordingly. 8403 */ 8404 (void) sd_get_write_cache_enabled(ssc, &wc_enabled); 8405 sd_get_write_cache_changeable(ssc, &wc_changeable); 8406 mutex_enter(SD_MUTEX(un)); 8407 un->un_f_write_cache_enabled = (wc_enabled != 0); 8408 un->un_f_cache_mode_changeable = (wc_changeable != 0); 8409 mutex_exit(SD_MUTEX(un)); 8410 8411 if ((un->un_f_rmw_type != SD_RMW_TYPE_RETURN_ERROR && 8412 un->un_tgt_blocksize != DEV_BSIZE) || 8413 un->un_f_enable_rmw) { 8414 if (!(un->un_wm_cache)) { 8415 (void) snprintf(name_str, sizeof (name_str), 8416 "%s%d_cache", 8417 ddi_driver_name(SD_DEVINFO(un)), 8418 ddi_get_instance(SD_DEVINFO(un))); 8419 un->un_wm_cache = kmem_cache_create( 8420 name_str, sizeof (struct sd_w_map), 8421 8, sd_wm_cache_constructor, 8422 sd_wm_cache_destructor, NULL, 8423 (void *)un, NULL, 0); 8424 if (!(un->un_wm_cache)) { 8425 goto wm_cache_failed; 8426 } 8427 } 8428 } 8429 8430 /* 8431 * Check the value of the NV_SUP bit and set 8432 * un_f_suppress_cache_flush accordingly. 8433 */ 8434 sd_get_nv_sup(ssc); 8435 8436 /* 8437 * Find out what type of reservation this disk supports. 8438 */ 8439 status = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 0, NULL); 8440 8441 switch (status) { 8442 case 0: 8443 /* 8444 * SCSI-3 reservations are supported. 8445 */ 8446 un->un_reservation_type = SD_SCSI3_RESERVATION; 8447 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8448 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un); 8449 break; 8450 case ENOTSUP: 8451 /* 8452 * The PERSISTENT RESERVE IN command would not be recognized by 8453 * a SCSI-2 device, so assume the reservation type is SCSI-2. 8454 */ 8455 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8456 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un); 8457 un->un_reservation_type = SD_SCSI2_RESERVATION; 8458 8459 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 8460 break; 8461 default: 8462 /* 8463 * default to SCSI-3 reservations 8464 */ 8465 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8466 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un); 8467 un->un_reservation_type = SD_SCSI3_RESERVATION; 8468 8469 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 8470 break; 8471 } 8472 8473 /* 8474 * Set the pstat and error stat values here, so data obtained during the 8475 * previous attach-time routines is available. 8476 * 8477 * Note: This is a critical sequence that needs to be maintained: 8478 * 1) Instantiate the kstats before any routines using the iopath 8479 * (i.e. sd_send_scsi_cmd). 8480 * 2) Initialize the error stats (sd_set_errstats) and partition 8481 * stats (sd_set_pstats)here, following 8482 * cmlb_validate_geometry(), sd_register_devid(), and 8483 * sd_cache_control(). 8484 */ 8485 8486 if (un->un_f_pkstats_enabled && geom_label_valid) { 8487 sd_set_pstats(un); 8488 SD_TRACE(SD_LOG_IO_PARTITION, un, 8489 "sd_unit_attach: un:0x%p pstats created and set\n", un); 8490 } 8491 8492 sd_set_errstats(un); 8493 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8494 "sd_unit_attach: un:0x%p errstats set\n", un); 8495 8496 sd_setup_blk_limits(ssc); 8497 8498 /* 8499 * After successfully attaching an instance, we record the information 8500 * of how many luns have been attached on the relative target and 8501 * controller for parallel SCSI. This information is used when sd tries 8502 * to set the tagged queuing capability in HBA. 8503 */ 8504 if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) { 8505 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH); 8506 } 8507 8508 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8509 "sd_unit_attach: un:0x%p exit success\n", un); 8510 8511 /* Uninitialize sd_ssc_t pointer */ 8512 sd_ssc_fini(ssc); 8513 8514 return (DDI_SUCCESS); 8515 8516 /* 8517 * An error occurred during the attach; clean up & return failure. 8518 */ 8519 wm_cache_failed: 8520 devid_failed: 8521 ddi_remove_minor_node(devi, NULL); 8522 8523 cmlb_attach_failed: 8524 /* 8525 * Cleanup from the scsi_ifsetcap() calls (437868) 8526 */ 8527 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 8528 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 8529 8530 /* 8531 * Refer to the comments of setting tagged-qing in the beginning of 8532 * sd_unit_attach. We can only disable tagged queuing when there is 8533 * no lun attached on the target. 8534 */ 8535 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 8536 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8537 } 8538 8539 if (un->un_f_is_fibre == FALSE) { 8540 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8541 } 8542 8543 spinup_failed: 8544 8545 /* Uninitialize sd_ssc_t pointer */ 8546 sd_ssc_fini(ssc); 8547 8548 mutex_enter(SD_MUTEX(un)); 8549 8550 /* Deallocate SCSI FMA memory spaces */ 8551 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal)); 8552 8553 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ 8554 if (un->un_direct_priority_timeid != NULL) { 8555 timeout_id_t temp_id = un->un_direct_priority_timeid; 8556 un->un_direct_priority_timeid = NULL; 8557 mutex_exit(SD_MUTEX(un)); 8558 (void) untimeout(temp_id); 8559 mutex_enter(SD_MUTEX(un)); 8560 } 8561 8562 /* Cancel any pending start/stop timeouts */ 8563 if (un->un_startstop_timeid != NULL) { 8564 timeout_id_t temp_id = un->un_startstop_timeid; 8565 un->un_startstop_timeid = NULL; 8566 mutex_exit(SD_MUTEX(un)); 8567 (void) untimeout(temp_id); 8568 mutex_enter(SD_MUTEX(un)); 8569 } 8570 8571 /* Cancel any pending reset-throttle timeouts */ 8572 if (un->un_reset_throttle_timeid != NULL) { 8573 timeout_id_t temp_id = un->un_reset_throttle_timeid; 8574 un->un_reset_throttle_timeid = NULL; 8575 mutex_exit(SD_MUTEX(un)); 8576 (void) untimeout(temp_id); 8577 mutex_enter(SD_MUTEX(un)); 8578 } 8579 8580 /* Cancel rmw warning message timeouts */ 8581 if (un->un_rmw_msg_timeid != NULL) { 8582 timeout_id_t temp_id = un->un_rmw_msg_timeid; 8583 un->un_rmw_msg_timeid = NULL; 8584 mutex_exit(SD_MUTEX(un)); 8585 (void) untimeout(temp_id); 8586 mutex_enter(SD_MUTEX(un)); 8587 } 8588 8589 /* Cancel any pending retry timeouts */ 8590 if (un->un_retry_timeid != NULL) { 8591 timeout_id_t temp_id = un->un_retry_timeid; 8592 un->un_retry_timeid = NULL; 8593 mutex_exit(SD_MUTEX(un)); 8594 (void) untimeout(temp_id); 8595 mutex_enter(SD_MUTEX(un)); 8596 } 8597 8598 /* Cancel any pending delayed cv broadcast timeouts */ 8599 if (un->un_dcvb_timeid != NULL) { 8600 timeout_id_t temp_id = un->un_dcvb_timeid; 8601 un->un_dcvb_timeid = NULL; 8602 mutex_exit(SD_MUTEX(un)); 8603 (void) untimeout(temp_id); 8604 mutex_enter(SD_MUTEX(un)); 8605 } 8606 8607 mutex_exit(SD_MUTEX(un)); 8608 8609 /* There should not be any in-progress I/O so ASSERT this check */ 8610 ASSERT(un->un_ncmds_in_transport == 0); 8611 ASSERT(un->un_ncmds_in_driver == 0); 8612 8613 /* Do not free the softstate if the callback routine is active */ 8614 sd_sync_with_callback(un); 8615 8616 /* 8617 * Partition stats apparently are not used with removables. These would 8618 * not have been created during attach, so no need to clean them up... 8619 */ 8620 if (un->un_errstats != NULL) { 8621 kstat_delete(un->un_errstats); 8622 un->un_errstats = NULL; 8623 } 8624 8625 create_errstats_failed: 8626 8627 if (un->un_stats != NULL) { 8628 kstat_delete(un->un_stats); 8629 un->un_stats = NULL; 8630 } 8631 8632 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8633 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8634 8635 ddi_prop_remove_all(devi); 8636 sema_destroy(&un->un_semoclose); 8637 cv_destroy(&un->un_state_cv); 8638 8639 sd_free_rqs(un); 8640 8641 alloc_rqs_failed: 8642 8643 devp->sd_private = NULL; 8644 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ 8645 8646 /* 8647 * Note: the man pages are unclear as to whether or not doing a 8648 * ddi_soft_state_free(sd_state, instance) is the right way to 8649 * clean up after the ddi_soft_state_zalloc() if the subsequent 8650 * ddi_get_soft_state() fails. The implication seems to be 8651 * that the get_soft_state cannot fail if the zalloc succeeds. 8652 */ 8653 #ifndef XPV_HVM_DRIVER 8654 ddi_soft_state_free(sd_state, instance); 8655 #endif /* !XPV_HVM_DRIVER */ 8656 8657 probe_failed: 8658 scsi_unprobe(devp); 8659 8660 return (DDI_FAILURE); 8661 } 8662 8663 8664 /* 8665 * Function: sd_unit_detach 8666 * 8667 * Description: Performs DDI_DETACH processing for sddetach(). 8668 * 8669 * Return Code: DDI_SUCCESS 8670 * DDI_FAILURE 8671 * 8672 * Context: Kernel thread context 8673 */ 8674 8675 static int 8676 sd_unit_detach(dev_info_t *devi) 8677 { 8678 struct scsi_device *devp; 8679 struct sd_lun *un; 8680 int i; 8681 int tgt; 8682 dev_t dev; 8683 dev_info_t *pdip = ddi_get_parent(devi); 8684 int instance = ddi_get_instance(devi); 8685 8686 mutex_enter(&sd_detach_mutex); 8687 8688 /* 8689 * Fail the detach for any of the following: 8690 * - Unable to get the sd_lun struct for the instance 8691 * - A layered driver has an outstanding open on the instance 8692 * - Another thread is already detaching this instance 8693 * - Another thread is currently performing an open 8694 */ 8695 devp = ddi_get_driver_private(devi); 8696 if ((devp == NULL) || 8697 ((un = (struct sd_lun *)devp->sd_private) == NULL) || 8698 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) || 8699 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) { 8700 mutex_exit(&sd_detach_mutex); 8701 return (DDI_FAILURE); 8702 } 8703 8704 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); 8705 8706 /* 8707 * Mark this instance as currently in a detach, to inhibit any 8708 * opens from a layered driver. 8709 */ 8710 un->un_detach_count++; 8711 mutex_exit(&sd_detach_mutex); 8712 8713 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 8714 SCSI_ADDR_PROP_TARGET, -1); 8715 8716 dev = sd_make_device(SD_DEVINFO(un)); 8717 8718 #ifndef lint 8719 _NOTE(COMPETING_THREADS_NOW); 8720 #endif 8721 8722 mutex_enter(SD_MUTEX(un)); 8723 8724 /* 8725 * Fail the detach if there are any outstanding layered 8726 * opens on this device. 8727 */ 8728 for (i = 0; i < NDKMAP; i++) { 8729 if (un->un_ocmap.lyropen[i] != 0) { 8730 goto err_notclosed; 8731 } 8732 } 8733 8734 /* 8735 * Verify there are NO outstanding commands issued to this device. 8736 * ie, un_ncmds_in_transport == 0. 8737 * It's possible to have outstanding commands through the physio 8738 * code path, even though everything's closed. 8739 */ 8740 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || 8741 (un->un_direct_priority_timeid != NULL) || 8742 (un->un_state == SD_STATE_RWAIT)) { 8743 mutex_exit(SD_MUTEX(un)); 8744 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8745 "sd_dr_detach: Detach failure due to outstanding cmds\n"); 8746 goto err_stillbusy; 8747 } 8748 8749 /* 8750 * If we have the device reserved, release the reservation. 8751 */ 8752 if ((un->un_resvd_status & SD_RESERVE) && 8753 !(un->un_resvd_status & SD_LOST_RESERVE)) { 8754 mutex_exit(SD_MUTEX(un)); 8755 /* 8756 * Note: sd_reserve_release sends a command to the device 8757 * via the sd_ioctlcmd() path, and can sleep. 8758 */ 8759 if (sd_reserve_release(dev, SD_RELEASE) != 0) { 8760 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8761 "sd_dr_detach: Cannot release reservation \n"); 8762 } 8763 } else { 8764 mutex_exit(SD_MUTEX(un)); 8765 } 8766 8767 /* 8768 * Untimeout any reserve recover, throttle reset, restart unit 8769 * and delayed broadcast timeout threads. Protect the timeout pointer 8770 * from getting nulled by their callback functions. 8771 */ 8772 mutex_enter(SD_MUTEX(un)); 8773 if (un->un_resvd_timeid != NULL) { 8774 timeout_id_t temp_id = un->un_resvd_timeid; 8775 un->un_resvd_timeid = NULL; 8776 mutex_exit(SD_MUTEX(un)); 8777 (void) untimeout(temp_id); 8778 mutex_enter(SD_MUTEX(un)); 8779 } 8780 8781 if (un->un_reset_throttle_timeid != NULL) { 8782 timeout_id_t temp_id = un->un_reset_throttle_timeid; 8783 un->un_reset_throttle_timeid = NULL; 8784 mutex_exit(SD_MUTEX(un)); 8785 (void) untimeout(temp_id); 8786 mutex_enter(SD_MUTEX(un)); 8787 } 8788 8789 if (un->un_startstop_timeid != NULL) { 8790 timeout_id_t temp_id = un->un_startstop_timeid; 8791 un->un_startstop_timeid = NULL; 8792 mutex_exit(SD_MUTEX(un)); 8793 (void) untimeout(temp_id); 8794 mutex_enter(SD_MUTEX(un)); 8795 } 8796 8797 if (un->un_rmw_msg_timeid != NULL) { 8798 timeout_id_t temp_id = un->un_rmw_msg_timeid; 8799 un->un_rmw_msg_timeid = NULL; 8800 mutex_exit(SD_MUTEX(un)); 8801 (void) untimeout(temp_id); 8802 mutex_enter(SD_MUTEX(un)); 8803 } 8804 8805 if (un->un_dcvb_timeid != NULL) { 8806 timeout_id_t temp_id = un->un_dcvb_timeid; 8807 un->un_dcvb_timeid = NULL; 8808 mutex_exit(SD_MUTEX(un)); 8809 (void) untimeout(temp_id); 8810 } else { 8811 mutex_exit(SD_MUTEX(un)); 8812 } 8813 8814 /* Remove any pending reservation reclaim requests for this device */ 8815 sd_rmv_resv_reclaim_req(dev); 8816 8817 mutex_enter(SD_MUTEX(un)); 8818 8819 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ 8820 if (un->un_direct_priority_timeid != NULL) { 8821 timeout_id_t temp_id = un->un_direct_priority_timeid; 8822 un->un_direct_priority_timeid = NULL; 8823 mutex_exit(SD_MUTEX(un)); 8824 (void) untimeout(temp_id); 8825 mutex_enter(SD_MUTEX(un)); 8826 } 8827 8828 /* Cancel any active multi-host disk watch thread requests */ 8829 if (un->un_mhd_token != NULL) { 8830 mutex_exit(SD_MUTEX(un)); 8831 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); 8832 if (scsi_watch_request_terminate(un->un_mhd_token, 8833 SCSI_WATCH_TERMINATE_NOWAIT)) { 8834 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8835 "sd_dr_detach: Cannot cancel mhd watch request\n"); 8836 /* 8837 * Note: We are returning here after having removed 8838 * some driver timeouts above. This is consistent with 8839 * the legacy implementation but perhaps the watch 8840 * terminate call should be made with the wait flag set. 8841 */ 8842 goto err_stillbusy; 8843 } 8844 mutex_enter(SD_MUTEX(un)); 8845 un->un_mhd_token = NULL; 8846 } 8847 8848 if (un->un_swr_token != NULL) { 8849 mutex_exit(SD_MUTEX(un)); 8850 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); 8851 if (scsi_watch_request_terminate(un->un_swr_token, 8852 SCSI_WATCH_TERMINATE_NOWAIT)) { 8853 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8854 "sd_dr_detach: Cannot cancel swr watch request\n"); 8855 /* 8856 * Note: We are returning here after having removed 8857 * some driver timeouts above. This is consistent with 8858 * the legacy implementation but perhaps the watch 8859 * terminate call should be made with the wait flag set. 8860 */ 8861 goto err_stillbusy; 8862 } 8863 mutex_enter(SD_MUTEX(un)); 8864 un->un_swr_token = NULL; 8865 } 8866 8867 mutex_exit(SD_MUTEX(un)); 8868 8869 /* 8870 * Clear any scsi_reset_notifies. We clear the reset notifies 8871 * if we have not registered one. 8872 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! 8873 */ 8874 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 8875 sd_mhd_reset_notify_cb, (caddr_t)un); 8876 8877 /* 8878 * protect the timeout pointers from getting nulled by 8879 * their callback functions during the cancellation process. 8880 * In such a scenario untimeout can be invoked with a null value. 8881 */ 8882 _NOTE(NO_COMPETING_THREADS_NOW); 8883 8884 mutex_enter(&un->un_pm_mutex); 8885 if (un->un_pm_idle_timeid != NULL) { 8886 timeout_id_t temp_id = un->un_pm_idle_timeid; 8887 un->un_pm_idle_timeid = NULL; 8888 mutex_exit(&un->un_pm_mutex); 8889 8890 /* 8891 * Timeout is active; cancel it. 8892 * Note that it'll never be active on a device 8893 * that does not support PM therefore we don't 8894 * have to check before calling pm_idle_component. 8895 */ 8896 (void) untimeout(temp_id); 8897 (void) pm_idle_component(SD_DEVINFO(un), 0); 8898 mutex_enter(&un->un_pm_mutex); 8899 } 8900 8901 /* 8902 * Check whether there is already a timeout scheduled for power 8903 * management. If yes then don't lower the power here, that's. 8904 * the timeout handler's job. 8905 */ 8906 if (un->un_pm_timeid != NULL) { 8907 timeout_id_t temp_id = un->un_pm_timeid; 8908 un->un_pm_timeid = NULL; 8909 mutex_exit(&un->un_pm_mutex); 8910 /* 8911 * Timeout is active; cancel it. 8912 * Note that it'll never be active on a device 8913 * that does not support PM therefore we don't 8914 * have to check before calling pm_idle_component. 8915 */ 8916 (void) untimeout(temp_id); 8917 (void) pm_idle_component(SD_DEVINFO(un), 0); 8918 8919 } else { 8920 mutex_exit(&un->un_pm_mutex); 8921 if ((un->un_f_pm_is_enabled == TRUE) && 8922 (pm_lower_power(SD_DEVINFO(un), 0, SD_PM_STATE_STOPPED(un)) 8923 != DDI_SUCCESS)) { 8924 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8925 "sd_dr_detach: Lower power request failed, ignoring.\n"); 8926 /* 8927 * Fix for bug: 4297749, item # 13 8928 * The above test now includes a check to see if PM is 8929 * supported by this device before call 8930 * pm_lower_power(). 8931 * Note, the following is not dead code. The call to 8932 * pm_lower_power above will generate a call back into 8933 * our sdpower routine which might result in a timeout 8934 * handler getting activated. Therefore the following 8935 * code is valid and necessary. 8936 */ 8937 mutex_enter(&un->un_pm_mutex); 8938 if (un->un_pm_timeid != NULL) { 8939 timeout_id_t temp_id = un->un_pm_timeid; 8940 un->un_pm_timeid = NULL; 8941 mutex_exit(&un->un_pm_mutex); 8942 (void) untimeout(temp_id); 8943 (void) pm_idle_component(SD_DEVINFO(un), 0); 8944 } else { 8945 mutex_exit(&un->un_pm_mutex); 8946 } 8947 } 8948 } 8949 8950 /* 8951 * Cleanup from the scsi_ifsetcap() calls (437868) 8952 * Relocated here from above to be after the call to 8953 * pm_lower_power, which was getting errors. 8954 */ 8955 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 8956 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 8957 8958 /* 8959 * Currently, tagged queuing is supported per target based by HBA. 8960 * Setting this per lun instance actually sets the capability of this 8961 * target in HBA, which affects those luns already attached on the 8962 * same target. So during detach, we can only disable this capability 8963 * only when this is the only lun left on this target. By doing 8964 * this, we assume a target has the same tagged queuing capability 8965 * for every lun. The condition can be removed when HBA is changed to 8966 * support per lun based tagged queuing capability. 8967 */ 8968 if (sd_scsi_get_target_lun_count(pdip, tgt) <= 1) { 8969 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8970 } 8971 8972 if (un->un_f_is_fibre == FALSE) { 8973 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8974 } 8975 8976 /* 8977 * Remove any event callbacks, fibre only 8978 */ 8979 if (un->un_f_is_fibre == TRUE) { 8980 if ((un->un_insert_event != NULL) && 8981 (ddi_remove_event_handler(un->un_insert_cb_id) != 8982 DDI_SUCCESS)) { 8983 /* 8984 * Note: We are returning here after having done 8985 * substantial cleanup above. This is consistent 8986 * with the legacy implementation but this may not 8987 * be the right thing to do. 8988 */ 8989 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8990 "sd_dr_detach: Cannot cancel insert event\n"); 8991 goto err_remove_event; 8992 } 8993 un->un_insert_event = NULL; 8994 8995 if ((un->un_remove_event != NULL) && 8996 (ddi_remove_event_handler(un->un_remove_cb_id) != 8997 DDI_SUCCESS)) { 8998 /* 8999 * Note: We are returning here after having done 9000 * substantial cleanup above. This is consistent 9001 * with the legacy implementation but this may not 9002 * be the right thing to do. 9003 */ 9004 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 9005 "sd_dr_detach: Cannot cancel remove event\n"); 9006 goto err_remove_event; 9007 } 9008 un->un_remove_event = NULL; 9009 } 9010 9011 /* Do not free the softstate if the callback routine is active */ 9012 sd_sync_with_callback(un); 9013 9014 cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 9015 cmlb_free_handle(&un->un_cmlbhandle); 9016 9017 /* 9018 * Hold the detach mutex here, to make sure that no other threads ever 9019 * can access a (partially) freed soft state structure. 9020 */ 9021 mutex_enter(&sd_detach_mutex); 9022 9023 /* 9024 * Clean up the soft state struct. 9025 * Cleanup is done in reverse order of allocs/inits. 9026 * At this point there should be no competing threads anymore. 9027 */ 9028 9029 scsi_fm_fini(devp); 9030 9031 /* 9032 * Deallocate memory for SCSI FMA. 9033 */ 9034 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal)); 9035 9036 /* 9037 * Unregister and free device id if it was not registered 9038 * by the transport. 9039 */ 9040 if (un->un_f_devid_transport_defined == FALSE) 9041 ddi_devid_unregister(devi); 9042 9043 /* 9044 * free the devid structure if allocated before (by ddi_devid_init() 9045 * or ddi_devid_get()). 9046 */ 9047 if (un->un_devid) { 9048 ddi_devid_free(un->un_devid); 9049 un->un_devid = NULL; 9050 } 9051 9052 /* 9053 * Destroy wmap cache if it exists. 9054 */ 9055 if (un->un_wm_cache != NULL) { 9056 kmem_cache_destroy(un->un_wm_cache); 9057 un->un_wm_cache = NULL; 9058 } 9059 9060 /* 9061 * kstat cleanup is done in detach for all device types (4363169). 9062 * We do not want to fail detach if the device kstats are not deleted 9063 * since there is a confusion about the devo_refcnt for the device. 9064 * We just delete the kstats and let detach complete successfully. 9065 */ 9066 if (un->un_stats != NULL) { 9067 kstat_delete(un->un_stats); 9068 un->un_stats = NULL; 9069 } 9070 if (un->un_unmapstats != NULL) { 9071 kstat_delete(un->un_unmapstats_ks); 9072 un->un_unmapstats_ks = NULL; 9073 un->un_unmapstats = NULL; 9074 } 9075 if (un->un_errstats != NULL) { 9076 kstat_delete(un->un_errstats); 9077 un->un_errstats = NULL; 9078 } 9079 9080 /* Remove partition stats */ 9081 if (un->un_f_pkstats_enabled) { 9082 for (i = 0; i < NSDMAP; i++) { 9083 if (un->un_pstats[i] != NULL) { 9084 kstat_delete(un->un_pstats[i]); 9085 un->un_pstats[i] = NULL; 9086 } 9087 } 9088 } 9089 9090 /* Remove xbuf registration */ 9091 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 9092 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 9093 9094 /* Remove driver properties */ 9095 ddi_prop_remove_all(devi); 9096 9097 mutex_destroy(&un->un_pm_mutex); 9098 cv_destroy(&un->un_pm_busy_cv); 9099 9100 cv_destroy(&un->un_wcc_cv); 9101 9102 /* Open/close semaphore */ 9103 sema_destroy(&un->un_semoclose); 9104 9105 /* Removable media condvar. */ 9106 cv_destroy(&un->un_state_cv); 9107 9108 /* Suspend/resume condvar. */ 9109 cv_destroy(&un->un_suspend_cv); 9110 cv_destroy(&un->un_disk_busy_cv); 9111 9112 sd_free_rqs(un); 9113 9114 /* Free up soft state */ 9115 devp->sd_private = NULL; 9116 9117 bzero(un, sizeof (struct sd_lun)); 9118 9119 ddi_soft_state_free(sd_state, instance); 9120 9121 mutex_exit(&sd_detach_mutex); 9122 9123 /* This frees up the INQUIRY data associated with the device. */ 9124 scsi_unprobe(devp); 9125 9126 /* 9127 * After successfully detaching an instance, we update the information 9128 * of how many luns have been attached in the relative target and 9129 * controller for parallel SCSI. This information is used when sd tries 9130 * to set the tagged queuing capability in HBA. 9131 * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to 9132 * check if the device is parallel SCSI. However, we don't need to 9133 * check here because we've already checked during attach. No device 9134 * that is not parallel SCSI is in the chain. 9135 */ 9136 if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) { 9137 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH); 9138 } 9139 9140 return (DDI_SUCCESS); 9141 9142 err_notclosed: 9143 mutex_exit(SD_MUTEX(un)); 9144 9145 err_stillbusy: 9146 _NOTE(NO_COMPETING_THREADS_NOW); 9147 9148 err_remove_event: 9149 mutex_enter(&sd_detach_mutex); 9150 un->un_detach_count--; 9151 mutex_exit(&sd_detach_mutex); 9152 9153 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); 9154 return (DDI_FAILURE); 9155 } 9156 9157 9158 /* 9159 * Function: sd_create_errstats 9160 * 9161 * Description: This routine instantiates the device error stats. 9162 * 9163 * Note: During attach the stats are instantiated first so they are 9164 * available for attach-time routines that utilize the driver 9165 * iopath to send commands to the device. The stats are initialized 9166 * separately so data obtained during some attach-time routines is 9167 * available. (4362483) 9168 * 9169 * Arguments: un - driver soft state (unit) structure 9170 * instance - driver instance 9171 * 9172 * Context: Kernel thread context 9173 */ 9174 9175 static void 9176 sd_create_errstats(struct sd_lun *un, int instance) 9177 { 9178 struct sd_errstats *stp; 9179 char kstatmodule_err[KSTAT_STRLEN]; 9180 char kstatname[KSTAT_STRLEN]; 9181 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t)); 9182 9183 ASSERT(un != NULL); 9184 9185 if (un->un_errstats != NULL) { 9186 return; 9187 } 9188 9189 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err), 9190 "%serr", sd_label); 9191 (void) snprintf(kstatname, sizeof (kstatname), 9192 "%s%d,err", sd_label, instance); 9193 9194 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname, 9195 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 9196 9197 if (un->un_errstats == NULL) { 9198 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 9199 "sd_create_errstats: Failed kstat_create\n"); 9200 return; 9201 } 9202 9203 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9204 kstat_named_init(&stp->sd_softerrs, "Soft Errors", 9205 KSTAT_DATA_UINT32); 9206 kstat_named_init(&stp->sd_harderrs, "Hard Errors", 9207 KSTAT_DATA_UINT32); 9208 kstat_named_init(&stp->sd_transerrs, "Transport Errors", 9209 KSTAT_DATA_UINT32); 9210 kstat_named_init(&stp->sd_vid, "Vendor", 9211 KSTAT_DATA_CHAR); 9212 kstat_named_init(&stp->sd_pid, "Product", 9213 KSTAT_DATA_CHAR); 9214 kstat_named_init(&stp->sd_revision, "Revision", 9215 KSTAT_DATA_CHAR); 9216 kstat_named_init(&stp->sd_serial, "Serial No", 9217 KSTAT_DATA_CHAR); 9218 kstat_named_init(&stp->sd_capacity, "Size", 9219 KSTAT_DATA_ULONGLONG); 9220 kstat_named_init(&stp->sd_rq_media_err, "Media Error", 9221 KSTAT_DATA_UINT32); 9222 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready", 9223 KSTAT_DATA_UINT32); 9224 kstat_named_init(&stp->sd_rq_nodev_err, "No Device", 9225 KSTAT_DATA_UINT32); 9226 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable", 9227 KSTAT_DATA_UINT32); 9228 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request", 9229 KSTAT_DATA_UINT32); 9230 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis", 9231 KSTAT_DATA_UINT32); 9232 9233 un->un_errstats->ks_private = un; 9234 un->un_errstats->ks_update = nulldev; 9235 9236 kstat_install(un->un_errstats); 9237 } 9238 9239 9240 /* 9241 * Function: sd_set_errstats 9242 * 9243 * Description: This routine sets the value of the vendor id, product id, 9244 * revision, serial number, and capacity device error stats. 9245 * 9246 * Note: During attach the stats are instantiated first so they are 9247 * available for attach-time routines that utilize the driver 9248 * iopath to send commands to the device. The stats are initialized 9249 * separately so data obtained during some attach-time routines is 9250 * available. (4362483) 9251 * 9252 * Arguments: un - driver soft state (unit) structure 9253 * 9254 * Context: Kernel thread context 9255 */ 9256 9257 static void 9258 sd_set_errstats(struct sd_lun *un) 9259 { 9260 struct sd_errstats *stp; 9261 char *sn; 9262 9263 ASSERT(un != NULL); 9264 ASSERT(un->un_errstats != NULL); 9265 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9266 ASSERT(stp != NULL); 9267 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8); 9268 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16); 9269 (void) strncpy(stp->sd_revision.value.c, 9270 un->un_sd->sd_inq->inq_revision, 4); 9271 9272 /* 9273 * All the errstats are persistent across detach/attach, 9274 * so reset all the errstats here in case of the hot 9275 * replacement of disk drives, except for not changed 9276 * Sun qualified drives. 9277 */ 9278 if ((bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) != 0) || 9279 (bcmp(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 9280 sizeof (SD_INQUIRY(un)->inq_serial)) != 0)) { 9281 stp->sd_softerrs.value.ui32 = 0; 9282 stp->sd_harderrs.value.ui32 = 0; 9283 stp->sd_transerrs.value.ui32 = 0; 9284 stp->sd_rq_media_err.value.ui32 = 0; 9285 stp->sd_rq_ntrdy_err.value.ui32 = 0; 9286 stp->sd_rq_nodev_err.value.ui32 = 0; 9287 stp->sd_rq_recov_err.value.ui32 = 0; 9288 stp->sd_rq_illrq_err.value.ui32 = 0; 9289 stp->sd_rq_pfa_err.value.ui32 = 0; 9290 } 9291 9292 /* 9293 * Set the "Serial No" kstat for Sun qualified drives (indicated by 9294 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid) 9295 * (4376302)) 9296 */ 9297 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) { 9298 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 9299 sizeof (SD_INQUIRY(un)->inq_serial)); 9300 } else { 9301 /* 9302 * Set the "Serial No" kstat for non-Sun qualified drives 9303 */ 9304 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, SD_DEVINFO(un), 9305 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 9306 INQUIRY_SERIAL_NO, &sn) == DDI_SUCCESS) { 9307 (void) strlcpy(stp->sd_serial.value.c, sn, 9308 sizeof (stp->sd_serial.value.c)); 9309 ddi_prop_free(sn); 9310 } 9311 } 9312 9313 if (un->un_f_blockcount_is_valid != TRUE) { 9314 /* 9315 * Set capacity error stat to 0 for no media. This ensures 9316 * a valid capacity is displayed in response to 'iostat -E' 9317 * when no media is present in the device. 9318 */ 9319 stp->sd_capacity.value.ui64 = 0; 9320 } else { 9321 /* 9322 * Multiply un_blockcount by un->un_sys_blocksize to get 9323 * capacity. 9324 * 9325 * Note: for non-512 blocksize devices "un_blockcount" has been 9326 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by 9327 * (un_tgt_blocksize / un->un_sys_blocksize). 9328 */ 9329 stp->sd_capacity.value.ui64 = (uint64_t) 9330 ((uint64_t)un->un_blockcount * un->un_sys_blocksize); 9331 } 9332 } 9333 9334 9335 /* 9336 * Function: sd_set_pstats 9337 * 9338 * Description: This routine instantiates and initializes the partition 9339 * stats for each partition with more than zero blocks. 9340 * (4363169) 9341 * 9342 * Arguments: un - driver soft state (unit) structure 9343 * 9344 * Context: Kernel thread context 9345 */ 9346 9347 static void 9348 sd_set_pstats(struct sd_lun *un) 9349 { 9350 char kstatname[KSTAT_STRLEN]; 9351 int instance; 9352 int i; 9353 diskaddr_t nblks = 0; 9354 char *partname = NULL; 9355 9356 ASSERT(un != NULL); 9357 9358 instance = ddi_get_instance(SD_DEVINFO(un)); 9359 9360 /* Note:x86: is this a VTOC8/VTOC16 difference? */ 9361 for (i = 0; i < NSDMAP; i++) { 9362 9363 if (cmlb_partinfo(un->un_cmlbhandle, i, 9364 &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0) 9365 continue; 9366 mutex_enter(SD_MUTEX(un)); 9367 9368 if ((un->un_pstats[i] == NULL) && 9369 (nblks != 0)) { 9370 9371 (void) snprintf(kstatname, sizeof (kstatname), 9372 "%s%d,%s", sd_label, instance, 9373 partname); 9374 9375 un->un_pstats[i] = kstat_create(sd_label, 9376 instance, kstatname, "partition", KSTAT_TYPE_IO, 9377 1, KSTAT_FLAG_PERSISTENT); 9378 if (un->un_pstats[i] != NULL) { 9379 un->un_pstats[i]->ks_lock = SD_MUTEX(un); 9380 kstat_install(un->un_pstats[i]); 9381 } 9382 } 9383 mutex_exit(SD_MUTEX(un)); 9384 } 9385 } 9386 9387 9388 #if (defined(__fibre)) 9389 /* 9390 * Function: sd_init_event_callbacks 9391 * 9392 * Description: This routine initializes the insertion and removal event 9393 * callbacks. (fibre only) 9394 * 9395 * Arguments: un - driver soft state (unit) structure 9396 * 9397 * Context: Kernel thread context 9398 */ 9399 9400 static void 9401 sd_init_event_callbacks(struct sd_lun *un) 9402 { 9403 ASSERT(un != NULL); 9404 9405 if ((un->un_insert_event == NULL) && 9406 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, 9407 &un->un_insert_event) == DDI_SUCCESS)) { 9408 /* 9409 * Add the callback for an insertion event 9410 */ 9411 (void) ddi_add_event_handler(SD_DEVINFO(un), 9412 un->un_insert_event, sd_event_callback, (void *)un, 9413 &(un->un_insert_cb_id)); 9414 } 9415 9416 if ((un->un_remove_event == NULL) && 9417 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, 9418 &un->un_remove_event) == DDI_SUCCESS)) { 9419 /* 9420 * Add the callback for a removal event 9421 */ 9422 (void) ddi_add_event_handler(SD_DEVINFO(un), 9423 un->un_remove_event, sd_event_callback, (void *)un, 9424 &(un->un_remove_cb_id)); 9425 } 9426 } 9427 9428 9429 /* 9430 * Function: sd_event_callback 9431 * 9432 * Description: This routine handles insert/remove events (photon). The 9433 * state is changed to OFFLINE which can be used to supress 9434 * error msgs. (fibre only) 9435 * 9436 * Arguments: un - driver soft state (unit) structure 9437 * 9438 * Context: Callout thread context 9439 */ 9440 /* ARGSUSED */ 9441 static void 9442 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, 9443 void *bus_impldata) 9444 { 9445 struct sd_lun *un = (struct sd_lun *)arg; 9446 9447 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); 9448 if (event == un->un_insert_event) { 9449 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); 9450 mutex_enter(SD_MUTEX(un)); 9451 if (un->un_state == SD_STATE_OFFLINE) { 9452 if (un->un_last_state != SD_STATE_SUSPENDED) { 9453 un->un_state = un->un_last_state; 9454 } else { 9455 /* 9456 * We have gone through SUSPEND/RESUME while 9457 * we were offline. Restore the last state 9458 */ 9459 un->un_state = un->un_save_state; 9460 } 9461 } 9462 mutex_exit(SD_MUTEX(un)); 9463 9464 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); 9465 } else if (event == un->un_remove_event) { 9466 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); 9467 mutex_enter(SD_MUTEX(un)); 9468 /* 9469 * We need to handle an event callback that occurs during 9470 * the suspend operation, since we don't prevent it. 9471 */ 9472 if (un->un_state != SD_STATE_OFFLINE) { 9473 if (un->un_state != SD_STATE_SUSPENDED) { 9474 New_state(un, SD_STATE_OFFLINE); 9475 } else { 9476 un->un_last_state = SD_STATE_OFFLINE; 9477 } 9478 } 9479 mutex_exit(SD_MUTEX(un)); 9480 } else { 9481 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 9482 "!Unknown event\n"); 9483 } 9484 9485 } 9486 #endif 9487 9488 /* 9489 * Values related to caching mode page depending on whether the unit is ATAPI. 9490 */ 9491 #define SDC_CDB_GROUP(un) ((un->un_f_cfg_is_atapi == TRUE) ? \ 9492 CDB_GROUP1 : CDB_GROUP0) 9493 #define SDC_HDRLEN(un) ((un->un_f_cfg_is_atapi == TRUE) ? \ 9494 MODE_HEADER_LENGTH_GRP2 : MODE_HEADER_LENGTH) 9495 /* 9496 * Use mode_cache_scsi3 to ensure we get all of the mode sense data, otherwise 9497 * the mode select will fail (mode_cache_scsi3 is a superset of mode_caching). 9498 */ 9499 #define SDC_BUFLEN(un) (SDC_HDRLEN(un) + MODE_BLK_DESC_LENGTH + \ 9500 sizeof (struct mode_cache_scsi3)) 9501 9502 static int 9503 sd_get_caching_mode_page(sd_ssc_t *ssc, uchar_t page_control, uchar_t **header, 9504 int *bdlen) 9505 { 9506 struct sd_lun *un = ssc->ssc_un; 9507 struct mode_caching *mode_caching_page; 9508 size_t buflen = SDC_BUFLEN(un); 9509 int hdrlen = SDC_HDRLEN(un); 9510 int rval; 9511 9512 /* 9513 * Do a test unit ready, otherwise a mode sense may not work if this 9514 * is the first command sent to the device after boot. 9515 */ 9516 if (sd_send_scsi_TEST_UNIT_READY(ssc, 0) != 0) 9517 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9518 9519 /* 9520 * Allocate memory for the retrieved mode page and its headers. Set 9521 * a pointer to the page itself. 9522 */ 9523 *header = kmem_zalloc(buflen, KM_SLEEP); 9524 9525 /* Get the information from the device */ 9526 rval = sd_send_scsi_MODE_SENSE(ssc, SDC_CDB_GROUP(un), *header, buflen, 9527 page_control | MODEPAGE_CACHING, SD_PATH_DIRECT); 9528 if (rval != 0) { 9529 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, "%s: Mode Sense Failed\n", 9530 __func__); 9531 goto mode_sense_failed; 9532 } 9533 9534 /* 9535 * Determine size of Block Descriptors in order to locate 9536 * the mode page data. ATAPI devices return 0, SCSI devices 9537 * should return MODE_BLK_DESC_LENGTH. 9538 */ 9539 if (un->un_f_cfg_is_atapi == TRUE) { 9540 struct mode_header_grp2 *mhp = 9541 (struct mode_header_grp2 *)(*header); 9542 *bdlen = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 9543 } else { 9544 *bdlen = ((struct mode_header *)(*header))->bdesc_length; 9545 } 9546 9547 if (*bdlen > MODE_BLK_DESC_LENGTH) { 9548 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 0, 9549 "%s: Mode Sense returned invalid block descriptor length\n", 9550 __func__); 9551 rval = EIO; 9552 goto mode_sense_failed; 9553 } 9554 9555 mode_caching_page = (struct mode_caching *)(*header + hdrlen + *bdlen); 9556 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 9557 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 9558 "%s: Mode Sense caching page code mismatch %d\n", 9559 __func__, mode_caching_page->mode_page.code); 9560 rval = EIO; 9561 } 9562 9563 mode_sense_failed: 9564 if (rval != 0) { 9565 kmem_free(*header, buflen); 9566 *header = NULL; 9567 *bdlen = 0; 9568 } 9569 return (rval); 9570 } 9571 9572 /* 9573 * Function: sd_cache_control() 9574 * 9575 * Description: This routine is the driver entry point for setting 9576 * read and write caching by modifying the WCE (write cache 9577 * enable) and RCD (read cache disable) bits of mode 9578 * page 8 (MODEPAGE_CACHING). 9579 * 9580 * Arguments: ssc - ssc contains pointer to driver soft state 9581 * (unit) structure for this target. 9582 * rcd_flag - flag for controlling the read cache 9583 * wce_flag - flag for controlling the write cache 9584 * 9585 * Return Code: EIO 9586 * code returned by sd_send_scsi_MODE_SENSE and 9587 * sd_send_scsi_MODE_SELECT 9588 * 9589 * Context: Kernel Thread 9590 */ 9591 9592 static int 9593 sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag) 9594 { 9595 struct sd_lun *un = ssc->ssc_un; 9596 struct mode_caching *mode_caching_page; 9597 uchar_t *header; 9598 size_t buflen = SDC_BUFLEN(un); 9599 int hdrlen = SDC_HDRLEN(un); 9600 int bdlen; 9601 int rval; 9602 9603 rval = sd_get_caching_mode_page(ssc, MODEPAGE_CURRENT, &header, &bdlen); 9604 switch (rval) { 9605 case 0: 9606 /* Check the relevant bits on successful mode sense */ 9607 mode_caching_page = (struct mode_caching *)(header + hdrlen + 9608 bdlen); 9609 if ((mode_caching_page->rcd && rcd_flag == SD_CACHE_ENABLE) || 9610 (!mode_caching_page->rcd && rcd_flag == SD_CACHE_DISABLE) || 9611 (mode_caching_page->wce && wce_flag == SD_CACHE_DISABLE) || 9612 (!mode_caching_page->wce && wce_flag == SD_CACHE_ENABLE)) { 9613 size_t sbuflen; 9614 uchar_t save_pg; 9615 9616 /* 9617 * Construct select buffer length based on the 9618 * length of the sense data returned. 9619 */ 9620 sbuflen = hdrlen + bdlen + sizeof (struct mode_page) + 9621 (int)mode_caching_page->mode_page.length; 9622 9623 /* Set the caching bits as requested */ 9624 if (rcd_flag == SD_CACHE_ENABLE) 9625 mode_caching_page->rcd = 0; 9626 else if (rcd_flag == SD_CACHE_DISABLE) 9627 mode_caching_page->rcd = 1; 9628 9629 if (wce_flag == SD_CACHE_ENABLE) 9630 mode_caching_page->wce = 1; 9631 else if (wce_flag == SD_CACHE_DISABLE) 9632 mode_caching_page->wce = 0; 9633 9634 /* 9635 * Save the page if the mode sense says the 9636 * drive supports it. 9637 */ 9638 save_pg = mode_caching_page->mode_page.ps ? 9639 SD_SAVE_PAGE : SD_DONTSAVE_PAGE; 9640 9641 /* Clear reserved bits before mode select */ 9642 mode_caching_page->mode_page.ps = 0; 9643 9644 /* 9645 * Clear out mode header for mode select. 9646 * The rest of the retrieved page will be reused. 9647 */ 9648 bzero(header, hdrlen); 9649 9650 if (un->un_f_cfg_is_atapi == TRUE) { 9651 struct mode_header_grp2 *mhp = 9652 (struct mode_header_grp2 *)header; 9653 mhp->bdesc_length_hi = bdlen >> 8; 9654 mhp->bdesc_length_lo = (uchar_t)bdlen & 0xff; 9655 } else { 9656 ((struct mode_header *)header)->bdesc_length = 9657 bdlen; 9658 } 9659 9660 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9661 9662 /* Issue mode select to change the cache settings */ 9663 rval = sd_send_scsi_MODE_SELECT(ssc, SDC_CDB_GROUP(un), 9664 header, sbuflen, save_pg, SD_PATH_DIRECT); 9665 } 9666 kmem_free(header, buflen); 9667 break; 9668 case EIO: 9669 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 9670 break; 9671 default: 9672 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9673 break; 9674 } 9675 9676 return (rval); 9677 } 9678 9679 9680 /* 9681 * Function: sd_get_write_cache_enabled() 9682 * 9683 * Description: This routine is the driver entry point for determining if write 9684 * caching is enabled. It examines the WCE (write cache enable) 9685 * bits of mode page 8 (MODEPAGE_CACHING) with Page Control field 9686 * bits set to MODEPAGE_CURRENT. 9687 * 9688 * Arguments: ssc - ssc contains pointer to driver soft state 9689 * (unit) structure for this target. 9690 * is_enabled - pointer to int where write cache enabled state 9691 * is returned (non-zero -> write cache enabled) 9692 * 9693 * Return Code: EIO 9694 * code returned by sd_send_scsi_MODE_SENSE 9695 * 9696 * Context: Kernel Thread 9697 * 9698 * NOTE: If ioctl is added to disable write cache, this sequence should 9699 * be followed so that no locking is required for accesses to 9700 * un->un_f_write_cache_enabled: 9701 * do mode select to clear wce 9702 * do synchronize cache to flush cache 9703 * set un->un_f_write_cache_enabled = FALSE 9704 * 9705 * Conversely, an ioctl to enable the write cache should be done 9706 * in this order: 9707 * set un->un_f_write_cache_enabled = TRUE 9708 * do mode select to set wce 9709 */ 9710 9711 static int 9712 sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled) 9713 { 9714 struct sd_lun *un = ssc->ssc_un; 9715 struct mode_caching *mode_caching_page; 9716 uchar_t *header; 9717 size_t buflen = SDC_BUFLEN(un); 9718 int hdrlen = SDC_HDRLEN(un); 9719 int bdlen; 9720 int rval; 9721 9722 /* In case of error, flag as enabled */ 9723 *is_enabled = TRUE; 9724 9725 rval = sd_get_caching_mode_page(ssc, MODEPAGE_CURRENT, &header, &bdlen); 9726 switch (rval) { 9727 case 0: 9728 mode_caching_page = (struct mode_caching *)(header + hdrlen + 9729 bdlen); 9730 *is_enabled = mode_caching_page->wce; 9731 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 9732 kmem_free(header, buflen); 9733 break; 9734 case EIO: { 9735 /* 9736 * Some disks do not support Mode Sense(6), we 9737 * should ignore this kind of error (sense key is 9738 * 0x5 - illegal request). 9739 */ 9740 uint8_t *sensep; 9741 int senlen; 9742 9743 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 9744 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 9745 ssc->ssc_uscsi_cmd->uscsi_rqresid); 9746 9747 if (senlen > 0 && 9748 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 9749 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 9750 } else { 9751 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 9752 } 9753 break; 9754 } 9755 default: 9756 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9757 break; 9758 } 9759 9760 return (rval); 9761 } 9762 9763 /* 9764 * Function: sd_get_write_cache_changeable() 9765 * 9766 * Description: This routine is the driver entry point for determining if write 9767 * caching is changeable. It examines the WCE (write cache enable) 9768 * bits of mode page 8 (MODEPAGE_CACHING) with Page Control field 9769 * bits set to MODEPAGE_CHANGEABLE. 9770 * 9771 * Arguments: ssc - ssc contains pointer to driver soft state 9772 * (unit) structure for this target. 9773 * is_changeable - pointer to int where write cache changeable 9774 * state is returned (non-zero -> write cache 9775 * changeable) 9776 * 9777 * Context: Kernel Thread 9778 */ 9779 9780 static void 9781 sd_get_write_cache_changeable(sd_ssc_t *ssc, int *is_changeable) 9782 { 9783 struct sd_lun *un = ssc->ssc_un; 9784 struct mode_caching *mode_caching_page; 9785 uchar_t *header; 9786 size_t buflen = SDC_BUFLEN(un); 9787 int hdrlen = SDC_HDRLEN(un); 9788 int bdlen; 9789 int rval; 9790 9791 /* In case of error, flag as enabled */ 9792 *is_changeable = TRUE; 9793 9794 rval = sd_get_caching_mode_page(ssc, MODEPAGE_CHANGEABLE, &header, 9795 &bdlen); 9796 switch (rval) { 9797 case 0: 9798 mode_caching_page = (struct mode_caching *)(header + hdrlen + 9799 bdlen); 9800 *is_changeable = mode_caching_page->wce; 9801 kmem_free(header, buflen); 9802 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 9803 break; 9804 case EIO: 9805 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 9806 break; 9807 default: 9808 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9809 break; 9810 } 9811 } 9812 9813 /* 9814 * Function: sd_get_nv_sup() 9815 * 9816 * Description: This routine is the driver entry point for 9817 * determining whether non-volatile cache is supported. This 9818 * determination process works as follows: 9819 * 9820 * 1. sd first queries sd.conf on whether 9821 * suppress_cache_flush bit is set for this device. 9822 * 9823 * 2. if not there, then queries the internal disk table. 9824 * 9825 * 3. if either sd.conf or internal disk table specifies 9826 * cache flush be suppressed, we don't bother checking 9827 * NV_SUP bit. 9828 * 9829 * If SUPPRESS_CACHE_FLUSH bit is not set to 1, sd queries 9830 * the optional INQUIRY VPD page 0x86. If the device 9831 * supports VPD page 0x86, sd examines the NV_SUP 9832 * (non-volatile cache support) bit in the INQUIRY VPD page 9833 * 0x86: 9834 * o If NV_SUP bit is set, sd assumes the device has a 9835 * non-volatile cache and set the 9836 * un_f_sync_nv_supported to TRUE. 9837 * o Otherwise cache is not non-volatile, 9838 * un_f_sync_nv_supported is set to FALSE. 9839 * 9840 * Arguments: un - driver soft state (unit) structure 9841 * 9842 * Return Code: 9843 * 9844 * Context: Kernel Thread 9845 */ 9846 9847 static void 9848 sd_get_nv_sup(sd_ssc_t *ssc) 9849 { 9850 int rval = 0; 9851 uchar_t *inq86 = NULL; 9852 size_t inq86_len = MAX_INQUIRY_SIZE; 9853 size_t inq86_resid = 0; 9854 struct dk_callback *dkc; 9855 struct sd_lun *un; 9856 9857 ASSERT(ssc != NULL); 9858 un = ssc->ssc_un; 9859 ASSERT(un != NULL); 9860 9861 mutex_enter(SD_MUTEX(un)); 9862 9863 /* 9864 * Be conservative on the device's support of 9865 * SYNC_NV bit: un_f_sync_nv_supported is 9866 * initialized to be false. 9867 */ 9868 un->un_f_sync_nv_supported = FALSE; 9869 9870 /* 9871 * If either sd.conf or internal disk table 9872 * specifies cache flush be suppressed, then 9873 * we don't bother checking NV_SUP bit. 9874 */ 9875 if (un->un_f_suppress_cache_flush == TRUE) { 9876 mutex_exit(SD_MUTEX(un)); 9877 return; 9878 } 9879 9880 if (sd_check_vpd_page_support(ssc) == 0 && 9881 un->un_vpd_page_mask & SD_VPD_EXTENDED_DATA_PG) { 9882 mutex_exit(SD_MUTEX(un)); 9883 /* collect page 86 data if available */ 9884 inq86 = kmem_zalloc(inq86_len, KM_SLEEP); 9885 9886 rval = sd_send_scsi_INQUIRY(ssc, inq86, inq86_len, 9887 0x01, 0x86, &inq86_resid); 9888 9889 if (rval == 0 && (inq86_len - inq86_resid > 6)) { 9890 SD_TRACE(SD_LOG_COMMON, un, 9891 "sd_get_nv_sup: \ 9892 successfully get VPD page: %x \ 9893 PAGE LENGTH: %x BYTE 6: %x\n", 9894 inq86[1], inq86[3], inq86[6]); 9895 9896 mutex_enter(SD_MUTEX(un)); 9897 /* 9898 * check the value of NV_SUP bit: only if the device 9899 * reports NV_SUP bit to be 1, the 9900 * un_f_sync_nv_supported bit will be set to true. 9901 */ 9902 if (inq86[6] & SD_VPD_NV_SUP) { 9903 un->un_f_sync_nv_supported = TRUE; 9904 } 9905 mutex_exit(SD_MUTEX(un)); 9906 } else if (rval != 0) { 9907 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9908 } 9909 9910 kmem_free(inq86, inq86_len); 9911 } else { 9912 mutex_exit(SD_MUTEX(un)); 9913 } 9914 9915 /* 9916 * Send a SYNC CACHE command to check whether 9917 * SYNC_NV bit is supported. This command should have 9918 * un_f_sync_nv_supported set to correct value. 9919 */ 9920 mutex_enter(SD_MUTEX(un)); 9921 if (un->un_f_sync_nv_supported) { 9922 mutex_exit(SD_MUTEX(un)); 9923 dkc = kmem_zalloc(sizeof (struct dk_callback), KM_SLEEP); 9924 dkc->dkc_flag = FLUSH_VOLATILE; 9925 (void) sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 9926 9927 /* 9928 * Send a TEST UNIT READY command to the device. This should 9929 * clear any outstanding UNIT ATTENTION that may be present. 9930 */ 9931 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR); 9932 if (rval != 0) 9933 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9934 9935 kmem_free(dkc, sizeof (struct dk_callback)); 9936 } else { 9937 mutex_exit(SD_MUTEX(un)); 9938 } 9939 9940 SD_TRACE(SD_LOG_COMMON, un, "sd_get_nv_sup: \ 9941 un_f_suppress_cache_flush is set to %d\n", 9942 un->un_f_suppress_cache_flush); 9943 } 9944 9945 /* 9946 * Function: sd_make_device 9947 * 9948 * Description: Utility routine to return the Solaris device number from 9949 * the data in the device's dev_info structure. 9950 * 9951 * Return Code: The Solaris device number 9952 * 9953 * Context: Any 9954 */ 9955 9956 static dev_t 9957 sd_make_device(dev_info_t *devi) 9958 { 9959 return (makedevice(ddi_driver_major(devi), 9960 ddi_get_instance(devi) << SDUNIT_SHIFT)); 9961 } 9962 9963 9964 /* 9965 * Function: sd_pm_entry 9966 * 9967 * Description: Called at the start of a new command to manage power 9968 * and busy status of a device. This includes determining whether 9969 * the current power state of the device is sufficient for 9970 * performing the command or whether it must be changed. 9971 * The PM framework is notified appropriately. 9972 * Only with a return status of DDI_SUCCESS will the 9973 * component be busy to the framework. 9974 * 9975 * All callers of sd_pm_entry must check the return status 9976 * and only call sd_pm_exit it it was DDI_SUCCESS. A status 9977 * of DDI_FAILURE indicates the device failed to power up. 9978 * In this case un_pm_count has been adjusted so the result 9979 * on exit is still powered down, ie. count is less than 0. 9980 * Calling sd_pm_exit with this count value hits an ASSERT. 9981 * 9982 * Return Code: DDI_SUCCESS or DDI_FAILURE 9983 * 9984 * Context: Kernel thread context. 9985 */ 9986 9987 static int 9988 sd_pm_entry(struct sd_lun *un) 9989 { 9990 int return_status = DDI_SUCCESS; 9991 9992 ASSERT(!mutex_owned(SD_MUTEX(un))); 9993 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9994 9995 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n"); 9996 9997 if (un->un_f_pm_is_enabled == FALSE) { 9998 SD_TRACE(SD_LOG_IO_PM, un, 9999 "sd_pm_entry: exiting, PM not enabled\n"); 10000 return (return_status); 10001 } 10002 10003 /* 10004 * Just increment a counter if PM is enabled. On the transition from 10005 * 0 ==> 1, mark the device as busy. The iodone side will decrement 10006 * the count with each IO and mark the device as idle when the count 10007 * hits 0. 10008 * 10009 * If the count is less than 0 the device is powered down. If a powered 10010 * down device is successfully powered up then the count must be 10011 * incremented to reflect the power up. Note that it'll get incremented 10012 * a second time to become busy. 10013 * 10014 * Because the following has the potential to change the device state 10015 * and must release the un_pm_mutex to do so, only one thread can be 10016 * allowed through at a time. 10017 */ 10018 10019 mutex_enter(&un->un_pm_mutex); 10020 while (un->un_pm_busy == TRUE) { 10021 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex); 10022 } 10023 un->un_pm_busy = TRUE; 10024 10025 if (un->un_pm_count < 1) { 10026 10027 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n"); 10028 10029 /* 10030 * Indicate we are now busy so the framework won't attempt to 10031 * power down the device. This call will only fail if either 10032 * we passed a bad component number or the device has no 10033 * components. Neither of these should ever happen. 10034 */ 10035 mutex_exit(&un->un_pm_mutex); 10036 return_status = pm_busy_component(SD_DEVINFO(un), 0); 10037 ASSERT(return_status == DDI_SUCCESS); 10038 10039 mutex_enter(&un->un_pm_mutex); 10040 10041 if (un->un_pm_count < 0) { 10042 mutex_exit(&un->un_pm_mutex); 10043 10044 SD_TRACE(SD_LOG_IO_PM, un, 10045 "sd_pm_entry: power up component\n"); 10046 10047 /* 10048 * pm_raise_power will cause sdpower to be called 10049 * which brings the device power level to the 10050 * desired state, If successful, un_pm_count and 10051 * un_power_level will be updated appropriately. 10052 */ 10053 return_status = pm_raise_power(SD_DEVINFO(un), 0, 10054 SD_PM_STATE_ACTIVE(un)); 10055 10056 mutex_enter(&un->un_pm_mutex); 10057 10058 if (return_status != DDI_SUCCESS) { 10059 /* 10060 * Power up failed. 10061 * Idle the device and adjust the count 10062 * so the result on exit is that we're 10063 * still powered down, ie. count is less than 0. 10064 */ 10065 SD_TRACE(SD_LOG_IO_PM, un, 10066 "sd_pm_entry: power up failed," 10067 " idle the component\n"); 10068 10069 (void) pm_idle_component(SD_DEVINFO(un), 0); 10070 un->un_pm_count--; 10071 } else { 10072 /* 10073 * Device is powered up, verify the 10074 * count is non-negative. 10075 * This is debug only. 10076 */ 10077 ASSERT(un->un_pm_count == 0); 10078 } 10079 } 10080 10081 if (return_status == DDI_SUCCESS) { 10082 /* 10083 * For performance, now that the device has been tagged 10084 * as busy, and it's known to be powered up, update the 10085 * chain types to use jump tables that do not include 10086 * pm. This significantly lowers the overhead and 10087 * therefore improves performance. 10088 */ 10089 10090 mutex_exit(&un->un_pm_mutex); 10091 mutex_enter(SD_MUTEX(un)); 10092 SD_TRACE(SD_LOG_IO_PM, un, 10093 "sd_pm_entry: changing uscsi_chain_type from %d\n", 10094 un->un_uscsi_chain_type); 10095 10096 if (un->un_f_non_devbsize_supported) { 10097 un->un_buf_chain_type = 10098 SD_CHAIN_INFO_RMMEDIA_NO_PM; 10099 } else { 10100 un->un_buf_chain_type = 10101 SD_CHAIN_INFO_DISK_NO_PM; 10102 } 10103 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 10104 10105 SD_TRACE(SD_LOG_IO_PM, un, 10106 " changed uscsi_chain_type to %d\n", 10107 un->un_uscsi_chain_type); 10108 mutex_exit(SD_MUTEX(un)); 10109 mutex_enter(&un->un_pm_mutex); 10110 10111 if (un->un_pm_idle_timeid == NULL) { 10112 /* 300 ms. */ 10113 un->un_pm_idle_timeid = 10114 timeout(sd_pm_idletimeout_handler, un, 10115 (drv_usectohz((clock_t)300000))); 10116 /* 10117 * Include an extra call to busy which keeps the 10118 * device busy with-respect-to the PM layer 10119 * until the timer fires, at which time it'll 10120 * get the extra idle call. 10121 */ 10122 (void) pm_busy_component(SD_DEVINFO(un), 0); 10123 } 10124 } 10125 } 10126 un->un_pm_busy = FALSE; 10127 /* Next... */ 10128 cv_signal(&un->un_pm_busy_cv); 10129 10130 un->un_pm_count++; 10131 10132 SD_TRACE(SD_LOG_IO_PM, un, 10133 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count); 10134 10135 mutex_exit(&un->un_pm_mutex); 10136 10137 return (return_status); 10138 } 10139 10140 10141 /* 10142 * Function: sd_pm_exit 10143 * 10144 * Description: Called at the completion of a command to manage busy 10145 * status for the device. If the device becomes idle the 10146 * PM framework is notified. 10147 * 10148 * Context: Kernel thread context 10149 */ 10150 10151 static void 10152 sd_pm_exit(struct sd_lun *un) 10153 { 10154 ASSERT(!mutex_owned(SD_MUTEX(un))); 10155 ASSERT(!mutex_owned(&un->un_pm_mutex)); 10156 10157 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n"); 10158 10159 /* 10160 * After attach the following flag is only read, so don't 10161 * take the penalty of acquiring a mutex for it. 10162 */ 10163 if (un->un_f_pm_is_enabled == TRUE) { 10164 10165 mutex_enter(&un->un_pm_mutex); 10166 un->un_pm_count--; 10167 10168 SD_TRACE(SD_LOG_IO_PM, un, 10169 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count); 10170 10171 ASSERT(un->un_pm_count >= 0); 10172 if (un->un_pm_count == 0) { 10173 mutex_exit(&un->un_pm_mutex); 10174 10175 SD_TRACE(SD_LOG_IO_PM, un, 10176 "sd_pm_exit: idle component\n"); 10177 10178 (void) pm_idle_component(SD_DEVINFO(un), 0); 10179 10180 } else { 10181 mutex_exit(&un->un_pm_mutex); 10182 } 10183 } 10184 10185 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n"); 10186 } 10187 10188 10189 /* 10190 * Function: sdopen 10191 * 10192 * Description: Driver's open(9e) entry point function. 10193 * 10194 * Arguments: dev_i - pointer to device number 10195 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE) 10196 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 10197 * cred_p - user credential pointer 10198 * 10199 * Return Code: EINVAL 10200 * ENXIO 10201 * EIO 10202 * EROFS 10203 * EBUSY 10204 * 10205 * Context: Kernel thread context 10206 */ 10207 /* ARGSUSED */ 10208 static int 10209 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 10210 { 10211 struct sd_lun *un; 10212 int nodelay; 10213 int part; 10214 uint64_t partmask; 10215 int instance; 10216 dev_t dev; 10217 int rval = EIO; 10218 diskaddr_t nblks = 0; 10219 diskaddr_t label_cap; 10220 10221 /* Validate the open type */ 10222 if (otyp >= OTYPCNT) { 10223 return (EINVAL); 10224 } 10225 10226 dev = *dev_p; 10227 instance = SDUNIT(dev); 10228 mutex_enter(&sd_detach_mutex); 10229 10230 /* 10231 * Fail the open if there is no softstate for the instance, or 10232 * if another thread somewhere is trying to detach the instance. 10233 */ 10234 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 10235 (un->un_detach_count != 0)) { 10236 mutex_exit(&sd_detach_mutex); 10237 /* 10238 * The probe cache only needs to be cleared when open (9e) fails 10239 * with ENXIO (4238046). 10240 */ 10241 /* 10242 * un-conditionally clearing probe cache is ok with 10243 * separate sd/ssd binaries 10244 * x86 platform can be an issue with both parallel 10245 * and fibre in 1 binary 10246 */ 10247 sd_scsi_clear_probe_cache(); 10248 return (ENXIO); 10249 } 10250 10251 /* 10252 * The un_layer_count is to prevent another thread in specfs from 10253 * trying to detach the instance, which can happen when we are 10254 * called from a higher-layer driver instead of thru specfs. 10255 * This will not be needed when DDI provides a layered driver 10256 * interface that allows specfs to know that an instance is in 10257 * use by a layered driver & should not be detached. 10258 * 10259 * Note: the semantics for layered driver opens are exactly one 10260 * close for every open. 10261 */ 10262 if (otyp == OTYP_LYR) { 10263 un->un_layer_count++; 10264 } 10265 10266 /* 10267 * Keep a count of the current # of opens in progress. This is because 10268 * some layered drivers try to call us as a regular open. This can 10269 * cause problems that we cannot prevent, however by keeping this count 10270 * we can at least keep our open and detach routines from racing against 10271 * each other under such conditions. 10272 */ 10273 un->un_opens_in_progress++; 10274 mutex_exit(&sd_detach_mutex); 10275 10276 nodelay = (flag & (FNDELAY | FNONBLOCK)); 10277 part = SDPART(dev); 10278 partmask = 1 << part; 10279 10280 /* 10281 * We use a semaphore here in order to serialize 10282 * open and close requests on the device. 10283 */ 10284 sema_p(&un->un_semoclose); 10285 10286 mutex_enter(SD_MUTEX(un)); 10287 10288 /* 10289 * All device accesses go thru sdstrategy() where we check 10290 * on suspend status but there could be a scsi_poll command, 10291 * which bypasses sdstrategy(), so we need to check pm 10292 * status. 10293 */ 10294 10295 if (!nodelay) { 10296 while ((un->un_state == SD_STATE_SUSPENDED) || 10297 (un->un_state == SD_STATE_PM_CHANGING)) { 10298 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10299 } 10300 10301 mutex_exit(SD_MUTEX(un)); 10302 if (sd_pm_entry(un) != DDI_SUCCESS) { 10303 rval = EIO; 10304 SD_ERROR(SD_LOG_OPEN_CLOSE, un, 10305 "sdopen: sd_pm_entry failed\n"); 10306 goto open_failed_with_pm; 10307 } 10308 mutex_enter(SD_MUTEX(un)); 10309 } 10310 10311 /* check for previous exclusive open */ 10312 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); 10313 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 10314 "sdopen: exclopen=%x, flag=%x, regopen=%x\n", 10315 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 10316 10317 if (un->un_exclopen & (partmask)) { 10318 goto excl_open_fail; 10319 } 10320 10321 if (flag & FEXCL) { 10322 int i; 10323 if (un->un_ocmap.lyropen[part]) { 10324 goto excl_open_fail; 10325 } 10326 for (i = 0; i < (OTYPCNT - 1); i++) { 10327 if (un->un_ocmap.regopen[i] & (partmask)) { 10328 goto excl_open_fail; 10329 } 10330 } 10331 } 10332 10333 /* 10334 * Check the write permission if this is a removable media device, 10335 * NDELAY has not been set, and writable permission is requested. 10336 * 10337 * Note: If NDELAY was set and this is write-protected media the WRITE 10338 * attempt will fail with EIO as part of the I/O processing. This is a 10339 * more permissive implementation that allows the open to succeed and 10340 * WRITE attempts to fail when appropriate. 10341 */ 10342 if (un->un_f_chk_wp_open) { 10343 if ((flag & FWRITE) && (!nodelay)) { 10344 mutex_exit(SD_MUTEX(un)); 10345 /* 10346 * Defer the check for write permission on writable 10347 * DVD drive till sdstrategy and will not fail open even 10348 * if FWRITE is set as the device can be writable 10349 * depending upon the media and the media can change 10350 * after the call to open(). 10351 */ 10352 if (un->un_f_dvdram_writable_device == FALSE) { 10353 if (ISCD(un) || sr_check_wp(dev)) { 10354 rval = EROFS; 10355 mutex_enter(SD_MUTEX(un)); 10356 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10357 "write to cd or write protected media\n"); 10358 goto open_fail; 10359 } 10360 } 10361 mutex_enter(SD_MUTEX(un)); 10362 } 10363 } 10364 10365 /* 10366 * If opening in NDELAY/NONBLOCK mode, just return. 10367 * Check if disk is ready and has a valid geometry later. 10368 */ 10369 if (!nodelay) { 10370 sd_ssc_t *ssc; 10371 10372 mutex_exit(SD_MUTEX(un)); 10373 ssc = sd_ssc_init(un); 10374 rval = sd_ready_and_valid(ssc, part); 10375 sd_ssc_fini(ssc); 10376 mutex_enter(SD_MUTEX(un)); 10377 /* 10378 * Fail if device is not ready or if the number of disk 10379 * blocks is zero or negative for non CD devices. 10380 */ 10381 10382 nblks = 0; 10383 10384 if (rval == SD_READY_VALID && (!ISCD(un))) { 10385 /* if cmlb_partinfo fails, nblks remains 0 */ 10386 mutex_exit(SD_MUTEX(un)); 10387 (void) cmlb_partinfo(un->un_cmlbhandle, part, &nblks, 10388 NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 10389 mutex_enter(SD_MUTEX(un)); 10390 } 10391 10392 if ((rval != SD_READY_VALID) || 10393 (!ISCD(un) && nblks <= 0)) { 10394 rval = un->un_f_has_removable_media ? ENXIO : EIO; 10395 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10396 "device not ready or invalid disk block value\n"); 10397 goto open_fail; 10398 } 10399 #if defined(__i386) || defined(__amd64) 10400 } else { 10401 uchar_t *cp; 10402 /* 10403 * x86 requires special nodelay handling, so that p0 is 10404 * always defined and accessible. 10405 * Invalidate geometry only if device is not already open. 10406 */ 10407 cp = &un->un_ocmap.chkd[0]; 10408 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 10409 if (*cp != (uchar_t)0) { 10410 break; 10411 } 10412 cp++; 10413 } 10414 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 10415 mutex_exit(SD_MUTEX(un)); 10416 cmlb_invalidate(un->un_cmlbhandle, 10417 (void *)SD_PATH_DIRECT); 10418 mutex_enter(SD_MUTEX(un)); 10419 } 10420 10421 #endif 10422 } 10423 10424 if (otyp == OTYP_LYR) { 10425 un->un_ocmap.lyropen[part]++; 10426 } else { 10427 un->un_ocmap.regopen[otyp] |= partmask; 10428 } 10429 10430 /* Set up open and exclusive open flags */ 10431 if (flag & FEXCL) { 10432 un->un_exclopen |= (partmask); 10433 } 10434 10435 /* 10436 * If the lun is EFI labeled and lun capacity is greater than the 10437 * capacity contained in the label, log a sys-event to notify the 10438 * interested module. 10439 * To avoid an infinite loop of logging sys-event, we only log the 10440 * event when the lun is not opened in NDELAY mode. The event handler 10441 * should open the lun in NDELAY mode. 10442 */ 10443 if (!nodelay) { 10444 mutex_exit(SD_MUTEX(un)); 10445 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 10446 (void*)SD_PATH_DIRECT) == 0) { 10447 mutex_enter(SD_MUTEX(un)); 10448 if (un->un_f_blockcount_is_valid && 10449 un->un_blockcount > label_cap && 10450 un->un_f_expnevent == B_FALSE) { 10451 un->un_f_expnevent = B_TRUE; 10452 mutex_exit(SD_MUTEX(un)); 10453 sd_log_lun_expansion_event(un, 10454 (nodelay ? KM_NOSLEEP : KM_SLEEP)); 10455 mutex_enter(SD_MUTEX(un)); 10456 } 10457 } else { 10458 mutex_enter(SD_MUTEX(un)); 10459 } 10460 } 10461 10462 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10463 "open of part %d type %d\n", part, otyp); 10464 10465 mutex_exit(SD_MUTEX(un)); 10466 if (!nodelay) { 10467 sd_pm_exit(un); 10468 } 10469 10470 sema_v(&un->un_semoclose); 10471 10472 mutex_enter(&sd_detach_mutex); 10473 un->un_opens_in_progress--; 10474 mutex_exit(&sd_detach_mutex); 10475 10476 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n"); 10477 return (DDI_SUCCESS); 10478 10479 excl_open_fail: 10480 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n"); 10481 rval = EBUSY; 10482 10483 open_fail: 10484 mutex_exit(SD_MUTEX(un)); 10485 10486 /* 10487 * On a failed open we must exit the pm management. 10488 */ 10489 if (!nodelay) { 10490 sd_pm_exit(un); 10491 } 10492 open_failed_with_pm: 10493 sema_v(&un->un_semoclose); 10494 10495 mutex_enter(&sd_detach_mutex); 10496 un->un_opens_in_progress--; 10497 if (otyp == OTYP_LYR) { 10498 un->un_layer_count--; 10499 } 10500 mutex_exit(&sd_detach_mutex); 10501 10502 return (rval); 10503 } 10504 10505 10506 /* 10507 * Function: sdclose 10508 * 10509 * Description: Driver's close(9e) entry point function. 10510 * 10511 * Arguments: dev - device number 10512 * flag - file status flag, informational only 10513 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 10514 * cred_p - user credential pointer 10515 * 10516 * Return Code: ENXIO 10517 * 10518 * Context: Kernel thread context 10519 */ 10520 /* ARGSUSED */ 10521 static int 10522 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 10523 { 10524 struct sd_lun *un; 10525 uchar_t *cp; 10526 int part; 10527 int nodelay; 10528 int rval = 0; 10529 10530 /* Validate the open type */ 10531 if (otyp >= OTYPCNT) { 10532 return (ENXIO); 10533 } 10534 10535 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10536 return (ENXIO); 10537 } 10538 10539 part = SDPART(dev); 10540 nodelay = flag & (FNDELAY | FNONBLOCK); 10541 10542 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 10543 "sdclose: close of part %d type %d\n", part, otyp); 10544 10545 /* 10546 * We use a semaphore here in order to serialize 10547 * open and close requests on the device. 10548 */ 10549 sema_p(&un->un_semoclose); 10550 10551 mutex_enter(SD_MUTEX(un)); 10552 10553 /* Don't proceed if power is being changed. */ 10554 while (un->un_state == SD_STATE_PM_CHANGING) { 10555 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10556 } 10557 10558 if (un->un_exclopen & (1 << part)) { 10559 un->un_exclopen &= ~(1 << part); 10560 } 10561 10562 /* Update the open partition map */ 10563 if (otyp == OTYP_LYR) { 10564 un->un_ocmap.lyropen[part] -= 1; 10565 } else { 10566 un->un_ocmap.regopen[otyp] &= ~(1 << part); 10567 } 10568 10569 cp = &un->un_ocmap.chkd[0]; 10570 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 10571 if (*cp != '\0') { 10572 break; 10573 } 10574 cp++; 10575 } 10576 10577 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 10578 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n"); 10579 10580 /* 10581 * We avoid persistance upon the last close, and set 10582 * the throttle back to the maximum. 10583 */ 10584 un->un_throttle = un->un_saved_throttle; 10585 10586 if (un->un_state == SD_STATE_OFFLINE) { 10587 if (un->un_f_is_fibre == FALSE) { 10588 scsi_log(SD_DEVINFO(un), sd_label, 10589 CE_WARN, "offline\n"); 10590 } 10591 mutex_exit(SD_MUTEX(un)); 10592 cmlb_invalidate(un->un_cmlbhandle, 10593 (void *)SD_PATH_DIRECT); 10594 mutex_enter(SD_MUTEX(un)); 10595 10596 } else { 10597 /* 10598 * Flush any outstanding writes in NVRAM cache. 10599 * Note: SYNCHRONIZE CACHE is an optional SCSI-2 10600 * cmd, it may not work for non-Pluto devices. 10601 * SYNCHRONIZE CACHE is not required for removables, 10602 * except DVD-RAM drives. 10603 * 10604 * Also note: because SYNCHRONIZE CACHE is currently 10605 * the only command issued here that requires the 10606 * drive be powered up, only do the power up before 10607 * sending the Sync Cache command. If additional 10608 * commands are added which require a powered up 10609 * drive, the following sequence may have to change. 10610 * 10611 * And finally, note that parallel SCSI on SPARC 10612 * only issues a Sync Cache to DVD-RAM, a newly 10613 * supported device. 10614 */ 10615 #if defined(__i386) || defined(__amd64) 10616 if ((un->un_f_sync_cache_supported && 10617 un->un_f_sync_cache_required) || 10618 un->un_f_dvdram_writable_device == TRUE) { 10619 #else 10620 if (un->un_f_dvdram_writable_device == TRUE) { 10621 #endif 10622 mutex_exit(SD_MUTEX(un)); 10623 if (sd_pm_entry(un) == DDI_SUCCESS) { 10624 rval = 10625 sd_send_scsi_SYNCHRONIZE_CACHE(un, 10626 NULL); 10627 /* ignore error if not supported */ 10628 if (rval == ENOTSUP) { 10629 rval = 0; 10630 } else if (rval != 0) { 10631 rval = EIO; 10632 } 10633 sd_pm_exit(un); 10634 } else { 10635 rval = EIO; 10636 } 10637 mutex_enter(SD_MUTEX(un)); 10638 } 10639 10640 /* 10641 * For devices which supports DOOR_LOCK, send an ALLOW 10642 * MEDIA REMOVAL command, but don't get upset if it 10643 * fails. We need to raise the power of the drive before 10644 * we can call sd_send_scsi_DOORLOCK() 10645 */ 10646 if (un->un_f_doorlock_supported) { 10647 mutex_exit(SD_MUTEX(un)); 10648 if (sd_pm_entry(un) == DDI_SUCCESS) { 10649 sd_ssc_t *ssc; 10650 10651 ssc = sd_ssc_init(un); 10652 rval = sd_send_scsi_DOORLOCK(ssc, 10653 SD_REMOVAL_ALLOW, SD_PATH_DIRECT); 10654 if (rval != 0) 10655 sd_ssc_assessment(ssc, 10656 SD_FMT_IGNORE); 10657 sd_ssc_fini(ssc); 10658 10659 sd_pm_exit(un); 10660 if (ISCD(un) && (rval != 0) && 10661 (nodelay != 0)) { 10662 rval = ENXIO; 10663 } 10664 } else { 10665 rval = EIO; 10666 } 10667 mutex_enter(SD_MUTEX(un)); 10668 } 10669 10670 /* 10671 * If a device has removable media, invalidate all 10672 * parameters related to media, such as geometry, 10673 * blocksize, and blockcount. 10674 */ 10675 if (un->un_f_has_removable_media) { 10676 sr_ejected(un); 10677 } 10678 10679 /* 10680 * Destroy the cache (if it exists) which was 10681 * allocated for the write maps since this is 10682 * the last close for this media. 10683 */ 10684 if (un->un_wm_cache) { 10685 /* 10686 * Check if there are pending commands. 10687 * and if there are give a warning and 10688 * do not destroy the cache. 10689 */ 10690 if (un->un_ncmds_in_driver > 0) { 10691 scsi_log(SD_DEVINFO(un), 10692 sd_label, CE_WARN, 10693 "Unable to clean up memory " 10694 "because of pending I/O\n"); 10695 } else { 10696 kmem_cache_destroy( 10697 un->un_wm_cache); 10698 un->un_wm_cache = NULL; 10699 } 10700 } 10701 } 10702 } 10703 10704 mutex_exit(SD_MUTEX(un)); 10705 sema_v(&un->un_semoclose); 10706 10707 if (otyp == OTYP_LYR) { 10708 mutex_enter(&sd_detach_mutex); 10709 /* 10710 * The detach routine may run when the layer count 10711 * drops to zero. 10712 */ 10713 un->un_layer_count--; 10714 mutex_exit(&sd_detach_mutex); 10715 } 10716 10717 return (rval); 10718 } 10719 10720 10721 /* 10722 * Function: sd_ready_and_valid 10723 * 10724 * Description: Test if device is ready and has a valid geometry. 10725 * 10726 * Arguments: ssc - sd_ssc_t will contain un 10727 * un - driver soft state (unit) structure 10728 * 10729 * Return Code: SD_READY_VALID ready and valid label 10730 * SD_NOT_READY_VALID not ready, no label 10731 * SD_RESERVED_BY_OTHERS reservation conflict 10732 * 10733 * Context: Never called at interrupt context. 10734 */ 10735 10736 static int 10737 sd_ready_and_valid(sd_ssc_t *ssc, int part) 10738 { 10739 struct sd_errstats *stp; 10740 uint64_t capacity; 10741 uint_t lbasize; 10742 int rval = SD_READY_VALID; 10743 char name_str[48]; 10744 boolean_t is_valid; 10745 struct sd_lun *un; 10746 int status; 10747 10748 ASSERT(ssc != NULL); 10749 un = ssc->ssc_un; 10750 ASSERT(un != NULL); 10751 ASSERT(!mutex_owned(SD_MUTEX(un))); 10752 10753 mutex_enter(SD_MUTEX(un)); 10754 /* 10755 * If a device has removable media, we must check if media is 10756 * ready when checking if this device is ready and valid. 10757 */ 10758 if (un->un_f_has_removable_media) { 10759 mutex_exit(SD_MUTEX(un)); 10760 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10761 10762 if (status != 0) { 10763 rval = SD_NOT_READY_VALID; 10764 mutex_enter(SD_MUTEX(un)); 10765 10766 /* Ignore all failed status for removalbe media */ 10767 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10768 10769 goto done; 10770 } 10771 10772 is_valid = SD_IS_VALID_LABEL(un); 10773 mutex_enter(SD_MUTEX(un)); 10774 if (!is_valid || 10775 (un->un_f_blockcount_is_valid == FALSE) || 10776 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 10777 10778 /* capacity has to be read every open. */ 10779 mutex_exit(SD_MUTEX(un)); 10780 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity, 10781 &lbasize, SD_PATH_DIRECT); 10782 10783 if (status != 0) { 10784 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10785 10786 cmlb_invalidate(un->un_cmlbhandle, 10787 (void *)SD_PATH_DIRECT); 10788 mutex_enter(SD_MUTEX(un)); 10789 rval = SD_NOT_READY_VALID; 10790 10791 goto done; 10792 } else { 10793 mutex_enter(SD_MUTEX(un)); 10794 sd_update_block_info(un, lbasize, capacity); 10795 } 10796 } 10797 10798 /* 10799 * Check if the media in the device is writable or not. 10800 */ 10801 if (!is_valid && ISCD(un)) { 10802 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT); 10803 } 10804 10805 } else { 10806 /* 10807 * Do a test unit ready to clear any unit attention from non-cd 10808 * devices. 10809 */ 10810 mutex_exit(SD_MUTEX(un)); 10811 10812 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10813 if (status != 0) { 10814 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10815 } 10816 10817 mutex_enter(SD_MUTEX(un)); 10818 } 10819 10820 10821 /* 10822 * If this is a non 512 block device, allocate space for 10823 * the wmap cache. This is being done here since every time 10824 * a media is changed this routine will be called and the 10825 * block size is a function of media rather than device. 10826 */ 10827 if (((un->un_f_rmw_type != SD_RMW_TYPE_RETURN_ERROR || 10828 un->un_f_non_devbsize_supported) && 10829 un->un_tgt_blocksize != DEV_BSIZE) || 10830 un->un_f_enable_rmw) { 10831 if (!(un->un_wm_cache)) { 10832 (void) snprintf(name_str, sizeof (name_str), 10833 "%s%d_cache", 10834 ddi_driver_name(SD_DEVINFO(un)), 10835 ddi_get_instance(SD_DEVINFO(un))); 10836 un->un_wm_cache = kmem_cache_create( 10837 name_str, sizeof (struct sd_w_map), 10838 8, sd_wm_cache_constructor, 10839 sd_wm_cache_destructor, NULL, 10840 (void *)un, NULL, 0); 10841 if (!(un->un_wm_cache)) { 10842 rval = ENOMEM; 10843 goto done; 10844 } 10845 } 10846 } 10847 10848 if (un->un_state == SD_STATE_NORMAL) { 10849 /* 10850 * If the target is not yet ready here (defined by a TUR 10851 * failure), invalidate the geometry and print an 'offline' 10852 * message. This is a legacy message, as the state of the 10853 * target is not actually changed to SD_STATE_OFFLINE. 10854 * 10855 * If the TUR fails for EACCES (Reservation Conflict), 10856 * SD_RESERVED_BY_OTHERS will be returned to indicate 10857 * reservation conflict. If the TUR fails for other 10858 * reasons, SD_NOT_READY_VALID will be returned. 10859 */ 10860 int err; 10861 10862 mutex_exit(SD_MUTEX(un)); 10863 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10864 mutex_enter(SD_MUTEX(un)); 10865 10866 if (err != 0) { 10867 mutex_exit(SD_MUTEX(un)); 10868 cmlb_invalidate(un->un_cmlbhandle, 10869 (void *)SD_PATH_DIRECT); 10870 mutex_enter(SD_MUTEX(un)); 10871 if (err == EACCES) { 10872 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10873 "reservation conflict\n"); 10874 rval = SD_RESERVED_BY_OTHERS; 10875 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10876 } else { 10877 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10878 "drive offline\n"); 10879 rval = SD_NOT_READY_VALID; 10880 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 10881 } 10882 goto done; 10883 } 10884 } 10885 10886 if (un->un_f_format_in_progress == FALSE) { 10887 mutex_exit(SD_MUTEX(un)); 10888 10889 (void) cmlb_validate(un->un_cmlbhandle, 0, 10890 (void *)SD_PATH_DIRECT); 10891 if (cmlb_partinfo(un->un_cmlbhandle, part, NULL, NULL, NULL, 10892 NULL, (void *) SD_PATH_DIRECT) != 0) { 10893 rval = SD_NOT_READY_VALID; 10894 mutex_enter(SD_MUTEX(un)); 10895 10896 goto done; 10897 } 10898 if (un->un_f_pkstats_enabled) { 10899 sd_set_pstats(un); 10900 SD_TRACE(SD_LOG_IO_PARTITION, un, 10901 "sd_ready_and_valid: un:0x%p pstats created and " 10902 "set\n", un); 10903 } 10904 mutex_enter(SD_MUTEX(un)); 10905 } 10906 10907 /* 10908 * If this device supports DOOR_LOCK command, try and send 10909 * this command to PREVENT MEDIA REMOVAL, but don't get upset 10910 * if it fails. For a CD, however, it is an error 10911 */ 10912 if (un->un_f_doorlock_supported) { 10913 mutex_exit(SD_MUTEX(un)); 10914 status = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 10915 SD_PATH_DIRECT); 10916 10917 if ((status != 0) && ISCD(un)) { 10918 rval = SD_NOT_READY_VALID; 10919 mutex_enter(SD_MUTEX(un)); 10920 10921 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10922 10923 goto done; 10924 } else if (status != 0) 10925 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10926 mutex_enter(SD_MUTEX(un)); 10927 } 10928 10929 /* The state has changed, inform the media watch routines */ 10930 un->un_mediastate = DKIO_INSERTED; 10931 cv_broadcast(&un->un_state_cv); 10932 rval = SD_READY_VALID; 10933 10934 done: 10935 10936 /* 10937 * Initialize the capacity kstat value, if no media previously 10938 * (capacity kstat is 0) and a media has been inserted 10939 * (un_blockcount > 0). 10940 */ 10941 if (un->un_errstats != NULL) { 10942 stp = (struct sd_errstats *)un->un_errstats->ks_data; 10943 if ((stp->sd_capacity.value.ui64 == 0) && 10944 (un->un_f_blockcount_is_valid == TRUE)) { 10945 stp->sd_capacity.value.ui64 = 10946 (uint64_t)((uint64_t)un->un_blockcount * 10947 un->un_sys_blocksize); 10948 } 10949 } 10950 10951 mutex_exit(SD_MUTEX(un)); 10952 return (rval); 10953 } 10954 10955 10956 /* 10957 * Function: sdmin 10958 * 10959 * Description: Routine to limit the size of a data transfer. Used in 10960 * conjunction with physio(9F). 10961 * 10962 * Arguments: bp - pointer to the indicated buf(9S) struct. 10963 * 10964 * Context: Kernel thread context. 10965 */ 10966 10967 static void 10968 sdmin(struct buf *bp) 10969 { 10970 struct sd_lun *un; 10971 int instance; 10972 10973 instance = SDUNIT(bp->b_edev); 10974 10975 un = ddi_get_soft_state(sd_state, instance); 10976 ASSERT(un != NULL); 10977 10978 /* 10979 * We depend on buf breakup to restrict 10980 * IO size if it is enabled. 10981 */ 10982 if (un->un_buf_breakup_supported) { 10983 return; 10984 } 10985 10986 if (bp->b_bcount > un->un_max_xfer_size) { 10987 bp->b_bcount = un->un_max_xfer_size; 10988 } 10989 } 10990 10991 10992 /* 10993 * Function: sdread 10994 * 10995 * Description: Driver's read(9e) entry point function. 10996 * 10997 * Arguments: dev - device number 10998 * uio - structure pointer describing where data is to be stored 10999 * in user's space 11000 * cred_p - user credential pointer 11001 * 11002 * Return Code: ENXIO 11003 * EIO 11004 * EINVAL 11005 * value returned by physio 11006 * 11007 * Context: Kernel thread context. 11008 */ 11009 /* ARGSUSED */ 11010 static int 11011 sdread(dev_t dev, struct uio *uio, cred_t *cred_p) 11012 { 11013 struct sd_lun *un = NULL; 11014 int secmask; 11015 int err = 0; 11016 sd_ssc_t *ssc; 11017 11018 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 11019 return (ENXIO); 11020 } 11021 11022 ASSERT(!mutex_owned(SD_MUTEX(un))); 11023 11024 11025 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 11026 mutex_enter(SD_MUTEX(un)); 11027 /* 11028 * Because the call to sd_ready_and_valid will issue I/O we 11029 * must wait here if either the device is suspended or 11030 * if it's power level is changing. 11031 */ 11032 while ((un->un_state == SD_STATE_SUSPENDED) || 11033 (un->un_state == SD_STATE_PM_CHANGING)) { 11034 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 11035 } 11036 un->un_ncmds_in_driver++; 11037 mutex_exit(SD_MUTEX(un)); 11038 11039 /* Initialize sd_ssc_t for internal uscsi commands */ 11040 ssc = sd_ssc_init(un); 11041 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 11042 err = EIO; 11043 } else { 11044 err = 0; 11045 } 11046 sd_ssc_fini(ssc); 11047 11048 mutex_enter(SD_MUTEX(un)); 11049 un->un_ncmds_in_driver--; 11050 ASSERT(un->un_ncmds_in_driver >= 0); 11051 mutex_exit(SD_MUTEX(un)); 11052 if (err != 0) 11053 return (err); 11054 } 11055 11056 /* 11057 * Read requests are restricted to multiples of the system block size. 11058 */ 11059 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR && 11060 !un->un_f_enable_rmw) 11061 secmask = un->un_tgt_blocksize - 1; 11062 else 11063 secmask = DEV_BSIZE - 1; 11064 11065 if (uio->uio_loffset & ((offset_t)(secmask))) { 11066 SD_ERROR(SD_LOG_READ_WRITE, un, 11067 "sdread: file offset not modulo %d\n", 11068 secmask + 1); 11069 err = EINVAL; 11070 } else if (uio->uio_iov->iov_len & (secmask)) { 11071 SD_ERROR(SD_LOG_READ_WRITE, un, 11072 "sdread: transfer length not modulo %d\n", 11073 secmask + 1); 11074 err = EINVAL; 11075 } else { 11076 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio); 11077 } 11078 11079 return (err); 11080 } 11081 11082 11083 /* 11084 * Function: sdwrite 11085 * 11086 * Description: Driver's write(9e) entry point function. 11087 * 11088 * Arguments: dev - device number 11089 * uio - structure pointer describing where data is stored in 11090 * user's space 11091 * cred_p - user credential pointer 11092 * 11093 * Return Code: ENXIO 11094 * EIO 11095 * EINVAL 11096 * value returned by physio 11097 * 11098 * Context: Kernel thread context. 11099 */ 11100 /* ARGSUSED */ 11101 static int 11102 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 11103 { 11104 struct sd_lun *un = NULL; 11105 int secmask; 11106 int err = 0; 11107 sd_ssc_t *ssc; 11108 11109 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 11110 return (ENXIO); 11111 } 11112 11113 ASSERT(!mutex_owned(SD_MUTEX(un))); 11114 11115 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 11116 mutex_enter(SD_MUTEX(un)); 11117 /* 11118 * Because the call to sd_ready_and_valid will issue I/O we 11119 * must wait here if either the device is suspended or 11120 * if it's power level is changing. 11121 */ 11122 while ((un->un_state == SD_STATE_SUSPENDED) || 11123 (un->un_state == SD_STATE_PM_CHANGING)) { 11124 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 11125 } 11126 un->un_ncmds_in_driver++; 11127 mutex_exit(SD_MUTEX(un)); 11128 11129 /* Initialize sd_ssc_t for internal uscsi commands */ 11130 ssc = sd_ssc_init(un); 11131 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 11132 err = EIO; 11133 } else { 11134 err = 0; 11135 } 11136 sd_ssc_fini(ssc); 11137 11138 mutex_enter(SD_MUTEX(un)); 11139 un->un_ncmds_in_driver--; 11140 ASSERT(un->un_ncmds_in_driver >= 0); 11141 mutex_exit(SD_MUTEX(un)); 11142 if (err != 0) 11143 return (err); 11144 } 11145 11146 /* 11147 * Write requests are restricted to multiples of the system block size. 11148 */ 11149 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR && 11150 !un->un_f_enable_rmw) 11151 secmask = un->un_tgt_blocksize - 1; 11152 else 11153 secmask = DEV_BSIZE - 1; 11154 11155 if (uio->uio_loffset & ((offset_t)(secmask))) { 11156 SD_ERROR(SD_LOG_READ_WRITE, un, 11157 "sdwrite: file offset not modulo %d\n", 11158 secmask + 1); 11159 err = EINVAL; 11160 } else if (uio->uio_iov->iov_len & (secmask)) { 11161 SD_ERROR(SD_LOG_READ_WRITE, un, 11162 "sdwrite: transfer length not modulo %d\n", 11163 secmask + 1); 11164 err = EINVAL; 11165 } else { 11166 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio); 11167 } 11168 11169 return (err); 11170 } 11171 11172 11173 /* 11174 * Function: sdaread 11175 * 11176 * Description: Driver's aread(9e) entry point function. 11177 * 11178 * Arguments: dev - device number 11179 * aio - structure pointer describing where data is to be stored 11180 * cred_p - user credential pointer 11181 * 11182 * Return Code: ENXIO 11183 * EIO 11184 * EINVAL 11185 * value returned by aphysio 11186 * 11187 * Context: Kernel thread context. 11188 */ 11189 /* ARGSUSED */ 11190 static int 11191 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 11192 { 11193 struct sd_lun *un = NULL; 11194 struct uio *uio = aio->aio_uio; 11195 int secmask; 11196 int err = 0; 11197 sd_ssc_t *ssc; 11198 11199 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 11200 return (ENXIO); 11201 } 11202 11203 ASSERT(!mutex_owned(SD_MUTEX(un))); 11204 11205 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 11206 mutex_enter(SD_MUTEX(un)); 11207 /* 11208 * Because the call to sd_ready_and_valid will issue I/O we 11209 * must wait here if either the device is suspended or 11210 * if it's power level is changing. 11211 */ 11212 while ((un->un_state == SD_STATE_SUSPENDED) || 11213 (un->un_state == SD_STATE_PM_CHANGING)) { 11214 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 11215 } 11216 un->un_ncmds_in_driver++; 11217 mutex_exit(SD_MUTEX(un)); 11218 11219 /* Initialize sd_ssc_t for internal uscsi commands */ 11220 ssc = sd_ssc_init(un); 11221 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 11222 err = EIO; 11223 } else { 11224 err = 0; 11225 } 11226 sd_ssc_fini(ssc); 11227 11228 mutex_enter(SD_MUTEX(un)); 11229 un->un_ncmds_in_driver--; 11230 ASSERT(un->un_ncmds_in_driver >= 0); 11231 mutex_exit(SD_MUTEX(un)); 11232 if (err != 0) 11233 return (err); 11234 } 11235 11236 /* 11237 * Read requests are restricted to multiples of the system block size. 11238 */ 11239 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR && 11240 !un->un_f_enable_rmw) 11241 secmask = un->un_tgt_blocksize - 1; 11242 else 11243 secmask = DEV_BSIZE - 1; 11244 11245 if (uio->uio_loffset & ((offset_t)(secmask))) { 11246 SD_ERROR(SD_LOG_READ_WRITE, un, 11247 "sdaread: file offset not modulo %d\n", 11248 secmask + 1); 11249 err = EINVAL; 11250 } else if (uio->uio_iov->iov_len & (secmask)) { 11251 SD_ERROR(SD_LOG_READ_WRITE, un, 11252 "sdaread: transfer length not modulo %d\n", 11253 secmask + 1); 11254 err = EINVAL; 11255 } else { 11256 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio); 11257 } 11258 11259 return (err); 11260 } 11261 11262 11263 /* 11264 * Function: sdawrite 11265 * 11266 * Description: Driver's awrite(9e) entry point function. 11267 * 11268 * Arguments: dev - device number 11269 * aio - structure pointer describing where data is stored 11270 * cred_p - user credential pointer 11271 * 11272 * Return Code: ENXIO 11273 * EIO 11274 * EINVAL 11275 * value returned by aphysio 11276 * 11277 * Context: Kernel thread context. 11278 */ 11279 /* ARGSUSED */ 11280 static int 11281 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 11282 { 11283 struct sd_lun *un = NULL; 11284 struct uio *uio = aio->aio_uio; 11285 int secmask; 11286 int err = 0; 11287 sd_ssc_t *ssc; 11288 11289 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 11290 return (ENXIO); 11291 } 11292 11293 ASSERT(!mutex_owned(SD_MUTEX(un))); 11294 11295 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 11296 mutex_enter(SD_MUTEX(un)); 11297 /* 11298 * Because the call to sd_ready_and_valid will issue I/O we 11299 * must wait here if either the device is suspended or 11300 * if it's power level is changing. 11301 */ 11302 while ((un->un_state == SD_STATE_SUSPENDED) || 11303 (un->un_state == SD_STATE_PM_CHANGING)) { 11304 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 11305 } 11306 un->un_ncmds_in_driver++; 11307 mutex_exit(SD_MUTEX(un)); 11308 11309 /* Initialize sd_ssc_t for internal uscsi commands */ 11310 ssc = sd_ssc_init(un); 11311 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 11312 err = EIO; 11313 } else { 11314 err = 0; 11315 } 11316 sd_ssc_fini(ssc); 11317 11318 mutex_enter(SD_MUTEX(un)); 11319 un->un_ncmds_in_driver--; 11320 ASSERT(un->un_ncmds_in_driver >= 0); 11321 mutex_exit(SD_MUTEX(un)); 11322 if (err != 0) 11323 return (err); 11324 } 11325 11326 /* 11327 * Write requests are restricted to multiples of the system block size. 11328 */ 11329 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR && 11330 !un->un_f_enable_rmw) 11331 secmask = un->un_tgt_blocksize - 1; 11332 else 11333 secmask = DEV_BSIZE - 1; 11334 11335 if (uio->uio_loffset & ((offset_t)(secmask))) { 11336 SD_ERROR(SD_LOG_READ_WRITE, un, 11337 "sdawrite: file offset not modulo %d\n", 11338 secmask + 1); 11339 err = EINVAL; 11340 } else if (uio->uio_iov->iov_len & (secmask)) { 11341 SD_ERROR(SD_LOG_READ_WRITE, un, 11342 "sdawrite: transfer length not modulo %d\n", 11343 secmask + 1); 11344 err = EINVAL; 11345 } else { 11346 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio); 11347 } 11348 11349 return (err); 11350 } 11351 11352 11353 11354 11355 11356 /* 11357 * Driver IO processing follows the following sequence: 11358 * 11359 * sdioctl(9E) sdstrategy(9E) biodone(9F) 11360 * | | ^ 11361 * v v | 11362 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+ 11363 * | | | | 11364 * v | | | 11365 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone() 11366 * | | ^ ^ 11367 * v v | | 11368 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | | 11369 * | | | | 11370 * +---+ | +------------+ +-------+ 11371 * | | | | 11372 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 11373 * | v | | 11374 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() | 11375 * | | ^ | 11376 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 11377 * | v | | 11378 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() | 11379 * | | ^ | 11380 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 11381 * | v | | 11382 * | sd_checksum_iostart() sd_checksum_iodone() | 11383 * | | ^ | 11384 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+ 11385 * | v | | 11386 * | sd_pm_iostart() sd_pm_iodone() | 11387 * | | ^ | 11388 * | | | | 11389 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+ 11390 * | ^ 11391 * v | 11392 * sd_core_iostart() | 11393 * | | 11394 * | +------>(*destroypkt)() 11395 * +-> sd_start_cmds() <-+ | | 11396 * | | | v 11397 * | | | scsi_destroy_pkt(9F) 11398 * | | | 11399 * +->(*initpkt)() +- sdintr() 11400 * | | | | 11401 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx() 11402 * | +-> scsi_setup_cdb(9F) | 11403 * | | 11404 * +--> scsi_transport(9F) | 11405 * | | 11406 * +----> SCSA ---->+ 11407 * 11408 * 11409 * This code is based upon the following presumptions: 11410 * 11411 * - iostart and iodone functions operate on buf(9S) structures. These 11412 * functions perform the necessary operations on the buf(9S) and pass 11413 * them along to the next function in the chain by using the macros 11414 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE() 11415 * (for iodone side functions). 11416 * 11417 * - The iostart side functions may sleep. The iodone side functions 11418 * are called under interrupt context and may NOT sleep. Therefore 11419 * iodone side functions also may not call iostart side functions. 11420 * (NOTE: iostart side functions should NOT sleep for memory, as 11421 * this could result in deadlock.) 11422 * 11423 * - An iostart side function may call its corresponding iodone side 11424 * function directly (if necessary). 11425 * 11426 * - In the event of an error, an iostart side function can return a buf(9S) 11427 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and 11428 * b_error in the usual way of course). 11429 * 11430 * - The taskq mechanism may be used by the iodone side functions to dispatch 11431 * requests to the iostart side functions. The iostart side functions in 11432 * this case would be called under the context of a taskq thread, so it's 11433 * OK for them to block/sleep/spin in this case. 11434 * 11435 * - iostart side functions may allocate "shadow" buf(9S) structs and 11436 * pass them along to the next function in the chain. The corresponding 11437 * iodone side functions must coalesce the "shadow" bufs and return 11438 * the "original" buf to the next higher layer. 11439 * 11440 * - The b_private field of the buf(9S) struct holds a pointer to 11441 * an sd_xbuf struct, which contains information needed to 11442 * construct the scsi_pkt for the command. 11443 * 11444 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each 11445 * layer must acquire & release the SD_MUTEX(un) as needed. 11446 */ 11447 11448 11449 /* 11450 * Create taskq for all targets in the system. This is created at 11451 * _init(9E) and destroyed at _fini(9E). 11452 * 11453 * Note: here we set the minalloc to a reasonably high number to ensure that 11454 * we will have an adequate supply of task entries available at interrupt time. 11455 * This is used in conjunction with the TASKQ_PREPOPULATE flag in 11456 * sd_create_taskq(). Since we do not want to sleep for allocations at 11457 * interrupt time, set maxalloc equal to minalloc. That way we will just fail 11458 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq 11459 * requests any one instant in time. 11460 */ 11461 #define SD_TASKQ_NUMTHREADS 8 11462 #define SD_TASKQ_MINALLOC 256 11463 #define SD_TASKQ_MAXALLOC 256 11464 11465 static taskq_t *sd_tq = NULL; 11466 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq)) 11467 11468 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; 11469 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; 11470 11471 /* 11472 * The following task queue is being created for the write part of 11473 * read-modify-write of non-512 block size devices. 11474 * Limit the number of threads to 1 for now. This number has been chosen 11475 * considering the fact that it applies only to dvd ram drives/MO drives 11476 * currently. Performance for which is not main criteria at this stage. 11477 * Note: It needs to be explored if we can use a single taskq in future 11478 */ 11479 #define SD_WMR_TASKQ_NUMTHREADS 1 11480 static taskq_t *sd_wmr_tq = NULL; 11481 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq)) 11482 11483 /* 11484 * Function: sd_taskq_create 11485 * 11486 * Description: Create taskq thread(s) and preallocate task entries 11487 * 11488 * Return Code: Returns a pointer to the allocated taskq_t. 11489 * 11490 * Context: Can sleep. Requires blockable context. 11491 * 11492 * Notes: - The taskq() facility currently is NOT part of the DDI. 11493 * (definitely NOT recommeded for 3rd-party drivers!) :-) 11494 * - taskq_create() will block for memory, also it will panic 11495 * if it cannot create the requested number of threads. 11496 * - Currently taskq_create() creates threads that cannot be 11497 * swapped. 11498 * - We use TASKQ_PREPOPULATE to ensure we have an adequate 11499 * supply of taskq entries at interrupt time (ie, so that we 11500 * do not have to sleep for memory) 11501 */ 11502 11503 static void 11504 sd_taskq_create(void) 11505 { 11506 char taskq_name[TASKQ_NAMELEN]; 11507 11508 ASSERT(sd_tq == NULL); 11509 ASSERT(sd_wmr_tq == NULL); 11510 11511 (void) snprintf(taskq_name, sizeof (taskq_name), 11512 "%s_drv_taskq", sd_label); 11513 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS, 11514 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 11515 TASKQ_PREPOPULATE)); 11516 11517 (void) snprintf(taskq_name, sizeof (taskq_name), 11518 "%s_rmw_taskq", sd_label); 11519 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS, 11520 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 11521 TASKQ_PREPOPULATE)); 11522 } 11523 11524 11525 /* 11526 * Function: sd_taskq_delete 11527 * 11528 * Description: Complementary cleanup routine for sd_taskq_create(). 11529 * 11530 * Context: Kernel thread context. 11531 */ 11532 11533 static void 11534 sd_taskq_delete(void) 11535 { 11536 ASSERT(sd_tq != NULL); 11537 ASSERT(sd_wmr_tq != NULL); 11538 taskq_destroy(sd_tq); 11539 taskq_destroy(sd_wmr_tq); 11540 sd_tq = NULL; 11541 sd_wmr_tq = NULL; 11542 } 11543 11544 11545 /* 11546 * Function: sdstrategy 11547 * 11548 * Description: Driver's strategy (9E) entry point function. 11549 * 11550 * Arguments: bp - pointer to buf(9S) 11551 * 11552 * Return Code: Always returns zero 11553 * 11554 * Context: Kernel thread context. 11555 */ 11556 11557 static int 11558 sdstrategy(struct buf *bp) 11559 { 11560 struct sd_lun *un; 11561 11562 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 11563 if (un == NULL) { 11564 bioerror(bp, EIO); 11565 bp->b_resid = bp->b_bcount; 11566 biodone(bp); 11567 return (0); 11568 } 11569 11570 /* As was done in the past, fail new cmds. if state is dumping. */ 11571 if (un->un_state == SD_STATE_DUMPING) { 11572 bioerror(bp, ENXIO); 11573 bp->b_resid = bp->b_bcount; 11574 biodone(bp); 11575 return (0); 11576 } 11577 11578 ASSERT(!mutex_owned(SD_MUTEX(un))); 11579 11580 /* 11581 * Commands may sneak in while we released the mutex in 11582 * DDI_SUSPEND, we should block new commands. However, old 11583 * commands that are still in the driver at this point should 11584 * still be allowed to drain. 11585 */ 11586 mutex_enter(SD_MUTEX(un)); 11587 /* 11588 * Must wait here if either the device is suspended or 11589 * if it's power level is changing. 11590 */ 11591 while ((un->un_state == SD_STATE_SUSPENDED) || 11592 (un->un_state == SD_STATE_PM_CHANGING)) { 11593 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 11594 } 11595 11596 un->un_ncmds_in_driver++; 11597 11598 /* 11599 * atapi: Since we are running the CD for now in PIO mode we need to 11600 * call bp_mapin here to avoid bp_mapin called interrupt context under 11601 * the HBA's init_pkt routine. 11602 */ 11603 if (un->un_f_cfg_is_atapi == TRUE) { 11604 mutex_exit(SD_MUTEX(un)); 11605 bp_mapin(bp); 11606 mutex_enter(SD_MUTEX(un)); 11607 } 11608 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n", 11609 un->un_ncmds_in_driver); 11610 11611 if (bp->b_flags & B_WRITE) 11612 un->un_f_sync_cache_required = TRUE; 11613 11614 mutex_exit(SD_MUTEX(un)); 11615 11616 /* 11617 * This will (eventually) allocate the sd_xbuf area and 11618 * call sd_xbuf_strategy(). We just want to return the 11619 * result of ddi_xbuf_qstrategy so that we have an opt- 11620 * imized tail call which saves us a stack frame. 11621 */ 11622 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); 11623 } 11624 11625 11626 /* 11627 * Function: sd_xbuf_strategy 11628 * 11629 * Description: Function for initiating IO operations via the 11630 * ddi_xbuf_qstrategy() mechanism. 11631 * 11632 * Context: Kernel thread context. 11633 */ 11634 11635 static void 11636 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg) 11637 { 11638 struct sd_lun *un = arg; 11639 11640 ASSERT(bp != NULL); 11641 ASSERT(xp != NULL); 11642 ASSERT(un != NULL); 11643 ASSERT(!mutex_owned(SD_MUTEX(un))); 11644 11645 /* 11646 * Initialize the fields in the xbuf and save a pointer to the 11647 * xbuf in bp->b_private. 11648 */ 11649 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL); 11650 11651 /* Send the buf down the iostart chain */ 11652 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp); 11653 } 11654 11655 11656 /* 11657 * Function: sd_xbuf_init 11658 * 11659 * Description: Prepare the given sd_xbuf struct for use. 11660 * 11661 * Arguments: un - ptr to softstate 11662 * bp - ptr to associated buf(9S) 11663 * xp - ptr to associated sd_xbuf 11664 * chain_type - IO chain type to use: 11665 * SD_CHAIN_NULL 11666 * SD_CHAIN_BUFIO 11667 * SD_CHAIN_USCSI 11668 * SD_CHAIN_DIRECT 11669 * SD_CHAIN_DIRECT_PRIORITY 11670 * pktinfop - ptr to private data struct for scsi_pkt(9S) 11671 * initialization; may be NULL if none. 11672 * 11673 * Context: Kernel thread context 11674 */ 11675 11676 static void 11677 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 11678 uchar_t chain_type, void *pktinfop) 11679 { 11680 int index; 11681 11682 ASSERT(un != NULL); 11683 ASSERT(bp != NULL); 11684 ASSERT(xp != NULL); 11685 11686 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n", 11687 bp, chain_type); 11688 11689 xp->xb_un = un; 11690 xp->xb_pktp = NULL; 11691 xp->xb_pktinfo = pktinfop; 11692 xp->xb_private = bp->b_private; 11693 xp->xb_blkno = (daddr_t)bp->b_blkno; 11694 11695 /* 11696 * Set up the iostart and iodone chain indexes in the xbuf, based 11697 * upon the specified chain type to use. 11698 */ 11699 switch (chain_type) { 11700 case SD_CHAIN_NULL: 11701 /* 11702 * Fall thru to just use the values for the buf type, even 11703 * tho for the NULL chain these values will never be used. 11704 */ 11705 /* FALLTHRU */ 11706 case SD_CHAIN_BUFIO: 11707 index = un->un_buf_chain_type; 11708 if ((!un->un_f_has_removable_media) && 11709 (un->un_tgt_blocksize != 0) && 11710 (un->un_tgt_blocksize != DEV_BSIZE || 11711 un->un_f_enable_rmw)) { 11712 int secmask = 0, blknomask = 0; 11713 if (un->un_f_enable_rmw) { 11714 blknomask = 11715 (un->un_phy_blocksize / DEV_BSIZE) - 1; 11716 secmask = un->un_phy_blocksize - 1; 11717 } else { 11718 blknomask = 11719 (un->un_tgt_blocksize / DEV_BSIZE) - 1; 11720 secmask = un->un_tgt_blocksize - 1; 11721 } 11722 11723 if ((bp->b_lblkno & (blknomask)) || 11724 (bp->b_bcount & (secmask))) { 11725 if ((un->un_f_rmw_type != 11726 SD_RMW_TYPE_RETURN_ERROR) || 11727 un->un_f_enable_rmw) { 11728 if (un->un_f_pm_is_enabled == FALSE) 11729 index = 11730 SD_CHAIN_INFO_MSS_DSK_NO_PM; 11731 else 11732 index = 11733 SD_CHAIN_INFO_MSS_DISK; 11734 } 11735 } 11736 } 11737 break; 11738 case SD_CHAIN_USCSI: 11739 index = un->un_uscsi_chain_type; 11740 break; 11741 case SD_CHAIN_DIRECT: 11742 index = un->un_direct_chain_type; 11743 break; 11744 case SD_CHAIN_DIRECT_PRIORITY: 11745 index = un->un_priority_chain_type; 11746 break; 11747 default: 11748 /* We're really broken if we ever get here... */ 11749 panic("sd_xbuf_init: illegal chain type!"); 11750 /*NOTREACHED*/ 11751 } 11752 11753 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index; 11754 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index; 11755 11756 /* 11757 * It might be a bit easier to simply bzero the entire xbuf above, 11758 * but it turns out that since we init a fair number of members anyway, 11759 * we save a fair number cycles by doing explicit assignment of zero. 11760 */ 11761 xp->xb_pkt_flags = 0; 11762 xp->xb_dma_resid = 0; 11763 xp->xb_retry_count = 0; 11764 xp->xb_victim_retry_count = 0; 11765 xp->xb_ua_retry_count = 0; 11766 xp->xb_nr_retry_count = 0; 11767 xp->xb_sense_bp = NULL; 11768 xp->xb_sense_status = 0; 11769 xp->xb_sense_state = 0; 11770 xp->xb_sense_resid = 0; 11771 xp->xb_ena = 0; 11772 11773 bp->b_private = xp; 11774 bp->b_flags &= ~(B_DONE | B_ERROR); 11775 bp->b_resid = 0; 11776 bp->av_forw = NULL; 11777 bp->av_back = NULL; 11778 bioerror(bp, 0); 11779 11780 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n"); 11781 } 11782 11783 11784 /* 11785 * Function: sd_uscsi_strategy 11786 * 11787 * Description: Wrapper for calling into the USCSI chain via physio(9F) 11788 * 11789 * Arguments: bp - buf struct ptr 11790 * 11791 * Return Code: Always returns 0 11792 * 11793 * Context: Kernel thread context 11794 */ 11795 11796 static int 11797 sd_uscsi_strategy(struct buf *bp) 11798 { 11799 struct sd_lun *un; 11800 struct sd_uscsi_info *uip; 11801 struct sd_xbuf *xp; 11802 uchar_t chain_type; 11803 uchar_t cmd; 11804 11805 ASSERT(bp != NULL); 11806 11807 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 11808 if (un == NULL) { 11809 bioerror(bp, EIO); 11810 bp->b_resid = bp->b_bcount; 11811 biodone(bp); 11812 return (0); 11813 } 11814 11815 ASSERT(!mutex_owned(SD_MUTEX(un))); 11816 11817 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp); 11818 11819 /* 11820 * A pointer to a struct sd_uscsi_info is expected in bp->b_private 11821 */ 11822 ASSERT(bp->b_private != NULL); 11823 uip = (struct sd_uscsi_info *)bp->b_private; 11824 cmd = ((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_cdb[0]; 11825 11826 mutex_enter(SD_MUTEX(un)); 11827 /* 11828 * atapi: Since we are running the CD for now in PIO mode we need to 11829 * call bp_mapin here to avoid bp_mapin called interrupt context under 11830 * the HBA's init_pkt routine. 11831 */ 11832 if (un->un_f_cfg_is_atapi == TRUE) { 11833 mutex_exit(SD_MUTEX(un)); 11834 bp_mapin(bp); 11835 mutex_enter(SD_MUTEX(un)); 11836 } 11837 un->un_ncmds_in_driver++; 11838 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n", 11839 un->un_ncmds_in_driver); 11840 11841 if ((bp->b_flags & B_WRITE) && (bp->b_bcount != 0) && 11842 (cmd != SCMD_MODE_SELECT) && (cmd != SCMD_MODE_SELECT_G1)) 11843 un->un_f_sync_cache_required = TRUE; 11844 11845 mutex_exit(SD_MUTEX(un)); 11846 11847 switch (uip->ui_flags) { 11848 case SD_PATH_DIRECT: 11849 chain_type = SD_CHAIN_DIRECT; 11850 break; 11851 case SD_PATH_DIRECT_PRIORITY: 11852 chain_type = SD_CHAIN_DIRECT_PRIORITY; 11853 break; 11854 default: 11855 chain_type = SD_CHAIN_USCSI; 11856 break; 11857 } 11858 11859 /* 11860 * We may allocate extra buf for external USCSI commands. If the 11861 * application asks for bigger than 20-byte sense data via USCSI, 11862 * SCSA layer will allocate 252 bytes sense buf for that command. 11863 */ 11864 if (((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_rqlen > 11865 SENSE_LENGTH) { 11866 xp = kmem_zalloc(sizeof (struct sd_xbuf) - SENSE_LENGTH + 11867 MAX_SENSE_LENGTH, KM_SLEEP); 11868 } else { 11869 xp = kmem_zalloc(sizeof (struct sd_xbuf), KM_SLEEP); 11870 } 11871 11872 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp); 11873 11874 /* Use the index obtained within xbuf_init */ 11875 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp); 11876 11877 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp); 11878 11879 return (0); 11880 } 11881 11882 /* 11883 * Function: sd_send_scsi_cmd 11884 * 11885 * Description: Runs a USCSI command for user (when called thru sdioctl), 11886 * or for the driver 11887 * 11888 * Arguments: dev - the dev_t for the device 11889 * incmd - ptr to a valid uscsi_cmd struct 11890 * flag - bit flag, indicating open settings, 32/64 bit type 11891 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 11892 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 11893 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 11894 * to use the USCSI "direct" chain and bypass the normal 11895 * command waitq. 11896 * 11897 * Return Code: 0 - successful completion of the given command 11898 * EIO - scsi_uscsi_handle_command() failed 11899 * ENXIO - soft state not found for specified dev 11900 * EINVAL 11901 * EFAULT - copyin/copyout error 11902 * return code of scsi_uscsi_handle_command(): 11903 * EIO 11904 * ENXIO 11905 * EACCES 11906 * 11907 * Context: Waits for command to complete. Can sleep. 11908 */ 11909 11910 static int 11911 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 11912 enum uio_seg dataspace, int path_flag) 11913 { 11914 struct sd_lun *un; 11915 sd_ssc_t *ssc; 11916 int rval; 11917 11918 un = ddi_get_soft_state(sd_state, SDUNIT(dev)); 11919 if (un == NULL) { 11920 return (ENXIO); 11921 } 11922 11923 /* 11924 * Using sd_ssc_send to handle uscsi cmd 11925 */ 11926 ssc = sd_ssc_init(un); 11927 rval = sd_ssc_send(ssc, incmd, flag, dataspace, path_flag); 11928 sd_ssc_fini(ssc); 11929 11930 return (rval); 11931 } 11932 11933 /* 11934 * Function: sd_ssc_init 11935 * 11936 * Description: Uscsi end-user call this function to initialize necessary 11937 * fields, such as uscsi_cmd and sd_uscsi_info struct. 11938 * 11939 * The return value of sd_send_scsi_cmd will be treated as a 11940 * fault in various conditions. Even it is not Zero, some 11941 * callers may ignore the return value. That is to say, we can 11942 * not make an accurate assessment in sdintr, since if a 11943 * command is failed in sdintr it does not mean the caller of 11944 * sd_send_scsi_cmd will treat it as a real failure. 11945 * 11946 * To avoid printing too many error logs for a failed uscsi 11947 * packet that the caller may not treat it as a failure, the 11948 * sd will keep silent for handling all uscsi commands. 11949 * 11950 * During detach->attach and attach-open, for some types of 11951 * problems, the driver should be providing information about 11952 * the problem encountered. Device use USCSI_SILENT, which 11953 * suppresses all driver information. The result is that no 11954 * information about the problem is available. Being 11955 * completely silent during this time is inappropriate. The 11956 * driver needs a more selective filter than USCSI_SILENT, so 11957 * that information related to faults is provided. 11958 * 11959 * To make the accurate accessment, the caller of 11960 * sd_send_scsi_USCSI_CMD should take the ownership and 11961 * get necessary information to print error messages. 11962 * 11963 * If we want to print necessary info of uscsi command, we need to 11964 * keep the uscsi_cmd and sd_uscsi_info till we can make the 11965 * assessment. We use sd_ssc_init to alloc necessary 11966 * structs for sending an uscsi command and we are also 11967 * responsible for free the memory by calling 11968 * sd_ssc_fini. 11969 * 11970 * The calling secquences will look like: 11971 * sd_ssc_init-> 11972 * 11973 * ... 11974 * 11975 * sd_send_scsi_USCSI_CMD-> 11976 * sd_ssc_send-> - - - sdintr 11977 * ... 11978 * 11979 * if we think the return value should be treated as a 11980 * failure, we make the accessment here and print out 11981 * necessary by retrieving uscsi_cmd and sd_uscsi_info' 11982 * 11983 * ... 11984 * 11985 * sd_ssc_fini 11986 * 11987 * 11988 * Arguments: un - pointer to driver soft state (unit) structure for this 11989 * target. 11990 * 11991 * Return code: sd_ssc_t - pointer to allocated sd_ssc_t struct, it contains 11992 * uscsi_cmd and sd_uscsi_info. 11993 * NULL - if can not alloc memory for sd_ssc_t struct 11994 * 11995 * Context: Kernel Thread. 11996 */ 11997 static sd_ssc_t * 11998 sd_ssc_init(struct sd_lun *un) 11999 { 12000 sd_ssc_t *ssc; 12001 struct uscsi_cmd *ucmdp; 12002 struct sd_uscsi_info *uip; 12003 12004 ASSERT(un != NULL); 12005 ASSERT(!mutex_owned(SD_MUTEX(un))); 12006 12007 /* 12008 * Allocate sd_ssc_t structure 12009 */ 12010 ssc = kmem_zalloc(sizeof (sd_ssc_t), KM_SLEEP); 12011 12012 /* 12013 * Allocate uscsi_cmd by calling scsi_uscsi_alloc common routine 12014 */ 12015 ucmdp = scsi_uscsi_alloc(); 12016 12017 /* 12018 * Allocate sd_uscsi_info structure 12019 */ 12020 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 12021 12022 ssc->ssc_uscsi_cmd = ucmdp; 12023 ssc->ssc_uscsi_info = uip; 12024 ssc->ssc_un = un; 12025 12026 return (ssc); 12027 } 12028 12029 /* 12030 * Function: sd_ssc_fini 12031 * 12032 * Description: To free sd_ssc_t and it's hanging off 12033 * 12034 * Arguments: ssc - struct pointer of sd_ssc_t. 12035 */ 12036 static void 12037 sd_ssc_fini(sd_ssc_t *ssc) 12038 { 12039 scsi_uscsi_free(ssc->ssc_uscsi_cmd); 12040 12041 if (ssc->ssc_uscsi_info != NULL) { 12042 kmem_free(ssc->ssc_uscsi_info, sizeof (struct sd_uscsi_info)); 12043 ssc->ssc_uscsi_info = NULL; 12044 } 12045 12046 kmem_free(ssc, sizeof (sd_ssc_t)); 12047 ssc = NULL; 12048 } 12049 12050 /* 12051 * Function: sd_ssc_send 12052 * 12053 * Description: Runs a USCSI command for user when called through sdioctl, 12054 * or for the driver. 12055 * 12056 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 12057 * sd_uscsi_info in. 12058 * incmd - ptr to a valid uscsi_cmd struct 12059 * flag - bit flag, indicating open settings, 32/64 bit type 12060 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 12061 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 12062 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 12063 * to use the USCSI "direct" chain and bypass the normal 12064 * command waitq. 12065 * 12066 * Return Code: 0 - successful completion of the given command 12067 * EIO - scsi_uscsi_handle_command() failed 12068 * ENXIO - soft state not found for specified dev 12069 * ECANCELED - command cancelled due to low power 12070 * EINVAL 12071 * EFAULT - copyin/copyout error 12072 * return code of scsi_uscsi_handle_command(): 12073 * EIO 12074 * ENXIO 12075 * EACCES 12076 * 12077 * Context: Kernel Thread; 12078 * Waits for command to complete. Can sleep. 12079 */ 12080 static int 12081 sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, int flag, 12082 enum uio_seg dataspace, int path_flag) 12083 { 12084 struct sd_uscsi_info *uip; 12085 struct uscsi_cmd *uscmd; 12086 struct sd_lun *un; 12087 dev_t dev; 12088 12089 int format = 0; 12090 int rval; 12091 12092 ASSERT(ssc != NULL); 12093 un = ssc->ssc_un; 12094 ASSERT(un != NULL); 12095 uscmd = ssc->ssc_uscsi_cmd; 12096 ASSERT(uscmd != NULL); 12097 ASSERT(!mutex_owned(SD_MUTEX(un))); 12098 if (ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT) { 12099 /* 12100 * If enter here, it indicates that the previous uscsi 12101 * command has not been processed by sd_ssc_assessment. 12102 * This is violating our rules of FMA telemetry processing. 12103 * We should print out this message and the last undisposed 12104 * uscsi command. 12105 */ 12106 if (uscmd->uscsi_cdb != NULL) { 12107 SD_INFO(SD_LOG_SDTEST, un, 12108 "sd_ssc_send is missing the alternative " 12109 "sd_ssc_assessment when running command 0x%x.\n", 12110 uscmd->uscsi_cdb[0]); 12111 } 12112 /* 12113 * Set the ssc_flags to SSC_FLAGS_UNKNOWN, which should be 12114 * the initial status. 12115 */ 12116 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12117 } 12118 12119 /* 12120 * We need to make sure sd_ssc_send will have sd_ssc_assessment 12121 * followed to avoid missing FMA telemetries. 12122 */ 12123 ssc->ssc_flags |= SSC_FLAGS_NEED_ASSESSMENT; 12124 12125 /* 12126 * if USCSI_PMFAILFAST is set and un is in low power, fail the 12127 * command immediately. 12128 */ 12129 mutex_enter(SD_MUTEX(un)); 12130 mutex_enter(&un->un_pm_mutex); 12131 if ((uscmd->uscsi_flags & USCSI_PMFAILFAST) && 12132 SD_DEVICE_IS_IN_LOW_POWER(un)) { 12133 SD_TRACE(SD_LOG_IO, un, "sd_ssc_send:" 12134 "un:0x%p is in low power\n", un); 12135 mutex_exit(&un->un_pm_mutex); 12136 mutex_exit(SD_MUTEX(un)); 12137 return (ECANCELED); 12138 } 12139 mutex_exit(&un->un_pm_mutex); 12140 mutex_exit(SD_MUTEX(un)); 12141 12142 #ifdef SDDEBUG 12143 switch (dataspace) { 12144 case UIO_USERSPACE: 12145 SD_TRACE(SD_LOG_IO, un, 12146 "sd_ssc_send: entry: un:0x%p UIO_USERSPACE\n", un); 12147 break; 12148 case UIO_SYSSPACE: 12149 SD_TRACE(SD_LOG_IO, un, 12150 "sd_ssc_send: entry: un:0x%p UIO_SYSSPACE\n", un); 12151 break; 12152 default: 12153 SD_TRACE(SD_LOG_IO, un, 12154 "sd_ssc_send: entry: un:0x%p UNEXPECTED SPACE\n", un); 12155 break; 12156 } 12157 #endif 12158 12159 rval = scsi_uscsi_copyin((intptr_t)incmd, flag, 12160 SD_ADDRESS(un), &uscmd); 12161 if (rval != 0) { 12162 SD_TRACE(SD_LOG_IO, un, "sd_sense_scsi_cmd: " 12163 "scsi_uscsi_alloc_and_copyin failed\n", un); 12164 return (rval); 12165 } 12166 12167 if ((uscmd->uscsi_cdb != NULL) && 12168 (uscmd->uscsi_cdb[0] == SCMD_FORMAT)) { 12169 mutex_enter(SD_MUTEX(un)); 12170 un->un_f_format_in_progress = TRUE; 12171 mutex_exit(SD_MUTEX(un)); 12172 format = 1; 12173 } 12174 12175 /* 12176 * Allocate an sd_uscsi_info struct and fill it with the info 12177 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 12178 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 12179 * since we allocate the buf here in this function, we do not 12180 * need to preserve the prior contents of b_private. 12181 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 12182 */ 12183 uip = ssc->ssc_uscsi_info; 12184 uip->ui_flags = path_flag; 12185 uip->ui_cmdp = uscmd; 12186 12187 /* 12188 * Commands sent with priority are intended for error recovery 12189 * situations, and do not have retries performed. 12190 */ 12191 if (path_flag == SD_PATH_DIRECT_PRIORITY) { 12192 uscmd->uscsi_flags |= USCSI_DIAGNOSE; 12193 } 12194 uscmd->uscsi_flags &= ~USCSI_NOINTR; 12195 12196 dev = SD_GET_DEV(un); 12197 rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd, 12198 sd_uscsi_strategy, NULL, uip); 12199 12200 /* 12201 * mark ssc_flags right after handle_cmd to make sure 12202 * the uscsi has been sent 12203 */ 12204 ssc->ssc_flags |= SSC_FLAGS_CMD_ISSUED; 12205 12206 #ifdef SDDEBUG 12207 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: " 12208 "uscsi_status: 0x%02x uscsi_resid:0x%x\n", 12209 uscmd->uscsi_status, uscmd->uscsi_resid); 12210 if (uscmd->uscsi_bufaddr != NULL) { 12211 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: " 12212 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n", 12213 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen); 12214 if (dataspace == UIO_SYSSPACE) { 12215 SD_DUMP_MEMORY(un, SD_LOG_IO, 12216 "data", (uchar_t *)uscmd->uscsi_bufaddr, 12217 uscmd->uscsi_buflen, SD_LOG_HEX); 12218 } 12219 } 12220 #endif 12221 12222 if (format == 1) { 12223 mutex_enter(SD_MUTEX(un)); 12224 un->un_f_format_in_progress = FALSE; 12225 mutex_exit(SD_MUTEX(un)); 12226 } 12227 12228 (void) scsi_uscsi_copyout((intptr_t)incmd, uscmd); 12229 12230 return (rval); 12231 } 12232 12233 /* 12234 * Function: sd_ssc_print 12235 * 12236 * Description: Print information available to the console. 12237 * 12238 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 12239 * sd_uscsi_info in. 12240 * sd_severity - log level. 12241 * Context: Kernel thread or interrupt context. 12242 */ 12243 static void 12244 sd_ssc_print(sd_ssc_t *ssc, int sd_severity) 12245 { 12246 struct uscsi_cmd *ucmdp; 12247 struct scsi_device *devp; 12248 dev_info_t *devinfo; 12249 uchar_t *sensep; 12250 int senlen; 12251 union scsi_cdb *cdbp; 12252 uchar_t com; 12253 extern struct scsi_key_strings scsi_cmds[]; 12254 12255 ASSERT(ssc != NULL); 12256 ASSERT(ssc->ssc_un != NULL); 12257 12258 if (SD_FM_LOG(ssc->ssc_un) != SD_FM_LOG_EREPORT) 12259 return; 12260 ucmdp = ssc->ssc_uscsi_cmd; 12261 devp = SD_SCSI_DEVP(ssc->ssc_un); 12262 devinfo = SD_DEVINFO(ssc->ssc_un); 12263 ASSERT(ucmdp != NULL); 12264 ASSERT(devp != NULL); 12265 ASSERT(devinfo != NULL); 12266 sensep = (uint8_t *)ucmdp->uscsi_rqbuf; 12267 senlen = ucmdp->uscsi_rqlen - ucmdp->uscsi_rqresid; 12268 cdbp = (union scsi_cdb *)ucmdp->uscsi_cdb; 12269 12270 /* In certain case (like DOORLOCK), the cdb could be NULL. */ 12271 if (cdbp == NULL) 12272 return; 12273 /* We don't print log if no sense data available. */ 12274 if (senlen == 0) 12275 sensep = NULL; 12276 com = cdbp->scc_cmd; 12277 scsi_generic_errmsg(devp, sd_label, sd_severity, 0, 0, com, 12278 scsi_cmds, sensep, ssc->ssc_un->un_additional_codes, NULL); 12279 } 12280 12281 /* 12282 * Function: sd_ssc_assessment 12283 * 12284 * Description: We use this function to make an assessment at the point 12285 * where SD driver may encounter a potential error. 12286 * 12287 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 12288 * sd_uscsi_info in. 12289 * tp_assess - a hint of strategy for ereport posting. 12290 * Possible values of tp_assess include: 12291 * SD_FMT_IGNORE - we don't post any ereport because we're 12292 * sure that it is ok to ignore the underlying problems. 12293 * SD_FMT_IGNORE_COMPROMISE - we don't post any ereport for now 12294 * but it might be not correct to ignore the underlying hardware 12295 * error. 12296 * SD_FMT_STATUS_CHECK - we will post an ereport with the 12297 * payload driver-assessment of value "fail" or 12298 * "fatal"(depending on what information we have here). This 12299 * assessment value is usually set when SD driver think there 12300 * is a potential error occurred(Typically, when return value 12301 * of the SCSI command is EIO). 12302 * SD_FMT_STANDARD - we will post an ereport with the payload 12303 * driver-assessment of value "info". This assessment value is 12304 * set when the SCSI command returned successfully and with 12305 * sense data sent back. 12306 * 12307 * Context: Kernel thread. 12308 */ 12309 static void 12310 sd_ssc_assessment(sd_ssc_t *ssc, enum sd_type_assessment tp_assess) 12311 { 12312 int senlen = 0; 12313 struct uscsi_cmd *ucmdp = NULL; 12314 struct sd_lun *un; 12315 12316 ASSERT(ssc != NULL); 12317 un = ssc->ssc_un; 12318 ASSERT(un != NULL); 12319 ucmdp = ssc->ssc_uscsi_cmd; 12320 ASSERT(ucmdp != NULL); 12321 12322 if (ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT) { 12323 ssc->ssc_flags &= ~SSC_FLAGS_NEED_ASSESSMENT; 12324 } else { 12325 /* 12326 * If enter here, it indicates that we have a wrong 12327 * calling sequence of sd_ssc_send and sd_ssc_assessment, 12328 * both of which should be called in a pair in case of 12329 * loss of FMA telemetries. 12330 */ 12331 if (ucmdp->uscsi_cdb != NULL) { 12332 SD_INFO(SD_LOG_SDTEST, un, 12333 "sd_ssc_assessment is missing the " 12334 "alternative sd_ssc_send when running 0x%x, " 12335 "or there are superfluous sd_ssc_assessment for " 12336 "the same sd_ssc_send.\n", 12337 ucmdp->uscsi_cdb[0]); 12338 } 12339 /* 12340 * Set the ssc_flags to the initial value to avoid passing 12341 * down dirty flags to the following sd_ssc_send function. 12342 */ 12343 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12344 return; 12345 } 12346 12347 /* 12348 * Only handle an issued command which is waiting for assessment. 12349 * A command which is not issued will not have 12350 * SSC_FLAGS_INVALID_DATA set, so it'ok we just return here. 12351 */ 12352 if (!(ssc->ssc_flags & SSC_FLAGS_CMD_ISSUED)) { 12353 sd_ssc_print(ssc, SCSI_ERR_INFO); 12354 return; 12355 } else { 12356 /* 12357 * For an issued command, we should clear this flag in 12358 * order to make the sd_ssc_t structure be used off 12359 * multiple uscsi commands. 12360 */ 12361 ssc->ssc_flags &= ~SSC_FLAGS_CMD_ISSUED; 12362 } 12363 12364 /* 12365 * We will not deal with non-retryable(flag USCSI_DIAGNOSE set) 12366 * commands here. And we should clear the ssc_flags before return. 12367 */ 12368 if (ucmdp->uscsi_flags & USCSI_DIAGNOSE) { 12369 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12370 return; 12371 } 12372 12373 switch (tp_assess) { 12374 case SD_FMT_IGNORE: 12375 case SD_FMT_IGNORE_COMPROMISE: 12376 break; 12377 case SD_FMT_STATUS_CHECK: 12378 /* 12379 * For a failed command(including the succeeded command 12380 * with invalid data sent back). 12381 */ 12382 sd_ssc_post(ssc, SD_FM_DRV_FATAL); 12383 break; 12384 case SD_FMT_STANDARD: 12385 /* 12386 * Always for the succeeded commands probably with sense 12387 * data sent back. 12388 * Limitation: 12389 * We can only handle a succeeded command with sense 12390 * data sent back when auto-request-sense is enabled. 12391 */ 12392 senlen = ssc->ssc_uscsi_cmd->uscsi_rqlen - 12393 ssc->ssc_uscsi_cmd->uscsi_rqresid; 12394 if ((ssc->ssc_uscsi_info->ui_pkt_state & STATE_ARQ_DONE) && 12395 (un->un_f_arq_enabled == TRUE) && 12396 senlen > 0 && 12397 ssc->ssc_uscsi_cmd->uscsi_rqbuf != NULL) { 12398 sd_ssc_post(ssc, SD_FM_DRV_NOTICE); 12399 } 12400 break; 12401 default: 12402 /* 12403 * Should not have other type of assessment. 12404 */ 12405 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 12406 "sd_ssc_assessment got wrong " 12407 "sd_type_assessment %d.\n", tp_assess); 12408 break; 12409 } 12410 /* 12411 * Clear up the ssc_flags before return. 12412 */ 12413 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12414 } 12415 12416 /* 12417 * Function: sd_ssc_post 12418 * 12419 * Description: 1. read the driver property to get fm-scsi-log flag. 12420 * 2. print log if fm_log_capable is non-zero. 12421 * 3. call sd_ssc_ereport_post to post ereport if possible. 12422 * 12423 * Context: May be called from kernel thread or interrupt context. 12424 */ 12425 static void 12426 sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess) 12427 { 12428 struct sd_lun *un; 12429 int sd_severity; 12430 12431 ASSERT(ssc != NULL); 12432 un = ssc->ssc_un; 12433 ASSERT(un != NULL); 12434 12435 /* 12436 * We may enter here from sd_ssc_assessment(for USCSI command) or 12437 * by directly called from sdintr context. 12438 * We don't handle a non-disk drive(CD-ROM, removable media). 12439 * Clear the ssc_flags before return in case we've set 12440 * SSC_FLAGS_INVALID_XXX which should be skipped for a non-disk 12441 * driver. 12442 */ 12443 if (ISCD(un) || un->un_f_has_removable_media) { 12444 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12445 return; 12446 } 12447 12448 switch (sd_assess) { 12449 case SD_FM_DRV_FATAL: 12450 sd_severity = SCSI_ERR_FATAL; 12451 break; 12452 case SD_FM_DRV_RECOVERY: 12453 sd_severity = SCSI_ERR_RECOVERED; 12454 break; 12455 case SD_FM_DRV_RETRY: 12456 sd_severity = SCSI_ERR_RETRYABLE; 12457 break; 12458 case SD_FM_DRV_NOTICE: 12459 sd_severity = SCSI_ERR_INFO; 12460 break; 12461 default: 12462 sd_severity = SCSI_ERR_UNKNOWN; 12463 } 12464 /* print log */ 12465 sd_ssc_print(ssc, sd_severity); 12466 12467 /* always post ereport */ 12468 sd_ssc_ereport_post(ssc, sd_assess); 12469 } 12470 12471 /* 12472 * Function: sd_ssc_set_info 12473 * 12474 * Description: Mark ssc_flags and set ssc_info which would be the 12475 * payload of uderr ereport. This function will cause 12476 * sd_ssc_ereport_post to post uderr ereport only. 12477 * Besides, when ssc_flags == SSC_FLAGS_INVALID_DATA(USCSI), 12478 * the function will also call SD_ERROR or scsi_log for a 12479 * CDROM/removable-media/DDI_FM_NOT_CAPABLE device. 12480 * 12481 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 12482 * sd_uscsi_info in. 12483 * ssc_flags - indicate the sub-category of a uderr. 12484 * comp - this argument is meaningful only when 12485 * ssc_flags == SSC_FLAGS_INVALID_DATA, and its possible 12486 * values include: 12487 * > 0, SD_ERROR is used with comp as the driver logging 12488 * component; 12489 * = 0, scsi-log is used to log error telemetries; 12490 * < 0, no log available for this telemetry. 12491 * 12492 * Context: Kernel thread or interrupt context 12493 */ 12494 static void 12495 sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, uint_t comp, const char *fmt, ...) 12496 { 12497 va_list ap; 12498 12499 ASSERT(ssc != NULL); 12500 ASSERT(ssc->ssc_un != NULL); 12501 12502 ssc->ssc_flags |= ssc_flags; 12503 va_start(ap, fmt); 12504 (void) vsnprintf(ssc->ssc_info, sizeof (ssc->ssc_info), fmt, ap); 12505 va_end(ap); 12506 12507 /* 12508 * If SSC_FLAGS_INVALID_DATA is set, it should be a uscsi command 12509 * with invalid data sent back. For non-uscsi command, the 12510 * following code will be bypassed. 12511 */ 12512 if (ssc_flags & SSC_FLAGS_INVALID_DATA) { 12513 if (SD_FM_LOG(ssc->ssc_un) == SD_FM_LOG_NSUP) { 12514 /* 12515 * If the error belong to certain component and we 12516 * do not want it to show up on the console, we 12517 * will use SD_ERROR, otherwise scsi_log is 12518 * preferred. 12519 */ 12520 if (comp > 0) { 12521 SD_ERROR(comp, ssc->ssc_un, ssc->ssc_info); 12522 } else if (comp == 0) { 12523 scsi_log(SD_DEVINFO(ssc->ssc_un), sd_label, 12524 CE_WARN, ssc->ssc_info); 12525 } 12526 } 12527 } 12528 } 12529 12530 /* 12531 * Function: sd_buf_iodone 12532 * 12533 * Description: Frees the sd_xbuf & returns the buf to its originator. 12534 * 12535 * Context: May be called from interrupt context. 12536 */ 12537 /* ARGSUSED */ 12538 static void 12539 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp) 12540 { 12541 struct sd_xbuf *xp; 12542 12543 ASSERT(un != NULL); 12544 ASSERT(bp != NULL); 12545 ASSERT(!mutex_owned(SD_MUTEX(un))); 12546 12547 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n"); 12548 12549 xp = SD_GET_XBUF(bp); 12550 ASSERT(xp != NULL); 12551 12552 /* xbuf is gone after this */ 12553 if (ddi_xbuf_done(bp, un->un_xbuf_attr)) { 12554 mutex_enter(SD_MUTEX(un)); 12555 12556 /* 12557 * Grab time when the cmd completed. 12558 * This is used for determining if the system has been 12559 * idle long enough to make it idle to the PM framework. 12560 * This is for lowering the overhead, and therefore improving 12561 * performance per I/O operation. 12562 */ 12563 un->un_pm_idle_time = gethrtime(); 12564 12565 un->un_ncmds_in_driver--; 12566 ASSERT(un->un_ncmds_in_driver >= 0); 12567 SD_INFO(SD_LOG_IO, un, 12568 "sd_buf_iodone: un_ncmds_in_driver = %ld\n", 12569 un->un_ncmds_in_driver); 12570 12571 mutex_exit(SD_MUTEX(un)); 12572 } 12573 12574 biodone(bp); /* bp is gone after this */ 12575 12576 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n"); 12577 } 12578 12579 12580 /* 12581 * Function: sd_uscsi_iodone 12582 * 12583 * Description: Frees the sd_xbuf & returns the buf to its originator. 12584 * 12585 * Context: May be called from interrupt context. 12586 */ 12587 /* ARGSUSED */ 12588 static void 12589 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 12590 { 12591 struct sd_xbuf *xp; 12592 12593 ASSERT(un != NULL); 12594 ASSERT(bp != NULL); 12595 12596 xp = SD_GET_XBUF(bp); 12597 ASSERT(xp != NULL); 12598 ASSERT(!mutex_owned(SD_MUTEX(un))); 12599 12600 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n"); 12601 12602 bp->b_private = xp->xb_private; 12603 12604 mutex_enter(SD_MUTEX(un)); 12605 12606 /* 12607 * Grab time when the cmd completed. 12608 * This is used for determining if the system has been 12609 * idle long enough to make it idle to the PM framework. 12610 * This is for lowering the overhead, and therefore improving 12611 * performance per I/O operation. 12612 */ 12613 un->un_pm_idle_time = gethrtime(); 12614 12615 un->un_ncmds_in_driver--; 12616 ASSERT(un->un_ncmds_in_driver >= 0); 12617 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", 12618 un->un_ncmds_in_driver); 12619 12620 mutex_exit(SD_MUTEX(un)); 12621 12622 if (((struct uscsi_cmd *)(xp->xb_pktinfo))->uscsi_rqlen > 12623 SENSE_LENGTH) { 12624 kmem_free(xp, sizeof (struct sd_xbuf) - SENSE_LENGTH + 12625 MAX_SENSE_LENGTH); 12626 } else { 12627 kmem_free(xp, sizeof (struct sd_xbuf)); 12628 } 12629 12630 biodone(bp); 12631 12632 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n"); 12633 } 12634 12635 12636 /* 12637 * Function: sd_mapblockaddr_iostart 12638 * 12639 * Description: Verify request lies within the partition limits for 12640 * the indicated minor device. Issue "overrun" buf if 12641 * request would exceed partition range. Converts 12642 * partition-relative block address to absolute. 12643 * 12644 * Upon exit of this function: 12645 * 1.I/O is aligned 12646 * xp->xb_blkno represents the absolute sector address 12647 * 2.I/O is misaligned 12648 * xp->xb_blkno represents the absolute logical block address 12649 * based on DEV_BSIZE. The logical block address will be 12650 * converted to physical sector address in sd_mapblocksize_\ 12651 * iostart. 12652 * 3.I/O is misaligned but is aligned in "overrun" buf 12653 * xp->xb_blkno represents the absolute logical block address 12654 * based on DEV_BSIZE. The logical block address will be 12655 * converted to physical sector address in sd_mapblocksize_\ 12656 * iostart. But no RMW will be issued in this case. 12657 * 12658 * Context: Can sleep 12659 * 12660 * Issues: This follows what the old code did, in terms of accessing 12661 * some of the partition info in the unit struct without holding 12662 * the mutext. This is a general issue, if the partition info 12663 * can be altered while IO is in progress... as soon as we send 12664 * a buf, its partitioning can be invalid before it gets to the 12665 * device. Probably the right fix is to move partitioning out 12666 * of the driver entirely. 12667 */ 12668 12669 static void 12670 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp) 12671 { 12672 diskaddr_t nblocks; /* #blocks in the given partition */ 12673 daddr_t blocknum; /* Block number specified by the buf */ 12674 size_t requested_nblocks; 12675 size_t available_nblocks; 12676 int partition; 12677 diskaddr_t partition_offset; 12678 struct sd_xbuf *xp; 12679 int secmask = 0, blknomask = 0; 12680 ushort_t is_aligned = TRUE; 12681 12682 ASSERT(un != NULL); 12683 ASSERT(bp != NULL); 12684 ASSERT(!mutex_owned(SD_MUTEX(un))); 12685 12686 SD_TRACE(SD_LOG_IO_PARTITION, un, 12687 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp); 12688 12689 xp = SD_GET_XBUF(bp); 12690 ASSERT(xp != NULL); 12691 12692 /* 12693 * If the geometry is not indicated as valid, attempt to access 12694 * the unit & verify the geometry/label. This can be the case for 12695 * removable-media devices, of if the device was opened in 12696 * NDELAY/NONBLOCK mode. 12697 */ 12698 partition = SDPART(bp->b_edev); 12699 12700 if (!SD_IS_VALID_LABEL(un)) { 12701 sd_ssc_t *ssc; 12702 /* 12703 * Initialize sd_ssc_t for internal uscsi commands 12704 * In case of potential porformance issue, we need 12705 * to alloc memory only if there is invalid label 12706 */ 12707 ssc = sd_ssc_init(un); 12708 12709 if (sd_ready_and_valid(ssc, partition) != SD_READY_VALID) { 12710 /* 12711 * For removable devices it is possible to start an 12712 * I/O without a media by opening the device in nodelay 12713 * mode. Also for writable CDs there can be many 12714 * scenarios where there is no geometry yet but volume 12715 * manager is trying to issue a read() just because 12716 * it can see TOC on the CD. So do not print a message 12717 * for removables. 12718 */ 12719 if (!un->un_f_has_removable_media) { 12720 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12721 "i/o to invalid geometry\n"); 12722 } 12723 bioerror(bp, EIO); 12724 bp->b_resid = bp->b_bcount; 12725 SD_BEGIN_IODONE(index, un, bp); 12726 12727 sd_ssc_fini(ssc); 12728 return; 12729 } 12730 sd_ssc_fini(ssc); 12731 } 12732 12733 nblocks = 0; 12734 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 12735 &nblocks, &partition_offset, NULL, NULL, (void *)SD_PATH_DIRECT); 12736 12737 if (un->un_f_enable_rmw) { 12738 blknomask = (un->un_phy_blocksize / DEV_BSIZE) - 1; 12739 secmask = un->un_phy_blocksize - 1; 12740 } else { 12741 blknomask = (un->un_tgt_blocksize / DEV_BSIZE) - 1; 12742 secmask = un->un_tgt_blocksize - 1; 12743 } 12744 12745 if ((bp->b_lblkno & (blknomask)) || (bp->b_bcount & (secmask))) { 12746 is_aligned = FALSE; 12747 } 12748 12749 if (!(NOT_DEVBSIZE(un)) || un->un_f_enable_rmw) { 12750 /* 12751 * If I/O is aligned, no need to involve RMW(Read Modify Write) 12752 * Convert the logical block number to target's physical sector 12753 * number. 12754 */ 12755 if (is_aligned) { 12756 xp->xb_blkno = SD_SYS2TGTBLOCK(un, xp->xb_blkno); 12757 } else { 12758 /* 12759 * There is no RMW if we're just reading, so don't 12760 * warn or error out because of it. 12761 */ 12762 if (bp->b_flags & B_READ) { 12763 /*EMPTY*/ 12764 } else if (!un->un_f_enable_rmw && 12765 un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR) { 12766 bp->b_flags |= B_ERROR; 12767 goto error_exit; 12768 } else if (un->un_f_rmw_type == SD_RMW_TYPE_DEFAULT) { 12769 mutex_enter(SD_MUTEX(un)); 12770 if (!un->un_f_enable_rmw && 12771 un->un_rmw_msg_timeid == NULL) { 12772 scsi_log(SD_DEVINFO(un), sd_label, 12773 CE_WARN, "I/O request is not " 12774 "aligned with %d disk sector size. " 12775 "It is handled through Read Modify " 12776 "Write but the performance is " 12777 "very low.\n", 12778 un->un_tgt_blocksize); 12779 un->un_rmw_msg_timeid = 12780 timeout(sd_rmw_msg_print_handler, 12781 un, SD_RMW_MSG_PRINT_TIMEOUT); 12782 } else { 12783 un->un_rmw_incre_count ++; 12784 } 12785 mutex_exit(SD_MUTEX(un)); 12786 } 12787 12788 nblocks = SD_TGT2SYSBLOCK(un, nblocks); 12789 partition_offset = SD_TGT2SYSBLOCK(un, 12790 partition_offset); 12791 } 12792 } 12793 12794 /* 12795 * blocknum is the starting block number of the request. At this 12796 * point it is still relative to the start of the minor device. 12797 */ 12798 blocknum = xp->xb_blkno; 12799 12800 /* 12801 * Legacy: If the starting block number is one past the last block 12802 * in the partition, do not set B_ERROR in the buf. 12803 */ 12804 if (blocknum == nblocks) { 12805 goto error_exit; 12806 } 12807 12808 /* 12809 * Confirm that the first block of the request lies within the 12810 * partition limits. Also the requested number of bytes must be 12811 * a multiple of the system block size. 12812 */ 12813 if ((blocknum < 0) || (blocknum >= nblocks) || 12814 ((bp->b_bcount & (DEV_BSIZE - 1)) != 0)) { 12815 bp->b_flags |= B_ERROR; 12816 goto error_exit; 12817 } 12818 12819 /* 12820 * If the requsted # blocks exceeds the available # blocks, that 12821 * is an overrun of the partition. 12822 */ 12823 if ((!NOT_DEVBSIZE(un)) && is_aligned) { 12824 requested_nblocks = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 12825 } else { 12826 requested_nblocks = SD_BYTES2SYSBLOCKS(bp->b_bcount); 12827 } 12828 12829 available_nblocks = (size_t)(nblocks - blocknum); 12830 ASSERT(nblocks >= blocknum); 12831 12832 if (requested_nblocks > available_nblocks) { 12833 size_t resid; 12834 12835 /* 12836 * Allocate an "overrun" buf to allow the request to proceed 12837 * for the amount of space available in the partition. The 12838 * amount not transferred will be added into the b_resid 12839 * when the operation is complete. The overrun buf 12840 * replaces the original buf here, and the original buf 12841 * is saved inside the overrun buf, for later use. 12842 */ 12843 if ((!NOT_DEVBSIZE(un)) && is_aligned) { 12844 resid = SD_TGTBLOCKS2BYTES(un, 12845 (offset_t)(requested_nblocks - available_nblocks)); 12846 } else { 12847 resid = SD_SYSBLOCKS2BYTES( 12848 (offset_t)(requested_nblocks - available_nblocks)); 12849 } 12850 12851 size_t count = bp->b_bcount - resid; 12852 /* 12853 * Note: count is an unsigned entity thus it'll NEVER 12854 * be less than 0 so ASSERT the original values are 12855 * correct. 12856 */ 12857 ASSERT(bp->b_bcount >= resid); 12858 12859 bp = sd_bioclone_alloc(bp, count, blocknum, 12860 (int (*)(struct buf *))(uintptr_t)sd_mapblockaddr_iodone); 12861 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */ 12862 ASSERT(xp != NULL); 12863 } 12864 12865 /* At this point there should be no residual for this buf. */ 12866 ASSERT(bp->b_resid == 0); 12867 12868 /* Convert the block number to an absolute address. */ 12869 xp->xb_blkno += partition_offset; 12870 12871 SD_NEXT_IOSTART(index, un, bp); 12872 12873 SD_TRACE(SD_LOG_IO_PARTITION, un, 12874 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp); 12875 12876 return; 12877 12878 error_exit: 12879 bp->b_resid = bp->b_bcount; 12880 SD_BEGIN_IODONE(index, un, bp); 12881 SD_TRACE(SD_LOG_IO_PARTITION, un, 12882 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp); 12883 } 12884 12885 12886 /* 12887 * Function: sd_mapblockaddr_iodone 12888 * 12889 * Description: Completion-side processing for partition management. 12890 * 12891 * Context: May be called under interrupt context 12892 */ 12893 12894 static void 12895 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp) 12896 { 12897 /* int partition; */ /* Not used, see below. */ 12898 ASSERT(un != NULL); 12899 ASSERT(bp != NULL); 12900 ASSERT(!mutex_owned(SD_MUTEX(un))); 12901 12902 SD_TRACE(SD_LOG_IO_PARTITION, un, 12903 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp); 12904 12905 if ((uintptr_t)bp->b_iodone == (uintptr_t)sd_mapblockaddr_iodone) { 12906 /* 12907 * We have an "overrun" buf to deal with... 12908 */ 12909 struct sd_xbuf *xp; 12910 struct buf *obp; /* ptr to the original buf */ 12911 12912 xp = SD_GET_XBUF(bp); 12913 ASSERT(xp != NULL); 12914 12915 /* Retrieve the pointer to the original buf */ 12916 obp = (struct buf *)xp->xb_private; 12917 ASSERT(obp != NULL); 12918 12919 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid); 12920 bioerror(obp, bp->b_error); 12921 12922 sd_bioclone_free(bp); 12923 12924 /* 12925 * Get back the original buf. 12926 * Note that since the restoration of xb_blkno below 12927 * was removed, the sd_xbuf is not needed. 12928 */ 12929 bp = obp; 12930 /* 12931 * xp = SD_GET_XBUF(bp); 12932 * ASSERT(xp != NULL); 12933 */ 12934 } 12935 12936 /* 12937 * Convert sd->xb_blkno back to a minor-device relative value. 12938 * Note: this has been commented out, as it is not needed in the 12939 * current implementation of the driver (ie, since this function 12940 * is at the top of the layering chains, so the info will be 12941 * discarded) and it is in the "hot" IO path. 12942 * 12943 * partition = getminor(bp->b_edev) & SDPART_MASK; 12944 * xp->xb_blkno -= un->un_offset[partition]; 12945 */ 12946 12947 SD_NEXT_IODONE(index, un, bp); 12948 12949 SD_TRACE(SD_LOG_IO_PARTITION, un, 12950 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp); 12951 } 12952 12953 12954 /* 12955 * Function: sd_mapblocksize_iostart 12956 * 12957 * Description: Convert between system block size (un->un_sys_blocksize) 12958 * and target block size (un->un_tgt_blocksize). 12959 * 12960 * Context: Can sleep to allocate resources. 12961 * 12962 * Assumptions: A higher layer has already performed any partition validation, 12963 * and converted the xp->xb_blkno to an absolute value relative 12964 * to the start of the device. 12965 * 12966 * It is also assumed that the higher layer has implemented 12967 * an "overrun" mechanism for the case where the request would 12968 * read/write beyond the end of a partition. In this case we 12969 * assume (and ASSERT) that bp->b_resid == 0. 12970 * 12971 * Note: The implementation for this routine assumes the target 12972 * block size remains constant between allocation and transport. 12973 */ 12974 12975 static void 12976 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp) 12977 { 12978 struct sd_mapblocksize_info *bsp; 12979 struct sd_xbuf *xp; 12980 offset_t first_byte; 12981 daddr_t start_block, end_block; 12982 daddr_t request_bytes; 12983 ushort_t is_aligned = FALSE; 12984 12985 ASSERT(un != NULL); 12986 ASSERT(bp != NULL); 12987 ASSERT(!mutex_owned(SD_MUTEX(un))); 12988 ASSERT(bp->b_resid == 0); 12989 12990 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12991 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp); 12992 12993 /* 12994 * For a non-writable CD, a write request is an error 12995 */ 12996 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) && 12997 (un->un_f_mmc_writable_media == FALSE)) { 12998 bioerror(bp, EIO); 12999 bp->b_resid = bp->b_bcount; 13000 SD_BEGIN_IODONE(index, un, bp); 13001 return; 13002 } 13003 13004 /* 13005 * We do not need a shadow buf if the device is using 13006 * un->un_sys_blocksize as its block size or if bcount == 0. 13007 * In this case there is no layer-private data block allocated. 13008 */ 13009 if ((un->un_tgt_blocksize == DEV_BSIZE && !un->un_f_enable_rmw) || 13010 (bp->b_bcount == 0)) { 13011 goto done; 13012 } 13013 13014 #if defined(__i386) || defined(__amd64) 13015 /* We do not support non-block-aligned transfers for ROD devices */ 13016 ASSERT(!ISROD(un)); 13017 #endif 13018 13019 xp = SD_GET_XBUF(bp); 13020 ASSERT(xp != NULL); 13021 13022 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 13023 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n", 13024 un->un_tgt_blocksize, DEV_BSIZE); 13025 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 13026 "request start block:0x%x\n", xp->xb_blkno); 13027 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 13028 "request len:0x%x\n", bp->b_bcount); 13029 13030 /* 13031 * Allocate the layer-private data area for the mapblocksize layer. 13032 * Layers are allowed to use the xp_private member of the sd_xbuf 13033 * struct to store the pointer to their layer-private data block, but 13034 * each layer also has the responsibility of restoring the prior 13035 * contents of xb_private before returning the buf/xbuf to the 13036 * higher layer that sent it. 13037 * 13038 * Here we save the prior contents of xp->xb_private into the 13039 * bsp->mbs_oprivate field of our layer-private data area. This value 13040 * is restored by sd_mapblocksize_iodone() just prior to freeing up 13041 * the layer-private area and returning the buf/xbuf to the layer 13042 * that sent it. 13043 * 13044 * Note that here we use kmem_zalloc for the allocation as there are 13045 * parts of the mapblocksize code that expect certain fields to be 13046 * zero unless explicitly set to a required value. 13047 */ 13048 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 13049 bsp->mbs_oprivate = xp->xb_private; 13050 xp->xb_private = bsp; 13051 13052 /* 13053 * This treats the data on the disk (target) as an array of bytes. 13054 * first_byte is the byte offset, from the beginning of the device, 13055 * to the location of the request. This is converted from a 13056 * un->un_sys_blocksize block address to a byte offset, and then back 13057 * to a block address based upon a un->un_tgt_blocksize block size. 13058 * 13059 * xp->xb_blkno should be absolute upon entry into this function, 13060 * but, but it is based upon partitions that use the "system" 13061 * block size. It must be adjusted to reflect the block size of 13062 * the target. 13063 * 13064 * Note that end_block is actually the block that follows the last 13065 * block of the request, but that's what is needed for the computation. 13066 */ 13067 first_byte = SD_SYSBLOCKS2BYTES((offset_t)xp->xb_blkno); 13068 if (un->un_f_enable_rmw) { 13069 start_block = xp->xb_blkno = 13070 (first_byte / un->un_phy_blocksize) * 13071 (un->un_phy_blocksize / DEV_BSIZE); 13072 end_block = ((first_byte + bp->b_bcount + 13073 un->un_phy_blocksize - 1) / un->un_phy_blocksize) * 13074 (un->un_phy_blocksize / DEV_BSIZE); 13075 } else { 13076 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize; 13077 end_block = (first_byte + bp->b_bcount + 13078 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize; 13079 } 13080 13081 /* request_bytes is rounded up to a multiple of the target block size */ 13082 request_bytes = (end_block - start_block) * un->un_tgt_blocksize; 13083 13084 /* 13085 * See if the starting address of the request and the request 13086 * length are aligned on a un->un_tgt_blocksize boundary. If aligned 13087 * then we do not need to allocate a shadow buf to handle the request. 13088 */ 13089 if (un->un_f_enable_rmw) { 13090 if (((first_byte % un->un_phy_blocksize) == 0) && 13091 ((bp->b_bcount % un->un_phy_blocksize) == 0)) { 13092 is_aligned = TRUE; 13093 } 13094 } else { 13095 if (((first_byte % un->un_tgt_blocksize) == 0) && 13096 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) { 13097 is_aligned = TRUE; 13098 } 13099 } 13100 13101 if ((bp->b_flags & B_READ) == 0) { 13102 /* 13103 * Lock the range for a write operation. An aligned request is 13104 * considered a simple write; otherwise the request must be a 13105 * read-modify-write. 13106 */ 13107 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1, 13108 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW); 13109 } 13110 13111 /* 13112 * Alloc a shadow buf if the request is not aligned. Also, this is 13113 * where the READ command is generated for a read-modify-write. (The 13114 * write phase is deferred until after the read completes.) 13115 */ 13116 if (is_aligned == FALSE) { 13117 13118 struct sd_mapblocksize_info *shadow_bsp; 13119 struct sd_xbuf *shadow_xp; 13120 struct buf *shadow_bp; 13121 13122 /* 13123 * Allocate the shadow buf and it associated xbuf. Note that 13124 * after this call the xb_blkno value in both the original 13125 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the 13126 * same: absolute relative to the start of the device, and 13127 * adjusted for the target block size. The b_blkno in the 13128 * shadow buf will also be set to this value. We should never 13129 * change b_blkno in the original bp however. 13130 * 13131 * Note also that the shadow buf will always need to be a 13132 * READ command, regardless of whether the incoming command 13133 * is a READ or a WRITE. 13134 */ 13135 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ, 13136 xp->xb_blkno, 13137 (int (*)(struct buf *))(uintptr_t)sd_mapblocksize_iodone); 13138 13139 shadow_xp = SD_GET_XBUF(shadow_bp); 13140 13141 /* 13142 * Allocate the layer-private data for the shadow buf. 13143 * (No need to preserve xb_private in the shadow xbuf.) 13144 */ 13145 shadow_xp->xb_private = shadow_bsp = 13146 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 13147 13148 /* 13149 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone 13150 * to figure out where the start of the user data is (based upon 13151 * the system block size) in the data returned by the READ 13152 * command (which will be based upon the target blocksize). Note 13153 * that this is only really used if the request is unaligned. 13154 */ 13155 if (un->un_f_enable_rmw) { 13156 bsp->mbs_copy_offset = (ssize_t)(first_byte - 13157 ((offset_t)xp->xb_blkno * un->un_sys_blocksize)); 13158 ASSERT((bsp->mbs_copy_offset >= 0) && 13159 (bsp->mbs_copy_offset < un->un_phy_blocksize)); 13160 } else { 13161 bsp->mbs_copy_offset = (ssize_t)(first_byte - 13162 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize)); 13163 ASSERT((bsp->mbs_copy_offset >= 0) && 13164 (bsp->mbs_copy_offset < un->un_tgt_blocksize)); 13165 } 13166 13167 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset; 13168 13169 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index; 13170 13171 /* Transfer the wmap (if any) to the shadow buf */ 13172 shadow_bsp->mbs_wmp = bsp->mbs_wmp; 13173 bsp->mbs_wmp = NULL; 13174 13175 /* 13176 * The shadow buf goes on from here in place of the 13177 * original buf. 13178 */ 13179 shadow_bsp->mbs_orig_bp = bp; 13180 bp = shadow_bp; 13181 } 13182 13183 SD_INFO(SD_LOG_IO_RMMEDIA, un, 13184 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno); 13185 SD_INFO(SD_LOG_IO_RMMEDIA, un, 13186 "sd_mapblocksize_iostart: tgt request len:0x%x\n", 13187 request_bytes); 13188 SD_INFO(SD_LOG_IO_RMMEDIA, un, 13189 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp); 13190 13191 done: 13192 SD_NEXT_IOSTART(index, un, bp); 13193 13194 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 13195 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp); 13196 } 13197 13198 13199 /* 13200 * Function: sd_mapblocksize_iodone 13201 * 13202 * Description: Completion side processing for block-size mapping. 13203 * 13204 * Context: May be called under interrupt context 13205 */ 13206 13207 static void 13208 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp) 13209 { 13210 struct sd_mapblocksize_info *bsp; 13211 struct sd_xbuf *xp; 13212 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */ 13213 struct buf *orig_bp; /* ptr to the original buf */ 13214 offset_t shadow_end; 13215 offset_t request_end; 13216 offset_t shadow_start; 13217 ssize_t copy_offset; 13218 size_t copy_length; 13219 size_t shortfall; 13220 uint_t is_write; /* TRUE if this bp is a WRITE */ 13221 uint_t has_wmap; /* TRUE is this bp has a wmap */ 13222 13223 ASSERT(un != NULL); 13224 ASSERT(bp != NULL); 13225 13226 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 13227 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp); 13228 13229 /* 13230 * There is no shadow buf or layer-private data if the target is 13231 * using un->un_sys_blocksize as its block size or if bcount == 0. 13232 */ 13233 if ((un->un_tgt_blocksize == DEV_BSIZE && !un->un_f_enable_rmw) || 13234 (bp->b_bcount == 0)) { 13235 goto exit; 13236 } 13237 13238 xp = SD_GET_XBUF(bp); 13239 ASSERT(xp != NULL); 13240 13241 /* Retrieve the pointer to the layer-private data area from the xbuf. */ 13242 bsp = xp->xb_private; 13243 13244 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE; 13245 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE; 13246 13247 if (is_write) { 13248 /* 13249 * For a WRITE request we must free up the block range that 13250 * we have locked up. This holds regardless of whether this is 13251 * an aligned write request or a read-modify-write request. 13252 */ 13253 sd_range_unlock(un, bsp->mbs_wmp); 13254 bsp->mbs_wmp = NULL; 13255 } 13256 13257 if ((uintptr_t)bp->b_iodone != (uintptr_t)sd_mapblocksize_iodone) { 13258 /* 13259 * An aligned read or write command will have no shadow buf; 13260 * there is not much else to do with it. 13261 */ 13262 goto done; 13263 } 13264 13265 orig_bp = bsp->mbs_orig_bp; 13266 ASSERT(orig_bp != NULL); 13267 orig_xp = SD_GET_XBUF(orig_bp); 13268 ASSERT(orig_xp != NULL); 13269 ASSERT(!mutex_owned(SD_MUTEX(un))); 13270 13271 if (!is_write && has_wmap) { 13272 /* 13273 * A READ with a wmap means this is the READ phase of a 13274 * read-modify-write. If an error occurred on the READ then 13275 * we do not proceed with the WRITE phase or copy any data. 13276 * Just release the write maps and return with an error. 13277 */ 13278 if ((bp->b_resid != 0) || (bp->b_error != 0)) { 13279 orig_bp->b_resid = orig_bp->b_bcount; 13280 bioerror(orig_bp, bp->b_error); 13281 sd_range_unlock(un, bsp->mbs_wmp); 13282 goto freebuf_done; 13283 } 13284 } 13285 13286 /* 13287 * Here is where we set up to copy the data from the shadow buf 13288 * into the space associated with the original buf. 13289 * 13290 * To deal with the conversion between block sizes, these 13291 * computations treat the data as an array of bytes, with the 13292 * first byte (byte 0) corresponding to the first byte in the 13293 * first block on the disk. 13294 */ 13295 13296 /* 13297 * shadow_start and shadow_len indicate the location and size of 13298 * the data returned with the shadow IO request. 13299 */ 13300 if (un->un_f_enable_rmw) { 13301 shadow_start = SD_SYSBLOCKS2BYTES((offset_t)xp->xb_blkno); 13302 } else { 13303 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 13304 } 13305 shadow_end = shadow_start + bp->b_bcount - bp->b_resid; 13306 13307 /* 13308 * copy_offset gives the offset (in bytes) from the start of the first 13309 * block of the READ request to the beginning of the data. We retrieve 13310 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved 13311 * there by sd_mapblockize_iostart(). copy_length gives the amount of 13312 * data to be copied (in bytes). 13313 */ 13314 copy_offset = bsp->mbs_copy_offset; 13315 if (un->un_f_enable_rmw) { 13316 ASSERT((copy_offset >= 0) && 13317 (copy_offset < un->un_phy_blocksize)); 13318 } else { 13319 ASSERT((copy_offset >= 0) && 13320 (copy_offset < un->un_tgt_blocksize)); 13321 } 13322 13323 copy_length = orig_bp->b_bcount; 13324 request_end = shadow_start + copy_offset + orig_bp->b_bcount; 13325 13326 /* 13327 * Set up the resid and error fields of orig_bp as appropriate. 13328 */ 13329 if (shadow_end >= request_end) { 13330 /* We got all the requested data; set resid to zero */ 13331 orig_bp->b_resid = 0; 13332 } else { 13333 /* 13334 * We failed to get enough data to fully satisfy the original 13335 * request. Just copy back whatever data we got and set 13336 * up the residual and error code as required. 13337 * 13338 * 'shortfall' is the amount by which the data received with the 13339 * shadow buf has "fallen short" of the requested amount. 13340 */ 13341 shortfall = (size_t)(request_end - shadow_end); 13342 13343 if (shortfall > orig_bp->b_bcount) { 13344 /* 13345 * We did not get enough data to even partially 13346 * fulfill the original request. The residual is 13347 * equal to the amount requested. 13348 */ 13349 orig_bp->b_resid = orig_bp->b_bcount; 13350 } else { 13351 /* 13352 * We did not get all the data that we requested 13353 * from the device, but we will try to return what 13354 * portion we did get. 13355 */ 13356 orig_bp->b_resid = shortfall; 13357 } 13358 ASSERT(copy_length >= orig_bp->b_resid); 13359 copy_length -= orig_bp->b_resid; 13360 } 13361 13362 /* Propagate the error code from the shadow buf to the original buf */ 13363 bioerror(orig_bp, bp->b_error); 13364 13365 if (is_write) { 13366 goto freebuf_done; /* No data copying for a WRITE */ 13367 } 13368 13369 if (has_wmap) { 13370 /* 13371 * This is a READ command from the READ phase of a 13372 * read-modify-write request. We have to copy the data given 13373 * by the user OVER the data returned by the READ command, 13374 * then convert the command from a READ to a WRITE and send 13375 * it back to the target. 13376 */ 13377 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset, 13378 copy_length); 13379 13380 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */ 13381 13382 /* 13383 * Dispatch the WRITE command to the taskq thread, which 13384 * will in turn send the command to the target. When the 13385 * WRITE command completes, we (sd_mapblocksize_iodone()) 13386 * will get called again as part of the iodone chain 13387 * processing for it. Note that we will still be dealing 13388 * with the shadow buf at that point. 13389 */ 13390 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp, 13391 KM_NOSLEEP) != TASKQID_INVALID) { 13392 /* 13393 * Dispatch was successful so we are done. Return 13394 * without going any higher up the iodone chain. Do 13395 * not free up any layer-private data until after the 13396 * WRITE completes. 13397 */ 13398 return; 13399 } 13400 13401 /* 13402 * Dispatch of the WRITE command failed; set up the error 13403 * condition and send this IO back up the iodone chain. 13404 */ 13405 bioerror(orig_bp, EIO); 13406 orig_bp->b_resid = orig_bp->b_bcount; 13407 13408 } else { 13409 /* 13410 * This is a regular READ request (ie, not a RMW). Copy the 13411 * data from the shadow buf into the original buf. The 13412 * copy_offset compensates for any "misalignment" between the 13413 * shadow buf (with its un->un_tgt_blocksize blocks) and the 13414 * original buf (with its un->un_sys_blocksize blocks). 13415 */ 13416 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr, 13417 copy_length); 13418 } 13419 13420 freebuf_done: 13421 13422 /* 13423 * At this point we still have both the shadow buf AND the original 13424 * buf to deal with, as well as the layer-private data area in each. 13425 * Local variables are as follows: 13426 * 13427 * bp -- points to shadow buf 13428 * xp -- points to xbuf of shadow buf 13429 * bsp -- points to layer-private data area of shadow buf 13430 * orig_bp -- points to original buf 13431 * 13432 * First free the shadow buf and its associated xbuf, then free the 13433 * layer-private data area from the shadow buf. There is no need to 13434 * restore xb_private in the shadow xbuf. 13435 */ 13436 sd_shadow_buf_free(bp); 13437 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 13438 13439 /* 13440 * Now update the local variables to point to the original buf, xbuf, 13441 * and layer-private area. 13442 */ 13443 bp = orig_bp; 13444 xp = SD_GET_XBUF(bp); 13445 ASSERT(xp != NULL); 13446 ASSERT(xp == orig_xp); 13447 bsp = xp->xb_private; 13448 ASSERT(bsp != NULL); 13449 13450 done: 13451 /* 13452 * Restore xb_private to whatever it was set to by the next higher 13453 * layer in the chain, then free the layer-private data area. 13454 */ 13455 xp->xb_private = bsp->mbs_oprivate; 13456 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 13457 13458 exit: 13459 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp), 13460 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp); 13461 13462 SD_NEXT_IODONE(index, un, bp); 13463 } 13464 13465 13466 /* 13467 * Function: sd_checksum_iostart 13468 * 13469 * Description: A stub function for a layer that's currently not used. 13470 * For now just a placeholder. 13471 * 13472 * Context: Kernel thread context 13473 */ 13474 13475 static void 13476 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp) 13477 { 13478 ASSERT(un != NULL); 13479 ASSERT(bp != NULL); 13480 ASSERT(!mutex_owned(SD_MUTEX(un))); 13481 SD_NEXT_IOSTART(index, un, bp); 13482 } 13483 13484 13485 /* 13486 * Function: sd_checksum_iodone 13487 * 13488 * Description: A stub function for a layer that's currently not used. 13489 * For now just a placeholder. 13490 * 13491 * Context: May be called under interrupt context 13492 */ 13493 13494 static void 13495 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp) 13496 { 13497 ASSERT(un != NULL); 13498 ASSERT(bp != NULL); 13499 ASSERT(!mutex_owned(SD_MUTEX(un))); 13500 SD_NEXT_IODONE(index, un, bp); 13501 } 13502 13503 13504 /* 13505 * Function: sd_checksum_uscsi_iostart 13506 * 13507 * Description: A stub function for a layer that's currently not used. 13508 * For now just a placeholder. 13509 * 13510 * Context: Kernel thread context 13511 */ 13512 13513 static void 13514 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp) 13515 { 13516 ASSERT(un != NULL); 13517 ASSERT(bp != NULL); 13518 ASSERT(!mutex_owned(SD_MUTEX(un))); 13519 SD_NEXT_IOSTART(index, un, bp); 13520 } 13521 13522 13523 /* 13524 * Function: sd_checksum_uscsi_iodone 13525 * 13526 * Description: A stub function for a layer that's currently not used. 13527 * For now just a placeholder. 13528 * 13529 * Context: May be called under interrupt context 13530 */ 13531 13532 static void 13533 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 13534 { 13535 ASSERT(un != NULL); 13536 ASSERT(bp != NULL); 13537 ASSERT(!mutex_owned(SD_MUTEX(un))); 13538 SD_NEXT_IODONE(index, un, bp); 13539 } 13540 13541 13542 /* 13543 * Function: sd_pm_iostart 13544 * 13545 * Description: iostart-side routine for Power mangement. 13546 * 13547 * Context: Kernel thread context 13548 */ 13549 13550 static void 13551 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp) 13552 { 13553 ASSERT(un != NULL); 13554 ASSERT(bp != NULL); 13555 ASSERT(!mutex_owned(SD_MUTEX(un))); 13556 ASSERT(!mutex_owned(&un->un_pm_mutex)); 13557 13558 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n"); 13559 13560 if (sd_pm_entry(un) != DDI_SUCCESS) { 13561 /* 13562 * Set up to return the failed buf back up the 'iodone' 13563 * side of the calling chain. 13564 */ 13565 bioerror(bp, EIO); 13566 bp->b_resid = bp->b_bcount; 13567 13568 SD_BEGIN_IODONE(index, un, bp); 13569 13570 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 13571 return; 13572 } 13573 13574 SD_NEXT_IOSTART(index, un, bp); 13575 13576 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 13577 } 13578 13579 13580 /* 13581 * Function: sd_pm_iodone 13582 * 13583 * Description: iodone-side routine for power mangement. 13584 * 13585 * Context: may be called from interrupt context 13586 */ 13587 13588 static void 13589 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp) 13590 { 13591 ASSERT(un != NULL); 13592 ASSERT(bp != NULL); 13593 ASSERT(!mutex_owned(&un->un_pm_mutex)); 13594 13595 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n"); 13596 13597 /* 13598 * After attach the following flag is only read, so don't 13599 * take the penalty of acquiring a mutex for it. 13600 */ 13601 if (un->un_f_pm_is_enabled == TRUE) { 13602 sd_pm_exit(un); 13603 } 13604 13605 SD_NEXT_IODONE(index, un, bp); 13606 13607 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n"); 13608 } 13609 13610 13611 /* 13612 * Function: sd_core_iostart 13613 * 13614 * Description: Primary driver function for enqueuing buf(9S) structs from 13615 * the system and initiating IO to the target device 13616 * 13617 * Context: Kernel thread context. Can sleep. 13618 * 13619 * Assumptions: - The given xp->xb_blkno is absolute 13620 * (ie, relative to the start of the device). 13621 * - The IO is to be done using the native blocksize of 13622 * the device, as specified in un->un_tgt_blocksize. 13623 */ 13624 /* ARGSUSED */ 13625 static void 13626 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp) 13627 { 13628 struct sd_xbuf *xp; 13629 13630 ASSERT(un != NULL); 13631 ASSERT(bp != NULL); 13632 ASSERT(!mutex_owned(SD_MUTEX(un))); 13633 ASSERT(bp->b_resid == 0); 13634 13635 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp); 13636 13637 xp = SD_GET_XBUF(bp); 13638 ASSERT(xp != NULL); 13639 13640 mutex_enter(SD_MUTEX(un)); 13641 13642 /* 13643 * If we are currently in the failfast state, fail any new IO 13644 * that has B_FAILFAST set, then return. 13645 */ 13646 if ((bp->b_flags & B_FAILFAST) && 13647 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) { 13648 mutex_exit(SD_MUTEX(un)); 13649 bioerror(bp, EIO); 13650 bp->b_resid = bp->b_bcount; 13651 SD_BEGIN_IODONE(index, un, bp); 13652 return; 13653 } 13654 13655 if (SD_IS_DIRECT_PRIORITY(xp)) { 13656 /* 13657 * Priority command -- transport it immediately. 13658 * 13659 * Note: We may want to assert that USCSI_DIAGNOSE is set, 13660 * because all direct priority commands should be associated 13661 * with error recovery actions which we don't want to retry. 13662 */ 13663 sd_start_cmds(un, bp); 13664 } else { 13665 /* 13666 * Normal command -- add it to the wait queue, then start 13667 * transporting commands from the wait queue. 13668 */ 13669 sd_add_buf_to_waitq(un, bp); 13670 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 13671 sd_start_cmds(un, NULL); 13672 } 13673 13674 mutex_exit(SD_MUTEX(un)); 13675 13676 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp); 13677 } 13678 13679 13680 /* 13681 * Function: sd_init_cdb_limits 13682 * 13683 * Description: This is to handle scsi_pkt initialization differences 13684 * between the driver platforms. 13685 * 13686 * Legacy behaviors: 13687 * 13688 * If the block number or the sector count exceeds the 13689 * capabilities of a Group 0 command, shift over to a 13690 * Group 1 command. We don't blindly use Group 1 13691 * commands because a) some drives (CDC Wren IVs) get a 13692 * bit confused, and b) there is probably a fair amount 13693 * of speed difference for a target to receive and decode 13694 * a 10 byte command instead of a 6 byte command. 13695 * 13696 * The xfer time difference of 6 vs 10 byte CDBs is 13697 * still significant so this code is still worthwhile. 13698 * 10 byte CDBs are very inefficient with the fas HBA driver 13699 * and older disks. Each CDB byte took 1 usec with some 13700 * popular disks. 13701 * 13702 * Context: Must be called at attach time 13703 */ 13704 13705 static void 13706 sd_init_cdb_limits(struct sd_lun *un) 13707 { 13708 int hba_cdb_limit; 13709 13710 /* 13711 * Use CDB_GROUP1 commands for most devices except for 13712 * parallel SCSI fixed drives in which case we get better 13713 * performance using CDB_GROUP0 commands (where applicable). 13714 */ 13715 un->un_mincdb = SD_CDB_GROUP1; 13716 #if !defined(__fibre) 13717 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && 13718 !un->un_f_has_removable_media) { 13719 un->un_mincdb = SD_CDB_GROUP0; 13720 } 13721 #endif 13722 13723 /* 13724 * Try to read the max-cdb-length supported by HBA. 13725 */ 13726 un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1); 13727 if (0 >= un->un_max_hba_cdb) { 13728 un->un_max_hba_cdb = CDB_GROUP4; 13729 hba_cdb_limit = SD_CDB_GROUP4; 13730 } else if (0 < un->un_max_hba_cdb && 13731 un->un_max_hba_cdb < CDB_GROUP1) { 13732 hba_cdb_limit = SD_CDB_GROUP0; 13733 } else if (CDB_GROUP1 <= un->un_max_hba_cdb && 13734 un->un_max_hba_cdb < CDB_GROUP5) { 13735 hba_cdb_limit = SD_CDB_GROUP1; 13736 } else if (CDB_GROUP5 <= un->un_max_hba_cdb && 13737 un->un_max_hba_cdb < CDB_GROUP4) { 13738 hba_cdb_limit = SD_CDB_GROUP5; 13739 } else { 13740 hba_cdb_limit = SD_CDB_GROUP4; 13741 } 13742 13743 /* 13744 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4 13745 * commands for fixed disks unless we are building for a 32 bit 13746 * kernel. 13747 */ 13748 #ifdef _LP64 13749 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 13750 min(hba_cdb_limit, SD_CDB_GROUP4); 13751 #else 13752 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 13753 min(hba_cdb_limit, SD_CDB_GROUP1); 13754 #endif 13755 13756 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) 13757 ? sizeof (struct scsi_arq_status) : 1); 13758 if (!ISCD(un)) 13759 un->un_cmd_timeout = (ushort_t)sd_io_time; 13760 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; 13761 } 13762 13763 13764 /* 13765 * Function: sd_initpkt_for_buf 13766 * 13767 * Description: Allocate and initialize for transport a scsi_pkt struct, 13768 * based upon the info specified in the given buf struct. 13769 * 13770 * Assumes the xb_blkno in the request is absolute (ie, 13771 * relative to the start of the device (NOT partition!). 13772 * Also assumes that the request is using the native block 13773 * size of the device (as returned by the READ CAPACITY 13774 * command). 13775 * 13776 * Return Code: SD_PKT_ALLOC_SUCCESS 13777 * SD_PKT_ALLOC_FAILURE 13778 * SD_PKT_ALLOC_FAILURE_NO_DMA 13779 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13780 * 13781 * Context: Kernel thread and may be called from software interrupt context 13782 * as part of a sdrunout callback. This function may not block or 13783 * call routines that block 13784 */ 13785 13786 static int 13787 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp) 13788 { 13789 struct sd_xbuf *xp; 13790 struct scsi_pkt *pktp = NULL; 13791 struct sd_lun *un; 13792 size_t blockcount; 13793 daddr_t startblock; 13794 int rval; 13795 int cmd_flags; 13796 13797 ASSERT(bp != NULL); 13798 ASSERT(pktpp != NULL); 13799 xp = SD_GET_XBUF(bp); 13800 ASSERT(xp != NULL); 13801 un = SD_GET_UN(bp); 13802 ASSERT(un != NULL); 13803 ASSERT(mutex_owned(SD_MUTEX(un))); 13804 ASSERT(bp->b_resid == 0); 13805 13806 SD_TRACE(SD_LOG_IO_CORE, un, 13807 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); 13808 13809 mutex_exit(SD_MUTEX(un)); 13810 13811 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13812 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { 13813 /* 13814 * Already have a scsi_pkt -- just need DMA resources. 13815 * We must recompute the CDB in case the mapping returns 13816 * a nonzero pkt_resid. 13817 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer 13818 * that is being retried, the unmap/remap of the DMA resouces 13819 * will result in the entire transfer starting over again 13820 * from the very first block. 13821 */ 13822 ASSERT(xp->xb_pktp != NULL); 13823 pktp = xp->xb_pktp; 13824 } else { 13825 pktp = NULL; 13826 } 13827 #endif /* __i386 || __amd64 */ 13828 13829 startblock = xp->xb_blkno; /* Absolute block num. */ 13830 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 13831 13832 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); 13833 13834 /* 13835 * sd_setup_rw_pkt will determine the appropriate CDB group to use, 13836 * call scsi_init_pkt, and build the CDB. 13837 */ 13838 rval = sd_setup_rw_pkt(un, &pktp, bp, 13839 cmd_flags, sdrunout, (caddr_t)un, 13840 startblock, blockcount); 13841 13842 if (rval == 0) { 13843 /* 13844 * Success. 13845 * 13846 * If partial DMA is being used and required for this transfer. 13847 * set it up here. 13848 */ 13849 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 && 13850 (pktp->pkt_resid != 0)) { 13851 13852 /* 13853 * Save the CDB length and pkt_resid for the 13854 * next xfer 13855 */ 13856 xp->xb_dma_resid = pktp->pkt_resid; 13857 13858 /* rezero resid */ 13859 pktp->pkt_resid = 0; 13860 13861 } else { 13862 xp->xb_dma_resid = 0; 13863 } 13864 13865 pktp->pkt_flags = un->un_tagflags; 13866 pktp->pkt_time = un->un_cmd_timeout; 13867 pktp->pkt_comp = sdintr; 13868 13869 pktp->pkt_private = bp; 13870 *pktpp = pktp; 13871 13872 SD_TRACE(SD_LOG_IO_CORE, un, 13873 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); 13874 13875 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13876 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; 13877 #endif 13878 13879 mutex_enter(SD_MUTEX(un)); 13880 return (SD_PKT_ALLOC_SUCCESS); 13881 13882 } 13883 13884 /* 13885 * SD_PKT_ALLOC_FAILURE is the only expected failure code 13886 * from sd_setup_rw_pkt. 13887 */ 13888 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 13889 13890 if (rval == SD_PKT_ALLOC_FAILURE) { 13891 *pktpp = NULL; 13892 /* 13893 * Set the driver state to RWAIT to indicate the driver 13894 * is waiting on resource allocations. The driver will not 13895 * suspend, pm_suspend, or detatch while the state is RWAIT. 13896 */ 13897 mutex_enter(SD_MUTEX(un)); 13898 New_state(un, SD_STATE_RWAIT); 13899 13900 SD_ERROR(SD_LOG_IO_CORE, un, 13901 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp); 13902 13903 if ((bp->b_flags & B_ERROR) != 0) { 13904 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 13905 } 13906 return (SD_PKT_ALLOC_FAILURE); 13907 } else { 13908 /* 13909 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13910 * 13911 * This should never happen. Maybe someone messed with the 13912 * kernel's minphys? 13913 */ 13914 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13915 "Request rejected: too large for CDB: " 13916 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount); 13917 SD_ERROR(SD_LOG_IO_CORE, un, 13918 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp); 13919 mutex_enter(SD_MUTEX(un)); 13920 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13921 13922 } 13923 } 13924 13925 13926 /* 13927 * Function: sd_destroypkt_for_buf 13928 * 13929 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing). 13930 * 13931 * Context: Kernel thread or interrupt context 13932 */ 13933 13934 static void 13935 sd_destroypkt_for_buf(struct buf *bp) 13936 { 13937 ASSERT(bp != NULL); 13938 ASSERT(SD_GET_UN(bp) != NULL); 13939 13940 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13941 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp); 13942 13943 ASSERT(SD_GET_PKTP(bp) != NULL); 13944 scsi_destroy_pkt(SD_GET_PKTP(bp)); 13945 13946 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13947 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp); 13948 } 13949 13950 /* 13951 * Function: sd_setup_rw_pkt 13952 * 13953 * Description: Determines appropriate CDB group for the requested LBA 13954 * and transfer length, calls scsi_init_pkt, and builds 13955 * the CDB. Do not use for partial DMA transfers except 13956 * for the initial transfer since the CDB size must 13957 * remain constant. 13958 * 13959 * Context: Kernel thread and may be called from software interrupt 13960 * context as part of a sdrunout callback. This function may not 13961 * block or call routines that block 13962 */ 13963 13964 13965 int 13966 sd_setup_rw_pkt(struct sd_lun *un, 13967 struct scsi_pkt **pktpp, struct buf *bp, int flags, 13968 int (*callback)(caddr_t), caddr_t callback_arg, 13969 diskaddr_t lba, uint32_t blockcount) 13970 { 13971 struct scsi_pkt *return_pktp; 13972 union scsi_cdb *cdbp; 13973 struct sd_cdbinfo *cp = NULL; 13974 int i; 13975 13976 /* 13977 * See which size CDB to use, based upon the request. 13978 */ 13979 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) { 13980 13981 /* 13982 * Check lba and block count against sd_cdbtab limits. 13983 * In the partial DMA case, we have to use the same size 13984 * CDB for all the transfers. Check lba + blockcount 13985 * against the max LBA so we know that segment of the 13986 * transfer can use the CDB we select. 13987 */ 13988 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) && 13989 (blockcount <= sd_cdbtab[i].sc_maxlen)) { 13990 13991 /* 13992 * The command will fit into the CDB type 13993 * specified by sd_cdbtab[i]. 13994 */ 13995 cp = sd_cdbtab + i; 13996 13997 /* 13998 * Call scsi_init_pkt so we can fill in the 13999 * CDB. 14000 */ 14001 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp, 14002 bp, cp->sc_grpcode, un->un_status_len, 0, 14003 flags, callback, callback_arg); 14004 14005 if (return_pktp != NULL) { 14006 14007 /* 14008 * Return new value of pkt 14009 */ 14010 *pktpp = return_pktp; 14011 14012 /* 14013 * To be safe, zero the CDB insuring there is 14014 * no leftover data from a previous command. 14015 */ 14016 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode); 14017 14018 /* 14019 * Handle partial DMA mapping 14020 */ 14021 if (return_pktp->pkt_resid != 0) { 14022 14023 /* 14024 * Not going to xfer as many blocks as 14025 * originally expected 14026 */ 14027 blockcount -= 14028 SD_BYTES2TGTBLOCKS(un, 14029 return_pktp->pkt_resid); 14030 } 14031 14032 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp; 14033 14034 /* 14035 * Set command byte based on the CDB 14036 * type we matched. 14037 */ 14038 cdbp->scc_cmd = cp->sc_grpmask | 14039 ((bp->b_flags & B_READ) ? 14040 SCMD_READ : SCMD_WRITE); 14041 14042 SD_FILL_SCSI1_LUN(un, return_pktp); 14043 14044 /* 14045 * Fill in LBA and length 14046 */ 14047 ASSERT((cp->sc_grpcode == CDB_GROUP1) || 14048 (cp->sc_grpcode == CDB_GROUP4) || 14049 (cp->sc_grpcode == CDB_GROUP0) || 14050 (cp->sc_grpcode == CDB_GROUP5)); 14051 14052 if (cp->sc_grpcode == CDB_GROUP1) { 14053 FORMG1ADDR(cdbp, lba); 14054 FORMG1COUNT(cdbp, blockcount); 14055 return (0); 14056 } else if (cp->sc_grpcode == CDB_GROUP4) { 14057 FORMG4LONGADDR(cdbp, lba); 14058 FORMG4COUNT(cdbp, blockcount); 14059 return (0); 14060 } else if (cp->sc_grpcode == CDB_GROUP0) { 14061 FORMG0ADDR(cdbp, lba); 14062 FORMG0COUNT(cdbp, blockcount); 14063 return (0); 14064 } else if (cp->sc_grpcode == CDB_GROUP5) { 14065 FORMG5ADDR(cdbp, lba); 14066 FORMG5COUNT(cdbp, blockcount); 14067 return (0); 14068 } 14069 14070 /* 14071 * It should be impossible to not match one 14072 * of the CDB types above, so we should never 14073 * reach this point. Set the CDB command byte 14074 * to test-unit-ready to avoid writing 14075 * to somewhere we don't intend. 14076 */ 14077 cdbp->scc_cmd = SCMD_TEST_UNIT_READY; 14078 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 14079 } else { 14080 /* 14081 * Couldn't get scsi_pkt 14082 */ 14083 return (SD_PKT_ALLOC_FAILURE); 14084 } 14085 } 14086 } 14087 14088 /* 14089 * None of the available CDB types were suitable. This really 14090 * should never happen: on a 64 bit system we support 14091 * READ16/WRITE16 which will hold an entire 64 bit disk address 14092 * and on a 32 bit system we will refuse to bind to a device 14093 * larger than 2TB so addresses will never be larger than 32 bits. 14094 */ 14095 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 14096 } 14097 14098 /* 14099 * Function: sd_setup_next_rw_pkt 14100 * 14101 * Description: Setup packet for partial DMA transfers, except for the 14102 * initial transfer. sd_setup_rw_pkt should be used for 14103 * the initial transfer. 14104 * 14105 * Context: Kernel thread and may be called from interrupt context. 14106 */ 14107 14108 int 14109 sd_setup_next_rw_pkt(struct sd_lun *un, 14110 struct scsi_pkt *pktp, struct buf *bp, 14111 diskaddr_t lba, uint32_t blockcount) 14112 { 14113 uchar_t com; 14114 union scsi_cdb *cdbp; 14115 uchar_t cdb_group_id; 14116 14117 ASSERT(pktp != NULL); 14118 ASSERT(pktp->pkt_cdbp != NULL); 14119 14120 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 14121 com = cdbp->scc_cmd; 14122 cdb_group_id = CDB_GROUPID(com); 14123 14124 ASSERT((cdb_group_id == CDB_GROUPID_0) || 14125 (cdb_group_id == CDB_GROUPID_1) || 14126 (cdb_group_id == CDB_GROUPID_4) || 14127 (cdb_group_id == CDB_GROUPID_5)); 14128 14129 /* 14130 * Move pkt to the next portion of the xfer. 14131 * func is NULL_FUNC so we do not have to release 14132 * the disk mutex here. 14133 */ 14134 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0, 14135 NULL_FUNC, NULL) == pktp) { 14136 /* Success. Handle partial DMA */ 14137 if (pktp->pkt_resid != 0) { 14138 blockcount -= 14139 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid); 14140 } 14141 14142 cdbp->scc_cmd = com; 14143 SD_FILL_SCSI1_LUN(un, pktp); 14144 if (cdb_group_id == CDB_GROUPID_1) { 14145 FORMG1ADDR(cdbp, lba); 14146 FORMG1COUNT(cdbp, blockcount); 14147 return (0); 14148 } else if (cdb_group_id == CDB_GROUPID_4) { 14149 FORMG4LONGADDR(cdbp, lba); 14150 FORMG4COUNT(cdbp, blockcount); 14151 return (0); 14152 } else if (cdb_group_id == CDB_GROUPID_0) { 14153 FORMG0ADDR(cdbp, lba); 14154 FORMG0COUNT(cdbp, blockcount); 14155 return (0); 14156 } else if (cdb_group_id == CDB_GROUPID_5) { 14157 FORMG5ADDR(cdbp, lba); 14158 FORMG5COUNT(cdbp, blockcount); 14159 return (0); 14160 } 14161 14162 /* Unreachable */ 14163 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 14164 } 14165 14166 /* 14167 * Error setting up next portion of cmd transfer. 14168 * Something is definitely very wrong and this 14169 * should not happen. 14170 */ 14171 return (SD_PKT_ALLOC_FAILURE); 14172 } 14173 14174 /* 14175 * Function: sd_initpkt_for_uscsi 14176 * 14177 * Description: Allocate and initialize for transport a scsi_pkt struct, 14178 * based upon the info specified in the given uscsi_cmd struct. 14179 * 14180 * Return Code: SD_PKT_ALLOC_SUCCESS 14181 * SD_PKT_ALLOC_FAILURE 14182 * SD_PKT_ALLOC_FAILURE_NO_DMA 14183 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 14184 * 14185 * Context: Kernel thread and may be called from software interrupt context 14186 * as part of a sdrunout callback. This function may not block or 14187 * call routines that block 14188 */ 14189 14190 static int 14191 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp) 14192 { 14193 struct uscsi_cmd *uscmd; 14194 struct sd_xbuf *xp; 14195 struct scsi_pkt *pktp; 14196 struct sd_lun *un; 14197 uint32_t flags = 0; 14198 14199 ASSERT(bp != NULL); 14200 ASSERT(pktpp != NULL); 14201 xp = SD_GET_XBUF(bp); 14202 ASSERT(xp != NULL); 14203 un = SD_GET_UN(bp); 14204 ASSERT(un != NULL); 14205 ASSERT(mutex_owned(SD_MUTEX(un))); 14206 14207 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 14208 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 14209 ASSERT(uscmd != NULL); 14210 14211 SD_TRACE(SD_LOG_IO_CORE, un, 14212 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp); 14213 14214 /* 14215 * Allocate the scsi_pkt for the command. 14216 * 14217 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path 14218 * during scsi_init_pkt time and will continue to use the 14219 * same path as long as the same scsi_pkt is used without 14220 * intervening scsi_dmafree(). Since uscsi command does 14221 * not call scsi_dmafree() before retry failed command, it 14222 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT 14223 * set such that scsi_vhci can use other available path for 14224 * retry. Besides, ucsci command does not allow DMA breakup, 14225 * so there is no need to set PKT_DMA_PARTIAL flag. 14226 * 14227 * More fundamentally, we can't support breaking up this DMA into 14228 * multiple windows on x86. There is, in general, no guarantee 14229 * that arbitrary SCSI commands are idempotent, which is required 14230 * if we want to use multiple windows for a given command. 14231 */ 14232 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 14233 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 14234 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 14235 ((int)(uscmd->uscsi_rqlen) + sizeof (struct scsi_arq_status) 14236 - sizeof (struct scsi_extended_sense)), 0, 14237 (un->un_pkt_flags & ~PKT_DMA_PARTIAL) | PKT_XARQ, 14238 sdrunout, (caddr_t)un); 14239 } else { 14240 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 14241 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 14242 sizeof (struct scsi_arq_status), 0, 14243 (un->un_pkt_flags & ~PKT_DMA_PARTIAL), 14244 sdrunout, (caddr_t)un); 14245 } 14246 14247 if (pktp == NULL) { 14248 *pktpp = NULL; 14249 /* 14250 * Set the driver state to RWAIT to indicate the driver 14251 * is waiting on resource allocations. The driver will not 14252 * suspend, pm_suspend, or detatch while the state is RWAIT. 14253 */ 14254 New_state(un, SD_STATE_RWAIT); 14255 14256 SD_ERROR(SD_LOG_IO_CORE, un, 14257 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp); 14258 14259 if ((bp->b_flags & B_ERROR) != 0) { 14260 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 14261 } 14262 return (SD_PKT_ALLOC_FAILURE); 14263 } 14264 14265 /* 14266 * We do not do DMA breakup for USCSI commands, so return failure 14267 * here if all the needed DMA resources were not allocated. 14268 */ 14269 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) && 14270 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) { 14271 scsi_destroy_pkt(pktp); 14272 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: " 14273 "No partial DMA for USCSI. exit: buf:0x%p\n", bp); 14274 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL); 14275 } 14276 14277 /* Init the cdb from the given uscsi struct */ 14278 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp, 14279 uscmd->uscsi_cdb[0], 0, 0, 0); 14280 14281 SD_FILL_SCSI1_LUN(un, pktp); 14282 14283 /* 14284 * Set up the optional USCSI flags. See the uscsi (7I) man page 14285 * for listing of the supported flags. 14286 */ 14287 14288 if (uscmd->uscsi_flags & USCSI_SILENT) { 14289 flags |= FLAG_SILENT; 14290 } 14291 14292 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) { 14293 flags |= FLAG_DIAGNOSE; 14294 } 14295 14296 if (uscmd->uscsi_flags & USCSI_ISOLATE) { 14297 flags |= FLAG_ISOLATE; 14298 } 14299 14300 if (un->un_f_is_fibre == FALSE) { 14301 if (uscmd->uscsi_flags & USCSI_RENEGOT) { 14302 flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 14303 } 14304 } 14305 14306 /* 14307 * Set the pkt flags here so we save time later. 14308 * Note: These flags are NOT in the uscsi man page!!! 14309 */ 14310 if (uscmd->uscsi_flags & USCSI_HEAD) { 14311 flags |= FLAG_HEAD; 14312 } 14313 14314 if (uscmd->uscsi_flags & USCSI_NOINTR) { 14315 flags |= FLAG_NOINTR; 14316 } 14317 14318 /* 14319 * For tagged queueing, things get a bit complicated. 14320 * Check first for head of queue and last for ordered queue. 14321 * If neither head nor order, use the default driver tag flags. 14322 */ 14323 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) { 14324 if (uscmd->uscsi_flags & USCSI_HTAG) { 14325 flags |= FLAG_HTAG; 14326 } else if (uscmd->uscsi_flags & USCSI_OTAG) { 14327 flags |= FLAG_OTAG; 14328 } else { 14329 flags |= un->un_tagflags & FLAG_TAGMASK; 14330 } 14331 } 14332 14333 if (uscmd->uscsi_flags & USCSI_NODISCON) { 14334 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON; 14335 } 14336 14337 pktp->pkt_flags = flags; 14338 14339 /* Transfer uscsi information to scsi_pkt */ 14340 (void) scsi_uscsi_pktinit(uscmd, pktp); 14341 14342 /* Copy the caller's CDB into the pkt... */ 14343 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen); 14344 14345 if (uscmd->uscsi_timeout == 0) { 14346 pktp->pkt_time = un->un_uscsi_timeout; 14347 } else { 14348 pktp->pkt_time = uscmd->uscsi_timeout; 14349 } 14350 14351 /* need it later to identify USCSI request in sdintr */ 14352 xp->xb_pkt_flags |= SD_XB_USCSICMD; 14353 14354 xp->xb_sense_resid = uscmd->uscsi_rqresid; 14355 14356 pktp->pkt_private = bp; 14357 pktp->pkt_comp = sdintr; 14358 *pktpp = pktp; 14359 14360 SD_TRACE(SD_LOG_IO_CORE, un, 14361 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp); 14362 14363 return (SD_PKT_ALLOC_SUCCESS); 14364 } 14365 14366 14367 /* 14368 * Function: sd_destroypkt_for_uscsi 14369 * 14370 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi 14371 * IOs.. Also saves relevant info into the associated uscsi_cmd 14372 * struct. 14373 * 14374 * Context: May be called under interrupt context 14375 */ 14376 14377 static void 14378 sd_destroypkt_for_uscsi(struct buf *bp) 14379 { 14380 struct uscsi_cmd *uscmd; 14381 struct sd_xbuf *xp; 14382 struct scsi_pkt *pktp; 14383 struct sd_lun *un; 14384 struct sd_uscsi_info *suip; 14385 14386 ASSERT(bp != NULL); 14387 xp = SD_GET_XBUF(bp); 14388 ASSERT(xp != NULL); 14389 un = SD_GET_UN(bp); 14390 ASSERT(un != NULL); 14391 ASSERT(!mutex_owned(SD_MUTEX(un))); 14392 pktp = SD_GET_PKTP(bp); 14393 ASSERT(pktp != NULL); 14394 14395 SD_TRACE(SD_LOG_IO_CORE, un, 14396 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp); 14397 14398 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 14399 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 14400 ASSERT(uscmd != NULL); 14401 14402 /* Save the status and the residual into the uscsi_cmd struct */ 14403 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 14404 uscmd->uscsi_resid = bp->b_resid; 14405 14406 /* Transfer scsi_pkt information to uscsi */ 14407 (void) scsi_uscsi_pktfini(pktp, uscmd); 14408 14409 /* 14410 * If enabled, copy any saved sense data into the area specified 14411 * by the uscsi command. 14412 */ 14413 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 14414 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 14415 /* 14416 * Note: uscmd->uscsi_rqbuf should always point to a buffer 14417 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd()) 14418 */ 14419 uscmd->uscsi_rqstatus = xp->xb_sense_status; 14420 uscmd->uscsi_rqresid = xp->xb_sense_resid; 14421 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 14422 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 14423 MAX_SENSE_LENGTH); 14424 } else { 14425 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 14426 SENSE_LENGTH); 14427 } 14428 } 14429 /* 14430 * The following assignments are for SCSI FMA. 14431 */ 14432 ASSERT(xp->xb_private != NULL); 14433 suip = (struct sd_uscsi_info *)xp->xb_private; 14434 suip->ui_pkt_reason = pktp->pkt_reason; 14435 suip->ui_pkt_state = pktp->pkt_state; 14436 suip->ui_pkt_statistics = pktp->pkt_statistics; 14437 suip->ui_lba = (uint64_t)SD_GET_BLKNO(bp); 14438 14439 /* We are done with the scsi_pkt; free it now */ 14440 ASSERT(SD_GET_PKTP(bp) != NULL); 14441 scsi_destroy_pkt(SD_GET_PKTP(bp)); 14442 14443 SD_TRACE(SD_LOG_IO_CORE, un, 14444 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp); 14445 } 14446 14447 14448 /* 14449 * Function: sd_bioclone_alloc 14450 * 14451 * Description: Allocate a buf(9S) and init it as per the given buf 14452 * and the various arguments. The associated sd_xbuf 14453 * struct is (nearly) duplicated. The struct buf *bp 14454 * argument is saved in new_xp->xb_private. 14455 * 14456 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 14457 * datalen - size of data area for the shadow bp 14458 * blkno - starting LBA 14459 * func - function pointer for b_iodone in the shadow buf. (May 14460 * be NULL if none.) 14461 * 14462 * Return Code: Pointer to allocates buf(9S) struct 14463 * 14464 * Context: Can sleep. 14465 */ 14466 14467 static struct buf * 14468 sd_bioclone_alloc(struct buf *bp, size_t datalen, daddr_t blkno, 14469 int (*func)(struct buf *)) 14470 { 14471 struct sd_lun *un; 14472 struct sd_xbuf *xp; 14473 struct sd_xbuf *new_xp; 14474 struct buf *new_bp; 14475 14476 ASSERT(bp != NULL); 14477 xp = SD_GET_XBUF(bp); 14478 ASSERT(xp != NULL); 14479 un = SD_GET_UN(bp); 14480 ASSERT(un != NULL); 14481 ASSERT(!mutex_owned(SD_MUTEX(un))); 14482 14483 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func, 14484 NULL, KM_SLEEP); 14485 14486 new_bp->b_lblkno = blkno; 14487 14488 /* 14489 * Allocate an xbuf for the shadow bp and copy the contents of the 14490 * original xbuf into it. 14491 */ 14492 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 14493 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 14494 14495 /* 14496 * The given bp is automatically saved in the xb_private member 14497 * of the new xbuf. Callers are allowed to depend on this. 14498 */ 14499 new_xp->xb_private = bp; 14500 14501 new_bp->b_private = new_xp; 14502 14503 return (new_bp); 14504 } 14505 14506 /* 14507 * Function: sd_shadow_buf_alloc 14508 * 14509 * Description: Allocate a buf(9S) and init it as per the given buf 14510 * and the various arguments. The associated sd_xbuf 14511 * struct is (nearly) duplicated. The struct buf *bp 14512 * argument is saved in new_xp->xb_private. 14513 * 14514 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 14515 * datalen - size of data area for the shadow bp 14516 * bflags - B_READ or B_WRITE (pseudo flag) 14517 * blkno - starting LBA 14518 * func - function pointer for b_iodone in the shadow buf. (May 14519 * be NULL if none.) 14520 * 14521 * Return Code: Pointer to allocates buf(9S) struct 14522 * 14523 * Context: Can sleep. 14524 */ 14525 14526 static struct buf * 14527 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags, 14528 daddr_t blkno, int (*func)(struct buf *)) 14529 { 14530 struct sd_lun *un; 14531 struct sd_xbuf *xp; 14532 struct sd_xbuf *new_xp; 14533 struct buf *new_bp; 14534 14535 ASSERT(bp != NULL); 14536 xp = SD_GET_XBUF(bp); 14537 ASSERT(xp != NULL); 14538 un = SD_GET_UN(bp); 14539 ASSERT(un != NULL); 14540 ASSERT(!mutex_owned(SD_MUTEX(un))); 14541 14542 if (bp->b_flags & (B_PAGEIO | B_PHYS)) { 14543 bp_mapin(bp); 14544 } 14545 14546 bflags &= (B_READ | B_WRITE); 14547 #if defined(__i386) || defined(__amd64) 14548 new_bp = getrbuf(KM_SLEEP); 14549 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); 14550 new_bp->b_bcount = datalen; 14551 new_bp->b_flags = bflags | 14552 (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW)); 14553 #else 14554 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, 14555 datalen, bflags, SLEEP_FUNC, NULL); 14556 #endif 14557 new_bp->av_forw = NULL; 14558 new_bp->av_back = NULL; 14559 new_bp->b_dev = bp->b_dev; 14560 new_bp->b_blkno = blkno; 14561 new_bp->b_iodone = func; 14562 new_bp->b_edev = bp->b_edev; 14563 new_bp->b_resid = 0; 14564 14565 /* We need to preserve the B_FAILFAST flag */ 14566 if (bp->b_flags & B_FAILFAST) { 14567 new_bp->b_flags |= B_FAILFAST; 14568 } 14569 14570 /* 14571 * Allocate an xbuf for the shadow bp and copy the contents of the 14572 * original xbuf into it. 14573 */ 14574 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 14575 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 14576 14577 /* Need later to copy data between the shadow buf & original buf! */ 14578 new_xp->xb_pkt_flags |= PKT_CONSISTENT; 14579 14580 /* 14581 * The given bp is automatically saved in the xb_private member 14582 * of the new xbuf. Callers are allowed to depend on this. 14583 */ 14584 new_xp->xb_private = bp; 14585 14586 new_bp->b_private = new_xp; 14587 14588 return (new_bp); 14589 } 14590 14591 /* 14592 * Function: sd_bioclone_free 14593 * 14594 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations 14595 * in the larger than partition operation. 14596 * 14597 * Context: May be called under interrupt context 14598 */ 14599 14600 static void 14601 sd_bioclone_free(struct buf *bp) 14602 { 14603 struct sd_xbuf *xp; 14604 14605 ASSERT(bp != NULL); 14606 xp = SD_GET_XBUF(bp); 14607 ASSERT(xp != NULL); 14608 14609 /* 14610 * Call bp_mapout() before freeing the buf, in case a lower 14611 * layer or HBA had done a bp_mapin(). we must do this here 14612 * as we are the "originator" of the shadow buf. 14613 */ 14614 bp_mapout(bp); 14615 14616 /* 14617 * Null out b_iodone before freeing the bp, to ensure that the driver 14618 * never gets confused by a stale value in this field. (Just a little 14619 * extra defensiveness here.) 14620 */ 14621 bp->b_iodone = NULL; 14622 14623 freerbuf(bp); 14624 14625 kmem_free(xp, sizeof (struct sd_xbuf)); 14626 } 14627 14628 /* 14629 * Function: sd_shadow_buf_free 14630 * 14631 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations. 14632 * 14633 * Context: May be called under interrupt context 14634 */ 14635 14636 static void 14637 sd_shadow_buf_free(struct buf *bp) 14638 { 14639 struct sd_xbuf *xp; 14640 14641 ASSERT(bp != NULL); 14642 xp = SD_GET_XBUF(bp); 14643 ASSERT(xp != NULL); 14644 14645 #if defined(__sparc) 14646 /* 14647 * Call bp_mapout() before freeing the buf, in case a lower 14648 * layer or HBA had done a bp_mapin(). we must do this here 14649 * as we are the "originator" of the shadow buf. 14650 */ 14651 bp_mapout(bp); 14652 #endif 14653 14654 /* 14655 * Null out b_iodone before freeing the bp, to ensure that the driver 14656 * never gets confused by a stale value in this field. (Just a little 14657 * extra defensiveness here.) 14658 */ 14659 bp->b_iodone = NULL; 14660 14661 #if defined(__i386) || defined(__amd64) 14662 kmem_free(bp->b_un.b_addr, bp->b_bcount); 14663 freerbuf(bp); 14664 #else 14665 scsi_free_consistent_buf(bp); 14666 #endif 14667 14668 kmem_free(xp, sizeof (struct sd_xbuf)); 14669 } 14670 14671 14672 /* 14673 * Function: sd_print_transport_rejected_message 14674 * 14675 * Description: This implements the ludicrously complex rules for printing 14676 * a "transport rejected" message. This is to address the 14677 * specific problem of having a flood of this error message 14678 * produced when a failover occurs. 14679 * 14680 * Context: Any. 14681 */ 14682 14683 static void 14684 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp, 14685 int code) 14686 { 14687 ASSERT(un != NULL); 14688 ASSERT(mutex_owned(SD_MUTEX(un))); 14689 ASSERT(xp != NULL); 14690 14691 /* 14692 * Print the "transport rejected" message under the following 14693 * conditions: 14694 * 14695 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set 14696 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR. 14697 * - If the error code IS a TRAN_FATAL_ERROR, then the message is 14698 * printed the FIRST time a TRAN_FATAL_ERROR is returned from 14699 * scsi_transport(9F) (which indicates that the target might have 14700 * gone off-line). This uses the un->un_tran_fatal_count 14701 * count, which is incremented whenever a TRAN_FATAL_ERROR is 14702 * received, and reset to zero whenver a TRAN_ACCEPT is returned 14703 * from scsi_transport(). 14704 * 14705 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of 14706 * the preceeding cases in order for the message to be printed. 14707 */ 14708 if (((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) && 14709 (SD_FM_LOG(un) == SD_FM_LOG_NSUP)) { 14710 if ((sd_level_mask & SD_LOGMASK_DIAG) || 14711 (code != TRAN_FATAL_ERROR) || 14712 (un->un_tran_fatal_count == 1)) { 14713 switch (code) { 14714 case TRAN_BADPKT: 14715 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14716 "transport rejected bad packet\n"); 14717 break; 14718 case TRAN_FATAL_ERROR: 14719 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14720 "transport rejected fatal error\n"); 14721 break; 14722 default: 14723 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14724 "transport rejected (%d)\n", code); 14725 break; 14726 } 14727 } 14728 } 14729 } 14730 14731 14732 /* 14733 * Function: sd_add_buf_to_waitq 14734 * 14735 * Description: Add the given buf(9S) struct to the wait queue for the 14736 * instance. If sorting is enabled, then the buf is added 14737 * to the queue via an elevator sort algorithm (a la 14738 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key. 14739 * If sorting is not enabled, then the buf is just added 14740 * to the end of the wait queue. 14741 * 14742 * Return Code: void 14743 * 14744 * Context: Does not sleep/block, therefore technically can be called 14745 * from any context. However if sorting is enabled then the 14746 * execution time is indeterminate, and may take long if 14747 * the wait queue grows large. 14748 */ 14749 14750 static void 14751 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp) 14752 { 14753 struct buf *ap; 14754 14755 ASSERT(bp != NULL); 14756 ASSERT(un != NULL); 14757 ASSERT(mutex_owned(SD_MUTEX(un))); 14758 14759 /* If the queue is empty, add the buf as the only entry & return. */ 14760 if (un->un_waitq_headp == NULL) { 14761 ASSERT(un->un_waitq_tailp == NULL); 14762 un->un_waitq_headp = un->un_waitq_tailp = bp; 14763 bp->av_forw = NULL; 14764 return; 14765 } 14766 14767 ASSERT(un->un_waitq_tailp != NULL); 14768 14769 /* 14770 * If sorting is disabled, just add the buf to the tail end of 14771 * the wait queue and return. 14772 */ 14773 if (un->un_f_disksort_disabled || un->un_f_enable_rmw) { 14774 un->un_waitq_tailp->av_forw = bp; 14775 un->un_waitq_tailp = bp; 14776 bp->av_forw = NULL; 14777 return; 14778 } 14779 14780 /* 14781 * Sort thru the list of requests currently on the wait queue 14782 * and add the new buf request at the appropriate position. 14783 * 14784 * The un->un_waitq_headp is an activity chain pointer on which 14785 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The 14786 * first queue holds those requests which are positioned after 14787 * the current SD_GET_BLKNO() (in the first request); the second holds 14788 * requests which came in after their SD_GET_BLKNO() number was passed. 14789 * Thus we implement a one way scan, retracting after reaching 14790 * the end of the drive to the first request on the second 14791 * queue, at which time it becomes the first queue. 14792 * A one-way scan is natural because of the way UNIX read-ahead 14793 * blocks are allocated. 14794 * 14795 * If we lie after the first request, then we must locate the 14796 * second request list and add ourselves to it. 14797 */ 14798 ap = un->un_waitq_headp; 14799 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) { 14800 while (ap->av_forw != NULL) { 14801 /* 14802 * Look for an "inversion" in the (normally 14803 * ascending) block numbers. This indicates 14804 * the start of the second request list. 14805 */ 14806 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) { 14807 /* 14808 * Search the second request list for the 14809 * first request at a larger block number. 14810 * We go before that; however if there is 14811 * no such request, we go at the end. 14812 */ 14813 do { 14814 if (SD_GET_BLKNO(bp) < 14815 SD_GET_BLKNO(ap->av_forw)) { 14816 goto insert; 14817 } 14818 ap = ap->av_forw; 14819 } while (ap->av_forw != NULL); 14820 goto insert; /* after last */ 14821 } 14822 ap = ap->av_forw; 14823 } 14824 14825 /* 14826 * No inversions... we will go after the last, and 14827 * be the first request in the second request list. 14828 */ 14829 goto insert; 14830 } 14831 14832 /* 14833 * Request is at/after the current request... 14834 * sort in the first request list. 14835 */ 14836 while (ap->av_forw != NULL) { 14837 /* 14838 * We want to go after the current request (1) if 14839 * there is an inversion after it (i.e. it is the end 14840 * of the first request list), or (2) if the next 14841 * request is a larger block no. than our request. 14842 */ 14843 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) || 14844 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) { 14845 goto insert; 14846 } 14847 ap = ap->av_forw; 14848 } 14849 14850 /* 14851 * Neither a second list nor a larger request, therefore 14852 * we go at the end of the first list (which is the same 14853 * as the end of the whole schebang). 14854 */ 14855 insert: 14856 bp->av_forw = ap->av_forw; 14857 ap->av_forw = bp; 14858 14859 /* 14860 * If we inserted onto the tail end of the waitq, make sure the 14861 * tail pointer is updated. 14862 */ 14863 if (ap == un->un_waitq_tailp) { 14864 un->un_waitq_tailp = bp; 14865 } 14866 } 14867 14868 14869 /* 14870 * Function: sd_start_cmds 14871 * 14872 * Description: Remove and transport cmds from the driver queues. 14873 * 14874 * Arguments: un - pointer to the unit (soft state) struct for the target. 14875 * 14876 * immed_bp - ptr to a buf to be transported immediately. Only 14877 * the immed_bp is transported; bufs on the waitq are not 14878 * processed and the un_retry_bp is not checked. If immed_bp is 14879 * NULL, then normal queue processing is performed. 14880 * 14881 * Context: May be called from kernel thread context, interrupt context, 14882 * or runout callback context. This function may not block or 14883 * call routines that block. 14884 */ 14885 14886 static void 14887 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) 14888 { 14889 struct sd_xbuf *xp; 14890 struct buf *bp; 14891 void (*statp)(kstat_io_t *); 14892 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14893 void (*saved_statp)(kstat_io_t *); 14894 #endif 14895 int rval; 14896 struct sd_fm_internal *sfip = NULL; 14897 14898 ASSERT(un != NULL); 14899 ASSERT(mutex_owned(SD_MUTEX(un))); 14900 ASSERT(un->un_ncmds_in_transport >= 0); 14901 ASSERT(un->un_throttle >= 0); 14902 14903 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); 14904 14905 do { 14906 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14907 saved_statp = NULL; 14908 #endif 14909 14910 /* 14911 * If we are syncing or dumping, fail the command to 14912 * avoid recursively calling back into scsi_transport(). 14913 * The dump I/O itself uses a separate code path so this 14914 * only prevents non-dump I/O from being sent while dumping. 14915 * File system sync takes place before dumping begins. 14916 * During panic, filesystem I/O is allowed provided 14917 * un_in_callback is <= 1. This is to prevent recursion 14918 * such as sd_start_cmds -> scsi_transport -> sdintr -> 14919 * sd_start_cmds and so on. See panic.c for more information 14920 * about the states the system can be in during panic. 14921 */ 14922 if ((un->un_state == SD_STATE_DUMPING) || 14923 (ddi_in_panic() && (un->un_in_callback > 1))) { 14924 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14925 "sd_start_cmds: panicking\n"); 14926 goto exit; 14927 } 14928 14929 if ((bp = immed_bp) != NULL) { 14930 /* 14931 * We have a bp that must be transported immediately. 14932 * It's OK to transport the immed_bp here without doing 14933 * the throttle limit check because the immed_bp is 14934 * always used in a retry/recovery case. This means 14935 * that we know we are not at the throttle limit by 14936 * virtue of the fact that to get here we must have 14937 * already gotten a command back via sdintr(). This also 14938 * relies on (1) the command on un_retry_bp preventing 14939 * further commands from the waitq from being issued; 14940 * and (2) the code in sd_retry_command checking the 14941 * throttle limit before issuing a delayed or immediate 14942 * retry. This holds even if the throttle limit is 14943 * currently ratcheted down from its maximum value. 14944 */ 14945 statp = kstat_runq_enter; 14946 if (bp == un->un_retry_bp) { 14947 ASSERT((un->un_retry_statp == NULL) || 14948 (un->un_retry_statp == kstat_waitq_enter) || 14949 (un->un_retry_statp == 14950 kstat_runq_back_to_waitq)); 14951 /* 14952 * If the waitq kstat was incremented when 14953 * sd_set_retry_bp() queued this bp for a retry, 14954 * then we must set up statp so that the waitq 14955 * count will get decremented correctly below. 14956 * Also we must clear un->un_retry_statp to 14957 * ensure that we do not act on a stale value 14958 * in this field. 14959 */ 14960 if ((un->un_retry_statp == kstat_waitq_enter) || 14961 (un->un_retry_statp == 14962 kstat_runq_back_to_waitq)) { 14963 statp = kstat_waitq_to_runq; 14964 } 14965 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14966 saved_statp = un->un_retry_statp; 14967 #endif 14968 un->un_retry_statp = NULL; 14969 14970 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14971 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " 14972 "un_throttle:%d un_ncmds_in_transport:%d\n", 14973 un, un->un_retry_bp, un->un_throttle, 14974 un->un_ncmds_in_transport); 14975 } else { 14976 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: " 14977 "processing priority bp:0x%p\n", bp); 14978 } 14979 14980 } else if ((bp = un->un_waitq_headp) != NULL) { 14981 /* 14982 * A command on the waitq is ready to go, but do not 14983 * send it if: 14984 * 14985 * (1) the throttle limit has been reached, or 14986 * (2) a retry is pending, or 14987 * (3) a START_STOP_UNIT callback pending, or 14988 * (4) a callback for a SD_PATH_DIRECT_PRIORITY 14989 * command is pending. 14990 * 14991 * For all of these conditions, IO processing will 14992 * restart after the condition is cleared. 14993 */ 14994 if (un->un_ncmds_in_transport >= un->un_throttle) { 14995 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14996 "sd_start_cmds: exiting, " 14997 "throttle limit reached!\n"); 14998 goto exit; 14999 } 15000 if (un->un_retry_bp != NULL) { 15001 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15002 "sd_start_cmds: exiting, retry pending!\n"); 15003 goto exit; 15004 } 15005 if (un->un_startstop_timeid != NULL) { 15006 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15007 "sd_start_cmds: exiting, " 15008 "START_STOP pending!\n"); 15009 goto exit; 15010 } 15011 if (un->un_direct_priority_timeid != NULL) { 15012 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15013 "sd_start_cmds: exiting, " 15014 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n"); 15015 goto exit; 15016 } 15017 15018 /* Dequeue the command */ 15019 un->un_waitq_headp = bp->av_forw; 15020 if (un->un_waitq_headp == NULL) { 15021 un->un_waitq_tailp = NULL; 15022 } 15023 bp->av_forw = NULL; 15024 statp = kstat_waitq_to_runq; 15025 SD_TRACE(SD_LOG_IO_CORE, un, 15026 "sd_start_cmds: processing waitq bp:0x%p\n", bp); 15027 15028 } else { 15029 /* No work to do so bail out now */ 15030 SD_TRACE(SD_LOG_IO_CORE, un, 15031 "sd_start_cmds: no more work, exiting!\n"); 15032 goto exit; 15033 } 15034 15035 /* 15036 * Reset the state to normal. This is the mechanism by which 15037 * the state transitions from either SD_STATE_RWAIT or 15038 * SD_STATE_OFFLINE to SD_STATE_NORMAL. 15039 * If state is SD_STATE_PM_CHANGING then this command is 15040 * part of the device power control and the state must 15041 * not be put back to normal. Doing so would would 15042 * allow new commands to proceed when they shouldn't, 15043 * the device may be going off. 15044 */ 15045 if ((un->un_state != SD_STATE_SUSPENDED) && 15046 (un->un_state != SD_STATE_PM_CHANGING)) { 15047 New_state(un, SD_STATE_NORMAL); 15048 } 15049 15050 xp = SD_GET_XBUF(bp); 15051 ASSERT(xp != NULL); 15052 15053 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 15054 /* 15055 * Allocate the scsi_pkt if we need one, or attach DMA 15056 * resources if we have a scsi_pkt that needs them. The 15057 * latter should only occur for commands that are being 15058 * retried. 15059 */ 15060 if ((xp->xb_pktp == NULL) || 15061 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { 15062 #else 15063 if (xp->xb_pktp == NULL) { 15064 #endif 15065 /* 15066 * There is no scsi_pkt allocated for this buf. Call 15067 * the initpkt function to allocate & init one. 15068 * 15069 * The scsi_init_pkt runout callback functionality is 15070 * implemented as follows: 15071 * 15072 * 1) The initpkt function always calls 15073 * scsi_init_pkt(9F) with sdrunout specified as the 15074 * callback routine. 15075 * 2) A successful packet allocation is initialized and 15076 * the I/O is transported. 15077 * 3) The I/O associated with an allocation resource 15078 * failure is left on its queue to be retried via 15079 * runout or the next I/O. 15080 * 4) The I/O associated with a DMA error is removed 15081 * from the queue and failed with EIO. Processing of 15082 * the transport queues is also halted to be 15083 * restarted via runout or the next I/O. 15084 * 5) The I/O associated with a CDB size or packet 15085 * size error is removed from the queue and failed 15086 * with EIO. Processing of the transport queues is 15087 * continued. 15088 * 15089 * Note: there is no interface for canceling a runout 15090 * callback. To prevent the driver from detaching or 15091 * suspending while a runout is pending the driver 15092 * state is set to SD_STATE_RWAIT 15093 * 15094 * Note: using the scsi_init_pkt callback facility can 15095 * result in an I/O request persisting at the head of 15096 * the list which cannot be satisfied even after 15097 * multiple retries. In the future the driver may 15098 * implement some kind of maximum runout count before 15099 * failing an I/O. 15100 * 15101 * Note: the use of funcp below may seem superfluous, 15102 * but it helps warlock figure out the correct 15103 * initpkt function calls (see [s]sd.wlcmd). 15104 */ 15105 struct scsi_pkt *pktp; 15106 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp); 15107 15108 ASSERT(bp != un->un_rqs_bp); 15109 15110 funcp = sd_initpkt_map[xp->xb_chain_iostart]; 15111 switch ((*funcp)(bp, &pktp)) { 15112 case SD_PKT_ALLOC_SUCCESS: 15113 xp->xb_pktp = pktp; 15114 SD_TRACE(SD_LOG_IO_CORE, un, 15115 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n", 15116 pktp); 15117 goto got_pkt; 15118 15119 case SD_PKT_ALLOC_FAILURE: 15120 /* 15121 * Temporary (hopefully) resource depletion. 15122 * Since retries and RQS commands always have a 15123 * scsi_pkt allocated, these cases should never 15124 * get here. So the only cases this needs to 15125 * handle is a bp from the waitq (which we put 15126 * back onto the waitq for sdrunout), or a bp 15127 * sent as an immed_bp (which we just fail). 15128 */ 15129 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15130 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); 15131 15132 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 15133 15134 if (bp == immed_bp) { 15135 /* 15136 * If SD_XB_DMA_FREED is clear, then 15137 * this is a failure to allocate a 15138 * scsi_pkt, and we must fail the 15139 * command. 15140 */ 15141 if ((xp->xb_pkt_flags & 15142 SD_XB_DMA_FREED) == 0) { 15143 break; 15144 } 15145 15146 /* 15147 * If this immediate command is NOT our 15148 * un_retry_bp, then we must fail it. 15149 */ 15150 if (bp != un->un_retry_bp) { 15151 break; 15152 } 15153 15154 /* 15155 * We get here if this cmd is our 15156 * un_retry_bp that was DMAFREED, but 15157 * scsi_init_pkt() failed to reallocate 15158 * DMA resources when we attempted to 15159 * retry it. This can happen when an 15160 * mpxio failover is in progress, but 15161 * we don't want to just fail the 15162 * command in this case. 15163 * 15164 * Use timeout(9F) to restart it after 15165 * a 100ms delay. We don't want to 15166 * let sdrunout() restart it, because 15167 * sdrunout() is just supposed to start 15168 * commands that are sitting on the 15169 * wait queue. The un_retry_bp stays 15170 * set until the command completes, but 15171 * sdrunout can be called many times 15172 * before that happens. Since sdrunout 15173 * cannot tell if the un_retry_bp is 15174 * already in the transport, it could 15175 * end up calling scsi_transport() for 15176 * the un_retry_bp multiple times. 15177 * 15178 * Also: don't schedule the callback 15179 * if some other callback is already 15180 * pending. 15181 */ 15182 if (un->un_retry_statp == NULL) { 15183 /* 15184 * restore the kstat pointer to 15185 * keep kstat counts coherent 15186 * when we do retry the command. 15187 */ 15188 un->un_retry_statp = 15189 saved_statp; 15190 } 15191 15192 if ((un->un_startstop_timeid == NULL) && 15193 (un->un_retry_timeid == NULL) && 15194 (un->un_direct_priority_timeid == 15195 NULL)) { 15196 15197 un->un_retry_timeid = 15198 timeout( 15199 sd_start_retry_command, 15200 un, SD_RESTART_TIMEOUT); 15201 } 15202 goto exit; 15203 } 15204 15205 #else 15206 if (bp == immed_bp) { 15207 break; /* Just fail the command */ 15208 } 15209 #endif 15210 15211 /* Add the buf back to the head of the waitq */ 15212 bp->av_forw = un->un_waitq_headp; 15213 un->un_waitq_headp = bp; 15214 if (un->un_waitq_tailp == NULL) { 15215 un->un_waitq_tailp = bp; 15216 } 15217 goto exit; 15218 15219 case SD_PKT_ALLOC_FAILURE_NO_DMA: 15220 /* 15221 * HBA DMA resource failure. Fail the command 15222 * and continue processing of the queues. 15223 */ 15224 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15225 "sd_start_cmds: " 15226 "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); 15227 break; 15228 15229 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: 15230 /* 15231 * Note:x86: Partial DMA mapping not supported 15232 * for USCSI commands, and all the needed DMA 15233 * resources were not allocated. 15234 */ 15235 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15236 "sd_start_cmds: " 15237 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); 15238 break; 15239 15240 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: 15241 /* 15242 * Note:x86: Request cannot fit into CDB based 15243 * on lba and len. 15244 */ 15245 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15246 "sd_start_cmds: " 15247 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); 15248 break; 15249 15250 default: 15251 /* Should NEVER get here! */ 15252 panic("scsi_initpkt error"); 15253 /*NOTREACHED*/ 15254 } 15255 15256 /* 15257 * Fatal error in allocating a scsi_pkt for this buf. 15258 * Update kstats & return the buf with an error code. 15259 * We must use sd_return_failed_command_no_restart() to 15260 * avoid a recursive call back into sd_start_cmds(). 15261 * However this also means that we must keep processing 15262 * the waitq here in order to avoid stalling. 15263 */ 15264 if (statp == kstat_waitq_to_runq) { 15265 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 15266 } 15267 sd_return_failed_command_no_restart(un, bp, EIO); 15268 if (bp == immed_bp) { 15269 /* immed_bp is gone by now, so clear this */ 15270 immed_bp = NULL; 15271 } 15272 continue; 15273 } 15274 got_pkt: 15275 if (bp == immed_bp) { 15276 /* goto the head of the class.... */ 15277 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 15278 } 15279 15280 un->un_ncmds_in_transport++; 15281 SD_UPDATE_KSTATS(un, statp, bp); 15282 15283 /* 15284 * Call scsi_transport() to send the command to the target. 15285 * According to SCSA architecture, we must drop the mutex here 15286 * before calling scsi_transport() in order to avoid deadlock. 15287 * Note that the scsi_pkt's completion routine can be executed 15288 * (from interrupt context) even before the call to 15289 * scsi_transport() returns. 15290 */ 15291 SD_TRACE(SD_LOG_IO_CORE, un, 15292 "sd_start_cmds: calling scsi_transport()\n"); 15293 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); 15294 15295 mutex_exit(SD_MUTEX(un)); 15296 rval = scsi_transport(xp->xb_pktp); 15297 mutex_enter(SD_MUTEX(un)); 15298 15299 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15300 "sd_start_cmds: scsi_transport() returned %d\n", rval); 15301 15302 switch (rval) { 15303 case TRAN_ACCEPT: 15304 /* Clear this with every pkt accepted by the HBA */ 15305 un->un_tran_fatal_count = 0; 15306 break; /* Success; try the next cmd (if any) */ 15307 15308 case TRAN_BUSY: 15309 un->un_ncmds_in_transport--; 15310 ASSERT(un->un_ncmds_in_transport >= 0); 15311 15312 /* 15313 * Don't retry request sense, the sense data 15314 * is lost when another request is sent. 15315 * Free up the rqs buf and retry 15316 * the original failed cmd. Update kstat. 15317 */ 15318 if (bp == un->un_rqs_bp) { 15319 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 15320 bp = sd_mark_rqs_idle(un, xp); 15321 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15322 NULL, NULL, EIO, un->un_busy_timeout / 500, 15323 kstat_waitq_enter); 15324 goto exit; 15325 } 15326 15327 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 15328 /* 15329 * Free the DMA resources for the scsi_pkt. This will 15330 * allow mpxio to select another path the next time 15331 * we call scsi_transport() with this scsi_pkt. 15332 * See sdintr() for the rationalization behind this. 15333 */ 15334 if ((un->un_f_is_fibre == TRUE) && 15335 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 15336 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { 15337 scsi_dmafree(xp->xb_pktp); 15338 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 15339 } 15340 #endif 15341 15342 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { 15343 /* 15344 * Commands that are SD_PATH_DIRECT_PRIORITY 15345 * are for error recovery situations. These do 15346 * not use the normal command waitq, so if they 15347 * get a TRAN_BUSY we cannot put them back onto 15348 * the waitq for later retry. One possible 15349 * problem is that there could already be some 15350 * other command on un_retry_bp that is waiting 15351 * for this one to complete, so we would be 15352 * deadlocked if we put this command back onto 15353 * the waitq for later retry (since un_retry_bp 15354 * must complete before the driver gets back to 15355 * commands on the waitq). 15356 * 15357 * To avoid deadlock we must schedule a callback 15358 * that will restart this command after a set 15359 * interval. This should keep retrying for as 15360 * long as the underlying transport keeps 15361 * returning TRAN_BUSY (just like for other 15362 * commands). Use the same timeout interval as 15363 * for the ordinary TRAN_BUSY retry. 15364 */ 15365 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15366 "sd_start_cmds: scsi_transport() returned " 15367 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n"); 15368 15369 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 15370 un->un_direct_priority_timeid = 15371 timeout(sd_start_direct_priority_command, 15372 bp, un->un_busy_timeout / 500); 15373 15374 goto exit; 15375 } 15376 15377 /* 15378 * For TRAN_BUSY, we want to reduce the throttle value, 15379 * unless we are retrying a command. 15380 */ 15381 if (bp != un->un_retry_bp) { 15382 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); 15383 } 15384 15385 /* 15386 * Set up the bp to be tried again 10 ms later. 15387 * Note:x86: Is there a timeout value in the sd_lun 15388 * for this condition? 15389 */ 15390 sd_set_retry_bp(un, bp, un->un_busy_timeout / 500, 15391 kstat_runq_back_to_waitq); 15392 goto exit; 15393 15394 case TRAN_FATAL_ERROR: 15395 un->un_tran_fatal_count++; 15396 /* FALLTHRU */ 15397 15398 case TRAN_BADPKT: 15399 default: 15400 un->un_ncmds_in_transport--; 15401 ASSERT(un->un_ncmds_in_transport >= 0); 15402 15403 /* 15404 * If this is our REQUEST SENSE command with a 15405 * transport error, we must get back the pointers 15406 * to the original buf, and mark the REQUEST 15407 * SENSE command as "available". 15408 */ 15409 if (bp == un->un_rqs_bp) { 15410 bp = sd_mark_rqs_idle(un, xp); 15411 xp = SD_GET_XBUF(bp); 15412 } else { 15413 /* 15414 * Legacy behavior: do not update transport 15415 * error count for request sense commands. 15416 */ 15417 SD_UPDATE_ERRSTATS(un, sd_transerrs); 15418 } 15419 15420 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 15421 sd_print_transport_rejected_message(un, xp, rval); 15422 15423 /* 15424 * This command will be terminated by SD driver due 15425 * to a fatal transport error. We should post 15426 * ereport.io.scsi.cmd.disk.tran with driver-assessment 15427 * of "fail" for any command to indicate this 15428 * situation. 15429 */ 15430 if (xp->xb_ena > 0) { 15431 ASSERT(un->un_fm_private != NULL); 15432 sfip = un->un_fm_private; 15433 sfip->fm_ssc.ssc_flags |= SSC_FLAGS_TRAN_ABORT; 15434 sd_ssc_extract_info(&sfip->fm_ssc, un, 15435 xp->xb_pktp, bp, xp); 15436 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL); 15437 } 15438 15439 /* 15440 * We must use sd_return_failed_command_no_restart() to 15441 * avoid a recursive call back into sd_start_cmds(). 15442 * However this also means that we must keep processing 15443 * the waitq here in order to avoid stalling. 15444 */ 15445 sd_return_failed_command_no_restart(un, bp, EIO); 15446 15447 /* 15448 * Notify any threads waiting in sd_ddi_suspend() that 15449 * a command completion has occurred. 15450 */ 15451 if (un->un_state == SD_STATE_SUSPENDED) { 15452 cv_broadcast(&un->un_disk_busy_cv); 15453 } 15454 15455 if (bp == immed_bp) { 15456 /* immed_bp is gone by now, so clear this */ 15457 immed_bp = NULL; 15458 } 15459 break; 15460 } 15461 15462 } while (immed_bp == NULL); 15463 15464 exit: 15465 ASSERT(mutex_owned(SD_MUTEX(un))); 15466 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n"); 15467 } 15468 15469 15470 /* 15471 * Function: sd_return_command 15472 * 15473 * Description: Returns a command to its originator (with or without an 15474 * error). Also starts commands waiting to be transported 15475 * to the target. 15476 * 15477 * Context: May be called from interrupt, kernel, or timeout context 15478 */ 15479 15480 static void 15481 sd_return_command(struct sd_lun *un, struct buf *bp) 15482 { 15483 struct sd_xbuf *xp; 15484 struct scsi_pkt *pktp; 15485 struct sd_fm_internal *sfip; 15486 15487 ASSERT(bp != NULL); 15488 ASSERT(un != NULL); 15489 ASSERT(mutex_owned(SD_MUTEX(un))); 15490 ASSERT(bp != un->un_rqs_bp); 15491 xp = SD_GET_XBUF(bp); 15492 ASSERT(xp != NULL); 15493 15494 pktp = SD_GET_PKTP(bp); 15495 sfip = (struct sd_fm_internal *)un->un_fm_private; 15496 ASSERT(sfip != NULL); 15497 15498 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n"); 15499 15500 /* 15501 * Note: check for the "sdrestart failed" case. 15502 */ 15503 if ((un->un_partial_dma_supported == 1) && 15504 ((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) && 15505 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) && 15506 (xp->xb_pktp->pkt_resid == 0)) { 15507 15508 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) { 15509 /* 15510 * Successfully set up next portion of cmd 15511 * transfer, try sending it 15512 */ 15513 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 15514 NULL, NULL, 0, (clock_t)0, NULL); 15515 sd_start_cmds(un, NULL); 15516 return; /* Note:x86: need a return here? */ 15517 } 15518 } 15519 15520 /* 15521 * If this is the failfast bp, clear it from un_failfast_bp. This 15522 * can happen if upon being re-tried the failfast bp either 15523 * succeeded or encountered another error (possibly even a different 15524 * error than the one that precipitated the failfast state, but in 15525 * that case it would have had to exhaust retries as well). Regardless, 15526 * this should not occur whenever the instance is in the active 15527 * failfast state. 15528 */ 15529 if (bp == un->un_failfast_bp) { 15530 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 15531 un->un_failfast_bp = NULL; 15532 } 15533 15534 /* 15535 * Clear the failfast state upon successful completion of ANY cmd. 15536 */ 15537 if (bp->b_error == 0) { 15538 un->un_failfast_state = SD_FAILFAST_INACTIVE; 15539 /* 15540 * If this is a successful command, but used to be retried, 15541 * we will take it as a recovered command and post an 15542 * ereport with driver-assessment of "recovered". 15543 */ 15544 if (xp->xb_ena > 0) { 15545 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 15546 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RECOVERY); 15547 } 15548 } else { 15549 /* 15550 * If this is a failed non-USCSI command we will post an 15551 * ereport with driver-assessment set accordingly("fail" or 15552 * "fatal"). 15553 */ 15554 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 15555 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 15556 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL); 15557 } 15558 } 15559 15560 /* 15561 * This is used if the command was retried one or more times. Show that 15562 * we are done with it, and allow processing of the waitq to resume. 15563 */ 15564 if (bp == un->un_retry_bp) { 15565 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15566 "sd_return_command: un:0x%p: " 15567 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 15568 un->un_retry_bp = NULL; 15569 un->un_retry_statp = NULL; 15570 } 15571 15572 SD_UPDATE_RDWR_STATS(un, bp); 15573 SD_UPDATE_PARTITION_STATS(un, bp); 15574 15575 switch (un->un_state) { 15576 case SD_STATE_SUSPENDED: 15577 /* 15578 * Notify any threads waiting in sd_ddi_suspend() that 15579 * a command completion has occurred. 15580 */ 15581 cv_broadcast(&un->un_disk_busy_cv); 15582 break; 15583 default: 15584 sd_start_cmds(un, NULL); 15585 break; 15586 } 15587 15588 /* Return this command up the iodone chain to its originator. */ 15589 mutex_exit(SD_MUTEX(un)); 15590 15591 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 15592 xp->xb_pktp = NULL; 15593 15594 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 15595 15596 ASSERT(!mutex_owned(SD_MUTEX(un))); 15597 mutex_enter(SD_MUTEX(un)); 15598 15599 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n"); 15600 } 15601 15602 15603 /* 15604 * Function: sd_return_failed_command 15605 * 15606 * Description: Command completion when an error occurred. 15607 * 15608 * Context: May be called from interrupt context 15609 */ 15610 15611 static void 15612 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode) 15613 { 15614 ASSERT(bp != NULL); 15615 ASSERT(un != NULL); 15616 ASSERT(mutex_owned(SD_MUTEX(un))); 15617 15618 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15619 "sd_return_failed_command: entry\n"); 15620 15621 /* 15622 * b_resid could already be nonzero due to a partial data 15623 * transfer, so do not change it here. 15624 */ 15625 SD_BIOERROR(bp, errcode); 15626 15627 sd_return_command(un, bp); 15628 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15629 "sd_return_failed_command: exit\n"); 15630 } 15631 15632 15633 /* 15634 * Function: sd_return_failed_command_no_restart 15635 * 15636 * Description: Same as sd_return_failed_command, but ensures that no 15637 * call back into sd_start_cmds will be issued. 15638 * 15639 * Context: May be called from interrupt context 15640 */ 15641 15642 static void 15643 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp, 15644 int errcode) 15645 { 15646 struct sd_xbuf *xp; 15647 15648 ASSERT(bp != NULL); 15649 ASSERT(un != NULL); 15650 ASSERT(mutex_owned(SD_MUTEX(un))); 15651 xp = SD_GET_XBUF(bp); 15652 ASSERT(xp != NULL); 15653 ASSERT(errcode != 0); 15654 15655 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15656 "sd_return_failed_command_no_restart: entry\n"); 15657 15658 /* 15659 * b_resid could already be nonzero due to a partial data 15660 * transfer, so do not change it here. 15661 */ 15662 SD_BIOERROR(bp, errcode); 15663 15664 /* 15665 * If this is the failfast bp, clear it. This can happen if the 15666 * failfast bp encounterd a fatal error when we attempted to 15667 * re-try it (such as a scsi_transport(9F) failure). However 15668 * we should NOT be in an active failfast state if the failfast 15669 * bp is not NULL. 15670 */ 15671 if (bp == un->un_failfast_bp) { 15672 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 15673 un->un_failfast_bp = NULL; 15674 } 15675 15676 if (bp == un->un_retry_bp) { 15677 /* 15678 * This command was retried one or more times. Show that we are 15679 * done with it, and allow processing of the waitq to resume. 15680 */ 15681 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15682 "sd_return_failed_command_no_restart: " 15683 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 15684 un->un_retry_bp = NULL; 15685 un->un_retry_statp = NULL; 15686 } 15687 15688 SD_UPDATE_RDWR_STATS(un, bp); 15689 SD_UPDATE_PARTITION_STATS(un, bp); 15690 15691 mutex_exit(SD_MUTEX(un)); 15692 15693 if (xp->xb_pktp != NULL) { 15694 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 15695 xp->xb_pktp = NULL; 15696 } 15697 15698 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 15699 15700 mutex_enter(SD_MUTEX(un)); 15701 15702 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15703 "sd_return_failed_command_no_restart: exit\n"); 15704 } 15705 15706 15707 /* 15708 * Function: sd_retry_command 15709 * 15710 * Description: queue up a command for retry, or (optionally) fail it 15711 * if retry counts are exhausted. 15712 * 15713 * Arguments: un - Pointer to the sd_lun struct for the target. 15714 * 15715 * bp - Pointer to the buf for the command to be retried. 15716 * 15717 * retry_check_flag - Flag to see which (if any) of the retry 15718 * counts should be decremented/checked. If the indicated 15719 * retry count is exhausted, then the command will not be 15720 * retried; it will be failed instead. This should use a 15721 * value equal to one of the following: 15722 * 15723 * SD_RETRIES_NOCHECK 15724 * SD_RESD_RETRIES_STANDARD 15725 * SD_RETRIES_VICTIM 15726 * 15727 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE 15728 * if the check should be made to see of FLAG_ISOLATE is set 15729 * in the pkt. If FLAG_ISOLATE is set, then the command is 15730 * not retried, it is simply failed. 15731 * 15732 * user_funcp - Ptr to function to call before dispatching the 15733 * command. May be NULL if no action needs to be performed. 15734 * (Primarily intended for printing messages.) 15735 * 15736 * user_arg - Optional argument to be passed along to 15737 * the user_funcp call. 15738 * 15739 * failure_code - errno return code to set in the bp if the 15740 * command is going to be failed. 15741 * 15742 * retry_delay - Retry delay interval in (clock_t) units. May 15743 * be zero which indicates that the retry should be retried 15744 * immediately (ie, without an intervening delay). 15745 * 15746 * statp - Ptr to kstat function to be updated if the command 15747 * is queued for a delayed retry. May be NULL if no kstat 15748 * update is desired. 15749 * 15750 * Context: May be called from interrupt context. 15751 */ 15752 15753 static void 15754 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag, 15755 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int code), 15756 void *user_arg, int failure_code, clock_t retry_delay, 15757 void (*statp)(kstat_io_t *)) 15758 { 15759 struct sd_xbuf *xp; 15760 struct scsi_pkt *pktp; 15761 struct sd_fm_internal *sfip; 15762 15763 ASSERT(un != NULL); 15764 ASSERT(mutex_owned(SD_MUTEX(un))); 15765 ASSERT(bp != NULL); 15766 xp = SD_GET_XBUF(bp); 15767 ASSERT(xp != NULL); 15768 pktp = SD_GET_PKTP(bp); 15769 ASSERT(pktp != NULL); 15770 15771 sfip = (struct sd_fm_internal *)un->un_fm_private; 15772 ASSERT(sfip != NULL); 15773 15774 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15775 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp); 15776 15777 /* 15778 * If we are syncing or dumping, fail the command to avoid 15779 * recursively calling back into scsi_transport(). 15780 */ 15781 if (ddi_in_panic()) { 15782 goto fail_command_no_log; 15783 } 15784 15785 /* 15786 * We should never be be retrying a command with FLAG_DIAGNOSE set, so 15787 * log an error and fail the command. 15788 */ 15789 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 15790 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 15791 "ERROR, retrying FLAG_DIAGNOSE command.\n"); 15792 sd_dump_memory(un, SD_LOG_IO, "CDB", 15793 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15794 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 15795 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 15796 goto fail_command; 15797 } 15798 15799 /* 15800 * If we are suspended, then put the command onto head of the 15801 * wait queue since we don't want to start more commands, and 15802 * clear the un_retry_bp. Next time when we are resumed, will 15803 * handle the command in the wait queue. 15804 */ 15805 switch (un->un_state) { 15806 case SD_STATE_SUSPENDED: 15807 case SD_STATE_DUMPING: 15808 bp->av_forw = un->un_waitq_headp; 15809 un->un_waitq_headp = bp; 15810 if (un->un_waitq_tailp == NULL) { 15811 un->un_waitq_tailp = bp; 15812 } 15813 if (bp == un->un_retry_bp) { 15814 un->un_retry_bp = NULL; 15815 un->un_retry_statp = NULL; 15816 } 15817 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 15818 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: " 15819 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp); 15820 return; 15821 default: 15822 break; 15823 } 15824 15825 /* 15826 * If the caller wants us to check FLAG_ISOLATE, then see if that 15827 * is set; if it is then we do not want to retry the command. 15828 * Normally, FLAG_ISOLATE is only used with USCSI cmds. 15829 */ 15830 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) { 15831 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { 15832 goto fail_command; 15833 } 15834 } 15835 15836 15837 /* 15838 * If SD_RETRIES_FAILFAST is set, it indicates that either a 15839 * command timeout or a selection timeout has occurred. This means 15840 * that we were unable to establish an kind of communication with 15841 * the target, and subsequent retries and/or commands are likely 15842 * to encounter similar results and take a long time to complete. 15843 * 15844 * If this is a failfast error condition, we need to update the 15845 * failfast state, even if this bp does not have B_FAILFAST set. 15846 */ 15847 if (retry_check_flag & SD_RETRIES_FAILFAST) { 15848 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { 15849 ASSERT(un->un_failfast_bp == NULL); 15850 /* 15851 * If we are already in the active failfast state, and 15852 * another failfast error condition has been detected, 15853 * then fail this command if it has B_FAILFAST set. 15854 * If B_FAILFAST is clear, then maintain the legacy 15855 * behavior of retrying heroically, even tho this will 15856 * take a lot more time to fail the command. 15857 */ 15858 if (bp->b_flags & B_FAILFAST) { 15859 goto fail_command; 15860 } 15861 } else { 15862 /* 15863 * We're not in the active failfast state, but we 15864 * have a failfast error condition, so we must begin 15865 * transition to the next state. We do this regardless 15866 * of whether or not this bp has B_FAILFAST set. 15867 */ 15868 if (un->un_failfast_bp == NULL) { 15869 /* 15870 * This is the first bp to meet a failfast 15871 * condition so save it on un_failfast_bp & 15872 * do normal retry processing. Do not enter 15873 * active failfast state yet. This marks 15874 * entry into the "failfast pending" state. 15875 */ 15876 un->un_failfast_bp = bp; 15877 15878 } else if (un->un_failfast_bp == bp) { 15879 /* 15880 * This is the second time *this* bp has 15881 * encountered a failfast error condition, 15882 * so enter active failfast state & flush 15883 * queues as appropriate. 15884 */ 15885 un->un_failfast_state = SD_FAILFAST_ACTIVE; 15886 un->un_failfast_bp = NULL; 15887 sd_failfast_flushq(un); 15888 15889 /* 15890 * Fail this bp now if B_FAILFAST set; 15891 * otherwise continue with retries. (It would 15892 * be pretty ironic if this bp succeeded on a 15893 * subsequent retry after we just flushed all 15894 * the queues). 15895 */ 15896 if (bp->b_flags & B_FAILFAST) { 15897 goto fail_command; 15898 } 15899 15900 #if !defined(lint) && !defined(__lint) 15901 } else { 15902 /* 15903 * If neither of the preceeding conditionals 15904 * was true, it means that there is some 15905 * *other* bp that has met an inital failfast 15906 * condition and is currently either being 15907 * retried or is waiting to be retried. In 15908 * that case we should perform normal retry 15909 * processing on *this* bp, since there is a 15910 * chance that the current failfast condition 15911 * is transient and recoverable. If that does 15912 * not turn out to be the case, then retries 15913 * will be cleared when the wait queue is 15914 * flushed anyway. 15915 */ 15916 #endif 15917 } 15918 } 15919 } else { 15920 /* 15921 * SD_RETRIES_FAILFAST is clear, which indicates that we 15922 * likely were able to at least establish some level of 15923 * communication with the target and subsequent commands 15924 * and/or retries are likely to get through to the target, 15925 * In this case we want to be aggressive about clearing 15926 * the failfast state. Note that this does not affect 15927 * the "failfast pending" condition. 15928 */ 15929 un->un_failfast_state = SD_FAILFAST_INACTIVE; 15930 } 15931 15932 15933 /* 15934 * Check the specified retry count to see if we can still do 15935 * any retries with this pkt before we should fail it. 15936 */ 15937 switch (retry_check_flag & SD_RETRIES_MASK) { 15938 case SD_RETRIES_VICTIM: 15939 /* 15940 * Check the victim retry count. If exhausted, then fall 15941 * thru & check against the standard retry count. 15942 */ 15943 if (xp->xb_victim_retry_count < un->un_victim_retry_count) { 15944 /* Increment count & proceed with the retry */ 15945 xp->xb_victim_retry_count++; 15946 break; 15947 } 15948 /* Victim retries exhausted, fall back to std. retries... */ 15949 /* FALLTHRU */ 15950 15951 case SD_RETRIES_STANDARD: 15952 if (xp->xb_retry_count >= un->un_retry_count) { 15953 /* Retries exhausted, fail the command */ 15954 SD_TRACE(SD_LOG_IO_CORE, un, 15955 "sd_retry_command: retries exhausted!\n"); 15956 /* 15957 * update b_resid for failed SCMD_READ & SCMD_WRITE 15958 * commands with nonzero pkt_resid. 15959 */ 15960 if ((pktp->pkt_reason == CMD_CMPLT) && 15961 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) && 15962 (pktp->pkt_resid != 0)) { 15963 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F; 15964 if ((op == SCMD_READ) || (op == SCMD_WRITE)) { 15965 SD_UPDATE_B_RESID(bp, pktp); 15966 } 15967 } 15968 goto fail_command; 15969 } 15970 xp->xb_retry_count++; 15971 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15972 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15973 break; 15974 15975 case SD_RETRIES_UA: 15976 if (xp->xb_ua_retry_count >= sd_ua_retry_count) { 15977 /* Retries exhausted, fail the command */ 15978 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15979 "Unit Attention retries exhausted. " 15980 "Check the target.\n"); 15981 goto fail_command; 15982 } 15983 xp->xb_ua_retry_count++; 15984 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15985 "sd_retry_command: retry count:%d\n", 15986 xp->xb_ua_retry_count); 15987 break; 15988 15989 case SD_RETRIES_BUSY: 15990 if (xp->xb_retry_count >= un->un_busy_retry_count) { 15991 /* Retries exhausted, fail the command */ 15992 SD_TRACE(SD_LOG_IO_CORE, un, 15993 "sd_retry_command: retries exhausted!\n"); 15994 goto fail_command; 15995 } 15996 xp->xb_retry_count++; 15997 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15998 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15999 break; 16000 16001 case SD_RETRIES_NOCHECK: 16002 default: 16003 /* No retry count to check. Just proceed with the retry */ 16004 break; 16005 } 16006 16007 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 16008 16009 /* 16010 * If this is a non-USCSI command being retried 16011 * during execution last time, we should post an ereport with 16012 * driver-assessment of the value "retry". 16013 * For partial DMA, request sense and STATUS_QFULL, there are no 16014 * hardware errors, we bypass ereport posting. 16015 */ 16016 if (failure_code != 0) { 16017 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16018 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 16019 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RETRY); 16020 } 16021 } 16022 16023 /* 16024 * If we were given a zero timeout, we must attempt to retry the 16025 * command immediately (ie, without a delay). 16026 */ 16027 if (retry_delay == 0) { 16028 /* 16029 * Check some limiting conditions to see if we can actually 16030 * do the immediate retry. If we cannot, then we must 16031 * fall back to queueing up a delayed retry. 16032 */ 16033 if (un->un_ncmds_in_transport >= un->un_throttle) { 16034 /* 16035 * We are at the throttle limit for the target, 16036 * fall back to delayed retry. 16037 */ 16038 retry_delay = un->un_busy_timeout; 16039 statp = kstat_waitq_enter; 16040 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16041 "sd_retry_command: immed. retry hit " 16042 "throttle!\n"); 16043 } else { 16044 /* 16045 * We're clear to proceed with the immediate retry. 16046 * First call the user-provided function (if any) 16047 */ 16048 if (user_funcp != NULL) { 16049 (*user_funcp)(un, bp, user_arg, 16050 SD_IMMEDIATE_RETRY_ISSUED); 16051 #ifdef __lock_lint 16052 sd_print_incomplete_msg(un, bp, user_arg, 16053 SD_IMMEDIATE_RETRY_ISSUED); 16054 sd_print_cmd_incomplete_msg(un, bp, user_arg, 16055 SD_IMMEDIATE_RETRY_ISSUED); 16056 sd_print_sense_failed_msg(un, bp, user_arg, 16057 SD_IMMEDIATE_RETRY_ISSUED); 16058 #endif 16059 } 16060 16061 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16062 "sd_retry_command: issuing immediate retry\n"); 16063 16064 /* 16065 * Call sd_start_cmds() to transport the command to 16066 * the target. 16067 */ 16068 sd_start_cmds(un, bp); 16069 16070 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16071 "sd_retry_command exit\n"); 16072 return; 16073 } 16074 } 16075 16076 /* 16077 * Set up to retry the command after a delay. 16078 * First call the user-provided function (if any) 16079 */ 16080 if (user_funcp != NULL) { 16081 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED); 16082 } 16083 16084 sd_set_retry_bp(un, bp, retry_delay, statp); 16085 16086 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 16087 return; 16088 16089 fail_command: 16090 16091 if (user_funcp != NULL) { 16092 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED); 16093 } 16094 16095 fail_command_no_log: 16096 16097 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16098 "sd_retry_command: returning failed command\n"); 16099 16100 sd_return_failed_command(un, bp, failure_code); 16101 16102 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 16103 } 16104 16105 16106 /* 16107 * Function: sd_set_retry_bp 16108 * 16109 * Description: Set up the given bp for retry. 16110 * 16111 * Arguments: un - ptr to associated softstate 16112 * bp - ptr to buf(9S) for the command 16113 * retry_delay - time interval before issuing retry (may be 0) 16114 * statp - optional pointer to kstat function 16115 * 16116 * Context: May be called under interrupt context 16117 */ 16118 16119 static void 16120 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, 16121 void (*statp)(kstat_io_t *)) 16122 { 16123 ASSERT(un != NULL); 16124 ASSERT(mutex_owned(SD_MUTEX(un))); 16125 ASSERT(bp != NULL); 16126 16127 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 16128 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp); 16129 16130 /* 16131 * Indicate that the command is being retried. This will not allow any 16132 * other commands on the wait queue to be transported to the target 16133 * until this command has been completed (success or failure). The 16134 * "retry command" is not transported to the target until the given 16135 * time delay expires, unless the user specified a 0 retry_delay. 16136 * 16137 * Note: the timeout(9F) callback routine is what actually calls 16138 * sd_start_cmds() to transport the command, with the exception of a 16139 * zero retry_delay. The only current implementor of a zero retry delay 16140 * is the case where a START_STOP_UNIT is sent to spin-up a device. 16141 */ 16142 if (un->un_retry_bp == NULL) { 16143 ASSERT(un->un_retry_statp == NULL); 16144 un->un_retry_bp = bp; 16145 16146 /* 16147 * If the user has not specified a delay the command should 16148 * be queued and no timeout should be scheduled. 16149 */ 16150 if (retry_delay == 0) { 16151 /* 16152 * Save the kstat pointer that will be used in the 16153 * call to SD_UPDATE_KSTATS() below, so that 16154 * sd_start_cmds() can correctly decrement the waitq 16155 * count when it is time to transport this command. 16156 */ 16157 un->un_retry_statp = statp; 16158 goto done; 16159 } 16160 } 16161 16162 if (un->un_retry_bp == bp) { 16163 /* 16164 * Save the kstat pointer that will be used in the call to 16165 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can 16166 * correctly decrement the waitq count when it is time to 16167 * transport this command. 16168 */ 16169 un->un_retry_statp = statp; 16170 16171 /* 16172 * Schedule a timeout if: 16173 * 1) The user has specified a delay. 16174 * 2) There is not a START_STOP_UNIT callback pending. 16175 * 16176 * If no delay has been specified, then it is up to the caller 16177 * to ensure that IO processing continues without stalling. 16178 * Effectively, this means that the caller will issue the 16179 * required call to sd_start_cmds(). The START_STOP_UNIT 16180 * callback does this after the START STOP UNIT command has 16181 * completed. In either of these cases we should not schedule 16182 * a timeout callback here. Also don't schedule the timeout if 16183 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart. 16184 */ 16185 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) && 16186 (un->un_direct_priority_timeid == NULL)) { 16187 un->un_retry_timeid = 16188 timeout(sd_start_retry_command, un, retry_delay); 16189 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16190 "sd_set_retry_bp: setting timeout: un: 0x%p" 16191 " bp:0x%p un_retry_timeid:0x%p\n", 16192 un, bp, un->un_retry_timeid); 16193 } 16194 } else { 16195 /* 16196 * We only get in here if there is already another command 16197 * waiting to be retried. In this case, we just put the 16198 * given command onto the wait queue, so it can be transported 16199 * after the current retry command has completed. 16200 * 16201 * Also we have to make sure that if the command at the head 16202 * of the wait queue is the un_failfast_bp, that we do not 16203 * put ahead of it any other commands that are to be retried. 16204 */ 16205 if ((un->un_failfast_bp != NULL) && 16206 (un->un_failfast_bp == un->un_waitq_headp)) { 16207 /* 16208 * Enqueue this command AFTER the first command on 16209 * the wait queue (which is also un_failfast_bp). 16210 */ 16211 bp->av_forw = un->un_waitq_headp->av_forw; 16212 un->un_waitq_headp->av_forw = bp; 16213 if (un->un_waitq_headp == un->un_waitq_tailp) { 16214 un->un_waitq_tailp = bp; 16215 } 16216 } else { 16217 /* Enqueue this command at the head of the waitq. */ 16218 bp->av_forw = un->un_waitq_headp; 16219 un->un_waitq_headp = bp; 16220 if (un->un_waitq_tailp == NULL) { 16221 un->un_waitq_tailp = bp; 16222 } 16223 } 16224 16225 if (statp == NULL) { 16226 statp = kstat_waitq_enter; 16227 } 16228 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16229 "sd_set_retry_bp: un:0x%p already delayed retry\n", un); 16230 } 16231 16232 done: 16233 if (statp != NULL) { 16234 SD_UPDATE_KSTATS(un, statp, bp); 16235 } 16236 16237 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16238 "sd_set_retry_bp: exit un:0x%p\n", un); 16239 } 16240 16241 16242 /* 16243 * Function: sd_start_retry_command 16244 * 16245 * Description: Start the command that has been waiting on the target's 16246 * retry queue. Called from timeout(9F) context after the 16247 * retry delay interval has expired. 16248 * 16249 * Arguments: arg - pointer to associated softstate for the device. 16250 * 16251 * Context: timeout(9F) thread context. May not sleep. 16252 */ 16253 16254 static void 16255 sd_start_retry_command(void *arg) 16256 { 16257 struct sd_lun *un = arg; 16258 16259 ASSERT(un != NULL); 16260 ASSERT(!mutex_owned(SD_MUTEX(un))); 16261 16262 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16263 "sd_start_retry_command: entry\n"); 16264 16265 mutex_enter(SD_MUTEX(un)); 16266 16267 un->un_retry_timeid = NULL; 16268 16269 if (un->un_retry_bp != NULL) { 16270 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16271 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n", 16272 un, un->un_retry_bp); 16273 sd_start_cmds(un, un->un_retry_bp); 16274 } 16275 16276 mutex_exit(SD_MUTEX(un)); 16277 16278 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16279 "sd_start_retry_command: exit\n"); 16280 } 16281 16282 /* 16283 * Function: sd_rmw_msg_print_handler 16284 * 16285 * Description: If RMW mode is enabled and warning message is triggered 16286 * print I/O count during a fixed interval. 16287 * 16288 * Arguments: arg - pointer to associated softstate for the device. 16289 * 16290 * Context: timeout(9F) thread context. May not sleep. 16291 */ 16292 static void 16293 sd_rmw_msg_print_handler(void *arg) 16294 { 16295 struct sd_lun *un = arg; 16296 16297 ASSERT(un != NULL); 16298 ASSERT(!mutex_owned(SD_MUTEX(un))); 16299 16300 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16301 "sd_rmw_msg_print_handler: entry\n"); 16302 16303 mutex_enter(SD_MUTEX(un)); 16304 16305 if (un->un_rmw_incre_count > 0) { 16306 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16307 "%"PRIu64" I/O requests are not aligned with %d disk " 16308 "sector size in %ld seconds. They are handled through " 16309 "Read Modify Write but the performance is very low!\n", 16310 un->un_rmw_incre_count, un->un_tgt_blocksize, 16311 drv_hztousec(SD_RMW_MSG_PRINT_TIMEOUT) / 1000000); 16312 un->un_rmw_incre_count = 0; 16313 un->un_rmw_msg_timeid = timeout(sd_rmw_msg_print_handler, 16314 un, SD_RMW_MSG_PRINT_TIMEOUT); 16315 } else { 16316 un->un_rmw_msg_timeid = NULL; 16317 } 16318 16319 mutex_exit(SD_MUTEX(un)); 16320 16321 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16322 "sd_rmw_msg_print_handler: exit\n"); 16323 } 16324 16325 /* 16326 * Function: sd_start_direct_priority_command 16327 * 16328 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had 16329 * received TRAN_BUSY when we called scsi_transport() to send it 16330 * to the underlying HBA. This function is called from timeout(9F) 16331 * context after the delay interval has expired. 16332 * 16333 * Arguments: arg - pointer to associated buf(9S) to be restarted. 16334 * 16335 * Context: timeout(9F) thread context. May not sleep. 16336 */ 16337 16338 static void 16339 sd_start_direct_priority_command(void *arg) 16340 { 16341 struct buf *priority_bp = arg; 16342 struct sd_lun *un; 16343 16344 ASSERT(priority_bp != NULL); 16345 un = SD_GET_UN(priority_bp); 16346 ASSERT(un != NULL); 16347 ASSERT(!mutex_owned(SD_MUTEX(un))); 16348 16349 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16350 "sd_start_direct_priority_command: entry\n"); 16351 16352 mutex_enter(SD_MUTEX(un)); 16353 un->un_direct_priority_timeid = NULL; 16354 sd_start_cmds(un, priority_bp); 16355 mutex_exit(SD_MUTEX(un)); 16356 16357 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16358 "sd_start_direct_priority_command: exit\n"); 16359 } 16360 16361 16362 /* 16363 * Function: sd_send_request_sense_command 16364 * 16365 * Description: Sends a REQUEST SENSE command to the target 16366 * 16367 * Context: May be called from interrupt context. 16368 */ 16369 16370 static void 16371 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 16372 struct scsi_pkt *pktp) 16373 { 16374 ASSERT(bp != NULL); 16375 ASSERT(un != NULL); 16376 ASSERT(mutex_owned(SD_MUTEX(un))); 16377 16378 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: " 16379 "entry: buf:0x%p\n", bp); 16380 16381 /* 16382 * If we are syncing or dumping, then fail the command to avoid a 16383 * recursive callback into scsi_transport(). Also fail the command 16384 * if we are suspended (legacy behavior). 16385 */ 16386 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 16387 (un->un_state == SD_STATE_DUMPING)) { 16388 sd_return_failed_command(un, bp, EIO); 16389 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16390 "sd_send_request_sense_command: syncing/dumping, exit\n"); 16391 return; 16392 } 16393 16394 /* 16395 * Retry the failed command and don't issue the request sense if: 16396 * 1) the sense buf is busy 16397 * 2) we have 1 or more outstanding commands on the target 16398 * (the sense data will be cleared or invalidated any way) 16399 * 16400 * Note: There could be an issue with not checking a retry limit here, 16401 * the problem is determining which retry limit to check. 16402 */ 16403 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { 16404 /* Don't retry if the command is flagged as non-retryable */ 16405 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 16406 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 16407 NULL, NULL, 0, un->un_busy_timeout, 16408 kstat_waitq_enter); 16409 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16410 "sd_send_request_sense_command: " 16411 "at full throttle, retrying exit\n"); 16412 } else { 16413 sd_return_failed_command(un, bp, EIO); 16414 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16415 "sd_send_request_sense_command: " 16416 "at full throttle, non-retryable exit\n"); 16417 } 16418 return; 16419 } 16420 16421 sd_mark_rqs_busy(un, bp); 16422 sd_start_cmds(un, un->un_rqs_bp); 16423 16424 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16425 "sd_send_request_sense_command: exit\n"); 16426 } 16427 16428 16429 /* 16430 * Function: sd_mark_rqs_busy 16431 * 16432 * Description: Indicate that the request sense bp for this instance is 16433 * in use. 16434 * 16435 * Context: May be called under interrupt context 16436 */ 16437 16438 static void 16439 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp) 16440 { 16441 struct sd_xbuf *sense_xp; 16442 16443 ASSERT(un != NULL); 16444 ASSERT(bp != NULL); 16445 ASSERT(mutex_owned(SD_MUTEX(un))); 16446 ASSERT(un->un_sense_isbusy == 0); 16447 16448 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: " 16449 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un); 16450 16451 sense_xp = SD_GET_XBUF(un->un_rqs_bp); 16452 ASSERT(sense_xp != NULL); 16453 16454 SD_INFO(SD_LOG_IO, un, 16455 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp); 16456 16457 ASSERT(sense_xp->xb_pktp != NULL); 16458 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) 16459 == (FLAG_SENSING | FLAG_HEAD)); 16460 16461 un->un_sense_isbusy = 1; 16462 un->un_rqs_bp->b_resid = 0; 16463 sense_xp->xb_pktp->pkt_resid = 0; 16464 sense_xp->xb_pktp->pkt_reason = 0; 16465 16466 /* So we can get back the bp at interrupt time! */ 16467 sense_xp->xb_sense_bp = bp; 16468 16469 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH); 16470 16471 /* 16472 * Mark this buf as awaiting sense data. (This is already set in 16473 * the pkt_flags for the RQS packet.) 16474 */ 16475 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING; 16476 16477 /* Request sense down same path */ 16478 if (scsi_pkt_allocated_correctly((SD_GET_XBUF(bp))->xb_pktp) && 16479 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance) 16480 sense_xp->xb_pktp->pkt_path_instance = 16481 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance; 16482 16483 sense_xp->xb_retry_count = 0; 16484 sense_xp->xb_victim_retry_count = 0; 16485 sense_xp->xb_ua_retry_count = 0; 16486 sense_xp->xb_nr_retry_count = 0; 16487 sense_xp->xb_dma_resid = 0; 16488 16489 /* Clean up the fields for auto-request sense */ 16490 sense_xp->xb_sense_status = 0; 16491 sense_xp->xb_sense_state = 0; 16492 sense_xp->xb_sense_resid = 0; 16493 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data)); 16494 16495 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n"); 16496 } 16497 16498 16499 /* 16500 * Function: sd_mark_rqs_idle 16501 * 16502 * Description: SD_MUTEX must be held continuously through this routine 16503 * to prevent reuse of the rqs struct before the caller can 16504 * complete it's processing. 16505 * 16506 * Return Code: Pointer to the RQS buf 16507 * 16508 * Context: May be called under interrupt context 16509 */ 16510 16511 static struct buf * 16512 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp) 16513 { 16514 struct buf *bp; 16515 ASSERT(un != NULL); 16516 ASSERT(sense_xp != NULL); 16517 ASSERT(mutex_owned(SD_MUTEX(un))); 16518 ASSERT(un->un_sense_isbusy != 0); 16519 16520 un->un_sense_isbusy = 0; 16521 bp = sense_xp->xb_sense_bp; 16522 sense_xp->xb_sense_bp = NULL; 16523 16524 /* This pkt is no longer interested in getting sense data */ 16525 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING; 16526 16527 return (bp); 16528 } 16529 16530 16531 16532 /* 16533 * Function: sd_alloc_rqs 16534 * 16535 * Description: Set up the unit to receive auto request sense data 16536 * 16537 * Return Code: DDI_SUCCESS or DDI_FAILURE 16538 * 16539 * Context: Called under attach(9E) context 16540 */ 16541 16542 static int 16543 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un) 16544 { 16545 struct sd_xbuf *xp; 16546 16547 ASSERT(un != NULL); 16548 ASSERT(!mutex_owned(SD_MUTEX(un))); 16549 ASSERT(un->un_rqs_bp == NULL); 16550 ASSERT(un->un_rqs_pktp == NULL); 16551 16552 /* 16553 * First allocate the required buf and scsi_pkt structs, then set up 16554 * the CDB in the scsi_pkt for a REQUEST SENSE command. 16555 */ 16556 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL, 16557 MAX_SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 16558 if (un->un_rqs_bp == NULL) { 16559 return (DDI_FAILURE); 16560 } 16561 16562 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp, 16563 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL); 16564 16565 if (un->un_rqs_pktp == NULL) { 16566 sd_free_rqs(un); 16567 return (DDI_FAILURE); 16568 } 16569 16570 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */ 16571 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp, 16572 SCMD_REQUEST_SENSE, 0, MAX_SENSE_LENGTH, 0); 16573 16574 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); 16575 16576 /* Set up the other needed members in the ARQ scsi_pkt. */ 16577 un->un_rqs_pktp->pkt_comp = sdintr; 16578 un->un_rqs_pktp->pkt_time = sd_io_time; 16579 un->un_rqs_pktp->pkt_flags |= 16580 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ 16581 16582 /* 16583 * Allocate & init the sd_xbuf struct for the RQS command. Do not 16584 * provide any intpkt, destroypkt routines as we take care of 16585 * scsi_pkt allocation/freeing here and in sd_free_rqs(). 16586 */ 16587 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 16588 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL); 16589 xp->xb_pktp = un->un_rqs_pktp; 16590 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16591 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n", 16592 un, xp, un->un_rqs_pktp, un->un_rqs_bp); 16593 16594 /* 16595 * Save the pointer to the request sense private bp so it can 16596 * be retrieved in sdintr. 16597 */ 16598 un->un_rqs_pktp->pkt_private = un->un_rqs_bp; 16599 ASSERT(un->un_rqs_bp->b_private == xp); 16600 16601 /* 16602 * See if the HBA supports auto-request sense for the specified 16603 * target/lun. If it does, then try to enable it (if not already 16604 * enabled). 16605 * 16606 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return 16607 * failure, while for other HBAs (pln) scsi_ifsetcap will always 16608 * return success. However, in both of these cases ARQ is always 16609 * enabled and scsi_ifgetcap will always return true. The best approach 16610 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). 16611 * 16612 * The 3rd case is the HBA (adp) always return enabled on 16613 * scsi_ifgetgetcap even when it's not enable, the best approach 16614 * is issue a scsi_ifsetcap then a scsi_ifgetcap 16615 * Note: this case is to circumvent the Adaptec bug. (x86 only) 16616 */ 16617 16618 if (un->un_f_is_fibre == TRUE) { 16619 un->un_f_arq_enabled = TRUE; 16620 } else { 16621 #if defined(__i386) || defined(__amd64) 16622 /* 16623 * Circumvent the Adaptec bug, remove this code when 16624 * the bug is fixed 16625 */ 16626 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); 16627 #endif 16628 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { 16629 case 0: 16630 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16631 "sd_alloc_rqs: HBA supports ARQ\n"); 16632 /* 16633 * ARQ is supported by this HBA but currently is not 16634 * enabled. Attempt to enable it and if successful then 16635 * mark this instance as ARQ enabled. 16636 */ 16637 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1) 16638 == 1) { 16639 /* Successfully enabled ARQ in the HBA */ 16640 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16641 "sd_alloc_rqs: ARQ enabled\n"); 16642 un->un_f_arq_enabled = TRUE; 16643 } else { 16644 /* Could not enable ARQ in the HBA */ 16645 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16646 "sd_alloc_rqs: failed ARQ enable\n"); 16647 un->un_f_arq_enabled = FALSE; 16648 } 16649 break; 16650 case 1: 16651 /* 16652 * ARQ is supported by this HBA and is already enabled. 16653 * Just mark ARQ as enabled for this instance. 16654 */ 16655 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16656 "sd_alloc_rqs: ARQ already enabled\n"); 16657 un->un_f_arq_enabled = TRUE; 16658 break; 16659 default: 16660 /* 16661 * ARQ is not supported by this HBA; disable it for this 16662 * instance. 16663 */ 16664 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16665 "sd_alloc_rqs: HBA does not support ARQ\n"); 16666 un->un_f_arq_enabled = FALSE; 16667 break; 16668 } 16669 } 16670 16671 return (DDI_SUCCESS); 16672 } 16673 16674 16675 /* 16676 * Function: sd_free_rqs 16677 * 16678 * Description: Cleanup for the pre-instance RQS command. 16679 * 16680 * Context: Kernel thread context 16681 */ 16682 16683 static void 16684 sd_free_rqs(struct sd_lun *un) 16685 { 16686 ASSERT(un != NULL); 16687 16688 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n"); 16689 16690 /* 16691 * If consistent memory is bound to a scsi_pkt, the pkt 16692 * has to be destroyed *before* freeing the consistent memory. 16693 * Don't change the sequence of this operations. 16694 * scsi_destroy_pkt() might access memory, which isn't allowed, 16695 * after it was freed in scsi_free_consistent_buf(). 16696 */ 16697 if (un->un_rqs_pktp != NULL) { 16698 scsi_destroy_pkt(un->un_rqs_pktp); 16699 un->un_rqs_pktp = NULL; 16700 } 16701 16702 if (un->un_rqs_bp != NULL) { 16703 struct sd_xbuf *xp = SD_GET_XBUF(un->un_rqs_bp); 16704 if (xp != NULL) { 16705 kmem_free(xp, sizeof (struct sd_xbuf)); 16706 } 16707 scsi_free_consistent_buf(un->un_rqs_bp); 16708 un->un_rqs_bp = NULL; 16709 } 16710 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n"); 16711 } 16712 16713 16714 16715 /* 16716 * Function: sd_reduce_throttle 16717 * 16718 * Description: Reduces the maximum # of outstanding commands on a 16719 * target to the current number of outstanding commands. 16720 * Queues a tiemout(9F) callback to restore the limit 16721 * after a specified interval has elapsed. 16722 * Typically used when we get a TRAN_BUSY return code 16723 * back from scsi_transport(). 16724 * 16725 * Arguments: un - ptr to the sd_lun softstate struct 16726 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL 16727 * 16728 * Context: May be called from interrupt context 16729 */ 16730 16731 static void 16732 sd_reduce_throttle(struct sd_lun *un, int throttle_type) 16733 { 16734 ASSERT(un != NULL); 16735 ASSERT(mutex_owned(SD_MUTEX(un))); 16736 ASSERT(un->un_ncmds_in_transport >= 0); 16737 16738 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 16739 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n", 16740 un, un->un_throttle, un->un_ncmds_in_transport); 16741 16742 if (un->un_throttle > 1) { 16743 if (un->un_f_use_adaptive_throttle == TRUE) { 16744 switch (throttle_type) { 16745 case SD_THROTTLE_TRAN_BUSY: 16746 if (un->un_busy_throttle == 0) { 16747 un->un_busy_throttle = un->un_throttle; 16748 } 16749 break; 16750 case SD_THROTTLE_QFULL: 16751 un->un_busy_throttle = 0; 16752 break; 16753 default: 16754 ASSERT(FALSE); 16755 } 16756 16757 if (un->un_ncmds_in_transport > 0) { 16758 un->un_throttle = un->un_ncmds_in_transport; 16759 } 16760 16761 } else { 16762 if (un->un_ncmds_in_transport == 0) { 16763 un->un_throttle = 1; 16764 } else { 16765 un->un_throttle = un->un_ncmds_in_transport; 16766 } 16767 } 16768 } 16769 16770 /* Reschedule the timeout if none is currently active */ 16771 if (un->un_reset_throttle_timeid == NULL) { 16772 un->un_reset_throttle_timeid = timeout(sd_restore_throttle, 16773 un, SD_THROTTLE_RESET_INTERVAL); 16774 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16775 "sd_reduce_throttle: timeout scheduled!\n"); 16776 } 16777 16778 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 16779 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle); 16780 } 16781 16782 16783 16784 /* 16785 * Function: sd_restore_throttle 16786 * 16787 * Description: Callback function for timeout(9F). Resets the current 16788 * value of un->un_throttle to its default. 16789 * 16790 * Arguments: arg - pointer to associated softstate for the device. 16791 * 16792 * Context: May be called from interrupt context 16793 */ 16794 16795 static void 16796 sd_restore_throttle(void *arg) 16797 { 16798 struct sd_lun *un = arg; 16799 16800 ASSERT(un != NULL); 16801 ASSERT(!mutex_owned(SD_MUTEX(un))); 16802 16803 mutex_enter(SD_MUTEX(un)); 16804 16805 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 16806 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle); 16807 16808 un->un_reset_throttle_timeid = NULL; 16809 16810 if (un->un_f_use_adaptive_throttle == TRUE) { 16811 /* 16812 * If un_busy_throttle is nonzero, then it contains the 16813 * value that un_throttle was when we got a TRAN_BUSY back 16814 * from scsi_transport(). We want to revert back to this 16815 * value. 16816 * 16817 * In the QFULL case, the throttle limit will incrementally 16818 * increase until it reaches max throttle. 16819 */ 16820 if (un->un_busy_throttle > 0) { 16821 un->un_throttle = un->un_busy_throttle; 16822 un->un_busy_throttle = 0; 16823 } else { 16824 /* 16825 * increase throttle by 10% open gate slowly, schedule 16826 * another restore if saved throttle has not been 16827 * reached 16828 */ 16829 short throttle; 16830 if (sd_qfull_throttle_enable) { 16831 throttle = un->un_throttle + 16832 max((un->un_throttle / 10), 1); 16833 un->un_throttle = 16834 (throttle < un->un_saved_throttle) ? 16835 throttle : un->un_saved_throttle; 16836 if (un->un_throttle < un->un_saved_throttle) { 16837 un->un_reset_throttle_timeid = 16838 timeout(sd_restore_throttle, 16839 un, 16840 SD_QFULL_THROTTLE_RESET_INTERVAL); 16841 } 16842 } 16843 } 16844 16845 /* 16846 * If un_throttle has fallen below the low-water mark, we 16847 * restore the maximum value here (and allow it to ratchet 16848 * down again if necessary). 16849 */ 16850 if (un->un_throttle < un->un_min_throttle) { 16851 un->un_throttle = un->un_saved_throttle; 16852 } 16853 } else { 16854 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 16855 "restoring limit from 0x%x to 0x%x\n", 16856 un->un_throttle, un->un_saved_throttle); 16857 un->un_throttle = un->un_saved_throttle; 16858 } 16859 16860 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 16861 "sd_restore_throttle: calling sd_start_cmds!\n"); 16862 16863 sd_start_cmds(un, NULL); 16864 16865 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 16866 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n", 16867 un, un->un_throttle); 16868 16869 mutex_exit(SD_MUTEX(un)); 16870 16871 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n"); 16872 } 16873 16874 /* 16875 * Function: sdrunout 16876 * 16877 * Description: Callback routine for scsi_init_pkt when a resource allocation 16878 * fails. 16879 * 16880 * Arguments: arg - a pointer to the sd_lun unit struct for the particular 16881 * soft state instance. 16882 * 16883 * Return Code: The scsi_init_pkt routine allows for the callback function to 16884 * return a 0 indicating the callback should be rescheduled or a 1 16885 * indicating not to reschedule. This routine always returns 1 16886 * because the driver always provides a callback function to 16887 * scsi_init_pkt. This results in a callback always being scheduled 16888 * (via the scsi_init_pkt callback implementation) if a resource 16889 * failure occurs. 16890 * 16891 * Context: This callback function may not block or call routines that block 16892 * 16893 * Note: Using the scsi_init_pkt callback facility can result in an I/O 16894 * request persisting at the head of the list which cannot be 16895 * satisfied even after multiple retries. In the future the driver 16896 * may implement some time of maximum runout count before failing 16897 * an I/O. 16898 */ 16899 16900 static int 16901 sdrunout(caddr_t arg) 16902 { 16903 struct sd_lun *un = (struct sd_lun *)arg; 16904 16905 ASSERT(un != NULL); 16906 ASSERT(!mutex_owned(SD_MUTEX(un))); 16907 16908 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n"); 16909 16910 mutex_enter(SD_MUTEX(un)); 16911 sd_start_cmds(un, NULL); 16912 mutex_exit(SD_MUTEX(un)); 16913 /* 16914 * This callback routine always returns 1 (i.e. do not reschedule) 16915 * because we always specify sdrunout as the callback handler for 16916 * scsi_init_pkt inside the call to sd_start_cmds. 16917 */ 16918 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); 16919 return (1); 16920 } 16921 16922 16923 /* 16924 * Function: sdintr 16925 * 16926 * Description: Completion callback routine for scsi_pkt(9S) structs 16927 * sent to the HBA driver via scsi_transport(9F). 16928 * 16929 * Context: Interrupt context 16930 */ 16931 16932 static void 16933 sdintr(struct scsi_pkt *pktp) 16934 { 16935 struct buf *bp; 16936 struct sd_xbuf *xp; 16937 struct sd_lun *un; 16938 size_t actual_len; 16939 sd_ssc_t *sscp; 16940 16941 ASSERT(pktp != NULL); 16942 bp = (struct buf *)pktp->pkt_private; 16943 ASSERT(bp != NULL); 16944 xp = SD_GET_XBUF(bp); 16945 ASSERT(xp != NULL); 16946 ASSERT(xp->xb_pktp != NULL); 16947 un = SD_GET_UN(bp); 16948 ASSERT(un != NULL); 16949 ASSERT(!mutex_owned(SD_MUTEX(un))); 16950 16951 #ifdef SD_FAULT_INJECTION 16952 16953 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n"); 16954 /* SD FaultInjection */ 16955 sd_faultinjection(pktp); 16956 16957 #endif /* SD_FAULT_INJECTION */ 16958 16959 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p," 16960 " xp:0x%p, un:0x%p\n", bp, xp, un); 16961 16962 mutex_enter(SD_MUTEX(un)); 16963 16964 ASSERT(un->un_fm_private != NULL); 16965 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc; 16966 ASSERT(sscp != NULL); 16967 16968 /* Reduce the count of the #commands currently in transport */ 16969 un->un_ncmds_in_transport--; 16970 ASSERT(un->un_ncmds_in_transport >= 0); 16971 16972 /* Increment counter to indicate that the callback routine is active */ 16973 un->un_in_callback++; 16974 16975 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 16976 16977 #ifdef SDDEBUG 16978 if (bp == un->un_retry_bp) { 16979 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " 16980 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", 16981 un, un->un_retry_bp, un->un_ncmds_in_transport); 16982 } 16983 #endif 16984 16985 /* 16986 * If pkt_reason is CMD_DEV_GONE, fail the command, and update the media 16987 * state if needed. 16988 */ 16989 if (pktp->pkt_reason == CMD_DEV_GONE) { 16990 /* Prevent multiple console messages for the same failure. */ 16991 if (un->un_last_pkt_reason != CMD_DEV_GONE) { 16992 un->un_last_pkt_reason = CMD_DEV_GONE; 16993 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16994 "Command failed to complete...Device is gone\n"); 16995 } 16996 if (un->un_mediastate != DKIO_DEV_GONE) { 16997 un->un_mediastate = DKIO_DEV_GONE; 16998 cv_broadcast(&un->un_state_cv); 16999 } 17000 /* 17001 * If the command happens to be the REQUEST SENSE command, 17002 * free up the rqs buf and fail the original command. 17003 */ 17004 if (bp == un->un_rqs_bp) { 17005 bp = sd_mark_rqs_idle(un, xp); 17006 } 17007 sd_return_failed_command(un, bp, EIO); 17008 goto exit; 17009 } 17010 17011 if (pktp->pkt_state & STATE_XARQ_DONE) { 17012 SD_TRACE(SD_LOG_COMMON, un, 17013 "sdintr: extra sense data received. pkt=%p\n", pktp); 17014 } 17015 17016 /* 17017 * First see if the pkt has auto-request sense data with it.... 17018 * Look at the packet state first so we don't take a performance 17019 * hit looking at the arq enabled flag unless absolutely necessary. 17020 */ 17021 if ((pktp->pkt_state & STATE_ARQ_DONE) && 17022 (un->un_f_arq_enabled == TRUE)) { 17023 /* 17024 * The HBA did an auto request sense for this command so check 17025 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 17026 * driver command that should not be retried. 17027 */ 17028 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 17029 /* 17030 * Save the relevant sense info into the xp for the 17031 * original cmd. 17032 */ 17033 struct scsi_arq_status *asp; 17034 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 17035 xp->xb_sense_status = 17036 *((uchar_t *)(&(asp->sts_rqpkt_status))); 17037 xp->xb_sense_state = asp->sts_rqpkt_state; 17038 xp->xb_sense_resid = asp->sts_rqpkt_resid; 17039 if (pktp->pkt_state & STATE_XARQ_DONE) { 17040 actual_len = MAX_SENSE_LENGTH - 17041 xp->xb_sense_resid; 17042 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 17043 MAX_SENSE_LENGTH); 17044 } else { 17045 if (xp->xb_sense_resid > SENSE_LENGTH) { 17046 actual_len = MAX_SENSE_LENGTH - 17047 xp->xb_sense_resid; 17048 } else { 17049 actual_len = SENSE_LENGTH - 17050 xp->xb_sense_resid; 17051 } 17052 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 17053 if ((((struct uscsi_cmd *) 17054 (xp->xb_pktinfo))->uscsi_rqlen) > 17055 actual_len) { 17056 xp->xb_sense_resid = 17057 (((struct uscsi_cmd *) 17058 (xp->xb_pktinfo))-> 17059 uscsi_rqlen) - actual_len; 17060 } else { 17061 xp->xb_sense_resid = 0; 17062 } 17063 } 17064 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 17065 SENSE_LENGTH); 17066 } 17067 17068 /* fail the command */ 17069 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17070 "sdintr: arq done and FLAG_DIAGNOSE set\n"); 17071 sd_return_failed_command(un, bp, EIO); 17072 goto exit; 17073 } 17074 17075 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 17076 /* 17077 * We want to either retry or fail this command, so free 17078 * the DMA resources here. If we retry the command then 17079 * the DMA resources will be reallocated in sd_start_cmds(). 17080 * Note that when PKT_DMA_PARTIAL is used, this reallocation 17081 * causes the *entire* transfer to start over again from the 17082 * beginning of the request, even for PARTIAL chunks that 17083 * have already transferred successfully. 17084 */ 17085 if ((un->un_f_is_fibre == TRUE) && 17086 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 17087 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 17088 scsi_dmafree(pktp); 17089 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 17090 } 17091 #endif 17092 17093 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17094 "sdintr: arq done, sd_handle_auto_request_sense\n"); 17095 17096 sd_handle_auto_request_sense(un, bp, xp, pktp); 17097 goto exit; 17098 } 17099 17100 /* Next see if this is the REQUEST SENSE pkt for the instance */ 17101 if (pktp->pkt_flags & FLAG_SENSING) { 17102 /* This pktp is from the unit's REQUEST_SENSE command */ 17103 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17104 "sdintr: sd_handle_request_sense\n"); 17105 sd_handle_request_sense(un, bp, xp, pktp); 17106 goto exit; 17107 } 17108 17109 /* 17110 * Check to see if the command successfully completed as requested; 17111 * this is the most common case (and also the hot performance path). 17112 * 17113 * Requirements for successful completion are: 17114 * pkt_reason is CMD_CMPLT and packet status is status good. 17115 * In addition: 17116 * - A residual of zero indicates successful completion no matter what 17117 * the command is. 17118 * - If the residual is not zero and the command is not a read or 17119 * write, then it's still defined as successful completion. In other 17120 * words, if the command is a read or write the residual must be 17121 * zero for successful completion. 17122 * - If the residual is not zero and the command is a read or 17123 * write, and it's a USCSICMD, then it's still defined as 17124 * successful completion. 17125 */ 17126 if ((pktp->pkt_reason == CMD_CMPLT) && 17127 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) { 17128 17129 /* 17130 * Since this command is returned with a good status, we 17131 * can reset the count for Sonoma failover. 17132 */ 17133 un->un_sonoma_failure_count = 0; 17134 17135 /* 17136 * Return all USCSI commands on good status 17137 */ 17138 if (pktp->pkt_resid == 0) { 17139 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17140 "sdintr: returning command for resid == 0\n"); 17141 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) && 17142 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) { 17143 SD_UPDATE_B_RESID(bp, pktp); 17144 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17145 "sdintr: returning command for resid != 0\n"); 17146 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 17147 SD_UPDATE_B_RESID(bp, pktp); 17148 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17149 "sdintr: returning uscsi command\n"); 17150 } else { 17151 goto not_successful; 17152 } 17153 sd_return_command(un, bp); 17154 17155 /* 17156 * Decrement counter to indicate that the callback routine 17157 * is done. 17158 */ 17159 un->un_in_callback--; 17160 ASSERT(un->un_in_callback >= 0); 17161 mutex_exit(SD_MUTEX(un)); 17162 17163 return; 17164 } 17165 17166 not_successful: 17167 17168 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 17169 /* 17170 * The following is based upon knowledge of the underlying transport 17171 * and its use of DMA resources. This code should be removed when 17172 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor 17173 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() 17174 * and sd_start_cmds(). 17175 * 17176 * Free any DMA resources associated with this command if there 17177 * is a chance it could be retried or enqueued for later retry. 17178 * If we keep the DMA binding then mpxio cannot reissue the 17179 * command on another path whenever a path failure occurs. 17180 * 17181 * Note that when PKT_DMA_PARTIAL is used, free/reallocation 17182 * causes the *entire* transfer to start over again from the 17183 * beginning of the request, even for PARTIAL chunks that 17184 * have already transferred successfully. 17185 * 17186 * This is only done for non-uscsi commands (and also skipped for the 17187 * driver's internal RQS command). Also just do this for Fibre Channel 17188 * devices as these are the only ones that support mpxio. 17189 */ 17190 if ((un->un_f_is_fibre == TRUE) && 17191 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 17192 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 17193 scsi_dmafree(pktp); 17194 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 17195 } 17196 #endif 17197 17198 /* 17199 * The command did not successfully complete as requested so check 17200 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 17201 * driver command that should not be retried so just return. If 17202 * FLAG_DIAGNOSE is not set the error will be processed below. 17203 */ 17204 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 17205 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17206 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n"); 17207 /* 17208 * Issue a request sense if a check condition caused the error 17209 * (we handle the auto request sense case above), otherwise 17210 * just fail the command. 17211 */ 17212 if ((pktp->pkt_reason == CMD_CMPLT) && 17213 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { 17214 sd_send_request_sense_command(un, bp, pktp); 17215 } else { 17216 sd_return_failed_command(un, bp, EIO); 17217 } 17218 goto exit; 17219 } 17220 17221 /* 17222 * The command did not successfully complete as requested so process 17223 * the error, retry, and/or attempt recovery. 17224 */ 17225 switch (pktp->pkt_reason) { 17226 case CMD_CMPLT: 17227 switch (SD_GET_PKT_STATUS(pktp)) { 17228 case STATUS_GOOD: 17229 /* 17230 * The command completed successfully with a non-zero 17231 * residual 17232 */ 17233 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17234 "sdintr: STATUS_GOOD \n"); 17235 sd_pkt_status_good(un, bp, xp, pktp); 17236 break; 17237 17238 case STATUS_CHECK: 17239 case STATUS_TERMINATED: 17240 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17241 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n"); 17242 sd_pkt_status_check_condition(un, bp, xp, pktp); 17243 break; 17244 17245 case STATUS_BUSY: 17246 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17247 "sdintr: STATUS_BUSY\n"); 17248 sd_pkt_status_busy(un, bp, xp, pktp); 17249 break; 17250 17251 case STATUS_RESERVATION_CONFLICT: 17252 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17253 "sdintr: STATUS_RESERVATION_CONFLICT\n"); 17254 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 17255 break; 17256 17257 case STATUS_QFULL: 17258 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17259 "sdintr: STATUS_QFULL\n"); 17260 sd_pkt_status_qfull(un, bp, xp, pktp); 17261 break; 17262 17263 case STATUS_MET: 17264 case STATUS_INTERMEDIATE: 17265 case STATUS_SCSI2: 17266 case STATUS_INTERMEDIATE_MET: 17267 case STATUS_ACA_ACTIVE: 17268 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17269 "Unexpected SCSI status received: 0x%x\n", 17270 SD_GET_PKT_STATUS(pktp)); 17271 /* 17272 * Mark the ssc_flags when detected invalid status 17273 * code for non-USCSI command. 17274 */ 17275 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17276 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS, 17277 0, "stat-code"); 17278 } 17279 sd_return_failed_command(un, bp, EIO); 17280 break; 17281 17282 default: 17283 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17284 "Invalid SCSI status received: 0x%x\n", 17285 SD_GET_PKT_STATUS(pktp)); 17286 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17287 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS, 17288 0, "stat-code"); 17289 } 17290 sd_return_failed_command(un, bp, EIO); 17291 break; 17292 17293 } 17294 break; 17295 17296 case CMD_INCOMPLETE: 17297 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17298 "sdintr: CMD_INCOMPLETE\n"); 17299 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp); 17300 break; 17301 case CMD_TRAN_ERR: 17302 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17303 "sdintr: CMD_TRAN_ERR\n"); 17304 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp); 17305 break; 17306 case CMD_RESET: 17307 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17308 "sdintr: CMD_RESET \n"); 17309 sd_pkt_reason_cmd_reset(un, bp, xp, pktp); 17310 break; 17311 case CMD_ABORTED: 17312 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17313 "sdintr: CMD_ABORTED \n"); 17314 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp); 17315 break; 17316 case CMD_TIMEOUT: 17317 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17318 "sdintr: CMD_TIMEOUT\n"); 17319 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp); 17320 break; 17321 case CMD_UNX_BUS_FREE: 17322 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17323 "sdintr: CMD_UNX_BUS_FREE \n"); 17324 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp); 17325 break; 17326 case CMD_TAG_REJECT: 17327 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17328 "sdintr: CMD_TAG_REJECT\n"); 17329 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp); 17330 break; 17331 default: 17332 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17333 "sdintr: default\n"); 17334 /* 17335 * Mark the ssc_flags for detecting invliad pkt_reason. 17336 */ 17337 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17338 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_PKT_REASON, 17339 0, "pkt-reason"); 17340 } 17341 sd_pkt_reason_default(un, bp, xp, pktp); 17342 break; 17343 } 17344 17345 exit: 17346 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n"); 17347 17348 /* Decrement counter to indicate that the callback routine is done. */ 17349 un->un_in_callback--; 17350 ASSERT(un->un_in_callback >= 0); 17351 17352 /* 17353 * At this point, the pkt has been dispatched, ie, it is either 17354 * being re-tried or has been returned to its caller and should 17355 * not be referenced. 17356 */ 17357 17358 mutex_exit(SD_MUTEX(un)); 17359 } 17360 17361 17362 /* 17363 * Function: sd_print_incomplete_msg 17364 * 17365 * Description: Prints the error message for a CMD_INCOMPLETE error. 17366 * 17367 * Arguments: un - ptr to associated softstate for the device. 17368 * bp - ptr to the buf(9S) for the command. 17369 * arg - message string ptr 17370 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED, 17371 * or SD_NO_RETRY_ISSUED. 17372 * 17373 * Context: May be called under interrupt context 17374 */ 17375 17376 static void 17377 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 17378 { 17379 struct scsi_pkt *pktp; 17380 char *msgp; 17381 char *cmdp = arg; 17382 17383 ASSERT(un != NULL); 17384 ASSERT(mutex_owned(SD_MUTEX(un))); 17385 ASSERT(bp != NULL); 17386 ASSERT(arg != NULL); 17387 pktp = SD_GET_PKTP(bp); 17388 ASSERT(pktp != NULL); 17389 17390 switch (code) { 17391 case SD_DELAYED_RETRY_ISSUED: 17392 case SD_IMMEDIATE_RETRY_ISSUED: 17393 msgp = "retrying"; 17394 break; 17395 case SD_NO_RETRY_ISSUED: 17396 default: 17397 msgp = "giving up"; 17398 break; 17399 } 17400 17401 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 17402 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17403 "incomplete %s- %s\n", cmdp, msgp); 17404 } 17405 } 17406 17407 17408 17409 /* 17410 * Function: sd_pkt_status_good 17411 * 17412 * Description: Processing for a STATUS_GOOD code in pkt_status. 17413 * 17414 * Context: May be called under interrupt context 17415 */ 17416 17417 static void 17418 sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 17419 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17420 { 17421 char *cmdp; 17422 17423 ASSERT(un != NULL); 17424 ASSERT(mutex_owned(SD_MUTEX(un))); 17425 ASSERT(bp != NULL); 17426 ASSERT(xp != NULL); 17427 ASSERT(pktp != NULL); 17428 ASSERT(pktp->pkt_reason == CMD_CMPLT); 17429 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD); 17430 ASSERT(pktp->pkt_resid != 0); 17431 17432 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n"); 17433 17434 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17435 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) { 17436 case SCMD_READ: 17437 cmdp = "read"; 17438 break; 17439 case SCMD_WRITE: 17440 cmdp = "write"; 17441 break; 17442 default: 17443 SD_UPDATE_B_RESID(bp, pktp); 17444 sd_return_command(un, bp); 17445 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 17446 return; 17447 } 17448 17449 /* 17450 * See if we can retry the read/write, preferrably immediately. 17451 * If retries are exhaused, then sd_retry_command() will update 17452 * the b_resid count. 17453 */ 17454 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg, 17455 cmdp, EIO, (clock_t)0, NULL); 17456 17457 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 17458 } 17459 17460 17461 17462 17463 17464 /* 17465 * Function: sd_handle_request_sense 17466 * 17467 * Description: Processing for non-auto Request Sense command. 17468 * 17469 * Arguments: un - ptr to associated softstate 17470 * sense_bp - ptr to buf(9S) for the RQS command 17471 * sense_xp - ptr to the sd_xbuf for the RQS command 17472 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command 17473 * 17474 * Context: May be called under interrupt context 17475 */ 17476 17477 static void 17478 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp, 17479 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp) 17480 { 17481 struct buf *cmd_bp; /* buf for the original command */ 17482 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */ 17483 struct scsi_pkt *cmd_pktp; /* pkt for the original command */ 17484 size_t actual_len; /* actual sense data length */ 17485 17486 ASSERT(un != NULL); 17487 ASSERT(mutex_owned(SD_MUTEX(un))); 17488 ASSERT(sense_bp != NULL); 17489 ASSERT(sense_xp != NULL); 17490 ASSERT(sense_pktp != NULL); 17491 17492 /* 17493 * Note the sense_bp, sense_xp, and sense_pktp here are for the 17494 * RQS command and not the original command. 17495 */ 17496 ASSERT(sense_pktp == un->un_rqs_pktp); 17497 ASSERT(sense_bp == un->un_rqs_bp); 17498 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) == 17499 (FLAG_SENSING | FLAG_HEAD)); 17500 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) & 17501 FLAG_SENSING) == FLAG_SENSING); 17502 17503 /* These are the bp, xp, and pktp for the original command */ 17504 cmd_bp = sense_xp->xb_sense_bp; 17505 cmd_xp = SD_GET_XBUF(cmd_bp); 17506 cmd_pktp = SD_GET_PKTP(cmd_bp); 17507 17508 if (sense_pktp->pkt_reason != CMD_CMPLT) { 17509 /* 17510 * The REQUEST SENSE command failed. Release the REQUEST 17511 * SENSE command for re-use, get back the bp for the original 17512 * command, and attempt to re-try the original command if 17513 * FLAG_DIAGNOSE is not set in the original packet. 17514 */ 17515 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17516 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 17517 cmd_bp = sd_mark_rqs_idle(un, sense_xp); 17518 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD, 17519 NULL, NULL, EIO, (clock_t)0, NULL); 17520 return; 17521 } 17522 } 17523 17524 /* 17525 * Save the relevant sense info into the xp for the original cmd. 17526 * 17527 * Note: if the request sense failed the state info will be zero 17528 * as set in sd_mark_rqs_busy() 17529 */ 17530 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp); 17531 cmd_xp->xb_sense_state = sense_pktp->pkt_state; 17532 actual_len = MAX_SENSE_LENGTH - sense_pktp->pkt_resid; 17533 if ((cmd_xp->xb_pkt_flags & SD_XB_USCSICMD) && 17534 (((struct uscsi_cmd *)cmd_xp->xb_pktinfo)->uscsi_rqlen > 17535 SENSE_LENGTH)) { 17536 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 17537 MAX_SENSE_LENGTH); 17538 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid; 17539 } else { 17540 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 17541 SENSE_LENGTH); 17542 if (actual_len < SENSE_LENGTH) { 17543 cmd_xp->xb_sense_resid = SENSE_LENGTH - actual_len; 17544 } else { 17545 cmd_xp->xb_sense_resid = 0; 17546 } 17547 } 17548 17549 /* 17550 * Free up the RQS command.... 17551 * NOTE: 17552 * Must do this BEFORE calling sd_validate_sense_data! 17553 * sd_validate_sense_data may return the original command in 17554 * which case the pkt will be freed and the flags can no 17555 * longer be touched. 17556 * SD_MUTEX is held through this process until the command 17557 * is dispatched based upon the sense data, so there are 17558 * no race conditions. 17559 */ 17560 (void) sd_mark_rqs_idle(un, sense_xp); 17561 17562 /* 17563 * For a retryable command see if we have valid sense data, if so then 17564 * turn it over to sd_decode_sense() to figure out the right course of 17565 * action. Just fail a non-retryable command. 17566 */ 17567 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 17568 if (sd_validate_sense_data(un, cmd_bp, cmd_xp, actual_len) == 17569 SD_SENSE_DATA_IS_VALID) { 17570 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp); 17571 } 17572 } else { 17573 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB", 17574 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 17575 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data", 17576 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 17577 sd_return_failed_command(un, cmd_bp, EIO); 17578 } 17579 } 17580 17581 17582 17583 17584 /* 17585 * Function: sd_handle_auto_request_sense 17586 * 17587 * Description: Processing for auto-request sense information. 17588 * 17589 * Arguments: un - ptr to associated softstate 17590 * bp - ptr to buf(9S) for the command 17591 * xp - ptr to the sd_xbuf for the command 17592 * pktp - ptr to the scsi_pkt(9S) for the command 17593 * 17594 * Context: May be called under interrupt context 17595 */ 17596 17597 static void 17598 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 17599 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17600 { 17601 struct scsi_arq_status *asp; 17602 size_t actual_len; 17603 17604 ASSERT(un != NULL); 17605 ASSERT(mutex_owned(SD_MUTEX(un))); 17606 ASSERT(bp != NULL); 17607 ASSERT(xp != NULL); 17608 ASSERT(pktp != NULL); 17609 ASSERT(pktp != un->un_rqs_pktp); 17610 ASSERT(bp != un->un_rqs_bp); 17611 17612 /* 17613 * For auto-request sense, we get a scsi_arq_status back from 17614 * the HBA, with the sense data in the sts_sensedata member. 17615 * The pkt_scbp of the packet points to this scsi_arq_status. 17616 */ 17617 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 17618 17619 if (asp->sts_rqpkt_reason != CMD_CMPLT) { 17620 /* 17621 * The auto REQUEST SENSE failed; see if we can re-try 17622 * the original command. 17623 */ 17624 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17625 "auto request sense failed (reason=%s)\n", 17626 scsi_rname(asp->sts_rqpkt_reason)); 17627 17628 sd_reset_target(un, pktp); 17629 17630 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 17631 NULL, NULL, EIO, (clock_t)0, NULL); 17632 return; 17633 } 17634 17635 /* Save the relevant sense info into the xp for the original cmd. */ 17636 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status))); 17637 xp->xb_sense_state = asp->sts_rqpkt_state; 17638 xp->xb_sense_resid = asp->sts_rqpkt_resid; 17639 if (xp->xb_sense_state & STATE_XARQ_DONE) { 17640 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 17641 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 17642 MAX_SENSE_LENGTH); 17643 } else { 17644 if (xp->xb_sense_resid > SENSE_LENGTH) { 17645 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 17646 } else { 17647 actual_len = SENSE_LENGTH - xp->xb_sense_resid; 17648 } 17649 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 17650 if ((((struct uscsi_cmd *) 17651 (xp->xb_pktinfo))->uscsi_rqlen) > actual_len) { 17652 xp->xb_sense_resid = (((struct uscsi_cmd *) 17653 (xp->xb_pktinfo))->uscsi_rqlen) - 17654 actual_len; 17655 } else { 17656 xp->xb_sense_resid = 0; 17657 } 17658 } 17659 bcopy(&asp->sts_sensedata, xp->xb_sense_data, SENSE_LENGTH); 17660 } 17661 17662 /* 17663 * See if we have valid sense data, if so then turn it over to 17664 * sd_decode_sense() to figure out the right course of action. 17665 */ 17666 if (sd_validate_sense_data(un, bp, xp, actual_len) == 17667 SD_SENSE_DATA_IS_VALID) { 17668 sd_decode_sense(un, bp, xp, pktp); 17669 } 17670 } 17671 17672 17673 /* 17674 * Function: sd_print_sense_failed_msg 17675 * 17676 * Description: Print log message when RQS has failed. 17677 * 17678 * Arguments: un - ptr to associated softstate 17679 * bp - ptr to buf(9S) for the command 17680 * arg - generic message string ptr 17681 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17682 * or SD_NO_RETRY_ISSUED 17683 * 17684 * Context: May be called from interrupt context 17685 */ 17686 17687 static void 17688 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg, 17689 int code) 17690 { 17691 char *msgp = arg; 17692 17693 ASSERT(un != NULL); 17694 ASSERT(mutex_owned(SD_MUTEX(un))); 17695 ASSERT(bp != NULL); 17696 17697 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) { 17698 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp); 17699 } 17700 } 17701 17702 17703 /* 17704 * Function: sd_validate_sense_data 17705 * 17706 * Description: Check the given sense data for validity. 17707 * If the sense data is not valid, the command will 17708 * be either failed or retried! 17709 * 17710 * Return Code: SD_SENSE_DATA_IS_INVALID 17711 * SD_SENSE_DATA_IS_VALID 17712 * 17713 * Context: May be called from interrupt context 17714 */ 17715 17716 static int 17717 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 17718 size_t actual_len) 17719 { 17720 struct scsi_extended_sense *esp; 17721 struct scsi_pkt *pktp; 17722 char *msgp = NULL; 17723 sd_ssc_t *sscp; 17724 17725 ASSERT(un != NULL); 17726 ASSERT(mutex_owned(SD_MUTEX(un))); 17727 ASSERT(bp != NULL); 17728 ASSERT(bp != un->un_rqs_bp); 17729 ASSERT(xp != NULL); 17730 ASSERT(un->un_fm_private != NULL); 17731 17732 pktp = SD_GET_PKTP(bp); 17733 ASSERT(pktp != NULL); 17734 17735 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc; 17736 ASSERT(sscp != NULL); 17737 17738 /* 17739 * Check the status of the RQS command (auto or manual). 17740 */ 17741 switch (xp->xb_sense_status & STATUS_MASK) { 17742 case STATUS_GOOD: 17743 break; 17744 17745 case STATUS_RESERVATION_CONFLICT: 17746 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 17747 return (SD_SENSE_DATA_IS_INVALID); 17748 17749 case STATUS_BUSY: 17750 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17751 "Busy Status on REQUEST SENSE\n"); 17752 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL, 17753 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 17754 return (SD_SENSE_DATA_IS_INVALID); 17755 17756 case STATUS_QFULL: 17757 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17758 "QFULL Status on REQUEST SENSE\n"); 17759 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, 17760 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 17761 return (SD_SENSE_DATA_IS_INVALID); 17762 17763 case STATUS_CHECK: 17764 case STATUS_TERMINATED: 17765 msgp = "Check Condition on REQUEST SENSE\n"; 17766 goto sense_failed; 17767 17768 default: 17769 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n"; 17770 goto sense_failed; 17771 } 17772 17773 /* 17774 * See if we got the minimum required amount of sense data. 17775 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes 17776 * or less. 17777 */ 17778 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) || 17779 (actual_len == 0)) { 17780 msgp = "Request Sense couldn't get sense data\n"; 17781 goto sense_failed; 17782 } 17783 17784 if (actual_len < SUN_MIN_SENSE_LENGTH) { 17785 msgp = "Not enough sense information\n"; 17786 /* Mark the ssc_flags for detecting invalid sense data */ 17787 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17788 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0, 17789 "sense-data"); 17790 } 17791 goto sense_failed; 17792 } 17793 17794 /* 17795 * We require the extended sense data 17796 */ 17797 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 17798 if (esp->es_class != CLASS_EXTENDED_SENSE) { 17799 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 17800 static char tmp[8]; 17801 static char buf[148]; 17802 char *p = (char *)(xp->xb_sense_data); 17803 int i; 17804 17805 mutex_enter(&sd_sense_mutex); 17806 (void) strcpy(buf, "undecodable sense information:"); 17807 for (i = 0; i < actual_len; i++) { 17808 (void) sprintf(tmp, " 0x%x", *(p++) & 0xff); 17809 (void) strcpy(&buf[strlen(buf)], tmp); 17810 } 17811 i = strlen(buf); 17812 (void) strcpy(&buf[i], "-(assumed fatal)\n"); 17813 17814 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP) { 17815 scsi_log(SD_DEVINFO(un), sd_label, 17816 CE_WARN, buf); 17817 } 17818 mutex_exit(&sd_sense_mutex); 17819 } 17820 17821 /* Mark the ssc_flags for detecting invalid sense data */ 17822 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17823 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0, 17824 "sense-data"); 17825 } 17826 17827 /* Note: Legacy behavior, fail the command with no retry */ 17828 sd_return_failed_command(un, bp, EIO); 17829 return (SD_SENSE_DATA_IS_INVALID); 17830 } 17831 17832 /* 17833 * Check that es_code is valid (es_class concatenated with es_code 17834 * make up the "response code" field. es_class will always be 7, so 17835 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the 17836 * format. 17837 */ 17838 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) && 17839 (esp->es_code != CODE_FMT_FIXED_DEFERRED) && 17840 (esp->es_code != CODE_FMT_DESCR_CURRENT) && 17841 (esp->es_code != CODE_FMT_DESCR_DEFERRED) && 17842 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) { 17843 /* Mark the ssc_flags for detecting invalid sense data */ 17844 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17845 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0, 17846 "sense-data"); 17847 } 17848 goto sense_failed; 17849 } 17850 17851 return (SD_SENSE_DATA_IS_VALID); 17852 17853 sense_failed: 17854 /* 17855 * If the request sense failed (for whatever reason), attempt 17856 * to retry the original command. 17857 */ 17858 #if defined(__i386) || defined(__amd64) 17859 /* 17860 * SD_RETRY_DELAY is conditionally compile (#if fibre) in 17861 * sddef.h for Sparc platform, and x86 uses 1 binary 17862 * for both SCSI/FC. 17863 * The SD_RETRY_DELAY value need to be adjusted here 17864 * when SD_RETRY_DELAY change in sddef.h 17865 */ 17866 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 17867 sd_print_sense_failed_msg, msgp, EIO, 17868 un->un_f_is_fibre ? drv_usectohz(100000) : (clock_t)0, NULL); 17869 #else 17870 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 17871 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); 17872 #endif 17873 17874 return (SD_SENSE_DATA_IS_INVALID); 17875 } 17876 17877 /* 17878 * Function: sd_decode_sense 17879 * 17880 * Description: Take recovery action(s) when SCSI Sense Data is received. 17881 * 17882 * Context: Interrupt context. 17883 */ 17884 17885 static void 17886 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 17887 struct scsi_pkt *pktp) 17888 { 17889 uint8_t sense_key; 17890 17891 ASSERT(un != NULL); 17892 ASSERT(mutex_owned(SD_MUTEX(un))); 17893 ASSERT(bp != NULL); 17894 ASSERT(bp != un->un_rqs_bp); 17895 ASSERT(xp != NULL); 17896 ASSERT(pktp != NULL); 17897 17898 sense_key = scsi_sense_key(xp->xb_sense_data); 17899 17900 switch (sense_key) { 17901 case KEY_NO_SENSE: 17902 sd_sense_key_no_sense(un, bp, xp, pktp); 17903 break; 17904 case KEY_RECOVERABLE_ERROR: 17905 sd_sense_key_recoverable_error(un, xp->xb_sense_data, 17906 bp, xp, pktp); 17907 break; 17908 case KEY_NOT_READY: 17909 sd_sense_key_not_ready(un, xp->xb_sense_data, 17910 bp, xp, pktp); 17911 break; 17912 case KEY_MEDIUM_ERROR: 17913 case KEY_HARDWARE_ERROR: 17914 sd_sense_key_medium_or_hardware_error(un, 17915 xp->xb_sense_data, bp, xp, pktp); 17916 break; 17917 case KEY_ILLEGAL_REQUEST: 17918 sd_sense_key_illegal_request(un, bp, xp, pktp); 17919 break; 17920 case KEY_UNIT_ATTENTION: 17921 sd_sense_key_unit_attention(un, xp->xb_sense_data, 17922 bp, xp, pktp); 17923 break; 17924 case KEY_WRITE_PROTECT: 17925 case KEY_VOLUME_OVERFLOW: 17926 case KEY_MISCOMPARE: 17927 sd_sense_key_fail_command(un, bp, xp, pktp); 17928 break; 17929 case KEY_BLANK_CHECK: 17930 sd_sense_key_blank_check(un, bp, xp, pktp); 17931 break; 17932 case KEY_ABORTED_COMMAND: 17933 sd_sense_key_aborted_command(un, bp, xp, pktp); 17934 break; 17935 case KEY_VENDOR_UNIQUE: 17936 case KEY_COPY_ABORTED: 17937 case KEY_EQUAL: 17938 case KEY_RESERVED: 17939 default: 17940 sd_sense_key_default(un, xp->xb_sense_data, 17941 bp, xp, pktp); 17942 break; 17943 } 17944 } 17945 17946 17947 /* 17948 * Function: sd_dump_memory 17949 * 17950 * Description: Debug logging routine to print the contents of a user provided 17951 * buffer. The output of the buffer is broken up into 256 byte 17952 * segments due to a size constraint of the scsi_log. 17953 * implementation. 17954 * 17955 * Arguments: un - ptr to softstate 17956 * comp - component mask 17957 * title - "title" string to preceed data when printed 17958 * data - ptr to data block to be printed 17959 * len - size of data block to be printed 17960 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c) 17961 * 17962 * Context: May be called from interrupt context 17963 */ 17964 17965 #define SD_DUMP_MEMORY_BUF_SIZE 256 17966 17967 static char *sd_dump_format_string[] = { 17968 " 0x%02x", 17969 " %c" 17970 }; 17971 17972 static void 17973 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data, 17974 int len, int fmt) 17975 { 17976 int i, j; 17977 int avail_count; 17978 int start_offset; 17979 int end_offset; 17980 size_t entry_len; 17981 char *bufp; 17982 char *local_buf; 17983 char *format_string; 17984 17985 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR)); 17986 17987 /* 17988 * In the debug version of the driver, this function is called from a 17989 * number of places which are NOPs in the release driver. 17990 * The debug driver therefore has additional methods of filtering 17991 * debug output. 17992 */ 17993 #ifdef SDDEBUG 17994 /* 17995 * In the debug version of the driver we can reduce the amount of debug 17996 * messages by setting sd_error_level to something other than 17997 * SCSI_ERR_ALL and clearing bits in sd_level_mask and 17998 * sd_component_mask. 17999 */ 18000 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) || 18001 (sd_error_level != SCSI_ERR_ALL)) { 18002 return; 18003 } 18004 if (((sd_component_mask & comp) == 0) || 18005 (sd_error_level != SCSI_ERR_ALL)) { 18006 return; 18007 } 18008 #else 18009 if (sd_error_level != SCSI_ERR_ALL) { 18010 return; 18011 } 18012 #endif 18013 18014 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP); 18015 bufp = local_buf; 18016 /* 18017 * Available length is the length of local_buf[], minus the 18018 * length of the title string, minus one for the ":", minus 18019 * one for the newline, minus one for the NULL terminator. 18020 * This gives the #bytes available for holding the printed 18021 * values from the given data buffer. 18022 */ 18023 if (fmt == SD_LOG_HEX) { 18024 format_string = sd_dump_format_string[0]; 18025 } else /* SD_LOG_CHAR */ { 18026 format_string = sd_dump_format_string[1]; 18027 } 18028 /* 18029 * Available count is the number of elements from the given 18030 * data buffer that we can fit into the available length. 18031 * This is based upon the size of the format string used. 18032 * Make one entry and find it's size. 18033 */ 18034 (void) sprintf(bufp, format_string, data[0]); 18035 entry_len = strlen(bufp); 18036 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len; 18037 18038 j = 0; 18039 while (j < len) { 18040 bufp = local_buf; 18041 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE); 18042 start_offset = j; 18043 18044 end_offset = start_offset + avail_count; 18045 18046 (void) sprintf(bufp, "%s:", title); 18047 bufp += strlen(bufp); 18048 for (i = start_offset; ((i < end_offset) && (j < len)); 18049 i++, j++) { 18050 (void) sprintf(bufp, format_string, data[i]); 18051 bufp += entry_len; 18052 } 18053 (void) sprintf(bufp, "\n"); 18054 18055 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf); 18056 } 18057 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE); 18058 } 18059 18060 /* 18061 * Function: sd_print_sense_msg 18062 * 18063 * Description: Log a message based upon the given sense data. 18064 * 18065 * Arguments: un - ptr to associated softstate 18066 * bp - ptr to buf(9S) for the command 18067 * arg - ptr to associate sd_sense_info struct 18068 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 18069 * or SD_NO_RETRY_ISSUED 18070 * 18071 * Context: May be called from interrupt context 18072 */ 18073 18074 static void 18075 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 18076 { 18077 struct sd_xbuf *xp; 18078 struct scsi_pkt *pktp; 18079 uint8_t *sensep; 18080 daddr_t request_blkno; 18081 diskaddr_t err_blkno; 18082 int severity; 18083 int pfa_flag; 18084 extern struct scsi_key_strings scsi_cmds[]; 18085 18086 ASSERT(un != NULL); 18087 ASSERT(mutex_owned(SD_MUTEX(un))); 18088 ASSERT(bp != NULL); 18089 xp = SD_GET_XBUF(bp); 18090 ASSERT(xp != NULL); 18091 pktp = SD_GET_PKTP(bp); 18092 ASSERT(pktp != NULL); 18093 ASSERT(arg != NULL); 18094 18095 severity = ((struct sd_sense_info *)(arg))->ssi_severity; 18096 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag; 18097 18098 if ((code == SD_DELAYED_RETRY_ISSUED) || 18099 (code == SD_IMMEDIATE_RETRY_ISSUED)) { 18100 severity = SCSI_ERR_RETRYABLE; 18101 } 18102 18103 /* Use absolute block number for the request block number */ 18104 request_blkno = xp->xb_blkno; 18105 18106 /* 18107 * Now try to get the error block number from the sense data 18108 */ 18109 sensep = xp->xb_sense_data; 18110 18111 if (scsi_sense_info_uint64(sensep, SENSE_LENGTH, 18112 (uint64_t *)&err_blkno)) { 18113 /* 18114 * We retrieved the error block number from the information 18115 * portion of the sense data. 18116 * 18117 * For USCSI commands we are better off using the error 18118 * block no. as the requested block no. (This is the best 18119 * we can estimate.) 18120 */ 18121 if ((SD_IS_BUFIO(xp) == FALSE) && 18122 ((pktp->pkt_flags & FLAG_SILENT) == 0)) { 18123 request_blkno = err_blkno; 18124 } 18125 } else { 18126 /* 18127 * Without the es_valid bit set (for fixed format) or an 18128 * information descriptor (for descriptor format) we cannot 18129 * be certain of the error blkno, so just use the 18130 * request_blkno. 18131 */ 18132 err_blkno = (diskaddr_t)request_blkno; 18133 } 18134 18135 /* 18136 * The following will log the buffer contents for the release driver 18137 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error 18138 * level is set to verbose. 18139 */ 18140 sd_dump_memory(un, SD_LOG_IO, "Failed CDB", 18141 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 18142 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 18143 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX); 18144 18145 if (pfa_flag == FALSE) { 18146 /* This is normally only set for USCSI */ 18147 if ((pktp->pkt_flags & FLAG_SILENT) != 0) { 18148 return; 18149 } 18150 18151 if ((SD_IS_BUFIO(xp) == TRUE) && 18152 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) && 18153 (severity < sd_error_level))) { 18154 return; 18155 } 18156 } 18157 /* 18158 * Check for Sonoma Failover and keep a count of how many failed I/O's 18159 */ 18160 if ((SD_IS_LSI(un)) && 18161 (scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) && 18162 (scsi_sense_asc(sensep) == 0x94) && 18163 (scsi_sense_ascq(sensep) == 0x01)) { 18164 un->un_sonoma_failure_count++; 18165 if (un->un_sonoma_failure_count > 1) { 18166 return; 18167 } 18168 } 18169 18170 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP || 18171 ((scsi_sense_key(sensep) == KEY_RECOVERABLE_ERROR) && 18172 (pktp->pkt_resid == 0))) { 18173 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity, 18174 request_blkno, err_blkno, scsi_cmds, 18175 (struct scsi_extended_sense *)sensep, 18176 un->un_additional_codes, NULL); 18177 } 18178 } 18179 18180 /* 18181 * Function: sd_sense_key_no_sense 18182 * 18183 * Description: Recovery action when sense data was not received. 18184 * 18185 * Context: May be called from interrupt context 18186 */ 18187 18188 static void 18189 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 18190 struct scsi_pkt *pktp) 18191 { 18192 struct sd_sense_info si; 18193 18194 ASSERT(un != NULL); 18195 ASSERT(mutex_owned(SD_MUTEX(un))); 18196 ASSERT(bp != NULL); 18197 ASSERT(xp != NULL); 18198 ASSERT(pktp != NULL); 18199 18200 si.ssi_severity = SCSI_ERR_FATAL; 18201 si.ssi_pfa_flag = FALSE; 18202 18203 SD_UPDATE_ERRSTATS(un, sd_softerrs); 18204 18205 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18206 &si, EIO, (clock_t)0, NULL); 18207 } 18208 18209 18210 /* 18211 * Function: sd_sense_key_recoverable_error 18212 * 18213 * Description: Recovery actions for a SCSI "Recovered Error" sense key. 18214 * 18215 * Context: May be called from interrupt context 18216 */ 18217 18218 static void 18219 sd_sense_key_recoverable_error(struct sd_lun *un, uint8_t *sense_datap, 18220 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 18221 { 18222 struct sd_sense_info si; 18223 uint8_t asc = scsi_sense_asc(sense_datap); 18224 uint8_t ascq = scsi_sense_ascq(sense_datap); 18225 18226 ASSERT(un != NULL); 18227 ASSERT(mutex_owned(SD_MUTEX(un))); 18228 ASSERT(bp != NULL); 18229 ASSERT(xp != NULL); 18230 ASSERT(pktp != NULL); 18231 18232 /* 18233 * 0x00, 0x1D: ATA PASSTHROUGH INFORMATION AVAILABLE 18234 */ 18235 if (asc == 0x00 && ascq == 0x1D) { 18236 sd_return_command(un, bp); 18237 return; 18238 } 18239 18240 /* 18241 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED 18242 */ 18243 if ((asc == 0x5D) && (sd_report_pfa != 0)) { 18244 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 18245 si.ssi_severity = SCSI_ERR_INFO; 18246 si.ssi_pfa_flag = TRUE; 18247 } else { 18248 SD_UPDATE_ERRSTATS(un, sd_softerrs); 18249 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err); 18250 si.ssi_severity = SCSI_ERR_RECOVERED; 18251 si.ssi_pfa_flag = FALSE; 18252 } 18253 18254 if (pktp->pkt_resid == 0) { 18255 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18256 sd_return_command(un, bp); 18257 return; 18258 } 18259 18260 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18261 &si, EIO, (clock_t)0, NULL); 18262 } 18263 18264 18265 18266 18267 /* 18268 * Function: sd_sense_key_not_ready 18269 * 18270 * Description: Recovery actions for a SCSI "Not Ready" sense key. 18271 * 18272 * Context: May be called from interrupt context 18273 */ 18274 18275 static void 18276 sd_sense_key_not_ready(struct sd_lun *un, uint8_t *sense_datap, struct buf *bp, 18277 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18278 { 18279 struct sd_sense_info si; 18280 uint8_t asc = scsi_sense_asc(sense_datap); 18281 uint8_t ascq = scsi_sense_ascq(sense_datap); 18282 18283 ASSERT(un != NULL); 18284 ASSERT(mutex_owned(SD_MUTEX(un))); 18285 ASSERT(bp != NULL); 18286 ASSERT(xp != NULL); 18287 ASSERT(pktp != NULL); 18288 18289 si.ssi_severity = SCSI_ERR_FATAL; 18290 si.ssi_pfa_flag = FALSE; 18291 18292 /* 18293 * Update error stats after first NOT READY error. Disks may have 18294 * been powered down and may need to be restarted. For CDROMs, 18295 * report NOT READY errors only if media is present. 18296 */ 18297 if ((ISCD(un) && (asc == 0x3A)) || 18298 (xp->xb_nr_retry_count > 0)) { 18299 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18300 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err); 18301 } 18302 18303 /* 18304 * Just fail if the "not ready" retry limit has been reached. 18305 */ 18306 if (xp->xb_nr_retry_count >= un->un_notready_retry_count) { 18307 /* Special check for error message printing for removables. */ 18308 if (un->un_f_has_removable_media && (asc == 0x04) && 18309 (ascq >= 0x04)) { 18310 si.ssi_severity = SCSI_ERR_ALL; 18311 } 18312 goto fail_command; 18313 } 18314 18315 /* 18316 * Check the ASC and ASCQ in the sense data as needed, to determine 18317 * what to do. 18318 */ 18319 switch (asc) { 18320 case 0x04: /* LOGICAL UNIT NOT READY */ 18321 /* 18322 * disk drives that don't spin up result in a very long delay 18323 * in format without warning messages. We will log a message 18324 * if the error level is set to verbose. 18325 */ 18326 if (sd_error_level < SCSI_ERR_RETRYABLE) { 18327 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18328 "logical unit not ready, resetting disk\n"); 18329 } 18330 18331 /* 18332 * There are different requirements for CDROMs and disks for 18333 * the number of retries. If a CD-ROM is giving this, it is 18334 * probably reading TOC and is in the process of getting 18335 * ready, so we should keep on trying for a long time to make 18336 * sure that all types of media are taken in account (for 18337 * some media the drive takes a long time to read TOC). For 18338 * disks we do not want to retry this too many times as this 18339 * can cause a long hang in format when the drive refuses to 18340 * spin up (a very common failure). 18341 */ 18342 switch (ascq) { 18343 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */ 18344 /* 18345 * Disk drives frequently refuse to spin up which 18346 * results in a very long hang in format without 18347 * warning messages. 18348 * 18349 * Note: This code preserves the legacy behavior of 18350 * comparing xb_nr_retry_count against zero for fibre 18351 * channel targets instead of comparing against the 18352 * un_reset_retry_count value. The reason for this 18353 * discrepancy has been so utterly lost beneath the 18354 * Sands of Time that even Indiana Jones could not 18355 * find it. 18356 */ 18357 if (un->un_f_is_fibre == TRUE) { 18358 if (((sd_level_mask & SD_LOGMASK_DIAG) || 18359 (xp->xb_nr_retry_count > 0)) && 18360 (un->un_startstop_timeid == NULL)) { 18361 scsi_log(SD_DEVINFO(un), sd_label, 18362 CE_WARN, "logical unit not ready, " 18363 "resetting disk\n"); 18364 sd_reset_target(un, pktp); 18365 } 18366 } else { 18367 if (((sd_level_mask & SD_LOGMASK_DIAG) || 18368 (xp->xb_nr_retry_count > 18369 un->un_reset_retry_count)) && 18370 (un->un_startstop_timeid == NULL)) { 18371 scsi_log(SD_DEVINFO(un), sd_label, 18372 CE_WARN, "logical unit not ready, " 18373 "resetting disk\n"); 18374 sd_reset_target(un, pktp); 18375 } 18376 } 18377 break; 18378 18379 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */ 18380 /* 18381 * If the target is in the process of becoming 18382 * ready, just proceed with the retry. This can 18383 * happen with CD-ROMs that take a long time to 18384 * read TOC after a power cycle or reset. 18385 */ 18386 goto do_retry; 18387 18388 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */ 18389 break; 18390 18391 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */ 18392 /* 18393 * Retries cannot help here so just fail right away. 18394 */ 18395 goto fail_command; 18396 18397 case 0x88: 18398 /* 18399 * Vendor-unique code for T3/T4: it indicates a 18400 * path problem in a mutipathed config, but as far as 18401 * the target driver is concerned it equates to a fatal 18402 * error, so we should just fail the command right away 18403 * (without printing anything to the console). If this 18404 * is not a T3/T4, fall thru to the default recovery 18405 * action. 18406 * T3/T4 is FC only, don't need to check is_fibre 18407 */ 18408 if (SD_IS_T3(un) || SD_IS_T4(un)) { 18409 sd_return_failed_command(un, bp, EIO); 18410 return; 18411 } 18412 /* FALLTHRU */ 18413 18414 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */ 18415 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */ 18416 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */ 18417 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */ 18418 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */ 18419 default: /* Possible future codes in SCSI spec? */ 18420 /* 18421 * For removable-media devices, do not retry if 18422 * ASCQ > 2 as these result mostly from USCSI commands 18423 * on MMC devices issued to check status of an 18424 * operation initiated in immediate mode. Also for 18425 * ASCQ >= 4 do not print console messages as these 18426 * mainly represent a user-initiated operation 18427 * instead of a system failure. 18428 */ 18429 if (un->un_f_has_removable_media) { 18430 si.ssi_severity = SCSI_ERR_ALL; 18431 goto fail_command; 18432 } 18433 break; 18434 } 18435 18436 /* 18437 * As part of our recovery attempt for the NOT READY 18438 * condition, we issue a START STOP UNIT command. However 18439 * we want to wait for a short delay before attempting this 18440 * as there may still be more commands coming back from the 18441 * target with the check condition. To do this we use 18442 * timeout(9F) to call sd_start_stop_unit_callback() after 18443 * the delay interval expires. (sd_start_stop_unit_callback() 18444 * dispatches sd_start_stop_unit_task(), which will issue 18445 * the actual START STOP UNIT command. The delay interval 18446 * is one-half of the delay that we will use to retry the 18447 * command that generated the NOT READY condition. 18448 * 18449 * Note that we could just dispatch sd_start_stop_unit_task() 18450 * from here and allow it to sleep for the delay interval, 18451 * but then we would be tying up the taskq thread 18452 * uncesessarily for the duration of the delay. 18453 * 18454 * Do not issue the START STOP UNIT if the current command 18455 * is already a START STOP UNIT. 18456 */ 18457 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) { 18458 break; 18459 } 18460 18461 /* 18462 * Do not schedule the timeout if one is already pending. 18463 */ 18464 if (un->un_startstop_timeid != NULL) { 18465 SD_INFO(SD_LOG_ERROR, un, 18466 "sd_sense_key_not_ready: restart already issued to" 18467 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)), 18468 ddi_get_instance(SD_DEVINFO(un))); 18469 break; 18470 } 18471 18472 /* 18473 * Schedule the START STOP UNIT command, then queue the command 18474 * for a retry. 18475 * 18476 * Note: A timeout is not scheduled for this retry because we 18477 * want the retry to be serial with the START_STOP_UNIT. The 18478 * retry will be started when the START_STOP_UNIT is completed 18479 * in sd_start_stop_unit_task. 18480 */ 18481 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback, 18482 un, un->un_busy_timeout / 2); 18483 xp->xb_nr_retry_count++; 18484 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter); 18485 return; 18486 18487 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */ 18488 if (sd_error_level < SCSI_ERR_RETRYABLE) { 18489 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18490 "unit does not respond to selection\n"); 18491 } 18492 break; 18493 18494 case 0x3A: /* MEDIUM NOT PRESENT */ 18495 if (sd_error_level >= SCSI_ERR_FATAL) { 18496 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18497 "Caddy not inserted in drive\n"); 18498 } 18499 18500 sr_ejected(un); 18501 un->un_mediastate = DKIO_EJECTED; 18502 /* The state has changed, inform the media watch routines */ 18503 cv_broadcast(&un->un_state_cv); 18504 /* Just fail if no media is present in the drive. */ 18505 goto fail_command; 18506 18507 default: 18508 if (sd_error_level < SCSI_ERR_RETRYABLE) { 18509 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 18510 "Unit not Ready. Additional sense code 0x%x\n", 18511 asc); 18512 } 18513 break; 18514 } 18515 18516 do_retry: 18517 18518 /* 18519 * Retry the command, as some targets may report NOT READY for 18520 * several seconds after being reset. 18521 */ 18522 xp->xb_nr_retry_count++; 18523 si.ssi_severity = SCSI_ERR_RETRYABLE; 18524 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 18525 &si, EIO, un->un_busy_timeout, NULL); 18526 18527 return; 18528 18529 fail_command: 18530 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18531 sd_return_failed_command(un, bp, EIO); 18532 } 18533 18534 18535 18536 /* 18537 * Function: sd_sense_key_medium_or_hardware_error 18538 * 18539 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error" 18540 * sense key. 18541 * 18542 * Context: May be called from interrupt context 18543 */ 18544 18545 static void 18546 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, uint8_t *sense_datap, 18547 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 18548 { 18549 struct sd_sense_info si; 18550 uint8_t sense_key = scsi_sense_key(sense_datap); 18551 uint8_t asc = scsi_sense_asc(sense_datap); 18552 18553 ASSERT(un != NULL); 18554 ASSERT(mutex_owned(SD_MUTEX(un))); 18555 ASSERT(bp != NULL); 18556 ASSERT(xp != NULL); 18557 ASSERT(pktp != NULL); 18558 18559 si.ssi_severity = SCSI_ERR_FATAL; 18560 si.ssi_pfa_flag = FALSE; 18561 18562 if (sense_key == KEY_MEDIUM_ERROR) { 18563 SD_UPDATE_ERRSTATS(un, sd_rq_media_err); 18564 } 18565 18566 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18567 18568 if ((un->un_reset_retry_count != 0) && 18569 (xp->xb_retry_count == un->un_reset_retry_count)) { 18570 mutex_exit(SD_MUTEX(un)); 18571 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */ 18572 if (un->un_f_allow_bus_device_reset == TRUE) { 18573 18574 boolean_t try_resetting_target = B_TRUE; 18575 18576 /* 18577 * We need to be able to handle specific ASC when we are 18578 * handling a KEY_HARDWARE_ERROR. In particular 18579 * taking the default action of resetting the target may 18580 * not be the appropriate way to attempt recovery. 18581 * Resetting a target because of a single LUN failure 18582 * victimizes all LUNs on that target. 18583 * 18584 * This is true for the LSI arrays, if an LSI 18585 * array controller returns an ASC of 0x84 (LUN Dead) we 18586 * should trust it. 18587 */ 18588 18589 if (sense_key == KEY_HARDWARE_ERROR) { 18590 switch (asc) { 18591 case 0x84: 18592 if (SD_IS_LSI(un)) { 18593 try_resetting_target = B_FALSE; 18594 } 18595 break; 18596 default: 18597 break; 18598 } 18599 } 18600 18601 if (try_resetting_target == B_TRUE) { 18602 int reset_retval = 0; 18603 if (un->un_f_lun_reset_enabled == TRUE) { 18604 SD_TRACE(SD_LOG_IO_CORE, un, 18605 "sd_sense_key_medium_or_hardware_" 18606 "error: issuing RESET_LUN\n"); 18607 reset_retval = 18608 scsi_reset(SD_ADDRESS(un), 18609 RESET_LUN); 18610 } 18611 if (reset_retval == 0) { 18612 SD_TRACE(SD_LOG_IO_CORE, un, 18613 "sd_sense_key_medium_or_hardware_" 18614 "error: issuing RESET_TARGET\n"); 18615 (void) scsi_reset(SD_ADDRESS(un), 18616 RESET_TARGET); 18617 } 18618 } 18619 } 18620 mutex_enter(SD_MUTEX(un)); 18621 } 18622 18623 /* 18624 * This really ought to be a fatal error, but we will retry anyway 18625 * as some drives report this as a spurious error. 18626 */ 18627 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18628 &si, EIO, (clock_t)0, NULL); 18629 } 18630 18631 18632 18633 /* 18634 * Function: sd_sense_key_illegal_request 18635 * 18636 * Description: Recovery actions for a SCSI "Illegal Request" sense key. 18637 * 18638 * Context: May be called from interrupt context 18639 */ 18640 18641 static void 18642 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 18643 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18644 { 18645 struct sd_sense_info si; 18646 18647 ASSERT(un != NULL); 18648 ASSERT(mutex_owned(SD_MUTEX(un))); 18649 ASSERT(bp != NULL); 18650 ASSERT(xp != NULL); 18651 ASSERT(pktp != NULL); 18652 18653 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err); 18654 18655 si.ssi_severity = SCSI_ERR_INFO; 18656 si.ssi_pfa_flag = FALSE; 18657 18658 /* Pointless to retry if the target thinks it's an illegal request */ 18659 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18660 sd_return_failed_command(un, bp, EIO); 18661 } 18662 18663 18664 18665 18666 /* 18667 * Function: sd_sense_key_unit_attention 18668 * 18669 * Description: Recovery actions for a SCSI "Unit Attention" sense key. 18670 * 18671 * Context: May be called from interrupt context 18672 */ 18673 18674 static void 18675 sd_sense_key_unit_attention(struct sd_lun *un, uint8_t *sense_datap, 18676 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 18677 { 18678 /* 18679 * For UNIT ATTENTION we allow retries for one minute. Devices 18680 * like Sonoma can return UNIT ATTENTION close to a minute 18681 * under certain conditions. 18682 */ 18683 int retry_check_flag = SD_RETRIES_UA; 18684 boolean_t kstat_updated = B_FALSE; 18685 struct sd_sense_info si; 18686 uint8_t asc = scsi_sense_asc(sense_datap); 18687 uint8_t ascq = scsi_sense_ascq(sense_datap); 18688 18689 ASSERT(un != NULL); 18690 ASSERT(mutex_owned(SD_MUTEX(un))); 18691 ASSERT(bp != NULL); 18692 ASSERT(xp != NULL); 18693 ASSERT(pktp != NULL); 18694 18695 si.ssi_severity = SCSI_ERR_INFO; 18696 si.ssi_pfa_flag = FALSE; 18697 18698 18699 switch (asc) { 18700 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */ 18701 if (sd_report_pfa != 0) { 18702 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 18703 si.ssi_pfa_flag = TRUE; 18704 retry_check_flag = SD_RETRIES_STANDARD; 18705 goto do_retry; 18706 } 18707 18708 break; 18709 18710 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ 18711 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 18712 un->un_resvd_status |= 18713 (SD_LOST_RESERVE | SD_WANT_RESERVE); 18714 } 18715 #ifdef _LP64 18716 if (un->un_blockcount + 1 > SD_GROUP1_MAX_ADDRESS) { 18717 if (taskq_dispatch(sd_tq, sd_reenable_dsense_task, 18718 un, KM_NOSLEEP) == TASKQID_INVALID) { 18719 /* 18720 * If we can't dispatch the task we'll just 18721 * live without descriptor sense. We can 18722 * try again on the next "unit attention" 18723 */ 18724 SD_ERROR(SD_LOG_ERROR, un, 18725 "sd_sense_key_unit_attention: " 18726 "Could not dispatch " 18727 "sd_reenable_dsense_task\n"); 18728 } 18729 } 18730 #endif /* _LP64 */ 18731 /* FALLTHRU */ 18732 18733 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ 18734 if (!un->un_f_has_removable_media) { 18735 break; 18736 } 18737 18738 /* 18739 * When we get a unit attention from a removable-media device, 18740 * it may be in a state that will take a long time to recover 18741 * (e.g., from a reset). Since we are executing in interrupt 18742 * context here, we cannot wait around for the device to come 18743 * back. So hand this command off to sd_media_change_task() 18744 * for deferred processing under taskq thread context. (Note 18745 * that the command still may be failed if a problem is 18746 * encountered at a later time.) 18747 */ 18748 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp, 18749 KM_NOSLEEP) == TASKQID_INVALID) { 18750 /* 18751 * Cannot dispatch the request so fail the command. 18752 */ 18753 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18754 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 18755 si.ssi_severity = SCSI_ERR_FATAL; 18756 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18757 sd_return_failed_command(un, bp, EIO); 18758 } 18759 18760 /* 18761 * If failed to dispatch sd_media_change_task(), we already 18762 * updated kstat. If succeed to dispatch sd_media_change_task(), 18763 * we should update kstat later if it encounters an error. So, 18764 * we update kstat_updated flag here. 18765 */ 18766 kstat_updated = B_TRUE; 18767 18768 /* 18769 * Either the command has been successfully dispatched to a 18770 * task Q for retrying, or the dispatch failed. In either case 18771 * do NOT retry again by calling sd_retry_command. This sets up 18772 * two retries of the same command and when one completes and 18773 * frees the resources the other will access freed memory, 18774 * a bad thing. 18775 */ 18776 return; 18777 18778 default: 18779 break; 18780 } 18781 18782 /* 18783 * ASC ASCQ 18784 * 2A 09 Capacity data has changed 18785 * 2A 01 Mode parameters changed 18786 * 3F 0E Reported luns data has changed 18787 * Arrays that support logical unit expansion should report 18788 * capacity changes(2Ah/09). Mode parameters changed and 18789 * reported luns data has changed are the approximation. 18790 */ 18791 if (((asc == 0x2a) && (ascq == 0x09)) || 18792 ((asc == 0x2a) && (ascq == 0x01)) || 18793 ((asc == 0x3f) && (ascq == 0x0e))) { 18794 if (taskq_dispatch(sd_tq, sd_target_change_task, un, 18795 KM_NOSLEEP) == TASKQID_INVALID) { 18796 SD_ERROR(SD_LOG_ERROR, un, 18797 "sd_sense_key_unit_attention: " 18798 "Could not dispatch sd_target_change_task\n"); 18799 } 18800 } 18801 18802 /* 18803 * Update kstat if we haven't done that. 18804 */ 18805 if (!kstat_updated) { 18806 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18807 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 18808 } 18809 18810 do_retry: 18811 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si, 18812 EIO, SD_UA_RETRY_DELAY, NULL); 18813 } 18814 18815 18816 18817 /* 18818 * Function: sd_sense_key_fail_command 18819 * 18820 * Description: Use to fail a command when we don't like the sense key that 18821 * was returned. 18822 * 18823 * Context: May be called from interrupt context 18824 */ 18825 18826 static void 18827 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 18828 struct scsi_pkt *pktp) 18829 { 18830 struct sd_sense_info si; 18831 18832 ASSERT(un != NULL); 18833 ASSERT(mutex_owned(SD_MUTEX(un))); 18834 ASSERT(bp != NULL); 18835 ASSERT(xp != NULL); 18836 ASSERT(pktp != NULL); 18837 18838 si.ssi_severity = SCSI_ERR_FATAL; 18839 si.ssi_pfa_flag = FALSE; 18840 18841 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18842 sd_return_failed_command(un, bp, EIO); 18843 } 18844 18845 18846 18847 /* 18848 * Function: sd_sense_key_blank_check 18849 * 18850 * Description: Recovery actions for a SCSI "Blank Check" sense key. 18851 * Has no monetary connotation. 18852 * 18853 * Context: May be called from interrupt context 18854 */ 18855 18856 static void 18857 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 18858 struct scsi_pkt *pktp) 18859 { 18860 struct sd_sense_info si; 18861 18862 ASSERT(un != NULL); 18863 ASSERT(mutex_owned(SD_MUTEX(un))); 18864 ASSERT(bp != NULL); 18865 ASSERT(xp != NULL); 18866 ASSERT(pktp != NULL); 18867 18868 /* 18869 * Blank check is not fatal for removable devices, therefore 18870 * it does not require a console message. 18871 */ 18872 si.ssi_severity = (un->un_f_has_removable_media) ? SCSI_ERR_ALL : 18873 SCSI_ERR_FATAL; 18874 si.ssi_pfa_flag = FALSE; 18875 18876 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18877 sd_return_failed_command(un, bp, EIO); 18878 } 18879 18880 18881 18882 18883 /* 18884 * Function: sd_sense_key_aborted_command 18885 * 18886 * Description: Recovery actions for a SCSI "Aborted Command" sense key. 18887 * 18888 * Context: May be called from interrupt context 18889 */ 18890 18891 static void 18892 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 18893 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18894 { 18895 struct sd_sense_info si; 18896 18897 ASSERT(un != NULL); 18898 ASSERT(mutex_owned(SD_MUTEX(un))); 18899 ASSERT(bp != NULL); 18900 ASSERT(xp != NULL); 18901 ASSERT(pktp != NULL); 18902 18903 si.ssi_severity = SCSI_ERR_FATAL; 18904 si.ssi_pfa_flag = FALSE; 18905 18906 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18907 18908 /* 18909 * This really ought to be a fatal error, but we will retry anyway 18910 * as some drives report this as a spurious error. 18911 */ 18912 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18913 &si, EIO, drv_usectohz(100000), NULL); 18914 } 18915 18916 18917 18918 /* 18919 * Function: sd_sense_key_default 18920 * 18921 * Description: Default recovery action for several SCSI sense keys (basically 18922 * attempts a retry). 18923 * 18924 * Context: May be called from interrupt context 18925 */ 18926 18927 static void 18928 sd_sense_key_default(struct sd_lun *un, uint8_t *sense_datap, struct buf *bp, 18929 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18930 { 18931 struct sd_sense_info si; 18932 uint8_t sense_key = scsi_sense_key(sense_datap); 18933 18934 ASSERT(un != NULL); 18935 ASSERT(mutex_owned(SD_MUTEX(un))); 18936 ASSERT(bp != NULL); 18937 ASSERT(xp != NULL); 18938 ASSERT(pktp != NULL); 18939 18940 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18941 18942 /* 18943 * Undecoded sense key. Attempt retries and hope that will fix 18944 * the problem. Otherwise, we're dead. 18945 */ 18946 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 18947 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18948 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]); 18949 } 18950 18951 si.ssi_severity = SCSI_ERR_FATAL; 18952 si.ssi_pfa_flag = FALSE; 18953 18954 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18955 &si, EIO, (clock_t)0, NULL); 18956 } 18957 18958 18959 18960 /* 18961 * Function: sd_print_retry_msg 18962 * 18963 * Description: Print a message indicating the retry action being taken. 18964 * 18965 * Arguments: un - ptr to associated softstate 18966 * bp - ptr to buf(9S) for the command 18967 * arg - not used. 18968 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 18969 * or SD_NO_RETRY_ISSUED 18970 * 18971 * Context: May be called from interrupt context 18972 */ 18973 /* ARGSUSED */ 18974 static void 18975 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag) 18976 { 18977 struct sd_xbuf *xp; 18978 struct scsi_pkt *pktp; 18979 char *reasonp; 18980 char *msgp; 18981 18982 ASSERT(un != NULL); 18983 ASSERT(mutex_owned(SD_MUTEX(un))); 18984 ASSERT(bp != NULL); 18985 pktp = SD_GET_PKTP(bp); 18986 ASSERT(pktp != NULL); 18987 xp = SD_GET_XBUF(bp); 18988 ASSERT(xp != NULL); 18989 18990 ASSERT(!mutex_owned(&un->un_pm_mutex)); 18991 mutex_enter(&un->un_pm_mutex); 18992 if ((un->un_state == SD_STATE_SUSPENDED) || 18993 (SD_DEVICE_IS_IN_LOW_POWER(un)) || 18994 (pktp->pkt_flags & FLAG_SILENT)) { 18995 mutex_exit(&un->un_pm_mutex); 18996 goto update_pkt_reason; 18997 } 18998 mutex_exit(&un->un_pm_mutex); 18999 19000 /* 19001 * Suppress messages if they are all the same pkt_reason; with 19002 * TQ, many (up to 256) are returned with the same pkt_reason. 19003 * If we are in panic, then suppress the retry messages. 19004 */ 19005 switch (flag) { 19006 case SD_NO_RETRY_ISSUED: 19007 msgp = "giving up"; 19008 break; 19009 case SD_IMMEDIATE_RETRY_ISSUED: 19010 case SD_DELAYED_RETRY_ISSUED: 19011 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) || 19012 ((pktp->pkt_reason == un->un_last_pkt_reason) && 19013 (sd_error_level != SCSI_ERR_ALL))) { 19014 return; 19015 } 19016 msgp = "retrying command"; 19017 break; 19018 default: 19019 goto update_pkt_reason; 19020 } 19021 19022 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" : 19023 scsi_rname(pktp->pkt_reason)); 19024 19025 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP) { 19026 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 19027 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp); 19028 } 19029 19030 update_pkt_reason: 19031 /* 19032 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason. 19033 * This is to prevent multiple console messages for the same failure 19034 * condition. Note that un->un_last_pkt_reason is NOT restored if & 19035 * when the command is retried successfully because there still may be 19036 * more commands coming back with the same value of pktp->pkt_reason. 19037 */ 19038 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) { 19039 un->un_last_pkt_reason = pktp->pkt_reason; 19040 } 19041 } 19042 19043 19044 /* 19045 * Function: sd_print_cmd_incomplete_msg 19046 * 19047 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason. 19048 * 19049 * Arguments: un - ptr to associated softstate 19050 * bp - ptr to buf(9S) for the command 19051 * arg - passed to sd_print_retry_msg() 19052 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 19053 * or SD_NO_RETRY_ISSUED 19054 * 19055 * Context: May be called from interrupt context 19056 */ 19057 19058 static void 19059 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, 19060 int code) 19061 { 19062 dev_info_t *dip; 19063 19064 ASSERT(un != NULL); 19065 ASSERT(mutex_owned(SD_MUTEX(un))); 19066 ASSERT(bp != NULL); 19067 19068 switch (code) { 19069 case SD_NO_RETRY_ISSUED: 19070 /* Command was failed. Someone turned off this target? */ 19071 if (un->un_state != SD_STATE_OFFLINE) { 19072 /* 19073 * Suppress message if we are detaching and 19074 * device has been disconnected 19075 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation 19076 * private interface and not part of the DDI 19077 */ 19078 dip = un->un_sd->sd_dev; 19079 if (!(DEVI_IS_DETACHING(dip) && 19080 DEVI_IS_DEVICE_REMOVED(dip))) { 19081 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 19082 "disk not responding to selection\n"); 19083 } 19084 New_state(un, SD_STATE_OFFLINE); 19085 } 19086 break; 19087 19088 case SD_DELAYED_RETRY_ISSUED: 19089 case SD_IMMEDIATE_RETRY_ISSUED: 19090 default: 19091 /* Command was successfully queued for retry */ 19092 sd_print_retry_msg(un, bp, arg, code); 19093 break; 19094 } 19095 } 19096 19097 19098 /* 19099 * Function: sd_pkt_reason_cmd_incomplete 19100 * 19101 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason. 19102 * 19103 * Context: May be called from interrupt context 19104 */ 19105 19106 static void 19107 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 19108 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19109 { 19110 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE; 19111 19112 ASSERT(un != NULL); 19113 ASSERT(mutex_owned(SD_MUTEX(un))); 19114 ASSERT(bp != NULL); 19115 ASSERT(xp != NULL); 19116 ASSERT(pktp != NULL); 19117 19118 /* Do not do a reset if selection did not complete */ 19119 /* Note: Should this not just check the bit? */ 19120 if (pktp->pkt_state != STATE_GOT_BUS) { 19121 SD_UPDATE_ERRSTATS(un, sd_transerrs); 19122 sd_reset_target(un, pktp); 19123 } 19124 19125 /* 19126 * If the target was not successfully selected, then set 19127 * SD_RETRIES_FAILFAST to indicate that we lost communication 19128 * with the target, and further retries and/or commands are 19129 * likely to take a long time. 19130 */ 19131 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) { 19132 flag |= SD_RETRIES_FAILFAST; 19133 } 19134 19135 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19136 19137 sd_retry_command(un, bp, flag, 19138 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19139 } 19140 19141 19142 19143 /* 19144 * Function: sd_pkt_reason_cmd_tran_err 19145 * 19146 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason. 19147 * 19148 * Context: May be called from interrupt context 19149 */ 19150 19151 static void 19152 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 19153 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19154 { 19155 ASSERT(un != NULL); 19156 ASSERT(mutex_owned(SD_MUTEX(un))); 19157 ASSERT(bp != NULL); 19158 ASSERT(xp != NULL); 19159 ASSERT(pktp != NULL); 19160 19161 /* 19162 * Do not reset if we got a parity error, or if 19163 * selection did not complete. 19164 */ 19165 SD_UPDATE_ERRSTATS(un, sd_harderrs); 19166 /* Note: Should this not just check the bit for pkt_state? */ 19167 if (((pktp->pkt_statistics & STAT_PERR) == 0) && 19168 (pktp->pkt_state != STATE_GOT_BUS)) { 19169 SD_UPDATE_ERRSTATS(un, sd_transerrs); 19170 sd_reset_target(un, pktp); 19171 } 19172 19173 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19174 19175 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 19176 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19177 } 19178 19179 19180 19181 /* 19182 * Function: sd_pkt_reason_cmd_reset 19183 * 19184 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason. 19185 * 19186 * Context: May be called from interrupt context 19187 */ 19188 19189 static void 19190 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 19191 struct scsi_pkt *pktp) 19192 { 19193 ASSERT(un != NULL); 19194 ASSERT(mutex_owned(SD_MUTEX(un))); 19195 ASSERT(bp != NULL); 19196 ASSERT(xp != NULL); 19197 ASSERT(pktp != NULL); 19198 19199 /* The target may still be running the command, so try to reset. */ 19200 SD_UPDATE_ERRSTATS(un, sd_transerrs); 19201 sd_reset_target(un, pktp); 19202 19203 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19204 19205 /* 19206 * If pkt_reason is CMD_RESET chances are that this pkt got 19207 * reset because another target on this bus caused it. The target 19208 * that caused it should get CMD_TIMEOUT with pkt_statistics 19209 * of STAT_TIMEOUT/STAT_DEV_RESET. 19210 */ 19211 19212 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 19213 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19214 } 19215 19216 19217 19218 19219 /* 19220 * Function: sd_pkt_reason_cmd_aborted 19221 * 19222 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason. 19223 * 19224 * Context: May be called from interrupt context 19225 */ 19226 19227 static void 19228 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 19229 struct scsi_pkt *pktp) 19230 { 19231 ASSERT(un != NULL); 19232 ASSERT(mutex_owned(SD_MUTEX(un))); 19233 ASSERT(bp != NULL); 19234 ASSERT(xp != NULL); 19235 ASSERT(pktp != NULL); 19236 19237 /* The target may still be running the command, so try to reset. */ 19238 SD_UPDATE_ERRSTATS(un, sd_transerrs); 19239 sd_reset_target(un, pktp); 19240 19241 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19242 19243 /* 19244 * If pkt_reason is CMD_ABORTED chances are that this pkt got 19245 * aborted because another target on this bus caused it. The target 19246 * that caused it should get CMD_TIMEOUT with pkt_statistics 19247 * of STAT_TIMEOUT/STAT_DEV_RESET. 19248 */ 19249 19250 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 19251 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19252 } 19253 19254 19255 19256 /* 19257 * Function: sd_pkt_reason_cmd_timeout 19258 * 19259 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason. 19260 * 19261 * Context: May be called from interrupt context 19262 */ 19263 19264 static void 19265 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 19266 struct scsi_pkt *pktp) 19267 { 19268 ASSERT(un != NULL); 19269 ASSERT(mutex_owned(SD_MUTEX(un))); 19270 ASSERT(bp != NULL); 19271 ASSERT(xp != NULL); 19272 ASSERT(pktp != NULL); 19273 19274 19275 SD_UPDATE_ERRSTATS(un, sd_transerrs); 19276 sd_reset_target(un, pktp); 19277 19278 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19279 19280 /* 19281 * A command timeout indicates that we could not establish 19282 * communication with the target, so set SD_RETRIES_FAILFAST 19283 * as further retries/commands are likely to take a long time. 19284 */ 19285 sd_retry_command(un, bp, 19286 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST), 19287 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19288 } 19289 19290 19291 19292 /* 19293 * Function: sd_pkt_reason_cmd_unx_bus_free 19294 * 19295 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason. 19296 * 19297 * Context: May be called from interrupt context 19298 */ 19299 19300 static void 19301 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 19302 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19303 { 19304 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code); 19305 19306 ASSERT(un != NULL); 19307 ASSERT(mutex_owned(SD_MUTEX(un))); 19308 ASSERT(bp != NULL); 19309 ASSERT(xp != NULL); 19310 ASSERT(pktp != NULL); 19311 19312 SD_UPDATE_ERRSTATS(un, sd_harderrs); 19313 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19314 19315 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? 19316 sd_print_retry_msg : NULL; 19317 19318 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 19319 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19320 } 19321 19322 19323 /* 19324 * Function: sd_pkt_reason_cmd_tag_reject 19325 * 19326 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason. 19327 * 19328 * Context: May be called from interrupt context 19329 */ 19330 19331 static void 19332 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 19333 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19334 { 19335 ASSERT(un != NULL); 19336 ASSERT(mutex_owned(SD_MUTEX(un))); 19337 ASSERT(bp != NULL); 19338 ASSERT(xp != NULL); 19339 ASSERT(pktp != NULL); 19340 19341 SD_UPDATE_ERRSTATS(un, sd_harderrs); 19342 pktp->pkt_flags = 0; 19343 un->un_tagflags = 0; 19344 if (un->un_f_opt_queueing == TRUE) { 19345 un->un_throttle = min(un->un_throttle, 3); 19346 } else { 19347 un->un_throttle = 1; 19348 } 19349 mutex_exit(SD_MUTEX(un)); 19350 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 19351 mutex_enter(SD_MUTEX(un)); 19352 19353 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19354 19355 /* Legacy behavior not to check retry counts here. */ 19356 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE), 19357 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19358 } 19359 19360 19361 /* 19362 * Function: sd_pkt_reason_default 19363 * 19364 * Description: Default recovery actions for SCSA pkt_reason values that 19365 * do not have more explicit recovery actions. 19366 * 19367 * Context: May be called from interrupt context 19368 */ 19369 19370 static void 19371 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 19372 struct scsi_pkt *pktp) 19373 { 19374 ASSERT(un != NULL); 19375 ASSERT(mutex_owned(SD_MUTEX(un))); 19376 ASSERT(bp != NULL); 19377 ASSERT(xp != NULL); 19378 ASSERT(pktp != NULL); 19379 19380 SD_UPDATE_ERRSTATS(un, sd_transerrs); 19381 sd_reset_target(un, pktp); 19382 19383 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19384 19385 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 19386 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19387 } 19388 19389 19390 19391 /* 19392 * Function: sd_pkt_status_check_condition 19393 * 19394 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status. 19395 * 19396 * Context: May be called from interrupt context 19397 */ 19398 19399 static void 19400 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 19401 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19402 { 19403 ASSERT(un != NULL); 19404 ASSERT(mutex_owned(SD_MUTEX(un))); 19405 ASSERT(bp != NULL); 19406 ASSERT(xp != NULL); 19407 ASSERT(pktp != NULL); 19408 19409 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: " 19410 "entry: buf:0x%p xp:0x%p\n", bp, xp); 19411 19412 /* 19413 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the 19414 * command will be retried after the request sense). Otherwise, retry 19415 * the command. Note: we are issuing the request sense even though the 19416 * retry limit may have been reached for the failed command. 19417 */ 19418 if (un->un_f_arq_enabled == FALSE) { 19419 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 19420 "no ARQ, sending request sense command\n"); 19421 sd_send_request_sense_command(un, bp, pktp); 19422 } else { 19423 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 19424 "ARQ,retrying request sense command\n"); 19425 #if defined(__i386) || defined(__amd64) 19426 /* 19427 * The SD_RETRY_DELAY value need to be adjusted here 19428 * when SD_RETRY_DELAY change in sddef.h 19429 */ 19430 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 19431 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, 19432 NULL); 19433 #else 19434 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 19435 EIO, SD_RETRY_DELAY, NULL); 19436 #endif 19437 } 19438 19439 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); 19440 } 19441 19442 19443 /* 19444 * Function: sd_pkt_status_busy 19445 * 19446 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status. 19447 * 19448 * Context: May be called from interrupt context 19449 */ 19450 19451 static void 19452 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 19453 struct scsi_pkt *pktp) 19454 { 19455 ASSERT(un != NULL); 19456 ASSERT(mutex_owned(SD_MUTEX(un))); 19457 ASSERT(bp != NULL); 19458 ASSERT(xp != NULL); 19459 ASSERT(pktp != NULL); 19460 19461 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19462 "sd_pkt_status_busy: entry\n"); 19463 19464 /* If retries are exhausted, just fail the command. */ 19465 if (xp->xb_retry_count >= un->un_busy_retry_count) { 19466 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 19467 "device busy too long\n"); 19468 sd_return_failed_command(un, bp, EIO); 19469 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19470 "sd_pkt_status_busy: exit\n"); 19471 return; 19472 } 19473 xp->xb_retry_count++; 19474 19475 /* 19476 * Try to reset the target. However, we do not want to perform 19477 * more than one reset if the device continues to fail. The reset 19478 * will be performed when the retry count reaches the reset 19479 * threshold. This threshold should be set such that at least 19480 * one retry is issued before the reset is performed. 19481 */ 19482 if (xp->xb_retry_count == 19483 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) { 19484 int rval = 0; 19485 mutex_exit(SD_MUTEX(un)); 19486 if (un->un_f_allow_bus_device_reset == TRUE) { 19487 /* 19488 * First try to reset the LUN; if we cannot then 19489 * try to reset the target. 19490 */ 19491 if (un->un_f_lun_reset_enabled == TRUE) { 19492 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19493 "sd_pkt_status_busy: RESET_LUN\n"); 19494 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 19495 } 19496 if (rval == 0) { 19497 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19498 "sd_pkt_status_busy: RESET_TARGET\n"); 19499 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 19500 } 19501 } 19502 if (rval == 0) { 19503 /* 19504 * If the RESET_LUN and/or RESET_TARGET failed, 19505 * try RESET_ALL 19506 */ 19507 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19508 "sd_pkt_status_busy: RESET_ALL\n"); 19509 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL); 19510 } 19511 mutex_enter(SD_MUTEX(un)); 19512 if (rval == 0) { 19513 /* 19514 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed. 19515 * At this point we give up & fail the command. 19516 */ 19517 sd_return_failed_command(un, bp, EIO); 19518 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19519 "sd_pkt_status_busy: exit (failed cmd)\n"); 19520 return; 19521 } 19522 } 19523 19524 /* 19525 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as 19526 * we have already checked the retry counts above. 19527 */ 19528 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 19529 EIO, un->un_busy_timeout, NULL); 19530 19531 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19532 "sd_pkt_status_busy: exit\n"); 19533 } 19534 19535 19536 /* 19537 * Function: sd_pkt_status_reservation_conflict 19538 * 19539 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI 19540 * command status. 19541 * 19542 * Context: May be called from interrupt context 19543 */ 19544 19545 static void 19546 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp, 19547 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19548 { 19549 ASSERT(un != NULL); 19550 ASSERT(mutex_owned(SD_MUTEX(un))); 19551 ASSERT(bp != NULL); 19552 ASSERT(xp != NULL); 19553 ASSERT(pktp != NULL); 19554 19555 /* 19556 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation 19557 * conflict could be due to various reasons like incorrect keys, not 19558 * registered or not reserved etc. So, we return EACCES to the caller. 19559 */ 19560 if (un->un_reservation_type == SD_SCSI3_RESERVATION) { 19561 int cmd = SD_GET_PKT_OPCODE(pktp); 19562 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) || 19563 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) { 19564 sd_return_failed_command(un, bp, EACCES); 19565 return; 19566 } 19567 } 19568 19569 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 19570 19571 if ((un->un_resvd_status & SD_FAILFAST) != 0) { 19572 if (sd_failfast_enable != 0) { 19573 /* By definition, we must panic here.... */ 19574 sd_panic_for_res_conflict(un); 19575 /*NOTREACHED*/ 19576 } 19577 SD_ERROR(SD_LOG_IO, un, 19578 "sd_handle_resv_conflict: Disk Reserved\n"); 19579 sd_return_failed_command(un, bp, EACCES); 19580 return; 19581 } 19582 19583 /* 19584 * 1147670: retry only if sd_retry_on_reservation_conflict 19585 * property is set (default is 1). Retries will not succeed 19586 * on a disk reserved by another initiator. HA systems 19587 * may reset this via sd.conf to avoid these retries. 19588 * 19589 * Note: The legacy return code for this failure is EIO, however EACCES 19590 * seems more appropriate for a reservation conflict. 19591 */ 19592 if (sd_retry_on_reservation_conflict == 0) { 19593 SD_ERROR(SD_LOG_IO, un, 19594 "sd_handle_resv_conflict: Device Reserved\n"); 19595 sd_return_failed_command(un, bp, EIO); 19596 return; 19597 } 19598 19599 /* 19600 * Retry the command if we can. 19601 * 19602 * Note: The legacy return code for this failure is EIO, however EACCES 19603 * seems more appropriate for a reservation conflict. 19604 */ 19605 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 19606 (clock_t)2, NULL); 19607 } 19608 19609 19610 19611 /* 19612 * Function: sd_pkt_status_qfull 19613 * 19614 * Description: Handle a QUEUE FULL condition from the target. This can 19615 * occur if the HBA does not handle the queue full condition. 19616 * (Basically this means third-party HBAs as Sun HBAs will 19617 * handle the queue full condition.) Note that if there are 19618 * some commands already in the transport, then the queue full 19619 * has occurred because the queue for this nexus is actually 19620 * full. If there are no commands in the transport, then the 19621 * queue full is resulting from some other initiator or lun 19622 * consuming all the resources at the target. 19623 * 19624 * Context: May be called from interrupt context 19625 */ 19626 19627 static void 19628 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 19629 struct scsi_pkt *pktp) 19630 { 19631 ASSERT(un != NULL); 19632 ASSERT(mutex_owned(SD_MUTEX(un))); 19633 ASSERT(bp != NULL); 19634 ASSERT(xp != NULL); 19635 ASSERT(pktp != NULL); 19636 19637 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19638 "sd_pkt_status_qfull: entry\n"); 19639 19640 /* 19641 * Just lower the QFULL throttle and retry the command. Note that 19642 * we do not limit the number of retries here. 19643 */ 19644 sd_reduce_throttle(un, SD_THROTTLE_QFULL); 19645 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, 19646 SD_RESTART_TIMEOUT, NULL); 19647 19648 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19649 "sd_pkt_status_qfull: exit\n"); 19650 } 19651 19652 19653 /* 19654 * Function: sd_reset_target 19655 * 19656 * Description: Issue a scsi_reset(9F), with either RESET_LUN, 19657 * RESET_TARGET, or RESET_ALL. 19658 * 19659 * Context: May be called under interrupt context. 19660 */ 19661 19662 static void 19663 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp) 19664 { 19665 int rval = 0; 19666 19667 ASSERT(un != NULL); 19668 ASSERT(mutex_owned(SD_MUTEX(un))); 19669 ASSERT(pktp != NULL); 19670 19671 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n"); 19672 19673 /* 19674 * No need to reset if the transport layer has already done so. 19675 */ 19676 if ((pktp->pkt_statistics & 19677 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) { 19678 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19679 "sd_reset_target: no reset\n"); 19680 return; 19681 } 19682 19683 mutex_exit(SD_MUTEX(un)); 19684 19685 if (un->un_f_allow_bus_device_reset == TRUE) { 19686 if (un->un_f_lun_reset_enabled == TRUE) { 19687 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19688 "sd_reset_target: RESET_LUN\n"); 19689 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 19690 } 19691 if (rval == 0) { 19692 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19693 "sd_reset_target: RESET_TARGET\n"); 19694 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 19695 } 19696 } 19697 19698 if (rval == 0) { 19699 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19700 "sd_reset_target: RESET_ALL\n"); 19701 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 19702 } 19703 19704 mutex_enter(SD_MUTEX(un)); 19705 19706 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n"); 19707 } 19708 19709 /* 19710 * Function: sd_target_change_task 19711 * 19712 * Description: Handle dynamic target change 19713 * 19714 * Context: Executes in a taskq() thread context 19715 */ 19716 static void 19717 sd_target_change_task(void *arg) 19718 { 19719 struct sd_lun *un = arg; 19720 uint64_t capacity; 19721 diskaddr_t label_cap; 19722 uint_t lbasize; 19723 sd_ssc_t *ssc; 19724 19725 ASSERT(un != NULL); 19726 ASSERT(!mutex_owned(SD_MUTEX(un))); 19727 19728 if ((un->un_f_blockcount_is_valid == FALSE) || 19729 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 19730 return; 19731 } 19732 19733 ssc = sd_ssc_init(un); 19734 19735 if (sd_send_scsi_READ_CAPACITY(ssc, &capacity, 19736 &lbasize, SD_PATH_DIRECT) != 0) { 19737 SD_ERROR(SD_LOG_ERROR, un, 19738 "sd_target_change_task: fail to read capacity\n"); 19739 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19740 goto task_exit; 19741 } 19742 19743 mutex_enter(SD_MUTEX(un)); 19744 if (capacity <= un->un_blockcount) { 19745 mutex_exit(SD_MUTEX(un)); 19746 goto task_exit; 19747 } 19748 19749 sd_update_block_info(un, lbasize, capacity); 19750 mutex_exit(SD_MUTEX(un)); 19751 19752 /* 19753 * If lun is EFI labeled and lun capacity is greater than the 19754 * capacity contained in the label, log a sys event. 19755 */ 19756 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 19757 (void*)SD_PATH_DIRECT) == 0) { 19758 mutex_enter(SD_MUTEX(un)); 19759 if (un->un_f_blockcount_is_valid && 19760 un->un_blockcount > label_cap) { 19761 mutex_exit(SD_MUTEX(un)); 19762 sd_log_lun_expansion_event(un, KM_SLEEP); 19763 } else { 19764 mutex_exit(SD_MUTEX(un)); 19765 } 19766 } 19767 19768 task_exit: 19769 sd_ssc_fini(ssc); 19770 } 19771 19772 19773 /* 19774 * Function: sd_log_dev_status_event 19775 * 19776 * Description: Log EC_dev_status sysevent 19777 * 19778 * Context: Never called from interrupt context 19779 */ 19780 static void 19781 sd_log_dev_status_event(struct sd_lun *un, char *esc, int km_flag) 19782 { 19783 int err; 19784 char *path; 19785 nvlist_t *attr_list; 19786 19787 /* Allocate and build sysevent attribute list */ 19788 err = nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE, km_flag); 19789 if (err != 0) { 19790 SD_ERROR(SD_LOG_ERROR, un, 19791 "sd_log_dev_status_event: fail to allocate space\n"); 19792 return; 19793 } 19794 19795 path = kmem_alloc(MAXPATHLEN, km_flag); 19796 if (path == NULL) { 19797 nvlist_free(attr_list); 19798 SD_ERROR(SD_LOG_ERROR, un, 19799 "sd_log_dev_status_event: fail to allocate space\n"); 19800 return; 19801 } 19802 /* 19803 * Add path attribute to identify the lun. 19804 * We are using minor node 'a' as the sysevent attribute. 19805 */ 19806 (void) snprintf(path, MAXPATHLEN, "/devices"); 19807 (void) ddi_pathname(SD_DEVINFO(un), path + strlen(path)); 19808 (void) snprintf(path + strlen(path), MAXPATHLEN - strlen(path), 19809 ":a"); 19810 19811 err = nvlist_add_string(attr_list, DEV_PHYS_PATH, path); 19812 if (err != 0) { 19813 nvlist_free(attr_list); 19814 kmem_free(path, MAXPATHLEN); 19815 SD_ERROR(SD_LOG_ERROR, un, 19816 "sd_log_dev_status_event: fail to add attribute\n"); 19817 return; 19818 } 19819 19820 /* Log dynamic lun expansion sysevent */ 19821 err = ddi_log_sysevent(SD_DEVINFO(un), SUNW_VENDOR, EC_DEV_STATUS, 19822 esc, attr_list, NULL, km_flag); 19823 if (err != DDI_SUCCESS) { 19824 SD_ERROR(SD_LOG_ERROR, un, 19825 "sd_log_dev_status_event: fail to log sysevent\n"); 19826 } 19827 19828 nvlist_free(attr_list); 19829 kmem_free(path, MAXPATHLEN); 19830 } 19831 19832 19833 /* 19834 * Function: sd_log_lun_expansion_event 19835 * 19836 * Description: Log lun expansion sys event 19837 * 19838 * Context: Never called from interrupt context 19839 */ 19840 static void 19841 sd_log_lun_expansion_event(struct sd_lun *un, int km_flag) 19842 { 19843 sd_log_dev_status_event(un, ESC_DEV_DLE, km_flag); 19844 } 19845 19846 19847 /* 19848 * Function: sd_log_eject_request_event 19849 * 19850 * Description: Log eject request sysevent 19851 * 19852 * Context: Never called from interrupt context 19853 */ 19854 static void 19855 sd_log_eject_request_event(struct sd_lun *un, int km_flag) 19856 { 19857 sd_log_dev_status_event(un, ESC_DEV_EJECT_REQUEST, km_flag); 19858 } 19859 19860 19861 /* 19862 * Function: sd_media_change_task 19863 * 19864 * Description: Recovery action for CDROM to become available. 19865 * 19866 * Context: Executes in a taskq() thread context 19867 */ 19868 19869 static void 19870 sd_media_change_task(void *arg) 19871 { 19872 struct scsi_pkt *pktp = arg; 19873 struct sd_lun *un; 19874 struct buf *bp; 19875 struct sd_xbuf *xp; 19876 int err = 0; 19877 int retry_count = 0; 19878 int retry_limit = SD_UNIT_ATTENTION_RETRY/10; 19879 struct sd_sense_info si; 19880 19881 ASSERT(pktp != NULL); 19882 bp = (struct buf *)pktp->pkt_private; 19883 ASSERT(bp != NULL); 19884 xp = SD_GET_XBUF(bp); 19885 ASSERT(xp != NULL); 19886 un = SD_GET_UN(bp); 19887 ASSERT(un != NULL); 19888 ASSERT(!mutex_owned(SD_MUTEX(un))); 19889 ASSERT(un->un_f_monitor_media_state); 19890 19891 si.ssi_severity = SCSI_ERR_INFO; 19892 si.ssi_pfa_flag = FALSE; 19893 19894 /* 19895 * When a reset is issued on a CDROM, it takes a long time to 19896 * recover. First few attempts to read capacity and other things 19897 * related to handling unit attention fail (with a ASC 0x4 and 19898 * ASCQ 0x1). In that case we want to do enough retries and we want 19899 * to limit the retries in other cases of genuine failures like 19900 * no media in drive. 19901 */ 19902 while (retry_count++ < retry_limit) { 19903 if ((err = sd_handle_mchange(un)) == 0) { 19904 break; 19905 } 19906 if (err == EAGAIN) { 19907 retry_limit = SD_UNIT_ATTENTION_RETRY; 19908 } 19909 /* Sleep for 0.5 sec. & try again */ 19910 delay(drv_usectohz(500000)); 19911 } 19912 19913 /* 19914 * Dispatch (retry or fail) the original command here, 19915 * along with appropriate console messages.... 19916 * 19917 * Must grab the mutex before calling sd_retry_command, 19918 * sd_print_sense_msg and sd_return_failed_command. 19919 */ 19920 mutex_enter(SD_MUTEX(un)); 19921 if (err != SD_CMD_SUCCESS) { 19922 SD_UPDATE_ERRSTATS(un, sd_harderrs); 19923 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 19924 si.ssi_severity = SCSI_ERR_FATAL; 19925 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 19926 sd_return_failed_command(un, bp, EIO); 19927 } else { 19928 sd_retry_command(un, bp, SD_RETRIES_UA, sd_print_sense_msg, 19929 &si, EIO, (clock_t)0, NULL); 19930 } 19931 mutex_exit(SD_MUTEX(un)); 19932 } 19933 19934 19935 19936 /* 19937 * Function: sd_handle_mchange 19938 * 19939 * Description: Perform geometry validation & other recovery when CDROM 19940 * has been removed from drive. 19941 * 19942 * Return Code: 0 for success 19943 * errno-type return code of either sd_send_scsi_DOORLOCK() or 19944 * sd_send_scsi_READ_CAPACITY() 19945 * 19946 * Context: Executes in a taskq() thread context 19947 */ 19948 19949 static int 19950 sd_handle_mchange(struct sd_lun *un) 19951 { 19952 uint64_t capacity; 19953 uint32_t lbasize; 19954 int rval; 19955 sd_ssc_t *ssc; 19956 19957 ASSERT(!mutex_owned(SD_MUTEX(un))); 19958 ASSERT(un->un_f_monitor_media_state); 19959 19960 ssc = sd_ssc_init(un); 19961 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize, 19962 SD_PATH_DIRECT_PRIORITY); 19963 19964 if (rval != 0) 19965 goto failed; 19966 19967 mutex_enter(SD_MUTEX(un)); 19968 sd_update_block_info(un, lbasize, capacity); 19969 19970 if (un->un_errstats != NULL) { 19971 struct sd_errstats *stp = 19972 (struct sd_errstats *)un->un_errstats->ks_data; 19973 stp->sd_capacity.value.ui64 = (uint64_t) 19974 ((uint64_t)un->un_blockcount * 19975 (uint64_t)un->un_tgt_blocksize); 19976 } 19977 19978 /* 19979 * Check if the media in the device is writable or not 19980 */ 19981 if (ISCD(un)) { 19982 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT_PRIORITY); 19983 } 19984 19985 /* 19986 * Note: Maybe let the strategy/partitioning chain worry about getting 19987 * valid geometry. 19988 */ 19989 mutex_exit(SD_MUTEX(un)); 19990 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 19991 19992 19993 if (cmlb_validate(un->un_cmlbhandle, 0, 19994 (void *)SD_PATH_DIRECT_PRIORITY) != 0) { 19995 sd_ssc_fini(ssc); 19996 return (EIO); 19997 } else { 19998 if (un->un_f_pkstats_enabled) { 19999 sd_set_pstats(un); 20000 SD_TRACE(SD_LOG_IO_PARTITION, un, 20001 "sd_handle_mchange: un:0x%p pstats created and " 20002 "set\n", un); 20003 } 20004 } 20005 20006 /* 20007 * Try to lock the door 20008 */ 20009 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 20010 SD_PATH_DIRECT_PRIORITY); 20011 failed: 20012 if (rval != 0) 20013 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 20014 sd_ssc_fini(ssc); 20015 return (rval); 20016 } 20017 20018 20019 /* 20020 * Function: sd_send_scsi_DOORLOCK 20021 * 20022 * Description: Issue the scsi DOOR LOCK command 20023 * 20024 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20025 * structure for this target. 20026 * flag - SD_REMOVAL_ALLOW 20027 * SD_REMOVAL_PREVENT 20028 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20029 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20030 * to use the USCSI "direct" chain and bypass the normal 20031 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 20032 * command is issued as part of an error recovery action. 20033 * 20034 * Return Code: 0 - Success 20035 * errno return code from sd_ssc_send() 20036 * 20037 * Context: Can sleep. 20038 */ 20039 20040 static int 20041 sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag) 20042 { 20043 struct scsi_extended_sense sense_buf; 20044 union scsi_cdb cdb; 20045 struct uscsi_cmd ucmd_buf; 20046 int status; 20047 struct sd_lun *un; 20048 20049 ASSERT(ssc != NULL); 20050 un = ssc->ssc_un; 20051 ASSERT(un != NULL); 20052 ASSERT(!mutex_owned(SD_MUTEX(un))); 20053 20054 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un); 20055 20056 /* already determined doorlock is not supported, fake success */ 20057 if (un->un_f_doorlock_supported == FALSE) { 20058 return (0); 20059 } 20060 20061 /* 20062 * If we are ejecting and see an SD_REMOVAL_PREVENT 20063 * ignore the command so we can complete the eject 20064 * operation. 20065 */ 20066 if (flag == SD_REMOVAL_PREVENT) { 20067 mutex_enter(SD_MUTEX(un)); 20068 if (un->un_f_ejecting == TRUE) { 20069 mutex_exit(SD_MUTEX(un)); 20070 return (EAGAIN); 20071 } 20072 mutex_exit(SD_MUTEX(un)); 20073 } 20074 20075 bzero(&cdb, sizeof (cdb)); 20076 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20077 20078 cdb.scc_cmd = SCMD_DOORLOCK; 20079 cdb.cdb_opaque[4] = (uchar_t)flag; 20080 20081 ucmd_buf.uscsi_cdb = (char *)&cdb; 20082 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 20083 ucmd_buf.uscsi_bufaddr = NULL; 20084 ucmd_buf.uscsi_buflen = 0; 20085 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20086 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 20087 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 20088 ucmd_buf.uscsi_timeout = 15; 20089 20090 SD_TRACE(SD_LOG_IO, un, 20091 "sd_send_scsi_DOORLOCK: returning sd_ssc_send\n"); 20092 20093 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20094 UIO_SYSSPACE, path_flag); 20095 20096 if (status == 0) 20097 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20098 20099 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) && 20100 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20101 (scsi_sense_key((uint8_t *)&sense_buf) == KEY_ILLEGAL_REQUEST)) { 20102 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 20103 20104 /* fake success and skip subsequent doorlock commands */ 20105 un->un_f_doorlock_supported = FALSE; 20106 return (0); 20107 } 20108 20109 return (status); 20110 } 20111 20112 /* 20113 * Function: sd_send_scsi_READ_CAPACITY 20114 * 20115 * Description: This routine uses the scsi READ CAPACITY command to determine 20116 * the device capacity in number of blocks and the device native 20117 * block size. If this function returns a failure, then the 20118 * values in *capp and *lbap are undefined. If the capacity 20119 * returned is 0xffffffff then the lun is too large for a 20120 * normal READ CAPACITY command and the results of a 20121 * READ CAPACITY 16 will be used instead. 20122 * 20123 * Arguments: ssc - ssc contains ptr to soft state struct for the target 20124 * capp - ptr to unsigned 64-bit variable to receive the 20125 * capacity value from the command. 20126 * lbap - ptr to unsigned 32-bit varaible to receive the 20127 * block size value from the command 20128 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20129 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20130 * to use the USCSI "direct" chain and bypass the normal 20131 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 20132 * command is issued as part of an error recovery action. 20133 * 20134 * Return Code: 0 - Success 20135 * EIO - IO error 20136 * EACCES - Reservation conflict detected 20137 * EAGAIN - Device is becoming ready 20138 * errno return code from sd_ssc_send() 20139 * 20140 * Context: Can sleep. Blocks until command completes. 20141 */ 20142 20143 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity) 20144 20145 static int 20146 sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, uint32_t *lbap, 20147 int path_flag) 20148 { 20149 struct scsi_extended_sense sense_buf; 20150 struct uscsi_cmd ucmd_buf; 20151 union scsi_cdb cdb; 20152 uint32_t *capacity_buf; 20153 uint64_t capacity; 20154 uint32_t lbasize; 20155 uint32_t pbsize; 20156 int status; 20157 struct sd_lun *un; 20158 20159 ASSERT(ssc != NULL); 20160 20161 un = ssc->ssc_un; 20162 ASSERT(un != NULL); 20163 ASSERT(!mutex_owned(SD_MUTEX(un))); 20164 ASSERT(capp != NULL); 20165 ASSERT(lbap != NULL); 20166 20167 SD_TRACE(SD_LOG_IO, un, 20168 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 20169 20170 /* 20171 * First send a READ_CAPACITY command to the target. 20172 * (This command is mandatory under SCSI-2.) 20173 * 20174 * Set up the CDB for the READ_CAPACITY command. The Partial 20175 * Medium Indicator bit is cleared. The address field must be 20176 * zero if the PMI bit is zero. 20177 */ 20178 bzero(&cdb, sizeof (cdb)); 20179 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20180 20181 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP); 20182 20183 cdb.scc_cmd = SCMD_READ_CAPACITY; 20184 20185 ucmd_buf.uscsi_cdb = (char *)&cdb; 20186 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 20187 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; 20188 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; 20189 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20190 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 20191 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20192 ucmd_buf.uscsi_timeout = 60; 20193 20194 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20195 UIO_SYSSPACE, path_flag); 20196 20197 switch (status) { 20198 case 0: 20199 /* Return failure if we did not get valid capacity data. */ 20200 if (ucmd_buf.uscsi_resid != 0) { 20201 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 20202 "sd_send_scsi_READ_CAPACITY received invalid " 20203 "capacity data"); 20204 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 20205 return (EIO); 20206 } 20207 /* 20208 * Read capacity and block size from the READ CAPACITY 10 data. 20209 * This data may be adjusted later due to device specific 20210 * issues. 20211 * 20212 * According to the SCSI spec, the READ CAPACITY 10 20213 * command returns the following: 20214 * 20215 * bytes 0-3: Maximum logical block address available. 20216 * (MSB in byte:0 & LSB in byte:3) 20217 * 20218 * bytes 4-7: Block length in bytes 20219 * (MSB in byte:4 & LSB in byte:7) 20220 * 20221 */ 20222 capacity = BE_32(capacity_buf[0]); 20223 lbasize = BE_32(capacity_buf[1]); 20224 20225 /* 20226 * Done with capacity_buf 20227 */ 20228 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 20229 20230 /* 20231 * if the reported capacity is set to all 0xf's, then 20232 * this disk is too large and requires SBC-2 commands. 20233 * Reissue the request using READ CAPACITY 16. 20234 */ 20235 if (capacity == 0xffffffff) { 20236 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 20237 status = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, 20238 &lbasize, &pbsize, path_flag); 20239 if (status != 0) { 20240 return (status); 20241 } else { 20242 goto rc16_done; 20243 } 20244 } 20245 break; /* Success! */ 20246 case EIO: 20247 switch (ucmd_buf.uscsi_status) { 20248 case STATUS_RESERVATION_CONFLICT: 20249 status = EACCES; 20250 break; 20251 case STATUS_CHECK: 20252 /* 20253 * Check condition; look for ASC/ASCQ of 0x04/0x01 20254 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 20255 */ 20256 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20257 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 20258 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 20259 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 20260 return (EAGAIN); 20261 } 20262 break; 20263 default: 20264 break; 20265 } 20266 /* FALLTHRU */ 20267 default: 20268 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 20269 return (status); 20270 } 20271 20272 /* 20273 * Some ATAPI CD-ROM drives report inaccurate LBA size values 20274 * (2352 and 0 are common) so for these devices always force the value 20275 * to 2048 as required by the ATAPI specs. 20276 */ 20277 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 20278 lbasize = 2048; 20279 } 20280 20281 /* 20282 * Get the maximum LBA value from the READ CAPACITY data. 20283 * Here we assume that the Partial Medium Indicator (PMI) bit 20284 * was cleared when issuing the command. This means that the LBA 20285 * returned from the device is the LBA of the last logical block 20286 * on the logical unit. The actual logical block count will be 20287 * this value plus one. 20288 */ 20289 capacity += 1; 20290 20291 /* 20292 * Currently, for removable media, the capacity is saved in terms 20293 * of un->un_sys_blocksize, so scale the capacity value to reflect this. 20294 */ 20295 if (un->un_f_has_removable_media) 20296 capacity *= (lbasize / un->un_sys_blocksize); 20297 20298 rc16_done: 20299 20300 /* 20301 * Copy the values from the READ CAPACITY command into the space 20302 * provided by the caller. 20303 */ 20304 *capp = capacity; 20305 *lbap = lbasize; 20306 20307 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: " 20308 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 20309 20310 /* 20311 * Both the lbasize and capacity from the device must be nonzero, 20312 * otherwise we assume that the values are not valid and return 20313 * failure to the caller. (4203735) 20314 */ 20315 if ((capacity == 0) || (lbasize == 0)) { 20316 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 20317 "sd_send_scsi_READ_CAPACITY received invalid value " 20318 "capacity %llu lbasize %d", capacity, lbasize); 20319 return (EIO); 20320 } 20321 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20322 return (0); 20323 } 20324 20325 /* 20326 * Function: sd_send_scsi_READ_CAPACITY_16 20327 * 20328 * Description: This routine uses the scsi READ CAPACITY 16 command to 20329 * determine the device capacity in number of blocks and the 20330 * device native block size. If this function returns a failure, 20331 * then the values in *capp and *lbap are undefined. 20332 * This routine should be called by sd_send_scsi_READ_CAPACITY 20333 * which will apply any device specific adjustments to capacity 20334 * and lbasize. One exception is it is also called by 20335 * sd_get_media_info_ext. In that function, there is no need to 20336 * adjust the capacity and lbasize. 20337 * 20338 * Arguments: ssc - ssc contains ptr to soft state struct for the target 20339 * capp - ptr to unsigned 64-bit variable to receive the 20340 * capacity value from the command. 20341 * lbap - ptr to unsigned 32-bit varaible to receive the 20342 * block size value from the command 20343 * psp - ptr to unsigned 32-bit variable to receive the 20344 * physical block size value from the command 20345 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20346 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20347 * to use the USCSI "direct" chain and bypass the normal 20348 * command waitq. SD_PATH_DIRECT_PRIORITY is used when 20349 * this command is issued as part of an error recovery 20350 * action. 20351 * 20352 * Return Code: 0 - Success 20353 * EIO - IO error 20354 * EACCES - Reservation conflict detected 20355 * EAGAIN - Device is becoming ready 20356 * errno return code from sd_ssc_send() 20357 * 20358 * Context: Can sleep. Blocks until command completes. 20359 */ 20360 20361 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16) 20362 20363 static int 20364 sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp, uint32_t *lbap, 20365 uint32_t *psp, int path_flag) 20366 { 20367 struct scsi_extended_sense sense_buf; 20368 struct uscsi_cmd ucmd_buf; 20369 union scsi_cdb cdb; 20370 uint64_t *capacity16_buf; 20371 uint64_t capacity; 20372 uint32_t lbasize; 20373 uint32_t pbsize; 20374 uint32_t lbpb_exp; 20375 int status; 20376 struct sd_lun *un; 20377 20378 ASSERT(ssc != NULL); 20379 20380 un = ssc->ssc_un; 20381 ASSERT(un != NULL); 20382 ASSERT(!mutex_owned(SD_MUTEX(un))); 20383 ASSERT(capp != NULL); 20384 ASSERT(lbap != NULL); 20385 20386 SD_TRACE(SD_LOG_IO, un, 20387 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 20388 20389 /* 20390 * First send a READ_CAPACITY_16 command to the target. 20391 * 20392 * Set up the CDB for the READ_CAPACITY_16 command. The Partial 20393 * Medium Indicator bit is cleared. The address field must be 20394 * zero if the PMI bit is zero. 20395 */ 20396 bzero(&cdb, sizeof (cdb)); 20397 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20398 20399 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP); 20400 20401 ucmd_buf.uscsi_cdb = (char *)&cdb; 20402 ucmd_buf.uscsi_cdblen = CDB_GROUP4; 20403 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; 20404 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; 20405 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20406 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 20407 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20408 ucmd_buf.uscsi_timeout = 60; 20409 20410 /* 20411 * Read Capacity (16) is a Service Action In command. One 20412 * command byte (0x9E) is overloaded for multiple operations, 20413 * with the second CDB byte specifying the desired operation 20414 */ 20415 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4; 20416 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4; 20417 20418 /* 20419 * Fill in allocation length field 20420 */ 20421 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen); 20422 20423 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20424 UIO_SYSSPACE, path_flag); 20425 20426 switch (status) { 20427 case 0: 20428 /* Return failure if we did not get valid capacity data. */ 20429 if (ucmd_buf.uscsi_resid > 20) { 20430 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 20431 "sd_send_scsi_READ_CAPACITY_16 received invalid " 20432 "capacity data"); 20433 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 20434 return (EIO); 20435 } 20436 20437 /* 20438 * Read capacity and block size from the READ CAPACITY 16 data. 20439 * This data may be adjusted later due to device specific 20440 * issues. 20441 * 20442 * According to the SCSI spec, the READ CAPACITY 16 20443 * command returns the following: 20444 * 20445 * bytes 0-7: Maximum logical block address available. 20446 * (MSB in byte:0 & LSB in byte:7) 20447 * 20448 * bytes 8-11: Block length in bytes 20449 * (MSB in byte:8 & LSB in byte:11) 20450 * 20451 * byte 13: LOGICAL BLOCKS PER PHYSICAL BLOCK EXPONENT 20452 * 20453 * byte 14: 20454 * bit 7: Thin-Provisioning Enabled 20455 * bit 6: Thin-Provisioning Read Zeros 20456 */ 20457 capacity = BE_64(capacity16_buf[0]); 20458 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); 20459 lbpb_exp = (BE_64(capacity16_buf[1]) >> 16) & 0x0f; 20460 20461 un->un_thin_flags = 0; 20462 if (((uint8_t *)capacity16_buf)[14] & (1 << 7)) 20463 un->un_thin_flags |= SD_THIN_PROV_ENABLED; 20464 if (((uint8_t *)capacity16_buf)[14] & (1 << 6)) 20465 un->un_thin_flags |= SD_THIN_PROV_READ_ZEROS; 20466 20467 pbsize = lbasize << lbpb_exp; 20468 20469 /* 20470 * Done with capacity16_buf 20471 */ 20472 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 20473 20474 /* 20475 * if the reported capacity is set to all 0xf's, then 20476 * this disk is too large. This could only happen with 20477 * a device that supports LBAs larger than 64 bits which 20478 * are not defined by any current T10 standards. 20479 */ 20480 if (capacity == 0xffffffffffffffff) { 20481 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 20482 "disk is too large"); 20483 return (EIO); 20484 } 20485 break; /* Success! */ 20486 case EIO: 20487 switch (ucmd_buf.uscsi_status) { 20488 case STATUS_RESERVATION_CONFLICT: 20489 status = EACCES; 20490 break; 20491 case STATUS_CHECK: 20492 /* 20493 * Check condition; look for ASC/ASCQ of 0x04/0x01 20494 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 20495 */ 20496 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20497 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 20498 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 20499 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 20500 return (EAGAIN); 20501 } 20502 break; 20503 default: 20504 break; 20505 } 20506 /* FALLTHRU */ 20507 default: 20508 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 20509 return (status); 20510 } 20511 20512 /* 20513 * Some ATAPI CD-ROM drives report inaccurate LBA size values 20514 * (2352 and 0 are common) so for these devices always force the value 20515 * to 2048 as required by the ATAPI specs. 20516 */ 20517 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 20518 lbasize = 2048; 20519 } 20520 20521 /* 20522 * Get the maximum LBA value from the READ CAPACITY 16 data. 20523 * Here we assume that the Partial Medium Indicator (PMI) bit 20524 * was cleared when issuing the command. This means that the LBA 20525 * returned from the device is the LBA of the last logical block 20526 * on the logical unit. The actual logical block count will be 20527 * this value plus one. 20528 */ 20529 capacity += 1; 20530 20531 /* 20532 * Currently, for removable media, the capacity is saved in terms 20533 * of un->un_sys_blocksize, so scale the capacity value to reflect this. 20534 */ 20535 if (un->un_f_has_removable_media) 20536 capacity *= (lbasize / un->un_sys_blocksize); 20537 20538 *capp = capacity; 20539 *lbap = lbasize; 20540 *psp = pbsize; 20541 20542 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: " 20543 "capacity:0x%llx lbasize:0x%x, pbsize: 0x%x\n", 20544 capacity, lbasize, pbsize); 20545 20546 if ((capacity == 0) || (lbasize == 0) || (pbsize == 0)) { 20547 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 20548 "sd_send_scsi_READ_CAPACITY_16 received invalid value " 20549 "capacity %llu lbasize %d pbsize %d", capacity, lbasize); 20550 return (EIO); 20551 } 20552 20553 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20554 return (0); 20555 } 20556 20557 20558 /* 20559 * Function: sd_send_scsi_START_STOP_UNIT 20560 * 20561 * Description: Issue a scsi START STOP UNIT command to the target. 20562 * 20563 * Arguments: ssc - ssc contatins pointer to driver soft state (unit) 20564 * structure for this target. 20565 * pc_flag - SD_POWER_CONDITION 20566 * SD_START_STOP 20567 * flag - SD_TARGET_START 20568 * SD_TARGET_STOP 20569 * SD_TARGET_EJECT 20570 * SD_TARGET_CLOSE 20571 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20572 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20573 * to use the USCSI "direct" chain and bypass the normal 20574 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 20575 * command is issued as part of an error recovery action. 20576 * 20577 * Return Code: 0 - Success 20578 * EIO - IO error 20579 * EACCES - Reservation conflict detected 20580 * ENXIO - Not Ready, medium not present 20581 * errno return code from sd_ssc_send() 20582 * 20583 * Context: Can sleep. 20584 */ 20585 20586 static int 20587 sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int pc_flag, int flag, 20588 int path_flag) 20589 { 20590 struct scsi_extended_sense sense_buf; 20591 union scsi_cdb cdb; 20592 struct uscsi_cmd ucmd_buf; 20593 int status; 20594 struct sd_lun *un; 20595 20596 ASSERT(ssc != NULL); 20597 un = ssc->ssc_un; 20598 ASSERT(un != NULL); 20599 ASSERT(!mutex_owned(SD_MUTEX(un))); 20600 20601 SD_TRACE(SD_LOG_IO, un, 20602 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un); 20603 20604 if (un->un_f_check_start_stop && 20605 (pc_flag == SD_START_STOP) && 20606 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) && 20607 (un->un_f_start_stop_supported != TRUE)) { 20608 return (0); 20609 } 20610 20611 /* 20612 * If we are performing an eject operation and 20613 * we receive any command other than SD_TARGET_EJECT 20614 * we should immediately return. 20615 */ 20616 if (flag != SD_TARGET_EJECT) { 20617 mutex_enter(SD_MUTEX(un)); 20618 if (un->un_f_ejecting == TRUE) { 20619 mutex_exit(SD_MUTEX(un)); 20620 return (EAGAIN); 20621 } 20622 mutex_exit(SD_MUTEX(un)); 20623 } 20624 20625 bzero(&cdb, sizeof (cdb)); 20626 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20627 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20628 20629 cdb.scc_cmd = SCMD_START_STOP; 20630 cdb.cdb_opaque[4] = (pc_flag == SD_POWER_CONDITION) ? 20631 (uchar_t)(flag << 4) : (uchar_t)flag; 20632 20633 ucmd_buf.uscsi_cdb = (char *)&cdb; 20634 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 20635 ucmd_buf.uscsi_bufaddr = NULL; 20636 ucmd_buf.uscsi_buflen = 0; 20637 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20638 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20639 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 20640 ucmd_buf.uscsi_timeout = 200; 20641 20642 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20643 UIO_SYSSPACE, path_flag); 20644 20645 switch (status) { 20646 case 0: 20647 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20648 break; /* Success! */ 20649 case EIO: 20650 switch (ucmd_buf.uscsi_status) { 20651 case STATUS_RESERVATION_CONFLICT: 20652 status = EACCES; 20653 break; 20654 case STATUS_CHECK: 20655 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) { 20656 switch (scsi_sense_key( 20657 (uint8_t *)&sense_buf)) { 20658 case KEY_ILLEGAL_REQUEST: 20659 status = ENOTSUP; 20660 break; 20661 case KEY_NOT_READY: 20662 if (scsi_sense_asc( 20663 (uint8_t *)&sense_buf) 20664 == 0x3A) { 20665 status = ENXIO; 20666 } 20667 break; 20668 default: 20669 break; 20670 } 20671 } 20672 break; 20673 default: 20674 break; 20675 } 20676 break; 20677 default: 20678 break; 20679 } 20680 20681 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n"); 20682 20683 return (status); 20684 } 20685 20686 20687 /* 20688 * Function: sd_start_stop_unit_callback 20689 * 20690 * Description: timeout(9F) callback to begin recovery process for a 20691 * device that has spun down. 20692 * 20693 * Arguments: arg - pointer to associated softstate struct. 20694 * 20695 * Context: Executes in a timeout(9F) thread context 20696 */ 20697 20698 static void 20699 sd_start_stop_unit_callback(void *arg) 20700 { 20701 struct sd_lun *un = arg; 20702 ASSERT(un != NULL); 20703 ASSERT(!mutex_owned(SD_MUTEX(un))); 20704 20705 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n"); 20706 20707 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP); 20708 } 20709 20710 20711 /* 20712 * Function: sd_start_stop_unit_task 20713 * 20714 * Description: Recovery procedure when a drive is spun down. 20715 * 20716 * Arguments: arg - pointer to associated softstate struct. 20717 * 20718 * Context: Executes in a taskq() thread context 20719 */ 20720 20721 static void 20722 sd_start_stop_unit_task(void *arg) 20723 { 20724 struct sd_lun *un = arg; 20725 sd_ssc_t *ssc; 20726 int power_level; 20727 int rval; 20728 20729 ASSERT(un != NULL); 20730 ASSERT(!mutex_owned(SD_MUTEX(un))); 20731 20732 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n"); 20733 20734 /* 20735 * Some unformatted drives report not ready error, no need to 20736 * restart if format has been initiated. 20737 */ 20738 mutex_enter(SD_MUTEX(un)); 20739 if (un->un_f_format_in_progress == TRUE) { 20740 mutex_exit(SD_MUTEX(un)); 20741 return; 20742 } 20743 mutex_exit(SD_MUTEX(un)); 20744 20745 ssc = sd_ssc_init(un); 20746 /* 20747 * When a START STOP command is issued from here, it is part of a 20748 * failure recovery operation and must be issued before any other 20749 * commands, including any pending retries. Thus it must be sent 20750 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up 20751 * succeeds or not, we will start I/O after the attempt. 20752 * If power condition is supported and the current power level 20753 * is capable of performing I/O, we should set the power condition 20754 * to that level. Otherwise, set the power condition to ACTIVE. 20755 */ 20756 if (un->un_f_power_condition_supported) { 20757 mutex_enter(SD_MUTEX(un)); 20758 ASSERT(SD_PM_IS_LEVEL_VALID(un, un->un_power_level)); 20759 power_level = sd_pwr_pc.ran_perf[un->un_power_level] 20760 > 0 ? un->un_power_level : SD_SPINDLE_ACTIVE; 20761 mutex_exit(SD_MUTEX(un)); 20762 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_POWER_CONDITION, 20763 sd_pl2pc[power_level], SD_PATH_DIRECT_PRIORITY); 20764 } else { 20765 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 20766 SD_TARGET_START, SD_PATH_DIRECT_PRIORITY); 20767 } 20768 20769 if (rval != 0) 20770 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 20771 sd_ssc_fini(ssc); 20772 /* 20773 * The above call blocks until the START_STOP_UNIT command completes. 20774 * Now that it has completed, we must re-try the original IO that 20775 * received the NOT READY condition in the first place. There are 20776 * three possible conditions here: 20777 * 20778 * (1) The original IO is on un_retry_bp. 20779 * (2) The original IO is on the regular wait queue, and un_retry_bp 20780 * is NULL. 20781 * (3) The original IO is on the regular wait queue, and un_retry_bp 20782 * points to some other, unrelated bp. 20783 * 20784 * For each case, we must call sd_start_cmds() with un_retry_bp 20785 * as the argument. If un_retry_bp is NULL, this will initiate 20786 * processing of the regular wait queue. If un_retry_bp is not NULL, 20787 * then this will process the bp on un_retry_bp. That may or may not 20788 * be the original IO, but that does not matter: the important thing 20789 * is to keep the IO processing going at this point. 20790 * 20791 * Note: This is a very specific error recovery sequence associated 20792 * with a drive that is not spun up. We attempt a START_STOP_UNIT and 20793 * serialize the I/O with completion of the spin-up. 20794 */ 20795 mutex_enter(SD_MUTEX(un)); 20796 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 20797 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n", 20798 un, un->un_retry_bp); 20799 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */ 20800 sd_start_cmds(un, un->un_retry_bp); 20801 mutex_exit(SD_MUTEX(un)); 20802 20803 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n"); 20804 } 20805 20806 20807 /* 20808 * Function: sd_send_scsi_INQUIRY 20809 * 20810 * Description: Issue the scsi INQUIRY command. 20811 * 20812 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20813 * structure for this target. 20814 * bufaddr 20815 * buflen 20816 * evpd 20817 * page_code 20818 * page_length 20819 * 20820 * Return Code: 0 - Success 20821 * errno return code from sd_ssc_send() 20822 * 20823 * Context: Can sleep. Does not return until command is completed. 20824 */ 20825 20826 static int 20827 sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, size_t buflen, 20828 uchar_t evpd, uchar_t page_code, size_t *residp) 20829 { 20830 union scsi_cdb cdb; 20831 struct uscsi_cmd ucmd_buf; 20832 int status; 20833 struct sd_lun *un; 20834 20835 ASSERT(ssc != NULL); 20836 un = ssc->ssc_un; 20837 ASSERT(un != NULL); 20838 ASSERT(!mutex_owned(SD_MUTEX(un))); 20839 ASSERT(bufaddr != NULL); 20840 20841 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un); 20842 20843 bzero(&cdb, sizeof (cdb)); 20844 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20845 bzero(bufaddr, buflen); 20846 20847 cdb.scc_cmd = SCMD_INQUIRY; 20848 cdb.cdb_opaque[1] = evpd; 20849 cdb.cdb_opaque[2] = page_code; 20850 FORMG0COUNT(&cdb, buflen); 20851 20852 ucmd_buf.uscsi_cdb = (char *)&cdb; 20853 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 20854 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20855 ucmd_buf.uscsi_buflen = buflen; 20856 ucmd_buf.uscsi_rqbuf = NULL; 20857 ucmd_buf.uscsi_rqlen = 0; 20858 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 20859 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ 20860 20861 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20862 UIO_SYSSPACE, SD_PATH_DIRECT); 20863 20864 /* 20865 * Only handle status == 0, the upper-level caller 20866 * will put different assessment based on the context. 20867 */ 20868 if (status == 0) 20869 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20870 20871 if ((status == 0) && (residp != NULL)) { 20872 *residp = ucmd_buf.uscsi_resid; 20873 } 20874 20875 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n"); 20876 20877 return (status); 20878 } 20879 20880 20881 /* 20882 * Function: sd_send_scsi_TEST_UNIT_READY 20883 * 20884 * Description: Issue the scsi TEST UNIT READY command. 20885 * This routine can be told to set the flag USCSI_DIAGNOSE to 20886 * prevent retrying failed commands. Use this when the intent 20887 * is either to check for device readiness, to clear a Unit 20888 * Attention, or to clear any outstanding sense data. 20889 * However under specific conditions the expected behavior 20890 * is for retries to bring a device ready, so use the flag 20891 * with caution. 20892 * 20893 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20894 * structure for this target. 20895 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present 20896 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE. 20897 * 0: dont check for media present, do retries on cmd. 20898 * 20899 * Return Code: 0 - Success 20900 * EIO - IO error 20901 * EACCES - Reservation conflict detected 20902 * ENXIO - Not Ready, medium not present 20903 * errno return code from sd_ssc_send() 20904 * 20905 * Context: Can sleep. Does not return until command is completed. 20906 */ 20907 20908 static int 20909 sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag) 20910 { 20911 struct scsi_extended_sense sense_buf; 20912 union scsi_cdb cdb; 20913 struct uscsi_cmd ucmd_buf; 20914 int status; 20915 struct sd_lun *un; 20916 20917 ASSERT(ssc != NULL); 20918 un = ssc->ssc_un; 20919 ASSERT(un != NULL); 20920 ASSERT(!mutex_owned(SD_MUTEX(un))); 20921 20922 SD_TRACE(SD_LOG_IO, un, 20923 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un); 20924 20925 /* 20926 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect 20927 * timeouts when they receive a TUR and the queue is not empty. Check 20928 * the configuration flag set during attach (indicating the drive has 20929 * this firmware bug) and un_ncmds_in_transport before issuing the 20930 * TUR. If there are 20931 * pending commands return success, this is a bit arbitrary but is ok 20932 * for non-removables (i.e. the eliteI disks) and non-clustering 20933 * configurations. 20934 */ 20935 if (un->un_f_cfg_tur_check == TRUE) { 20936 mutex_enter(SD_MUTEX(un)); 20937 if (un->un_ncmds_in_transport != 0) { 20938 mutex_exit(SD_MUTEX(un)); 20939 return (0); 20940 } 20941 mutex_exit(SD_MUTEX(un)); 20942 } 20943 20944 bzero(&cdb, sizeof (cdb)); 20945 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20946 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20947 20948 cdb.scc_cmd = SCMD_TEST_UNIT_READY; 20949 20950 ucmd_buf.uscsi_cdb = (char *)&cdb; 20951 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 20952 ucmd_buf.uscsi_bufaddr = NULL; 20953 ucmd_buf.uscsi_buflen = 0; 20954 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20955 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20956 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 20957 20958 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ 20959 if ((flag & SD_DONT_RETRY_TUR) != 0) { 20960 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; 20961 } 20962 ucmd_buf.uscsi_timeout = 60; 20963 20964 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20965 UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : 20966 SD_PATH_STANDARD)); 20967 20968 switch (status) { 20969 case 0: 20970 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20971 break; /* Success! */ 20972 case EIO: 20973 switch (ucmd_buf.uscsi_status) { 20974 case STATUS_RESERVATION_CONFLICT: 20975 status = EACCES; 20976 break; 20977 case STATUS_CHECK: 20978 if ((flag & SD_CHECK_FOR_MEDIA) == 0) { 20979 break; 20980 } 20981 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20982 (scsi_sense_key((uint8_t *)&sense_buf) == 20983 KEY_NOT_READY) && 20984 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x3A)) { 20985 status = ENXIO; 20986 } 20987 break; 20988 default: 20989 break; 20990 } 20991 break; 20992 default: 20993 break; 20994 } 20995 20996 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n"); 20997 20998 return (status); 20999 } 21000 21001 /* 21002 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN 21003 * 21004 * Description: Issue the scsi PERSISTENT RESERVE IN command. 21005 * 21006 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21007 * structure for this target. 21008 * 21009 * Return Code: 0 - Success 21010 * EACCES 21011 * ENOTSUP 21012 * errno return code from sd_ssc_send() 21013 * 21014 * Context: Can sleep. Does not return until command is completed. 21015 */ 21016 21017 static int 21018 sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, uchar_t usr_cmd, 21019 uint16_t data_len, uchar_t *data_bufp) 21020 { 21021 struct scsi_extended_sense sense_buf; 21022 union scsi_cdb cdb; 21023 struct uscsi_cmd ucmd_buf; 21024 int status; 21025 int no_caller_buf = FALSE; 21026 struct sd_lun *un; 21027 21028 ASSERT(ssc != NULL); 21029 un = ssc->ssc_un; 21030 ASSERT(un != NULL); 21031 ASSERT(!mutex_owned(SD_MUTEX(un))); 21032 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV)); 21033 21034 SD_TRACE(SD_LOG_IO, un, 21035 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un); 21036 21037 bzero(&cdb, sizeof (cdb)); 21038 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21039 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21040 if (data_bufp == NULL) { 21041 /* Allocate a default buf if the caller did not give one */ 21042 ASSERT(data_len == 0); 21043 data_len = MHIOC_RESV_KEY_SIZE; 21044 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP); 21045 no_caller_buf = TRUE; 21046 } 21047 21048 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN; 21049 cdb.cdb_opaque[1] = usr_cmd; 21050 FORMG1COUNT(&cdb, data_len); 21051 21052 ucmd_buf.uscsi_cdb = (char *)&cdb; 21053 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 21054 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; 21055 ucmd_buf.uscsi_buflen = data_len; 21056 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21057 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21058 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 21059 ucmd_buf.uscsi_timeout = 60; 21060 21061 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21062 UIO_SYSSPACE, SD_PATH_STANDARD); 21063 21064 switch (status) { 21065 case 0: 21066 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21067 21068 break; /* Success! */ 21069 case EIO: 21070 switch (ucmd_buf.uscsi_status) { 21071 case STATUS_RESERVATION_CONFLICT: 21072 status = EACCES; 21073 break; 21074 case STATUS_CHECK: 21075 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 21076 (scsi_sense_key((uint8_t *)&sense_buf) == 21077 KEY_ILLEGAL_REQUEST)) { 21078 status = ENOTSUP; 21079 } 21080 break; 21081 default: 21082 break; 21083 } 21084 break; 21085 default: 21086 break; 21087 } 21088 21089 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n"); 21090 21091 if (no_caller_buf == TRUE) { 21092 kmem_free(data_bufp, data_len); 21093 } 21094 21095 return (status); 21096 } 21097 21098 21099 /* 21100 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT 21101 * 21102 * Description: This routine is the driver entry point for handling CD-ROM 21103 * multi-host persistent reservation requests (MHIOCGRP_INKEYS, 21104 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the 21105 * device. 21106 * 21107 * Arguments: ssc - ssc contains un - pointer to soft state struct 21108 * for the target. 21109 * usr_cmd SCSI-3 reservation facility command (one of 21110 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE, 21111 * SD_SCSI3_PREEMPTANDABORT, SD_SCSI3_CLEAR) 21112 * usr_bufp - user provided pointer register, reserve descriptor or 21113 * preempt and abort structure (mhioc_register_t, 21114 * mhioc_resv_desc_t, mhioc_preemptandabort_t) 21115 * 21116 * Return Code: 0 - Success 21117 * EACCES 21118 * ENOTSUP 21119 * errno return code from sd_ssc_send() 21120 * 21121 * Context: Can sleep. Does not return until command is completed. 21122 */ 21123 21124 static int 21125 sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, uchar_t usr_cmd, 21126 uchar_t *usr_bufp) 21127 { 21128 struct scsi_extended_sense sense_buf; 21129 union scsi_cdb cdb; 21130 struct uscsi_cmd ucmd_buf; 21131 int status; 21132 uchar_t data_len = sizeof (sd_prout_t); 21133 sd_prout_t *prp; 21134 struct sd_lun *un; 21135 21136 ASSERT(ssc != NULL); 21137 un = ssc->ssc_un; 21138 ASSERT(un != NULL); 21139 ASSERT(!mutex_owned(SD_MUTEX(un))); 21140 ASSERT(data_len == 24); /* required by scsi spec */ 21141 21142 SD_TRACE(SD_LOG_IO, un, 21143 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un); 21144 21145 if (usr_bufp == NULL) { 21146 return (EINVAL); 21147 } 21148 21149 bzero(&cdb, sizeof (cdb)); 21150 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21151 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21152 prp = kmem_zalloc(data_len, KM_SLEEP); 21153 21154 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT; 21155 cdb.cdb_opaque[1] = usr_cmd; 21156 FORMG1COUNT(&cdb, data_len); 21157 21158 ucmd_buf.uscsi_cdb = (char *)&cdb; 21159 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 21160 ucmd_buf.uscsi_bufaddr = (caddr_t)prp; 21161 ucmd_buf.uscsi_buflen = data_len; 21162 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21163 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21164 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 21165 ucmd_buf.uscsi_timeout = 60; 21166 21167 switch (usr_cmd) { 21168 case SD_SCSI3_REGISTER: { 21169 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; 21170 21171 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 21172 bcopy(ptr->newkey.key, prp->service_key, 21173 MHIOC_RESV_KEY_SIZE); 21174 prp->aptpl = ptr->aptpl; 21175 break; 21176 } 21177 case SD_SCSI3_CLEAR: { 21178 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 21179 21180 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 21181 break; 21182 } 21183 case SD_SCSI3_RESERVE: 21184 case SD_SCSI3_RELEASE: { 21185 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 21186 21187 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 21188 prp->scope_address = BE_32(ptr->scope_specific_addr); 21189 cdb.cdb_opaque[2] = ptr->type; 21190 break; 21191 } 21192 case SD_SCSI3_PREEMPTANDABORT: { 21193 mhioc_preemptandabort_t *ptr = 21194 (mhioc_preemptandabort_t *)usr_bufp; 21195 21196 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 21197 bcopy(ptr->victim_key.key, prp->service_key, 21198 MHIOC_RESV_KEY_SIZE); 21199 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr); 21200 cdb.cdb_opaque[2] = ptr->resvdesc.type; 21201 ucmd_buf.uscsi_flags |= USCSI_HEAD; 21202 break; 21203 } 21204 case SD_SCSI3_REGISTERANDIGNOREKEY: 21205 { 21206 mhioc_registerandignorekey_t *ptr; 21207 ptr = (mhioc_registerandignorekey_t *)usr_bufp; 21208 bcopy(ptr->newkey.key, 21209 prp->service_key, MHIOC_RESV_KEY_SIZE); 21210 prp->aptpl = ptr->aptpl; 21211 break; 21212 } 21213 default: 21214 ASSERT(FALSE); 21215 break; 21216 } 21217 21218 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21219 UIO_SYSSPACE, SD_PATH_STANDARD); 21220 21221 switch (status) { 21222 case 0: 21223 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21224 break; /* Success! */ 21225 case EIO: 21226 switch (ucmd_buf.uscsi_status) { 21227 case STATUS_RESERVATION_CONFLICT: 21228 status = EACCES; 21229 break; 21230 case STATUS_CHECK: 21231 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 21232 (scsi_sense_key((uint8_t *)&sense_buf) == 21233 KEY_ILLEGAL_REQUEST)) { 21234 status = ENOTSUP; 21235 } 21236 break; 21237 default: 21238 break; 21239 } 21240 break; 21241 default: 21242 break; 21243 } 21244 21245 kmem_free(prp, data_len); 21246 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n"); 21247 return (status); 21248 } 21249 21250 21251 /* 21252 * Function: sd_send_scsi_SYNCHRONIZE_CACHE 21253 * 21254 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target 21255 * 21256 * Arguments: un - pointer to the target's soft state struct 21257 * dkc - pointer to the callback structure 21258 * 21259 * Return Code: 0 - success 21260 * errno-type error code 21261 * 21262 * Context: kernel thread context only. 21263 * 21264 * _______________________________________________________________ 21265 * | dkc_flag & | dkc_callback | DKIOCFLUSHWRITECACHE | 21266 * |FLUSH_VOLATILE| | operation | 21267 * |______________|______________|_________________________________| 21268 * | 0 | NULL | Synchronous flush on both | 21269 * | | | volatile and non-volatile cache | 21270 * |______________|______________|_________________________________| 21271 * | 1 | NULL | Synchronous flush on volatile | 21272 * | | | cache; disk drivers may suppress| 21273 * | | | flush if disk table indicates | 21274 * | | | non-volatile cache | 21275 * |______________|______________|_________________________________| 21276 * | 0 | !NULL | Asynchronous flush on both | 21277 * | | | volatile and non-volatile cache;| 21278 * |______________|______________|_________________________________| 21279 * | 1 | !NULL | Asynchronous flush on volatile | 21280 * | | | cache; disk drivers may suppress| 21281 * | | | flush if disk table indicates | 21282 * | | | non-volatile cache | 21283 * |______________|______________|_________________________________| 21284 * 21285 */ 21286 21287 static int 21288 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc) 21289 { 21290 struct sd_uscsi_info *uip; 21291 struct uscsi_cmd *uscmd; 21292 union scsi_cdb *cdb; 21293 struct buf *bp; 21294 int rval = 0; 21295 int is_async; 21296 21297 SD_TRACE(SD_LOG_IO, un, 21298 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un); 21299 21300 ASSERT(un != NULL); 21301 ASSERT(!mutex_owned(SD_MUTEX(un))); 21302 21303 if (dkc == NULL || dkc->dkc_callback == NULL) { 21304 is_async = FALSE; 21305 } else { 21306 is_async = TRUE; 21307 } 21308 21309 mutex_enter(SD_MUTEX(un)); 21310 /* check whether cache flush should be suppressed */ 21311 if (un->un_f_suppress_cache_flush == TRUE) { 21312 mutex_exit(SD_MUTEX(un)); 21313 /* 21314 * suppress the cache flush if the device is told to do 21315 * so by sd.conf or disk table 21316 */ 21317 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_SYNCHRONIZE_CACHE: \ 21318 skip the cache flush since suppress_cache_flush is %d!\n", 21319 un->un_f_suppress_cache_flush); 21320 21321 if (is_async == TRUE) { 21322 /* invoke callback for asynchronous flush */ 21323 (*dkc->dkc_callback)(dkc->dkc_cookie, 0); 21324 } 21325 return (rval); 21326 } 21327 mutex_exit(SD_MUTEX(un)); 21328 21329 /* 21330 * check dkc_flag & FLUSH_VOLATILE so SYNC_NV bit can be 21331 * set properly 21332 */ 21333 cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP); 21334 cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE; 21335 21336 mutex_enter(SD_MUTEX(un)); 21337 if (dkc != NULL && un->un_f_sync_nv_supported && 21338 (dkc->dkc_flag & FLUSH_VOLATILE)) { 21339 /* 21340 * if the device supports SYNC_NV bit, turn on 21341 * the SYNC_NV bit to only flush volatile cache 21342 */ 21343 cdb->cdb_un.tag |= SD_SYNC_NV_BIT; 21344 } 21345 mutex_exit(SD_MUTEX(un)); 21346 21347 /* 21348 * First get some memory for the uscsi_cmd struct and cdb 21349 * and initialize for SYNCHRONIZE_CACHE cmd. 21350 */ 21351 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 21352 uscmd->uscsi_cdblen = CDB_GROUP1; 21353 uscmd->uscsi_cdb = (caddr_t)cdb; 21354 uscmd->uscsi_bufaddr = NULL; 21355 uscmd->uscsi_buflen = 0; 21356 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 21357 uscmd->uscsi_rqlen = SENSE_LENGTH; 21358 uscmd->uscsi_rqresid = SENSE_LENGTH; 21359 uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 21360 uscmd->uscsi_timeout = sd_io_time; 21361 21362 /* 21363 * Allocate an sd_uscsi_info struct and fill it with the info 21364 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 21365 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 21366 * since we allocate the buf here in this function, we do not 21367 * need to preserve the prior contents of b_private. 21368 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 21369 */ 21370 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 21371 uip->ui_flags = SD_PATH_DIRECT; 21372 uip->ui_cmdp = uscmd; 21373 21374 bp = getrbuf(KM_SLEEP); 21375 bp->b_private = uip; 21376 21377 /* 21378 * Setup buffer to carry uscsi request. 21379 */ 21380 bp->b_flags = B_BUSY; 21381 bp->b_bcount = 0; 21382 bp->b_blkno = 0; 21383 21384 if (is_async == TRUE) { 21385 bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone; 21386 uip->ui_dkc = *dkc; 21387 } 21388 21389 bp->b_edev = SD_GET_DEV(un); 21390 bp->b_dev = cmpdev(bp->b_edev); /* maybe unnecessary? */ 21391 21392 /* 21393 * Unset un_f_sync_cache_required flag 21394 */ 21395 mutex_enter(SD_MUTEX(un)); 21396 un->un_f_sync_cache_required = FALSE; 21397 mutex_exit(SD_MUTEX(un)); 21398 21399 (void) sd_uscsi_strategy(bp); 21400 21401 /* 21402 * If synchronous request, wait for completion 21403 * If async just return and let b_iodone callback 21404 * cleanup. 21405 * NOTE: On return, u_ncmds_in_driver will be decremented, 21406 * but it was also incremented in sd_uscsi_strategy(), so 21407 * we should be ok. 21408 */ 21409 if (is_async == FALSE) { 21410 (void) biowait(bp); 21411 rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp); 21412 } 21413 21414 return (rval); 21415 } 21416 21417 21418 static int 21419 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp) 21420 { 21421 struct sd_uscsi_info *uip; 21422 struct uscsi_cmd *uscmd; 21423 uint8_t *sense_buf; 21424 struct sd_lun *un; 21425 int status; 21426 union scsi_cdb *cdb; 21427 21428 uip = (struct sd_uscsi_info *)(bp->b_private); 21429 ASSERT(uip != NULL); 21430 21431 uscmd = uip->ui_cmdp; 21432 ASSERT(uscmd != NULL); 21433 21434 sense_buf = (uint8_t *)uscmd->uscsi_rqbuf; 21435 ASSERT(sense_buf != NULL); 21436 21437 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 21438 ASSERT(un != NULL); 21439 21440 cdb = (union scsi_cdb *)uscmd->uscsi_cdb; 21441 21442 status = geterror(bp); 21443 switch (status) { 21444 case 0: 21445 break; /* Success! */ 21446 case EIO: 21447 switch (uscmd->uscsi_status) { 21448 case STATUS_RESERVATION_CONFLICT: 21449 /* Ignore reservation conflict */ 21450 status = 0; 21451 goto done; 21452 21453 case STATUS_CHECK: 21454 if ((uscmd->uscsi_rqstatus == STATUS_GOOD) && 21455 (scsi_sense_key(sense_buf) == 21456 KEY_ILLEGAL_REQUEST)) { 21457 /* Ignore Illegal Request error */ 21458 if (cdb->cdb_un.tag&SD_SYNC_NV_BIT) { 21459 mutex_enter(SD_MUTEX(un)); 21460 un->un_f_sync_nv_supported = FALSE; 21461 mutex_exit(SD_MUTEX(un)); 21462 status = 0; 21463 SD_TRACE(SD_LOG_IO, un, 21464 "un_f_sync_nv_supported \ 21465 is set to false.\n"); 21466 goto done; 21467 } 21468 21469 mutex_enter(SD_MUTEX(un)); 21470 un->un_f_sync_cache_supported = FALSE; 21471 mutex_exit(SD_MUTEX(un)); 21472 SD_TRACE(SD_LOG_IO, un, 21473 "sd_send_scsi_SYNCHRONIZE_CACHE_biodone: \ 21474 un_f_sync_cache_supported set to false \ 21475 with asc = %x, ascq = %x\n", 21476 scsi_sense_asc(sense_buf), 21477 scsi_sense_ascq(sense_buf)); 21478 status = ENOTSUP; 21479 goto done; 21480 } 21481 break; 21482 default: 21483 break; 21484 } 21485 /* FALLTHRU */ 21486 default: 21487 /* 21488 * Turn on the un_f_sync_cache_required flag 21489 * since the SYNC CACHE command failed 21490 */ 21491 mutex_enter(SD_MUTEX(un)); 21492 un->un_f_sync_cache_required = TRUE; 21493 mutex_exit(SD_MUTEX(un)); 21494 21495 /* 21496 * Don't log an error message if this device 21497 * has removable media. 21498 */ 21499 if (!un->un_f_has_removable_media) { 21500 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 21501 "SYNCHRONIZE CACHE command failed (%d)\n", status); 21502 } 21503 break; 21504 } 21505 21506 done: 21507 if (uip->ui_dkc.dkc_callback != NULL) { 21508 (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status); 21509 } 21510 21511 ASSERT((bp->b_flags & B_REMAPPED) == 0); 21512 freerbuf(bp); 21513 kmem_free(uip, sizeof (struct sd_uscsi_info)); 21514 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 21515 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 21516 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 21517 21518 return (status); 21519 } 21520 21521 /* 21522 * Issues a single SCSI UNMAP command with a prepared UNMAP parameter list. 21523 * Returns zero on success, or the non-zero command error code on failure. 21524 */ 21525 static int 21526 sd_send_scsi_UNMAP_issue_one(sd_ssc_t *ssc, unmap_param_hdr_t *uph, 21527 uint64_t num_descr, uint64_t bytes) 21528 { 21529 struct sd_lun *un = ssc->ssc_un; 21530 struct scsi_extended_sense sense_buf; 21531 union scsi_cdb cdb; 21532 struct uscsi_cmd ucmd_buf; 21533 int status; 21534 const uint64_t param_size = sizeof (unmap_param_hdr_t) + 21535 num_descr * sizeof (unmap_blk_descr_t); 21536 21537 ASSERT3U(param_size - 2, <=, UINT16_MAX); 21538 uph->uph_data_len = BE_16(param_size - 2); 21539 uph->uph_descr_data_len = BE_16(param_size - 8); 21540 21541 bzero(&cdb, sizeof (cdb)); 21542 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21543 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21544 21545 cdb.scc_cmd = SCMD_UNMAP; 21546 FORMG1COUNT(&cdb, param_size); 21547 21548 ucmd_buf.uscsi_cdb = (char *)&cdb; 21549 ucmd_buf.uscsi_cdblen = (uchar_t)CDB_GROUP1; 21550 ucmd_buf.uscsi_bufaddr = (caddr_t)uph; 21551 ucmd_buf.uscsi_buflen = param_size; 21552 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21553 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21554 ucmd_buf.uscsi_flags = USCSI_WRITE | USCSI_RQENABLE | USCSI_SILENT; 21555 ucmd_buf.uscsi_timeout = un->un_cmd_timeout; 21556 21557 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, UIO_SYSSPACE, 21558 SD_PATH_STANDARD); 21559 21560 switch (status) { 21561 case 0: 21562 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21563 21564 if (un->un_unmapstats) { 21565 atomic_inc_64(&un->un_unmapstats->us_cmds.value.ui64); 21566 atomic_add_64(&un->un_unmapstats->us_extents.value.ui64, 21567 num_descr); 21568 atomic_add_64(&un->un_unmapstats->us_bytes.value.ui64, 21569 bytes); 21570 } 21571 break; /* Success! */ 21572 case EIO: 21573 if (un->un_unmapstats) 21574 atomic_inc_64(&un->un_unmapstats->us_errs.value.ui64); 21575 switch (ucmd_buf.uscsi_status) { 21576 case STATUS_RESERVATION_CONFLICT: 21577 status = EACCES; 21578 break; 21579 default: 21580 break; 21581 } 21582 break; 21583 default: 21584 if (un->un_unmapstats) 21585 atomic_inc_64(&un->un_unmapstats->us_errs.value.ui64); 21586 break; 21587 } 21588 21589 return (status); 21590 } 21591 21592 /* 21593 * Returns a pointer to the i'th block descriptor inside an UNMAP param list. 21594 */ 21595 static inline unmap_blk_descr_t * 21596 UNMAP_blk_descr_i(void *buf, size_t i) 21597 { 21598 return ((unmap_blk_descr_t *)((uintptr_t)buf + 21599 sizeof (unmap_param_hdr_t) + (i * sizeof (unmap_blk_descr_t)))); 21600 } 21601 21602 /* 21603 * Takes the list of extents from sd_send_scsi_UNMAP, chops it up, prepares 21604 * UNMAP block descriptors and issues individual SCSI UNMAP commands. While 21605 * doing so we consult the block limits to determine at most how many 21606 * extents and LBAs we can UNMAP in one command. 21607 * If a command fails for whatever, reason, extent list processing is aborted 21608 * and the failed command's status is returned. Otherwise returns 0 on 21609 * success. 21610 */ 21611 static int 21612 sd_send_scsi_UNMAP_issue(dev_t dev, sd_ssc_t *ssc, const dkioc_free_list_t *dfl) 21613 { 21614 struct sd_lun *un = ssc->ssc_un; 21615 unmap_param_hdr_t *uph; 21616 sd_blk_limits_t *lim = &un->un_blk_lim; 21617 int rval = 0; 21618 int partition; 21619 /* partition offset & length in system blocks */ 21620 diskaddr_t part_off_sysblks = 0, part_len_sysblks = 0; 21621 uint64_t part_off, part_len; 21622 uint64_t descr_cnt_lim, byte_cnt_lim; 21623 uint64_t descr_issued = 0, bytes_issued = 0; 21624 21625 uph = kmem_zalloc(SD_UNMAP_PARAM_LIST_MAXSZ, KM_SLEEP); 21626 21627 partition = SDPART(dev); 21628 rval = cmlb_partinfo(un->un_cmlbhandle, partition, &part_len_sysblks, 21629 &part_off_sysblks, NULL, NULL, (void *)SD_PATH_DIRECT); 21630 if (rval != 0) 21631 goto out; 21632 part_off = SD_SYSBLOCKS2BYTES(part_off_sysblks); 21633 part_len = SD_SYSBLOCKS2BYTES(part_len_sysblks); 21634 21635 ASSERT(un->un_blk_lim.lim_max_unmap_lba_cnt != 0); 21636 ASSERT(un->un_blk_lim.lim_max_unmap_descr_cnt != 0); 21637 /* Spec says 0xffffffff are special values, so compute maximums. */ 21638 byte_cnt_lim = lim->lim_max_unmap_lba_cnt < UINT32_MAX ? 21639 (uint64_t)lim->lim_max_unmap_lba_cnt * un->un_tgt_blocksize : 21640 UINT64_MAX; 21641 descr_cnt_lim = MIN(lim->lim_max_unmap_descr_cnt, SD_UNMAP_MAX_DESCR); 21642 21643 if (dfl->dfl_offset >= part_len) { 21644 rval = SET_ERROR(EINVAL); 21645 goto out; 21646 } 21647 21648 for (size_t i = 0; i < dfl->dfl_num_exts; i++) { 21649 const dkioc_free_list_ext_t *ext = &dfl->dfl_exts[i]; 21650 uint64_t ext_start = ext->dfle_start; 21651 uint64_t ext_length = ext->dfle_length; 21652 21653 while (ext_length > 0) { 21654 unmap_blk_descr_t *ubd; 21655 /* Respect device limit on LBA count per command */ 21656 uint64_t len = MIN(MIN(ext_length, byte_cnt_lim - 21657 bytes_issued), SD_TGTBLOCKS2BYTES(un, UINT32_MAX)); 21658 21659 /* check partition limits */ 21660 if (ext_start >= part_len || 21661 ext_start + len < ext_start || 21662 dfl->dfl_offset + ext_start + len < 21663 dfl->dfl_offset || 21664 dfl->dfl_offset + ext_start + len > part_len) { 21665 rval = SET_ERROR(EINVAL); 21666 goto out; 21667 } 21668 21669 ASSERT3U(descr_issued, <, descr_cnt_lim); 21670 ASSERT3U(bytes_issued, <, byte_cnt_lim); 21671 ubd = UNMAP_blk_descr_i(uph, descr_issued); 21672 21673 /* adjust in-partition addresses to be device-global */ 21674 ubd->ubd_lba = BE_64(SD_BYTES2TGTBLOCKS(un, 21675 dfl->dfl_offset + ext_start + part_off)); 21676 ubd->ubd_lba_cnt = BE_32(SD_BYTES2TGTBLOCKS(un, len)); 21677 21678 descr_issued++; 21679 bytes_issued += len; 21680 21681 /* Issue command when device limits reached */ 21682 if (descr_issued == descr_cnt_lim || 21683 bytes_issued == byte_cnt_lim) { 21684 rval = sd_send_scsi_UNMAP_issue_one(ssc, uph, 21685 descr_issued, bytes_issued); 21686 if (rval != 0) 21687 goto out; 21688 descr_issued = 0; 21689 bytes_issued = 0; 21690 } 21691 21692 ext_start += len; 21693 ext_length -= len; 21694 } 21695 } 21696 21697 if (descr_issued > 0) { 21698 /* issue last command */ 21699 rval = sd_send_scsi_UNMAP_issue_one(ssc, uph, descr_issued, 21700 bytes_issued); 21701 } 21702 21703 out: 21704 kmem_free(uph, SD_UNMAP_PARAM_LIST_MAXSZ); 21705 return (rval); 21706 } 21707 21708 /* 21709 * Issues one or several UNMAP commands based on a list of extents to be 21710 * unmapped. The internal multi-command processing is hidden, as the exact 21711 * number of commands and extents per command is limited by both SCSI 21712 * command syntax and device limits (as expressed in the SCSI Block Limits 21713 * VPD page and un_blk_lim in struct sd_lun). 21714 * Returns zero on success, or the error code of the first failed SCSI UNMAP 21715 * command. 21716 */ 21717 static int 21718 sd_send_scsi_UNMAP(dev_t dev, sd_ssc_t *ssc, dkioc_free_list_t *dfl, int flag) 21719 { 21720 struct sd_lun *un = ssc->ssc_un; 21721 int rval = 0; 21722 21723 ASSERT(!mutex_owned(SD_MUTEX(un))); 21724 ASSERT(dfl != NULL); 21725 21726 /* Per spec, any of these conditions signals lack of UNMAP support. */ 21727 if (!(un->un_thin_flags & SD_THIN_PROV_ENABLED) || 21728 un->un_blk_lim.lim_max_unmap_descr_cnt == 0 || 21729 un->un_blk_lim.lim_max_unmap_lba_cnt == 0) { 21730 return (SET_ERROR(ENOTSUP)); 21731 } 21732 21733 /* For userspace calls we must copy in. */ 21734 if (!(flag & FKIOCTL)) { 21735 int err = dfl_copyin(dfl, &dfl, flag, KM_SLEEP); 21736 if (err != 0) 21737 return (err); 21738 } else if (dfl->dfl_num_exts > DFL_COPYIN_MAX_EXTS) { 21739 ASSERT3U(dfl->dfl_num_exts, <=, DFL_COPYIN_MAX_EXTS); 21740 return (SET_ERROR(EINVAL)); 21741 } 21742 21743 rval = sd_send_scsi_UNMAP_issue(dev, ssc, dfl); 21744 21745 if (!(flag & FKIOCTL)) { 21746 dfl_free(dfl); 21747 dfl = NULL; 21748 } 21749 21750 return (rval); 21751 } 21752 21753 /* 21754 * Function: sd_send_scsi_GET_CONFIGURATION 21755 * 21756 * Description: Issues the get configuration command to the device. 21757 * Called from sd_check_for_writable_cd & sd_get_media_info 21758 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN 21759 * Arguments: ssc 21760 * ucmdbuf 21761 * rqbuf 21762 * rqbuflen 21763 * bufaddr 21764 * buflen 21765 * path_flag 21766 * 21767 * Return Code: 0 - Success 21768 * errno return code from sd_ssc_send() 21769 * 21770 * Context: Can sleep. Does not return until command is completed. 21771 * 21772 */ 21773 21774 static int 21775 sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, struct uscsi_cmd *ucmdbuf, 21776 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen, 21777 int path_flag) 21778 { 21779 char cdb[CDB_GROUP1]; 21780 int status; 21781 struct sd_lun *un; 21782 21783 ASSERT(ssc != NULL); 21784 un = ssc->ssc_un; 21785 ASSERT(un != NULL); 21786 ASSERT(!mutex_owned(SD_MUTEX(un))); 21787 ASSERT(bufaddr != NULL); 21788 ASSERT(ucmdbuf != NULL); 21789 ASSERT(rqbuf != NULL); 21790 21791 SD_TRACE(SD_LOG_IO, un, 21792 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un); 21793 21794 bzero(cdb, sizeof (cdb)); 21795 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 21796 bzero(rqbuf, rqbuflen); 21797 bzero(bufaddr, buflen); 21798 21799 /* 21800 * Set up cdb field for the get configuration command. 21801 */ 21802 cdb[0] = SCMD_GET_CONFIGURATION; 21803 cdb[1] = 0x02; /* Requested Type */ 21804 cdb[8] = SD_PROFILE_HEADER_LEN; 21805 ucmdbuf->uscsi_cdb = cdb; 21806 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 21807 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 21808 ucmdbuf->uscsi_buflen = buflen; 21809 ucmdbuf->uscsi_timeout = sd_io_time; 21810 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 21811 ucmdbuf->uscsi_rqlen = rqbuflen; 21812 ucmdbuf->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT | USCSI_READ; 21813 21814 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL, 21815 UIO_SYSSPACE, path_flag); 21816 21817 switch (status) { 21818 case 0: 21819 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21820 break; /* Success! */ 21821 case EIO: 21822 switch (ucmdbuf->uscsi_status) { 21823 case STATUS_RESERVATION_CONFLICT: 21824 status = EACCES; 21825 break; 21826 default: 21827 break; 21828 } 21829 break; 21830 default: 21831 break; 21832 } 21833 21834 if (status == 0) { 21835 SD_DUMP_MEMORY(un, SD_LOG_IO, 21836 "sd_send_scsi_GET_CONFIGURATION: data", 21837 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 21838 } 21839 21840 SD_TRACE(SD_LOG_IO, un, 21841 "sd_send_scsi_GET_CONFIGURATION: exit\n"); 21842 21843 return (status); 21844 } 21845 21846 /* 21847 * Function: sd_send_scsi_feature_GET_CONFIGURATION 21848 * 21849 * Description: Issues the get configuration command to the device to 21850 * retrieve a specific feature. Called from 21851 * sd_check_for_writable_cd & sd_set_mmc_caps. 21852 * Arguments: ssc 21853 * ucmdbuf 21854 * rqbuf 21855 * rqbuflen 21856 * bufaddr 21857 * buflen 21858 * feature 21859 * 21860 * Return Code: 0 - Success 21861 * errno return code from sd_ssc_send() 21862 * 21863 * Context: Can sleep. Does not return until command is completed. 21864 * 21865 */ 21866 static int 21867 sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc, struct uscsi_cmd *ucmdbuf, 21868 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen, 21869 char feature, int path_flag) 21870 { 21871 char cdb[CDB_GROUP1]; 21872 int status; 21873 struct sd_lun *un; 21874 21875 ASSERT(ssc != NULL); 21876 un = ssc->ssc_un; 21877 ASSERT(un != NULL); 21878 ASSERT(!mutex_owned(SD_MUTEX(un))); 21879 ASSERT(bufaddr != NULL); 21880 ASSERT(ucmdbuf != NULL); 21881 ASSERT(rqbuf != NULL); 21882 21883 SD_TRACE(SD_LOG_IO, un, 21884 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un); 21885 21886 bzero(cdb, sizeof (cdb)); 21887 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 21888 bzero(rqbuf, rqbuflen); 21889 bzero(bufaddr, buflen); 21890 21891 /* 21892 * Set up cdb field for the get configuration command. 21893 */ 21894 cdb[0] = SCMD_GET_CONFIGURATION; 21895 cdb[1] = 0x02; /* Requested Type */ 21896 cdb[3] = feature; 21897 cdb[8] = buflen; 21898 ucmdbuf->uscsi_cdb = cdb; 21899 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 21900 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 21901 ucmdbuf->uscsi_buflen = buflen; 21902 ucmdbuf->uscsi_timeout = sd_io_time; 21903 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 21904 ucmdbuf->uscsi_rqlen = rqbuflen; 21905 ucmdbuf->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT | USCSI_READ; 21906 21907 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL, 21908 UIO_SYSSPACE, path_flag); 21909 21910 switch (status) { 21911 case 0: 21912 21913 break; /* Success! */ 21914 case EIO: 21915 switch (ucmdbuf->uscsi_status) { 21916 case STATUS_RESERVATION_CONFLICT: 21917 status = EACCES; 21918 break; 21919 default: 21920 break; 21921 } 21922 break; 21923 default: 21924 break; 21925 } 21926 21927 if (status == 0) { 21928 SD_DUMP_MEMORY(un, SD_LOG_IO, 21929 "sd_send_scsi_feature_GET_CONFIGURATION: data", 21930 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 21931 } 21932 21933 SD_TRACE(SD_LOG_IO, un, 21934 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n"); 21935 21936 return (status); 21937 } 21938 21939 21940 /* 21941 * Function: sd_send_scsi_MODE_SENSE 21942 * 21943 * Description: Utility function for issuing a scsi MODE SENSE command. 21944 * Note: This routine uses a consistent implementation for Group0, 21945 * Group1, and Group2 commands across all platforms. ATAPI devices 21946 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 21947 * 21948 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21949 * structure for this target. 21950 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 21951 * CDB_GROUP[1|2] (10 byte). 21952 * bufaddr - buffer for page data retrieved from the target. 21953 * buflen - size of page to be retrieved. 21954 * page_code - page code of data to be retrieved from the target. 21955 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 21956 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 21957 * to use the USCSI "direct" chain and bypass the normal 21958 * command waitq. 21959 * 21960 * Return Code: 0 - Success 21961 * errno return code from sd_ssc_send() 21962 * 21963 * Context: Can sleep. Does not return until command is completed. 21964 */ 21965 21966 static int 21967 sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr, 21968 size_t buflen, uchar_t page_code, int path_flag) 21969 { 21970 struct scsi_extended_sense sense_buf; 21971 union scsi_cdb cdb; 21972 struct uscsi_cmd ucmd_buf; 21973 int status; 21974 int headlen; 21975 struct sd_lun *un; 21976 21977 ASSERT(ssc != NULL); 21978 un = ssc->ssc_un; 21979 ASSERT(un != NULL); 21980 ASSERT(!mutex_owned(SD_MUTEX(un))); 21981 ASSERT(bufaddr != NULL); 21982 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 21983 (cdbsize == CDB_GROUP2)); 21984 21985 SD_TRACE(SD_LOG_IO, un, 21986 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un); 21987 21988 bzero(&cdb, sizeof (cdb)); 21989 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21990 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21991 bzero(bufaddr, buflen); 21992 21993 if (cdbsize == CDB_GROUP0) { 21994 cdb.scc_cmd = SCMD_MODE_SENSE; 21995 cdb.cdb_opaque[2] = page_code; 21996 FORMG0COUNT(&cdb, buflen); 21997 headlen = MODE_HEADER_LENGTH; 21998 } else { 21999 cdb.scc_cmd = SCMD_MODE_SENSE_G1; 22000 cdb.cdb_opaque[2] = page_code; 22001 FORMG1COUNT(&cdb, buflen); 22002 headlen = MODE_HEADER_LENGTH_GRP2; 22003 } 22004 22005 ASSERT(headlen <= buflen); 22006 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 22007 22008 ucmd_buf.uscsi_cdb = (char *)&cdb; 22009 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 22010 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 22011 ucmd_buf.uscsi_buflen = buflen; 22012 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 22013 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 22014 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 22015 ucmd_buf.uscsi_timeout = 60; 22016 22017 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 22018 UIO_SYSSPACE, path_flag); 22019 22020 switch (status) { 22021 case 0: 22022 /* 22023 * sr_check_wp() uses 0x3f page code and check the header of 22024 * mode page to determine if target device is write-protected. 22025 * But some USB devices return 0 bytes for 0x3f page code. For 22026 * this case, make sure that mode page header is returned at 22027 * least. 22028 */ 22029 if (buflen - ucmd_buf.uscsi_resid < headlen) { 22030 status = EIO; 22031 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 22032 "mode page header is not returned"); 22033 } 22034 break; /* Success! */ 22035 case EIO: 22036 switch (ucmd_buf.uscsi_status) { 22037 case STATUS_RESERVATION_CONFLICT: 22038 status = EACCES; 22039 break; 22040 default: 22041 break; 22042 } 22043 break; 22044 default: 22045 break; 22046 } 22047 22048 if (status == 0) { 22049 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data", 22050 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 22051 } 22052 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n"); 22053 22054 return (status); 22055 } 22056 22057 22058 /* 22059 * Function: sd_send_scsi_MODE_SELECT 22060 * 22061 * Description: Utility function for issuing a scsi MODE SELECT command. 22062 * Note: This routine uses a consistent implementation for Group0, 22063 * Group1, and Group2 commands across all platforms. ATAPI devices 22064 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 22065 * 22066 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 22067 * structure for this target. 22068 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 22069 * CDB_GROUP[1|2] (10 byte). 22070 * bufaddr - buffer for page data retrieved from the target. 22071 * buflen - size of page to be retrieved. 22072 * save_page - boolean to determin if SP bit should be set. 22073 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 22074 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 22075 * to use the USCSI "direct" chain and bypass the normal 22076 * command waitq. 22077 * 22078 * Return Code: 0 - Success 22079 * errno return code from sd_ssc_send() 22080 * 22081 * Context: Can sleep. Does not return until command is completed. 22082 */ 22083 22084 static int 22085 sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr, 22086 size_t buflen, uchar_t save_page, int path_flag) 22087 { 22088 struct scsi_extended_sense sense_buf; 22089 union scsi_cdb cdb; 22090 struct uscsi_cmd ucmd_buf; 22091 int status; 22092 struct sd_lun *un; 22093 22094 ASSERT(ssc != NULL); 22095 un = ssc->ssc_un; 22096 ASSERT(un != NULL); 22097 ASSERT(!mutex_owned(SD_MUTEX(un))); 22098 ASSERT(bufaddr != NULL); 22099 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 22100 (cdbsize == CDB_GROUP2)); 22101 22102 SD_TRACE(SD_LOG_IO, un, 22103 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un); 22104 22105 bzero(&cdb, sizeof (cdb)); 22106 bzero(&ucmd_buf, sizeof (ucmd_buf)); 22107 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 22108 22109 /* Set the PF bit for many third party drives */ 22110 cdb.cdb_opaque[1] = 0x10; 22111 22112 /* Set the savepage(SP) bit if given */ 22113 if (save_page == SD_SAVE_PAGE) { 22114 cdb.cdb_opaque[1] |= 0x01; 22115 } 22116 22117 if (cdbsize == CDB_GROUP0) { 22118 cdb.scc_cmd = SCMD_MODE_SELECT; 22119 FORMG0COUNT(&cdb, buflen); 22120 } else { 22121 cdb.scc_cmd = SCMD_MODE_SELECT_G1; 22122 FORMG1COUNT(&cdb, buflen); 22123 } 22124 22125 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 22126 22127 ucmd_buf.uscsi_cdb = (char *)&cdb; 22128 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 22129 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 22130 ucmd_buf.uscsi_buflen = buflen; 22131 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 22132 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 22133 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 22134 ucmd_buf.uscsi_timeout = 60; 22135 22136 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 22137 UIO_SYSSPACE, path_flag); 22138 22139 switch (status) { 22140 case 0: 22141 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 22142 break; /* Success! */ 22143 case EIO: 22144 switch (ucmd_buf.uscsi_status) { 22145 case STATUS_RESERVATION_CONFLICT: 22146 status = EACCES; 22147 break; 22148 default: 22149 break; 22150 } 22151 break; 22152 default: 22153 break; 22154 } 22155 22156 if (status == 0) { 22157 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data", 22158 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 22159 } 22160 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n"); 22161 22162 return (status); 22163 } 22164 22165 22166 /* 22167 * Function: sd_send_scsi_RDWR 22168 * 22169 * Description: Issue a scsi READ or WRITE command with the given parameters. 22170 * 22171 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 22172 * structure for this target. 22173 * cmd: SCMD_READ or SCMD_WRITE 22174 * bufaddr: Address of caller's buffer to receive the RDWR data 22175 * buflen: Length of caller's buffer receive the RDWR data. 22176 * start_block: Block number for the start of the RDWR operation. 22177 * (Assumes target-native block size.) 22178 * residp: Pointer to variable to receive the redisual of the 22179 * RDWR operation (may be NULL of no residual requested). 22180 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 22181 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 22182 * to use the USCSI "direct" chain and bypass the normal 22183 * command waitq. 22184 * 22185 * Return Code: 0 - Success 22186 * errno return code from sd_ssc_send() 22187 * 22188 * Context: Can sleep. Does not return until command is completed. 22189 */ 22190 22191 static int 22192 sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr, 22193 size_t buflen, daddr_t start_block, int path_flag) 22194 { 22195 struct scsi_extended_sense sense_buf; 22196 union scsi_cdb cdb; 22197 struct uscsi_cmd ucmd_buf; 22198 uint32_t block_count; 22199 int status; 22200 int cdbsize; 22201 uchar_t flag; 22202 struct sd_lun *un; 22203 22204 ASSERT(ssc != NULL); 22205 un = ssc->ssc_un; 22206 ASSERT(un != NULL); 22207 ASSERT(!mutex_owned(SD_MUTEX(un))); 22208 ASSERT(bufaddr != NULL); 22209 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE)); 22210 22211 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un); 22212 22213 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 22214 return (EINVAL); 22215 } 22216 22217 mutex_enter(SD_MUTEX(un)); 22218 block_count = SD_BYTES2TGTBLOCKS(un, buflen); 22219 mutex_exit(SD_MUTEX(un)); 22220 22221 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE; 22222 22223 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: " 22224 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n", 22225 bufaddr, buflen, start_block, block_count); 22226 22227 bzero(&cdb, sizeof (cdb)); 22228 bzero(&ucmd_buf, sizeof (ucmd_buf)); 22229 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 22230 22231 /* Compute CDB size to use */ 22232 if (start_block > 0xffffffff) 22233 cdbsize = CDB_GROUP4; 22234 else if ((start_block & 0xFFE00000) || 22235 (un->un_f_cfg_is_atapi == TRUE)) 22236 cdbsize = CDB_GROUP1; 22237 else 22238 cdbsize = CDB_GROUP0; 22239 22240 switch (cdbsize) { 22241 case CDB_GROUP0: /* 6-byte CDBs */ 22242 cdb.scc_cmd = cmd; 22243 FORMG0ADDR(&cdb, start_block); 22244 FORMG0COUNT(&cdb, block_count); 22245 break; 22246 case CDB_GROUP1: /* 10-byte CDBs */ 22247 cdb.scc_cmd = cmd | SCMD_GROUP1; 22248 FORMG1ADDR(&cdb, start_block); 22249 FORMG1COUNT(&cdb, block_count); 22250 break; 22251 case CDB_GROUP4: /* 16-byte CDBs */ 22252 cdb.scc_cmd = cmd | SCMD_GROUP4; 22253 FORMG4LONGADDR(&cdb, (uint64_t)start_block); 22254 FORMG4COUNT(&cdb, block_count); 22255 break; 22256 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */ 22257 default: 22258 /* All others reserved */ 22259 return (EINVAL); 22260 } 22261 22262 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */ 22263 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 22264 22265 ucmd_buf.uscsi_cdb = (char *)&cdb; 22266 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 22267 ucmd_buf.uscsi_bufaddr = bufaddr; 22268 ucmd_buf.uscsi_buflen = buflen; 22269 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 22270 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 22271 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; 22272 ucmd_buf.uscsi_timeout = 60; 22273 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 22274 UIO_SYSSPACE, path_flag); 22275 22276 switch (status) { 22277 case 0: 22278 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 22279 break; /* Success! */ 22280 case EIO: 22281 switch (ucmd_buf.uscsi_status) { 22282 case STATUS_RESERVATION_CONFLICT: 22283 status = EACCES; 22284 break; 22285 default: 22286 break; 22287 } 22288 break; 22289 default: 22290 break; 22291 } 22292 22293 if (status == 0) { 22294 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data", 22295 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 22296 } 22297 22298 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n"); 22299 22300 return (status); 22301 } 22302 22303 22304 /* 22305 * Function: sd_send_scsi_LOG_SENSE 22306 * 22307 * Description: Issue a scsi LOG_SENSE command with the given parameters. 22308 * 22309 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 22310 * structure for this target. 22311 * 22312 * Return Code: 0 - Success 22313 * errno return code from sd_ssc_send() 22314 * 22315 * Context: Can sleep. Does not return until command is completed. 22316 */ 22317 22318 static int 22319 sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, uint16_t buflen, 22320 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, int path_flag) 22321 { 22322 struct scsi_extended_sense sense_buf; 22323 union scsi_cdb cdb; 22324 struct uscsi_cmd ucmd_buf; 22325 int status; 22326 struct sd_lun *un; 22327 22328 ASSERT(ssc != NULL); 22329 un = ssc->ssc_un; 22330 ASSERT(un != NULL); 22331 ASSERT(!mutex_owned(SD_MUTEX(un))); 22332 22333 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un); 22334 22335 bzero(&cdb, sizeof (cdb)); 22336 bzero(&ucmd_buf, sizeof (ucmd_buf)); 22337 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 22338 22339 cdb.scc_cmd = SCMD_LOG_SENSE_G1; 22340 cdb.cdb_opaque[2] = (page_control << 6) | page_code; 22341 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8); 22342 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF); 22343 FORMG1COUNT(&cdb, buflen); 22344 22345 ucmd_buf.uscsi_cdb = (char *)&cdb; 22346 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 22347 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 22348 ucmd_buf.uscsi_buflen = buflen; 22349 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 22350 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 22351 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 22352 ucmd_buf.uscsi_timeout = 60; 22353 22354 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 22355 UIO_SYSSPACE, path_flag); 22356 22357 switch (status) { 22358 case 0: 22359 break; 22360 case EIO: 22361 switch (ucmd_buf.uscsi_status) { 22362 case STATUS_RESERVATION_CONFLICT: 22363 status = EACCES; 22364 break; 22365 case STATUS_CHECK: 22366 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 22367 (scsi_sense_key((uint8_t *)&sense_buf) == 22368 KEY_ILLEGAL_REQUEST) && 22369 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x24)) { 22370 /* 22371 * ASC 0x24: INVALID FIELD IN CDB 22372 */ 22373 switch (page_code) { 22374 case START_STOP_CYCLE_PAGE: 22375 /* 22376 * The start stop cycle counter is 22377 * implemented as page 0x31 in earlier 22378 * generation disks. In new generation 22379 * disks the start stop cycle counter is 22380 * implemented as page 0xE. To properly 22381 * handle this case if an attempt for 22382 * log page 0xE is made and fails we 22383 * will try again using page 0x31. 22384 * 22385 * Network storage BU committed to 22386 * maintain the page 0x31 for this 22387 * purpose and will not have any other 22388 * page implemented with page code 0x31 22389 * until all disks transition to the 22390 * standard page. 22391 */ 22392 mutex_enter(SD_MUTEX(un)); 22393 un->un_start_stop_cycle_page = 22394 START_STOP_CYCLE_VU_PAGE; 22395 cdb.cdb_opaque[2] = 22396 (char)(page_control << 6) | 22397 un->un_start_stop_cycle_page; 22398 mutex_exit(SD_MUTEX(un)); 22399 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22400 status = sd_ssc_send( 22401 ssc, &ucmd_buf, FKIOCTL, 22402 UIO_SYSSPACE, path_flag); 22403 22404 break; 22405 case TEMPERATURE_PAGE: 22406 status = ENOTTY; 22407 break; 22408 default: 22409 break; 22410 } 22411 } 22412 break; 22413 default: 22414 break; 22415 } 22416 break; 22417 default: 22418 break; 22419 } 22420 22421 if (status == 0) { 22422 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 22423 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data", 22424 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 22425 } 22426 22427 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n"); 22428 22429 return (status); 22430 } 22431 22432 22433 /* 22434 * Function: sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION 22435 * 22436 * Description: Issue the scsi GET EVENT STATUS NOTIFICATION command. 22437 * 22438 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 22439 * structure for this target. 22440 * bufaddr 22441 * buflen 22442 * class_req 22443 * 22444 * Return Code: 0 - Success 22445 * errno return code from sd_ssc_send() 22446 * 22447 * Context: Can sleep. Does not return until command is completed. 22448 */ 22449 22450 static int 22451 sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION(sd_ssc_t *ssc, uchar_t *bufaddr, 22452 size_t buflen, uchar_t class_req) 22453 { 22454 union scsi_cdb cdb; 22455 struct uscsi_cmd ucmd_buf; 22456 int status; 22457 struct sd_lun *un; 22458 22459 ASSERT(ssc != NULL); 22460 un = ssc->ssc_un; 22461 ASSERT(un != NULL); 22462 ASSERT(!mutex_owned(SD_MUTEX(un))); 22463 ASSERT(bufaddr != NULL); 22464 22465 SD_TRACE(SD_LOG_IO, un, 22466 "sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION: entry: un:0x%p\n", un); 22467 22468 bzero(&cdb, sizeof (cdb)); 22469 bzero(&ucmd_buf, sizeof (ucmd_buf)); 22470 bzero(bufaddr, buflen); 22471 22472 cdb.scc_cmd = SCMD_GET_EVENT_STATUS_NOTIFICATION; 22473 cdb.cdb_opaque[1] = 1; /* polled */ 22474 cdb.cdb_opaque[4] = class_req; 22475 FORMG1COUNT(&cdb, buflen); 22476 22477 ucmd_buf.uscsi_cdb = (char *)&cdb; 22478 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 22479 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 22480 ucmd_buf.uscsi_buflen = buflen; 22481 ucmd_buf.uscsi_rqbuf = NULL; 22482 ucmd_buf.uscsi_rqlen = 0; 22483 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 22484 ucmd_buf.uscsi_timeout = 60; 22485 22486 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 22487 UIO_SYSSPACE, SD_PATH_DIRECT); 22488 22489 /* 22490 * Only handle status == 0, the upper-level caller 22491 * will put different assessment based on the context. 22492 */ 22493 if (status == 0) { 22494 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 22495 22496 if (ucmd_buf.uscsi_resid != 0) { 22497 status = EIO; 22498 } 22499 } 22500 22501 SD_TRACE(SD_LOG_IO, un, 22502 "sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION: exit\n"); 22503 22504 return (status); 22505 } 22506 22507 22508 static boolean_t 22509 sd_gesn_media_data_valid(uchar_t *data) 22510 { 22511 uint16_t len; 22512 22513 len = (data[1] << 8) | data[0]; 22514 return ((len >= 6) && 22515 ((data[2] & SD_GESN_HEADER_NEA) == 0) && 22516 ((data[2] & SD_GESN_HEADER_CLASS) == SD_GESN_MEDIA_CLASS) && 22517 ((data[3] & (1 << SD_GESN_MEDIA_CLASS)) != 0)); 22518 } 22519 22520 22521 /* 22522 * Function: sdioctl 22523 * 22524 * Description: Driver's ioctl(9e) entry point function. 22525 * 22526 * Arguments: dev - device number 22527 * cmd - ioctl operation to be performed 22528 * arg - user argument, contains data to be set or reference 22529 * parameter for get 22530 * flag - bit flag, indicating open settings, 32/64 bit type 22531 * cred_p - user credential pointer 22532 * rval_p - calling process return value (OPT) 22533 * 22534 * Return Code: EINVAL 22535 * ENOTTY 22536 * ENXIO 22537 * EIO 22538 * EFAULT 22539 * ENOTSUP 22540 * EPERM 22541 * 22542 * Context: Called from the device switch at normal priority. 22543 */ 22544 22545 static int 22546 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) 22547 { 22548 struct sd_lun *un = NULL; 22549 int err = 0; 22550 int i = 0; 22551 cred_t *cr; 22552 int tmprval = EINVAL; 22553 boolean_t is_valid; 22554 sd_ssc_t *ssc; 22555 22556 /* 22557 * All device accesses go thru sdstrategy where we check on suspend 22558 * status 22559 */ 22560 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22561 return (ENXIO); 22562 } 22563 22564 ASSERT(!mutex_owned(SD_MUTEX(un))); 22565 22566 /* Initialize sd_ssc_t for internal uscsi commands */ 22567 ssc = sd_ssc_init(un); 22568 22569 is_valid = SD_IS_VALID_LABEL(un); 22570 22571 /* 22572 * Moved this wait from sd_uscsi_strategy to here for 22573 * reasons of deadlock prevention. Internal driver commands, 22574 * specifically those to change a devices power level, result 22575 * in a call to sd_uscsi_strategy. 22576 */ 22577 mutex_enter(SD_MUTEX(un)); 22578 while ((un->un_state == SD_STATE_SUSPENDED) || 22579 (un->un_state == SD_STATE_PM_CHANGING)) { 22580 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 22581 } 22582 /* 22583 * Twiddling the counter here protects commands from now 22584 * through to the top of sd_uscsi_strategy. Without the 22585 * counter inc. a power down, for example, could get in 22586 * after the above check for state is made and before 22587 * execution gets to the top of sd_uscsi_strategy. 22588 * That would cause problems. 22589 */ 22590 un->un_ncmds_in_driver++; 22591 22592 if (!is_valid && 22593 (flag & (FNDELAY | FNONBLOCK))) { 22594 switch (cmd) { 22595 case DKIOCGGEOM: /* SD_PATH_DIRECT */ 22596 case DKIOCGVTOC: 22597 case DKIOCGEXTVTOC: 22598 case DKIOCGAPART: 22599 case DKIOCPARTINFO: 22600 case DKIOCEXTPARTINFO: 22601 case DKIOCSGEOM: 22602 case DKIOCSAPART: 22603 case DKIOCGETEFI: 22604 case DKIOCPARTITION: 22605 case DKIOCSVTOC: 22606 case DKIOCSEXTVTOC: 22607 case DKIOCSETEFI: 22608 case DKIOCGMBOOT: 22609 case DKIOCSMBOOT: 22610 case DKIOCG_PHYGEOM: 22611 case DKIOCG_VIRTGEOM: 22612 #if defined(__i386) || defined(__amd64) 22613 case DKIOCSETEXTPART: 22614 #endif 22615 /* let cmlb handle it */ 22616 goto skip_ready_valid; 22617 22618 case CDROMPAUSE: 22619 case CDROMRESUME: 22620 case CDROMPLAYMSF: 22621 case CDROMPLAYTRKIND: 22622 case CDROMREADTOCHDR: 22623 case CDROMREADTOCENTRY: 22624 case CDROMSTOP: 22625 case CDROMSTART: 22626 case CDROMVOLCTRL: 22627 case CDROMSUBCHNL: 22628 case CDROMREADMODE2: 22629 case CDROMREADMODE1: 22630 case CDROMREADOFFSET: 22631 case CDROMSBLKMODE: 22632 case CDROMGBLKMODE: 22633 case CDROMGDRVSPEED: 22634 case CDROMSDRVSPEED: 22635 case CDROMCDDA: 22636 case CDROMCDXA: 22637 case CDROMSUBCODE: 22638 if (!ISCD(un)) { 22639 un->un_ncmds_in_driver--; 22640 ASSERT(un->un_ncmds_in_driver >= 0); 22641 mutex_exit(SD_MUTEX(un)); 22642 err = ENOTTY; 22643 goto done_without_assess; 22644 } 22645 break; 22646 case FDEJECT: 22647 case DKIOCEJECT: 22648 case CDROMEJECT: 22649 if (!un->un_f_eject_media_supported) { 22650 un->un_ncmds_in_driver--; 22651 ASSERT(un->un_ncmds_in_driver >= 0); 22652 mutex_exit(SD_MUTEX(un)); 22653 err = ENOTTY; 22654 goto done_without_assess; 22655 } 22656 break; 22657 case DKIOCFLUSHWRITECACHE: 22658 mutex_exit(SD_MUTEX(un)); 22659 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 22660 if (err != 0) { 22661 mutex_enter(SD_MUTEX(un)); 22662 un->un_ncmds_in_driver--; 22663 ASSERT(un->un_ncmds_in_driver >= 0); 22664 mutex_exit(SD_MUTEX(un)); 22665 err = EIO; 22666 goto done_quick_assess; 22667 } 22668 mutex_enter(SD_MUTEX(un)); 22669 /* FALLTHROUGH */ 22670 case DKIOCREMOVABLE: 22671 case DKIOCHOTPLUGGABLE: 22672 case DKIOCINFO: 22673 case DKIOCGMEDIAINFO: 22674 case DKIOCGMEDIAINFOEXT: 22675 case DKIOCSOLIDSTATE: 22676 case MHIOCENFAILFAST: 22677 case MHIOCSTATUS: 22678 case MHIOCTKOWN: 22679 case MHIOCRELEASE: 22680 case MHIOCGRP_INKEYS: 22681 case MHIOCGRP_INRESV: 22682 case MHIOCGRP_REGISTER: 22683 case MHIOCGRP_CLEAR: 22684 case MHIOCGRP_RESERVE: 22685 case MHIOCGRP_PREEMPTANDABORT: 22686 case MHIOCGRP_REGISTERANDIGNOREKEY: 22687 case CDROMCLOSETRAY: 22688 case USCSICMD: 22689 case USCSIMAXXFER: 22690 goto skip_ready_valid; 22691 default: 22692 break; 22693 } 22694 22695 mutex_exit(SD_MUTEX(un)); 22696 err = sd_ready_and_valid(ssc, SDPART(dev)); 22697 mutex_enter(SD_MUTEX(un)); 22698 22699 if (err != SD_READY_VALID) { 22700 switch (cmd) { 22701 case DKIOCSTATE: 22702 case CDROMGDRVSPEED: 22703 case CDROMSDRVSPEED: 22704 case FDEJECT: /* for eject command */ 22705 case DKIOCEJECT: 22706 case CDROMEJECT: 22707 case DKIOCREMOVABLE: 22708 case DKIOCHOTPLUGGABLE: 22709 break; 22710 default: 22711 if (un->un_f_has_removable_media) { 22712 err = ENXIO; 22713 } else { 22714 /* Do not map SD_RESERVED_BY_OTHERS to EIO */ 22715 if (err == SD_RESERVED_BY_OTHERS) { 22716 err = EACCES; 22717 } else { 22718 err = EIO; 22719 } 22720 } 22721 un->un_ncmds_in_driver--; 22722 ASSERT(un->un_ncmds_in_driver >= 0); 22723 mutex_exit(SD_MUTEX(un)); 22724 22725 goto done_without_assess; 22726 } 22727 } 22728 } 22729 22730 skip_ready_valid: 22731 mutex_exit(SD_MUTEX(un)); 22732 22733 switch (cmd) { 22734 case DKIOCINFO: 22735 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n"); 22736 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag); 22737 break; 22738 22739 case DKIOCGMEDIAINFO: 22740 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n"); 22741 err = sd_get_media_info(dev, (caddr_t)arg, flag); 22742 break; 22743 22744 case DKIOCGMEDIAINFOEXT: 22745 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFOEXT\n"); 22746 err = sd_get_media_info_ext(dev, (caddr_t)arg, flag); 22747 break; 22748 22749 case DKIOCGGEOM: 22750 case DKIOCGVTOC: 22751 case DKIOCGEXTVTOC: 22752 case DKIOCGAPART: 22753 case DKIOCPARTINFO: 22754 case DKIOCEXTPARTINFO: 22755 case DKIOCSGEOM: 22756 case DKIOCSAPART: 22757 case DKIOCGETEFI: 22758 case DKIOCPARTITION: 22759 case DKIOCSVTOC: 22760 case DKIOCSEXTVTOC: 22761 case DKIOCSETEFI: 22762 case DKIOCGMBOOT: 22763 case DKIOCSMBOOT: 22764 case DKIOCG_PHYGEOM: 22765 case DKIOCG_VIRTGEOM: 22766 #if defined(__i386) || defined(__amd64) 22767 case DKIOCSETEXTPART: 22768 #endif 22769 SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd); 22770 22771 /* TUR should spin up */ 22772 22773 if (un->un_f_has_removable_media) 22774 err = sd_send_scsi_TEST_UNIT_READY(ssc, 22775 SD_CHECK_FOR_MEDIA); 22776 22777 else 22778 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 22779 22780 if (err != 0) 22781 goto done_with_assess; 22782 22783 err = cmlb_ioctl(un->un_cmlbhandle, dev, 22784 cmd, arg, flag, cred_p, rval_p, (void *)SD_PATH_DIRECT); 22785 22786 if ((err == 0) && 22787 ((cmd == DKIOCSETEFI) || 22788 ((un->un_f_pkstats_enabled) && 22789 (cmd == DKIOCSAPART || cmd == DKIOCSVTOC || 22790 cmd == DKIOCSEXTVTOC)))) { 22791 22792 tmprval = cmlb_validate(un->un_cmlbhandle, CMLB_SILENT, 22793 (void *)SD_PATH_DIRECT); 22794 if ((tmprval == 0) && un->un_f_pkstats_enabled) { 22795 sd_set_pstats(un); 22796 SD_TRACE(SD_LOG_IO_PARTITION, un, 22797 "sd_ioctl: un:0x%p pstats created and " 22798 "set\n", un); 22799 } 22800 } 22801 22802 if ((cmd == DKIOCSVTOC || cmd == DKIOCSEXTVTOC) || 22803 ((cmd == DKIOCSETEFI) && (tmprval == 0))) { 22804 22805 mutex_enter(SD_MUTEX(un)); 22806 if (un->un_f_devid_supported && 22807 (un->un_f_opt_fab_devid == TRUE)) { 22808 if (un->un_devid == NULL) { 22809 sd_register_devid(ssc, SD_DEVINFO(un), 22810 SD_TARGET_IS_UNRESERVED); 22811 } else { 22812 /* 22813 * The device id for this disk 22814 * has been fabricated. The 22815 * device id must be preserved 22816 * by writing it back out to 22817 * disk. 22818 */ 22819 if (sd_write_deviceid(ssc) != 0) { 22820 ddi_devid_free(un->un_devid); 22821 un->un_devid = NULL; 22822 } 22823 } 22824 } 22825 mutex_exit(SD_MUTEX(un)); 22826 } 22827 22828 break; 22829 22830 case DKIOCLOCK: 22831 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n"); 22832 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 22833 SD_PATH_STANDARD); 22834 goto done_with_assess; 22835 22836 case DKIOCUNLOCK: 22837 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n"); 22838 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW, 22839 SD_PATH_STANDARD); 22840 goto done_with_assess; 22841 22842 case DKIOCSTATE: { 22843 enum dkio_state state; 22844 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n"); 22845 22846 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) { 22847 err = EFAULT; 22848 } else { 22849 err = sd_check_media(dev, state); 22850 if (err == 0) { 22851 if (ddi_copyout(&un->un_mediastate, (void *)arg, 22852 sizeof (int), flag) != 0) 22853 err = EFAULT; 22854 } 22855 } 22856 break; 22857 } 22858 22859 case DKIOCREMOVABLE: 22860 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n"); 22861 i = un->un_f_has_removable_media ? 1 : 0; 22862 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 22863 err = EFAULT; 22864 } else { 22865 err = 0; 22866 } 22867 break; 22868 22869 case DKIOCSOLIDSTATE: 22870 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSOLIDSTATE\n"); 22871 i = un->un_f_is_solid_state ? 1 : 0; 22872 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 22873 err = EFAULT; 22874 } else { 22875 err = 0; 22876 } 22877 break; 22878 22879 case DKIOCHOTPLUGGABLE: 22880 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCHOTPLUGGABLE\n"); 22881 i = un->un_f_is_hotpluggable ? 1 : 0; 22882 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 22883 err = EFAULT; 22884 } else { 22885 err = 0; 22886 } 22887 break; 22888 22889 case DKIOCREADONLY: 22890 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREADONLY\n"); 22891 i = 0; 22892 if ((ISCD(un) && !un->un_f_mmc_writable_media) || 22893 (sr_check_wp(dev) != 0)) { 22894 i = 1; 22895 } 22896 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 22897 err = EFAULT; 22898 } else { 22899 err = 0; 22900 } 22901 break; 22902 22903 case DKIOCGTEMPERATURE: 22904 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n"); 22905 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag); 22906 break; 22907 22908 case MHIOCENFAILFAST: 22909 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n"); 22910 if ((err = drv_priv(cred_p)) == 0) { 22911 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag); 22912 } 22913 break; 22914 22915 case MHIOCTKOWN: 22916 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n"); 22917 if ((err = drv_priv(cred_p)) == 0) { 22918 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag); 22919 } 22920 break; 22921 22922 case MHIOCRELEASE: 22923 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n"); 22924 if ((err = drv_priv(cred_p)) == 0) { 22925 err = sd_mhdioc_release(dev); 22926 } 22927 break; 22928 22929 case MHIOCSTATUS: 22930 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n"); 22931 if ((err = drv_priv(cred_p)) == 0) { 22932 switch (sd_send_scsi_TEST_UNIT_READY(ssc, 0)) { 22933 case 0: 22934 err = 0; 22935 break; 22936 case EACCES: 22937 *rval_p = 1; 22938 err = 0; 22939 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22940 break; 22941 default: 22942 err = EIO; 22943 goto done_with_assess; 22944 } 22945 } 22946 break; 22947 22948 case MHIOCQRESERVE: 22949 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n"); 22950 if ((err = drv_priv(cred_p)) == 0) { 22951 err = sd_reserve_release(dev, SD_RESERVE); 22952 } 22953 break; 22954 22955 case MHIOCREREGISTERDEVID: 22956 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n"); 22957 if (drv_priv(cred_p) == EPERM) { 22958 err = EPERM; 22959 } else if (!un->un_f_devid_supported) { 22960 err = ENOTTY; 22961 } else { 22962 err = sd_mhdioc_register_devid(dev); 22963 } 22964 break; 22965 22966 case MHIOCGRP_INKEYS: 22967 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n"); 22968 if (((err = drv_priv(cred_p)) != EPERM) && 22969 arg != (intptr_t)NULL) { 22970 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22971 err = ENOTSUP; 22972 } else { 22973 err = sd_mhdioc_inkeys(dev, (caddr_t)arg, 22974 flag); 22975 } 22976 } 22977 break; 22978 22979 case MHIOCGRP_INRESV: 22980 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n"); 22981 if (((err = drv_priv(cred_p)) != EPERM) && 22982 arg != (intptr_t)NULL) { 22983 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22984 err = ENOTSUP; 22985 } else { 22986 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag); 22987 } 22988 } 22989 break; 22990 22991 case MHIOCGRP_REGISTER: 22992 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n"); 22993 if ((err = drv_priv(cred_p)) != EPERM) { 22994 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22995 err = ENOTSUP; 22996 } else if (arg != (intptr_t)NULL) { 22997 mhioc_register_t reg; 22998 if (ddi_copyin((void *)arg, ®, 22999 sizeof (mhioc_register_t), flag) != 0) { 23000 err = EFAULT; 23001 } else { 23002 err = 23003 sd_send_scsi_PERSISTENT_RESERVE_OUT( 23004 ssc, SD_SCSI3_REGISTER, 23005 (uchar_t *)®); 23006 if (err != 0) 23007 goto done_with_assess; 23008 } 23009 } 23010 } 23011 break; 23012 23013 case MHIOCGRP_CLEAR: 23014 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_CLEAR\n"); 23015 if ((err = drv_priv(cred_p)) != EPERM) { 23016 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 23017 err = ENOTSUP; 23018 } else if (arg != (intptr_t)NULL) { 23019 mhioc_register_t reg; 23020 if (ddi_copyin((void *)arg, ®, 23021 sizeof (mhioc_register_t), flag) != 0) { 23022 err = EFAULT; 23023 } else { 23024 err = 23025 sd_send_scsi_PERSISTENT_RESERVE_OUT( 23026 ssc, SD_SCSI3_CLEAR, 23027 (uchar_t *)®); 23028 if (err != 0) 23029 goto done_with_assess; 23030 } 23031 } 23032 } 23033 break; 23034 23035 case MHIOCGRP_RESERVE: 23036 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n"); 23037 if ((err = drv_priv(cred_p)) != EPERM) { 23038 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 23039 err = ENOTSUP; 23040 } else if (arg != (intptr_t)NULL) { 23041 mhioc_resv_desc_t resv_desc; 23042 if (ddi_copyin((void *)arg, &resv_desc, 23043 sizeof (mhioc_resv_desc_t), flag) != 0) { 23044 err = EFAULT; 23045 } else { 23046 err = 23047 sd_send_scsi_PERSISTENT_RESERVE_OUT( 23048 ssc, SD_SCSI3_RESERVE, 23049 (uchar_t *)&resv_desc); 23050 if (err != 0) 23051 goto done_with_assess; 23052 } 23053 } 23054 } 23055 break; 23056 23057 case MHIOCGRP_PREEMPTANDABORT: 23058 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 23059 if ((err = drv_priv(cred_p)) != EPERM) { 23060 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 23061 err = ENOTSUP; 23062 } else if (arg != (intptr_t)NULL) { 23063 mhioc_preemptandabort_t preempt_abort; 23064 if (ddi_copyin((void *)arg, &preempt_abort, 23065 sizeof (mhioc_preemptandabort_t), 23066 flag) != 0) { 23067 err = EFAULT; 23068 } else { 23069 err = 23070 sd_send_scsi_PERSISTENT_RESERVE_OUT( 23071 ssc, SD_SCSI3_PREEMPTANDABORT, 23072 (uchar_t *)&preempt_abort); 23073 if (err != 0) 23074 goto done_with_assess; 23075 } 23076 } 23077 } 23078 break; 23079 23080 case MHIOCGRP_REGISTERANDIGNOREKEY: 23081 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTERANDIGNOREKEY\n"); 23082 if ((err = drv_priv(cred_p)) != EPERM) { 23083 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 23084 err = ENOTSUP; 23085 } else if (arg != (intptr_t)NULL) { 23086 mhioc_registerandignorekey_t r_and_i; 23087 if (ddi_copyin((void *)arg, (void *)&r_and_i, 23088 sizeof (mhioc_registerandignorekey_t), 23089 flag) != 0) { 23090 err = EFAULT; 23091 } else { 23092 err = 23093 sd_send_scsi_PERSISTENT_RESERVE_OUT( 23094 ssc, SD_SCSI3_REGISTERANDIGNOREKEY, 23095 (uchar_t *)&r_and_i); 23096 if (err != 0) 23097 goto done_with_assess; 23098 } 23099 } 23100 } 23101 break; 23102 23103 case USCSICMD: 23104 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n"); 23105 cr = ddi_get_cred(); 23106 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 23107 err = EPERM; 23108 } else { 23109 enum uio_seg uioseg; 23110 23111 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE : 23112 UIO_USERSPACE; 23113 if (un->un_f_format_in_progress == TRUE) { 23114 err = EAGAIN; 23115 break; 23116 } 23117 23118 err = sd_ssc_send(ssc, 23119 (struct uscsi_cmd *)arg, 23120 flag, uioseg, SD_PATH_STANDARD); 23121 if (err != 0) 23122 goto done_with_assess; 23123 else 23124 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 23125 } 23126 break; 23127 23128 case USCSIMAXXFER: 23129 SD_TRACE(SD_LOG_IOCTL, un, "USCSIMAXXFER\n"); 23130 cr = ddi_get_cred(); 23131 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 23132 err = EPERM; 23133 } else { 23134 const uscsi_xfer_t xfer = un->un_max_xfer_size; 23135 23136 if (ddi_copyout(&xfer, (void *)arg, sizeof (xfer), 23137 flag) != 0) { 23138 err = EFAULT; 23139 } else { 23140 err = 0; 23141 } 23142 } 23143 break; 23144 23145 case CDROMPAUSE: 23146 case CDROMRESUME: 23147 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n"); 23148 if (!ISCD(un)) { 23149 err = ENOTTY; 23150 } else { 23151 err = sr_pause_resume(dev, cmd); 23152 } 23153 break; 23154 23155 case CDROMPLAYMSF: 23156 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n"); 23157 if (!ISCD(un)) { 23158 err = ENOTTY; 23159 } else { 23160 err = sr_play_msf(dev, (caddr_t)arg, flag); 23161 } 23162 break; 23163 23164 case CDROMPLAYTRKIND: 23165 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); 23166 #if defined(__i386) || defined(__amd64) 23167 /* 23168 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead 23169 */ 23170 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 23171 #else 23172 if (!ISCD(un)) { 23173 #endif 23174 err = ENOTTY; 23175 } else { 23176 err = sr_play_trkind(dev, (caddr_t)arg, flag); 23177 } 23178 break; 23179 23180 case CDROMREADTOCHDR: 23181 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n"); 23182 if (!ISCD(un)) { 23183 err = ENOTTY; 23184 } else { 23185 err = sr_read_tochdr(dev, (caddr_t)arg, flag); 23186 } 23187 break; 23188 23189 case CDROMREADTOCENTRY: 23190 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n"); 23191 if (!ISCD(un)) { 23192 err = ENOTTY; 23193 } else { 23194 err = sr_read_tocentry(dev, (caddr_t)arg, flag); 23195 } 23196 break; 23197 23198 case CDROMSTOP: 23199 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n"); 23200 if (!ISCD(un)) { 23201 err = ENOTTY; 23202 } else { 23203 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 23204 SD_TARGET_STOP, SD_PATH_STANDARD); 23205 goto done_with_assess; 23206 } 23207 break; 23208 23209 case CDROMSTART: 23210 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n"); 23211 if (!ISCD(un)) { 23212 err = ENOTTY; 23213 } else { 23214 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 23215 SD_TARGET_START, SD_PATH_STANDARD); 23216 goto done_with_assess; 23217 } 23218 break; 23219 23220 case CDROMCLOSETRAY: 23221 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n"); 23222 if (!ISCD(un)) { 23223 err = ENOTTY; 23224 } else { 23225 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 23226 SD_TARGET_CLOSE, SD_PATH_STANDARD); 23227 goto done_with_assess; 23228 } 23229 break; 23230 23231 case FDEJECT: /* for eject command */ 23232 case DKIOCEJECT: 23233 case CDROMEJECT: 23234 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n"); 23235 if (!un->un_f_eject_media_supported) { 23236 err = ENOTTY; 23237 } else { 23238 err = sr_eject(dev); 23239 } 23240 break; 23241 23242 case CDROMVOLCTRL: 23243 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n"); 23244 if (!ISCD(un)) { 23245 err = ENOTTY; 23246 } else { 23247 err = sr_volume_ctrl(dev, (caddr_t)arg, flag); 23248 } 23249 break; 23250 23251 case CDROMSUBCHNL: 23252 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n"); 23253 if (!ISCD(un)) { 23254 err = ENOTTY; 23255 } else { 23256 err = sr_read_subchannel(dev, (caddr_t)arg, flag); 23257 } 23258 break; 23259 23260 case CDROMREADMODE2: 23261 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n"); 23262 if (!ISCD(un)) { 23263 err = ENOTTY; 23264 } else if (un->un_f_cfg_is_atapi == TRUE) { 23265 /* 23266 * If the drive supports READ CD, use that instead of 23267 * switching the LBA size via a MODE SELECT 23268 * Block Descriptor 23269 */ 23270 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag); 23271 } else { 23272 err = sr_read_mode2(dev, (caddr_t)arg, flag); 23273 } 23274 break; 23275 23276 case CDROMREADMODE1: 23277 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n"); 23278 if (!ISCD(un)) { 23279 err = ENOTTY; 23280 } else { 23281 err = sr_read_mode1(dev, (caddr_t)arg, flag); 23282 } 23283 break; 23284 23285 case CDROMREADOFFSET: 23286 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n"); 23287 if (!ISCD(un)) { 23288 err = ENOTTY; 23289 } else { 23290 err = sr_read_sony_session_offset(dev, (caddr_t)arg, 23291 flag); 23292 } 23293 break; 23294 23295 case CDROMSBLKMODE: 23296 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n"); 23297 /* 23298 * There is no means of changing block size in case of atapi 23299 * drives, thus return ENOTTY if drive type is atapi 23300 */ 23301 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 23302 err = ENOTTY; 23303 } else if (un->un_f_mmc_cap == TRUE) { 23304 23305 /* 23306 * MMC Devices do not support changing the 23307 * logical block size 23308 * 23309 * Note: EINVAL is being returned instead of ENOTTY to 23310 * maintain consistancy with the original mmc 23311 * driver update. 23312 */ 23313 err = EINVAL; 23314 } else { 23315 mutex_enter(SD_MUTEX(un)); 23316 if ((!(un->un_exclopen & (1<<SDPART(dev)))) || 23317 (un->un_ncmds_in_transport > 0)) { 23318 mutex_exit(SD_MUTEX(un)); 23319 err = EINVAL; 23320 } else { 23321 mutex_exit(SD_MUTEX(un)); 23322 err = sr_change_blkmode(dev, cmd, arg, flag); 23323 } 23324 } 23325 break; 23326 23327 case CDROMGBLKMODE: 23328 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n"); 23329 if (!ISCD(un)) { 23330 err = ENOTTY; 23331 } else if ((un->un_f_cfg_is_atapi != FALSE) && 23332 (un->un_f_blockcount_is_valid != FALSE)) { 23333 /* 23334 * Drive is an ATAPI drive so return target block 23335 * size for ATAPI drives since we cannot change the 23336 * blocksize on ATAPI drives. Used primarily to detect 23337 * if an ATAPI cdrom is present. 23338 */ 23339 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg, 23340 sizeof (int), flag) != 0) { 23341 err = EFAULT; 23342 } else { 23343 err = 0; 23344 } 23345 23346 } else { 23347 /* 23348 * Drive supports changing block sizes via a Mode 23349 * Select. 23350 */ 23351 err = sr_change_blkmode(dev, cmd, arg, flag); 23352 } 23353 break; 23354 23355 case CDROMGDRVSPEED: 23356 case CDROMSDRVSPEED: 23357 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n"); 23358 if (!ISCD(un)) { 23359 err = ENOTTY; 23360 } else if (un->un_f_mmc_cap == TRUE) { 23361 /* 23362 * Note: In the future the driver implementation 23363 * for getting and 23364 * setting cd speed should entail: 23365 * 1) If non-mmc try the Toshiba mode page 23366 * (sr_change_speed) 23367 * 2) If mmc but no support for Real Time Streaming try 23368 * the SET CD SPEED (0xBB) command 23369 * (sr_atapi_change_speed) 23370 * 3) If mmc and support for Real Time Streaming 23371 * try the GET PERFORMANCE and SET STREAMING 23372 * commands (not yet implemented, 4380808) 23373 */ 23374 /* 23375 * As per recent MMC spec, CD-ROM speed is variable 23376 * and changes with LBA. Since there is no such 23377 * things as drive speed now, fail this ioctl. 23378 * 23379 * Note: EINVAL is returned for consistancy of original 23380 * implementation which included support for getting 23381 * the drive speed of mmc devices but not setting 23382 * the drive speed. Thus EINVAL would be returned 23383 * if a set request was made for an mmc device. 23384 * We no longer support get or set speed for 23385 * mmc but need to remain consistent with regard 23386 * to the error code returned. 23387 */ 23388 err = EINVAL; 23389 } else if (un->un_f_cfg_is_atapi == TRUE) { 23390 err = sr_atapi_change_speed(dev, cmd, arg, flag); 23391 } else { 23392 err = sr_change_speed(dev, cmd, arg, flag); 23393 } 23394 break; 23395 23396 case CDROMCDDA: 23397 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n"); 23398 if (!ISCD(un)) { 23399 err = ENOTTY; 23400 } else { 23401 err = sr_read_cdda(dev, (void *)arg, flag); 23402 } 23403 break; 23404 23405 case CDROMCDXA: 23406 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n"); 23407 if (!ISCD(un)) { 23408 err = ENOTTY; 23409 } else { 23410 err = sr_read_cdxa(dev, (caddr_t)arg, flag); 23411 } 23412 break; 23413 23414 case CDROMSUBCODE: 23415 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n"); 23416 if (!ISCD(un)) { 23417 err = ENOTTY; 23418 } else { 23419 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag); 23420 } 23421 break; 23422 23423 23424 #ifdef SDDEBUG 23425 /* RESET/ABORTS testing ioctls */ 23426 case DKIOCRESET: { 23427 int reset_level; 23428 23429 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) { 23430 err = EFAULT; 23431 } else { 23432 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: " 23433 "reset_level = 0x%lx\n", reset_level); 23434 if (scsi_reset(SD_ADDRESS(un), reset_level)) { 23435 err = 0; 23436 } else { 23437 err = EIO; 23438 } 23439 } 23440 break; 23441 } 23442 23443 case DKIOCABORT: 23444 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n"); 23445 if (scsi_abort(SD_ADDRESS(un), NULL)) { 23446 err = 0; 23447 } else { 23448 err = EIO; 23449 } 23450 break; 23451 #endif 23452 23453 #ifdef SD_FAULT_INJECTION 23454 /* SDIOC FaultInjection testing ioctls */ 23455 case SDIOCSTART: 23456 case SDIOCSTOP: 23457 case SDIOCINSERTPKT: 23458 case SDIOCINSERTXB: 23459 case SDIOCINSERTUN: 23460 case SDIOCINSERTARQ: 23461 case SDIOCPUSH: 23462 case SDIOCRETRIEVE: 23463 case SDIOCRUN: 23464 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" 23465 "SDIOC detected cmd:0x%X:\n", cmd); 23466 /* call error generator */ 23467 sd_faultinjection_ioctl(cmd, arg, un); 23468 err = 0; 23469 break; 23470 23471 #endif /* SD_FAULT_INJECTION */ 23472 23473 case DKIOCFLUSHWRITECACHE: 23474 { 23475 struct dk_callback *dkc = (struct dk_callback *)arg; 23476 23477 mutex_enter(SD_MUTEX(un)); 23478 if (!un->un_f_sync_cache_supported || 23479 !un->un_f_write_cache_enabled) { 23480 err = un->un_f_sync_cache_supported ? 23481 0 : ENOTSUP; 23482 mutex_exit(SD_MUTEX(un)); 23483 if ((flag & FKIOCTL) && dkc != NULL && 23484 dkc->dkc_callback != NULL) { 23485 (*dkc->dkc_callback)(dkc->dkc_cookie, 23486 err); 23487 /* 23488 * Did callback and reported error. 23489 * Since we did a callback, ioctl 23490 * should return 0. 23491 */ 23492 err = 0; 23493 } 23494 break; 23495 } 23496 mutex_exit(SD_MUTEX(un)); 23497 23498 if ((flag & FKIOCTL) && dkc != NULL && 23499 dkc->dkc_callback != NULL) { 23500 /* async SYNC CACHE request */ 23501 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 23502 } else { 23503 /* synchronous SYNC CACHE request */ 23504 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 23505 } 23506 } 23507 break; 23508 23509 case DKIOCFREE: 23510 { 23511 dkioc_free_list_t *dfl = (dkioc_free_list_t *)arg; 23512 23513 /* bad ioctls shouldn't panic */ 23514 if (dfl == NULL) { 23515 /* check kernel callers strictly in debug */ 23516 ASSERT0(flag & FKIOCTL); 23517 err = SET_ERROR(EINVAL); 23518 break; 23519 } 23520 /* synchronous UNMAP request */ 23521 err = sd_send_scsi_UNMAP(dev, ssc, dfl, flag); 23522 } 23523 break; 23524 23525 case DKIOCGETWCE: { 23526 23527 int wce; 23528 23529 if ((err = sd_get_write_cache_enabled(ssc, &wce)) != 0) { 23530 break; 23531 } 23532 23533 if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) { 23534 err = EFAULT; 23535 } 23536 break; 23537 } 23538 23539 case DKIOCSETWCE: { 23540 23541 int wce, sync_supported; 23542 int cur_wce = 0; 23543 23544 if (!un->un_f_cache_mode_changeable) { 23545 err = EINVAL; 23546 break; 23547 } 23548 23549 if (ddi_copyin((void *)arg, &wce, sizeof (wce), flag)) { 23550 err = EFAULT; 23551 break; 23552 } 23553 23554 /* 23555 * Synchronize multiple threads trying to enable 23556 * or disable the cache via the un_f_wcc_cv 23557 * condition variable. 23558 */ 23559 mutex_enter(SD_MUTEX(un)); 23560 23561 /* 23562 * Don't allow the cache to be enabled if the 23563 * config file has it disabled. 23564 */ 23565 if (un->un_f_opt_disable_cache && wce) { 23566 mutex_exit(SD_MUTEX(un)); 23567 err = EINVAL; 23568 break; 23569 } 23570 23571 /* 23572 * Wait for write cache change in progress 23573 * bit to be clear before proceeding. 23574 */ 23575 while (un->un_f_wcc_inprog) 23576 cv_wait(&un->un_wcc_cv, SD_MUTEX(un)); 23577 23578 un->un_f_wcc_inprog = 1; 23579 23580 mutex_exit(SD_MUTEX(un)); 23581 23582 /* 23583 * Get the current write cache state 23584 */ 23585 if ((err = sd_get_write_cache_enabled(ssc, &cur_wce)) != 0) { 23586 mutex_enter(SD_MUTEX(un)); 23587 un->un_f_wcc_inprog = 0; 23588 cv_broadcast(&un->un_wcc_cv); 23589 mutex_exit(SD_MUTEX(un)); 23590 break; 23591 } 23592 23593 mutex_enter(SD_MUTEX(un)); 23594 un->un_f_write_cache_enabled = (cur_wce != 0); 23595 23596 if (un->un_f_write_cache_enabled && wce == 0) { 23597 /* 23598 * Disable the write cache. Don't clear 23599 * un_f_write_cache_enabled until after 23600 * the mode select and flush are complete. 23601 */ 23602 sync_supported = un->un_f_sync_cache_supported; 23603 23604 /* 23605 * If cache flush is suppressed, we assume that the 23606 * controller firmware will take care of managing the 23607 * write cache for us: no need to explicitly 23608 * disable it. 23609 */ 23610 if (!un->un_f_suppress_cache_flush) { 23611 mutex_exit(SD_MUTEX(un)); 23612 if ((err = sd_cache_control(ssc, 23613 SD_CACHE_NOCHANGE, 23614 SD_CACHE_DISABLE)) == 0 && 23615 sync_supported) { 23616 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, 23617 NULL); 23618 } 23619 } else { 23620 mutex_exit(SD_MUTEX(un)); 23621 } 23622 23623 mutex_enter(SD_MUTEX(un)); 23624 if (err == 0) { 23625 un->un_f_write_cache_enabled = 0; 23626 } 23627 23628 } else if (!un->un_f_write_cache_enabled && wce != 0) { 23629 /* 23630 * Set un_f_write_cache_enabled first, so there is 23631 * no window where the cache is enabled, but the 23632 * bit says it isn't. 23633 */ 23634 un->un_f_write_cache_enabled = 1; 23635 23636 /* 23637 * If cache flush is suppressed, we assume that the 23638 * controller firmware will take care of managing the 23639 * write cache for us: no need to explicitly 23640 * enable it. 23641 */ 23642 if (!un->un_f_suppress_cache_flush) { 23643 mutex_exit(SD_MUTEX(un)); 23644 err = sd_cache_control(ssc, SD_CACHE_NOCHANGE, 23645 SD_CACHE_ENABLE); 23646 } else { 23647 mutex_exit(SD_MUTEX(un)); 23648 } 23649 23650 mutex_enter(SD_MUTEX(un)); 23651 23652 if (err) { 23653 un->un_f_write_cache_enabled = 0; 23654 } 23655 } 23656 23657 un->un_f_wcc_inprog = 0; 23658 cv_broadcast(&un->un_wcc_cv); 23659 mutex_exit(SD_MUTEX(un)); 23660 break; 23661 } 23662 23663 default: 23664 err = ENOTTY; 23665 break; 23666 } 23667 mutex_enter(SD_MUTEX(un)); 23668 un->un_ncmds_in_driver--; 23669 ASSERT(un->un_ncmds_in_driver >= 0); 23670 mutex_exit(SD_MUTEX(un)); 23671 23672 23673 done_without_assess: 23674 sd_ssc_fini(ssc); 23675 23676 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 23677 return (err); 23678 23679 done_with_assess: 23680 mutex_enter(SD_MUTEX(un)); 23681 un->un_ncmds_in_driver--; 23682 ASSERT(un->un_ncmds_in_driver >= 0); 23683 mutex_exit(SD_MUTEX(un)); 23684 23685 done_quick_assess: 23686 if (err != 0) 23687 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23688 /* Uninitialize sd_ssc_t pointer */ 23689 sd_ssc_fini(ssc); 23690 23691 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 23692 return (err); 23693 } 23694 23695 23696 /* 23697 * Function: sd_dkio_ctrl_info 23698 * 23699 * Description: This routine is the driver entry point for handling controller 23700 * information ioctl requests (DKIOCINFO). 23701 * 23702 * Arguments: dev - the device number 23703 * arg - pointer to user provided dk_cinfo structure 23704 * specifying the controller type and attributes. 23705 * flag - this argument is a pass through to ddi_copyxxx() 23706 * directly from the mode argument of ioctl(). 23707 * 23708 * Return Code: 0 23709 * EFAULT 23710 * ENXIO 23711 */ 23712 23713 static int 23714 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag) 23715 { 23716 struct sd_lun *un = NULL; 23717 struct dk_cinfo *info; 23718 dev_info_t *pdip; 23719 int lun, tgt; 23720 23721 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23722 return (ENXIO); 23723 } 23724 23725 info = (struct dk_cinfo *) 23726 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); 23727 23728 switch (un->un_ctype) { 23729 case CTYPE_CDROM: 23730 info->dki_ctype = DKC_CDROM; 23731 break; 23732 default: 23733 info->dki_ctype = DKC_SCSI_CCS; 23734 break; 23735 } 23736 pdip = ddi_get_parent(SD_DEVINFO(un)); 23737 info->dki_cnum = ddi_get_instance(pdip); 23738 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) { 23739 (void) strcpy(info->dki_cname, ddi_get_name(pdip)); 23740 } else { 23741 (void) strncpy(info->dki_cname, ddi_node_name(pdip), 23742 DK_DEVLEN - 1); 23743 } 23744 23745 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 23746 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0); 23747 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 23748 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0); 23749 23750 /* Unit Information */ 23751 info->dki_unit = ddi_get_instance(SD_DEVINFO(un)); 23752 info->dki_slave = ((tgt << 3) | lun); 23753 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)), 23754 DK_DEVLEN - 1); 23755 info->dki_flags = DKI_FMTVOL; 23756 info->dki_partition = SDPART(dev); 23757 23758 /* Max Transfer size of this device in blocks */ 23759 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize; 23760 info->dki_addr = 0; 23761 info->dki_space = 0; 23762 info->dki_prio = 0; 23763 info->dki_vec = 0; 23764 23765 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) { 23766 kmem_free(info, sizeof (struct dk_cinfo)); 23767 return (EFAULT); 23768 } else { 23769 kmem_free(info, sizeof (struct dk_cinfo)); 23770 return (0); 23771 } 23772 } 23773 23774 /* 23775 * Function: sd_get_media_info_com 23776 * 23777 * Description: This routine returns the information required to populate 23778 * the fields for the dk_minfo/dk_minfo_ext structures. 23779 * 23780 * Arguments: dev - the device number 23781 * dki_media_type - media_type 23782 * dki_lbsize - logical block size 23783 * dki_capacity - capacity in blocks 23784 * dki_pbsize - physical block size (if requested) 23785 * 23786 * Return Code: 0 23787 * EACCESS 23788 * EFAULT 23789 * ENXIO 23790 * EIO 23791 */ 23792 static int 23793 sd_get_media_info_com(dev_t dev, uint_t *dki_media_type, uint_t *dki_lbsize, 23794 diskaddr_t *dki_capacity, uint_t *dki_pbsize) 23795 { 23796 struct sd_lun *un = NULL; 23797 struct uscsi_cmd com; 23798 struct scsi_inquiry *sinq; 23799 u_longlong_t media_capacity; 23800 uint64_t capacity; 23801 uint_t lbasize; 23802 uint_t pbsize; 23803 uchar_t *out_data; 23804 uchar_t *rqbuf; 23805 int rval = 0; 23806 int rtn; 23807 sd_ssc_t *ssc; 23808 23809 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 23810 (un->un_state == SD_STATE_OFFLINE)) { 23811 return (ENXIO); 23812 } 23813 23814 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info_com: entry\n"); 23815 23816 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 23817 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 23818 ssc = sd_ssc_init(un); 23819 23820 /* Issue a TUR to determine if the drive is ready with media present */ 23821 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_CHECK_FOR_MEDIA); 23822 if (rval == ENXIO) { 23823 goto done; 23824 } else if (rval != 0) { 23825 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23826 } 23827 23828 /* Now get configuration data */ 23829 if (ISCD(un)) { 23830 *dki_media_type = DK_CDROM; 23831 23832 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 23833 if (un->un_f_mmc_cap == TRUE) { 23834 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, 23835 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN, 23836 SD_PATH_STANDARD); 23837 23838 if (rtn) { 23839 /* 23840 * We ignore all failures for CD and need to 23841 * put the assessment before processing code 23842 * to avoid missing assessment for FMA. 23843 */ 23844 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23845 /* 23846 * Failed for other than an illegal request 23847 * or command not supported 23848 */ 23849 if ((com.uscsi_status == STATUS_CHECK) && 23850 (com.uscsi_rqstatus == STATUS_GOOD)) { 23851 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 23852 (rqbuf[12] != 0x20)) { 23853 rval = EIO; 23854 goto no_assessment; 23855 } 23856 } 23857 } else { 23858 /* 23859 * The GET CONFIGURATION command succeeded 23860 * so set the media type according to the 23861 * returned data 23862 */ 23863 *dki_media_type = out_data[6]; 23864 *dki_media_type <<= 8; 23865 *dki_media_type |= out_data[7]; 23866 } 23867 } 23868 } else { 23869 /* 23870 * The profile list is not available, so we attempt to identify 23871 * the media type based on the inquiry data 23872 */ 23873 sinq = un->un_sd->sd_inq; 23874 if ((sinq->inq_dtype == DTYPE_DIRECT) || 23875 (sinq->inq_dtype == DTYPE_OPTICAL)) { 23876 /* This is a direct access device or optical disk */ 23877 *dki_media_type = DK_FIXED_DISK; 23878 23879 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 23880 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 23881 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 23882 *dki_media_type = DK_ZIP; 23883 } else if ( 23884 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 23885 *dki_media_type = DK_JAZ; 23886 } 23887 } 23888 } else { 23889 /* 23890 * Not a CD, direct access or optical disk so return 23891 * unknown media 23892 */ 23893 *dki_media_type = DK_UNKNOWN; 23894 } 23895 } 23896 23897 /* 23898 * Now read the capacity so we can provide the lbasize, 23899 * pbsize and capacity. 23900 */ 23901 if (dki_pbsize && un->un_f_descr_format_supported) { 23902 rval = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, &lbasize, 23903 &pbsize, SD_PATH_DIRECT); 23904 23905 /* 23906 * Override the physical blocksize if the instance already 23907 * has a larger value. 23908 */ 23909 pbsize = MAX(pbsize, un->un_phy_blocksize); 23910 } 23911 23912 if (dki_pbsize == NULL || rval != 0 || 23913 !un->un_f_descr_format_supported) { 23914 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize, 23915 SD_PATH_DIRECT); 23916 23917 switch (rval) { 23918 case 0: 23919 if (un->un_f_enable_rmw && 23920 un->un_phy_blocksize != 0) { 23921 pbsize = un->un_phy_blocksize; 23922 } else { 23923 pbsize = lbasize; 23924 } 23925 media_capacity = capacity; 23926 23927 /* 23928 * sd_send_scsi_READ_CAPACITY() reports capacity in 23929 * un->un_sys_blocksize chunks. So we need to convert 23930 * it into cap.lbsize chunks. 23931 */ 23932 if (un->un_f_has_removable_media) { 23933 media_capacity *= un->un_sys_blocksize; 23934 media_capacity /= lbasize; 23935 } 23936 break; 23937 case EACCES: 23938 rval = EACCES; 23939 goto done; 23940 default: 23941 rval = EIO; 23942 goto done; 23943 } 23944 } else { 23945 if (un->un_f_enable_rmw && 23946 !ISP2(pbsize % DEV_BSIZE)) { 23947 pbsize = SSD_SECSIZE; 23948 } else if (!ISP2(lbasize % DEV_BSIZE) || 23949 !ISP2(pbsize % DEV_BSIZE)) { 23950 pbsize = lbasize = DEV_BSIZE; 23951 } 23952 media_capacity = capacity; 23953 } 23954 23955 /* 23956 * If lun is expanded dynamically, update the un structure. 23957 */ 23958 mutex_enter(SD_MUTEX(un)); 23959 if ((un->un_f_blockcount_is_valid == TRUE) && 23960 (un->un_f_tgt_blocksize_is_valid == TRUE) && 23961 (capacity > un->un_blockcount)) { 23962 un->un_f_expnevent = B_FALSE; 23963 sd_update_block_info(un, lbasize, capacity); 23964 } 23965 mutex_exit(SD_MUTEX(un)); 23966 23967 *dki_lbsize = lbasize; 23968 *dki_capacity = media_capacity; 23969 if (dki_pbsize) 23970 *dki_pbsize = pbsize; 23971 23972 done: 23973 if (rval != 0) { 23974 if (rval == EIO) 23975 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23976 else 23977 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23978 } 23979 no_assessment: 23980 sd_ssc_fini(ssc); 23981 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 23982 kmem_free(rqbuf, SENSE_LENGTH); 23983 return (rval); 23984 } 23985 23986 /* 23987 * Function: sd_get_media_info 23988 * 23989 * Description: This routine is the driver entry point for handling ioctl 23990 * requests for the media type or command set profile used by the 23991 * drive to operate on the media (DKIOCGMEDIAINFO). 23992 * 23993 * Arguments: dev - the device number 23994 * arg - pointer to user provided dk_minfo structure 23995 * specifying the media type, logical block size and 23996 * drive capacity. 23997 * flag - this argument is a pass through to ddi_copyxxx() 23998 * directly from the mode argument of ioctl(). 23999 * 24000 * Return Code: returns the value from sd_get_media_info_com 24001 */ 24002 static int 24003 sd_get_media_info(dev_t dev, caddr_t arg, int flag) 24004 { 24005 struct dk_minfo mi; 24006 int rval; 24007 24008 rval = sd_get_media_info_com(dev, &mi.dki_media_type, 24009 &mi.dki_lbsize, &mi.dki_capacity, NULL); 24010 24011 if (rval) 24012 return (rval); 24013 if (ddi_copyout(&mi, arg, sizeof (struct dk_minfo), flag)) 24014 rval = EFAULT; 24015 return (rval); 24016 } 24017 24018 /* 24019 * Function: sd_get_media_info_ext 24020 * 24021 * Description: This routine is the driver entry point for handling ioctl 24022 * requests for the media type or command set profile used by the 24023 * drive to operate on the media (DKIOCGMEDIAINFOEXT). The 24024 * difference this ioctl and DKIOCGMEDIAINFO is the return value 24025 * of this ioctl contains both logical block size and physical 24026 * block size. 24027 * 24028 * 24029 * Arguments: dev - the device number 24030 * arg - pointer to user provided dk_minfo_ext structure 24031 * specifying the media type, logical block size, 24032 * physical block size and disk capacity. 24033 * flag - this argument is a pass through to ddi_copyxxx() 24034 * directly from the mode argument of ioctl(). 24035 * 24036 * Return Code: returns the value from sd_get_media_info_com 24037 */ 24038 static int 24039 sd_get_media_info_ext(dev_t dev, caddr_t arg, int flag) 24040 { 24041 struct dk_minfo_ext mie; 24042 int rval = 0; 24043 24044 rval = sd_get_media_info_com(dev, &mie.dki_media_type, 24045 &mie.dki_lbsize, &mie.dki_capacity, &mie.dki_pbsize); 24046 24047 if (rval) 24048 return (rval); 24049 if (ddi_copyout(&mie, arg, sizeof (struct dk_minfo_ext), flag)) 24050 rval = EFAULT; 24051 return (rval); 24052 24053 } 24054 24055 /* 24056 * Function: sd_watch_request_submit 24057 * 24058 * Description: Call scsi_watch_request_submit or scsi_mmc_watch_request_submit 24059 * depending on which is supported by device. 24060 */ 24061 static opaque_t 24062 sd_watch_request_submit(struct sd_lun *un) 24063 { 24064 dev_t dev; 24065 24066 /* All submissions are unified to use same device number */ 24067 dev = sd_make_device(SD_DEVINFO(un)); 24068 24069 if (un->un_f_mmc_cap && un->un_f_mmc_gesn_polling) { 24070 return (scsi_mmc_watch_request_submit(SD_SCSI_DEVP(un), 24071 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 24072 (caddr_t)dev)); 24073 } else { 24074 return (scsi_watch_request_submit(SD_SCSI_DEVP(un), 24075 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 24076 (caddr_t)dev)); 24077 } 24078 } 24079 24080 24081 /* 24082 * Function: sd_check_media 24083 * 24084 * Description: This utility routine implements the functionality for the 24085 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the 24086 * driver state changes from that specified by the user 24087 * (inserted or ejected). For example, if the user specifies 24088 * DKIO_EJECTED and the current media state is inserted this 24089 * routine will immediately return DKIO_INSERTED. However, if the 24090 * current media state is not inserted the user thread will be 24091 * blocked until the drive state changes. If DKIO_NONE is specified 24092 * the user thread will block until a drive state change occurs. 24093 * 24094 * Arguments: dev - the device number 24095 * state - user pointer to a dkio_state, updated with the current 24096 * drive state at return. 24097 * 24098 * Return Code: ENXIO 24099 * EIO 24100 * EAGAIN 24101 * EINTR 24102 */ 24103 24104 static int 24105 sd_check_media(dev_t dev, enum dkio_state state) 24106 { 24107 struct sd_lun *un = NULL; 24108 enum dkio_state prev_state; 24109 opaque_t token = NULL; 24110 int rval = 0; 24111 sd_ssc_t *ssc; 24112 24113 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24114 return (ENXIO); 24115 } 24116 24117 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n"); 24118 24119 ssc = sd_ssc_init(un); 24120 24121 mutex_enter(SD_MUTEX(un)); 24122 24123 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: " 24124 "state=%x, mediastate=%x\n", state, un->un_mediastate); 24125 24126 prev_state = un->un_mediastate; 24127 24128 /* is there anything to do? */ 24129 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) { 24130 /* 24131 * submit the request to the scsi_watch service; 24132 * scsi_media_watch_cb() does the real work 24133 */ 24134 mutex_exit(SD_MUTEX(un)); 24135 24136 /* 24137 * This change handles the case where a scsi watch request is 24138 * added to a device that is powered down. To accomplish this 24139 * we power up the device before adding the scsi watch request, 24140 * since the scsi watch sends a TUR directly to the device 24141 * which the device cannot handle if it is powered down. 24142 */ 24143 if (sd_pm_entry(un) != DDI_SUCCESS) { 24144 mutex_enter(SD_MUTEX(un)); 24145 goto done; 24146 } 24147 24148 token = sd_watch_request_submit(un); 24149 24150 sd_pm_exit(un); 24151 24152 mutex_enter(SD_MUTEX(un)); 24153 if (token == NULL) { 24154 rval = EAGAIN; 24155 goto done; 24156 } 24157 24158 /* 24159 * This is a special case IOCTL that doesn't return 24160 * until the media state changes. Routine sdpower 24161 * knows about and handles this so don't count it 24162 * as an active cmd in the driver, which would 24163 * keep the device busy to the pm framework. 24164 * If the count isn't decremented the device can't 24165 * be powered down. 24166 */ 24167 un->un_ncmds_in_driver--; 24168 ASSERT(un->un_ncmds_in_driver >= 0); 24169 24170 /* 24171 * if a prior request had been made, this will be the same 24172 * token, as scsi_watch was designed that way. 24173 */ 24174 un->un_swr_token = token; 24175 un->un_specified_mediastate = state; 24176 24177 /* 24178 * now wait for media change 24179 * we will not be signalled unless mediastate == state but it is 24180 * still better to test for this condition, since there is a 24181 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED 24182 */ 24183 SD_TRACE(SD_LOG_COMMON, un, 24184 "sd_check_media: waiting for media state change\n"); 24185 while (un->un_mediastate == state) { 24186 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) { 24187 SD_TRACE(SD_LOG_COMMON, un, 24188 "sd_check_media: waiting for media state " 24189 "was interrupted\n"); 24190 un->un_ncmds_in_driver++; 24191 rval = EINTR; 24192 goto done; 24193 } 24194 SD_TRACE(SD_LOG_COMMON, un, 24195 "sd_check_media: received signal, state=%x\n", 24196 un->un_mediastate); 24197 } 24198 /* 24199 * Inc the counter to indicate the device once again 24200 * has an active outstanding cmd. 24201 */ 24202 un->un_ncmds_in_driver++; 24203 } 24204 24205 /* invalidate geometry */ 24206 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) { 24207 sr_ejected(un); 24208 } 24209 24210 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) { 24211 uint64_t capacity; 24212 uint_t lbasize; 24213 24214 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n"); 24215 mutex_exit(SD_MUTEX(un)); 24216 /* 24217 * Since the following routines use SD_PATH_DIRECT, we must 24218 * call PM directly before the upcoming disk accesses. This 24219 * may cause the disk to be power/spin up. 24220 */ 24221 24222 if (sd_pm_entry(un) == DDI_SUCCESS) { 24223 rval = sd_send_scsi_READ_CAPACITY(ssc, 24224 &capacity, &lbasize, SD_PATH_DIRECT); 24225 if (rval != 0) { 24226 sd_pm_exit(un); 24227 if (rval == EIO) 24228 sd_ssc_assessment(ssc, 24229 SD_FMT_STATUS_CHECK); 24230 else 24231 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 24232 mutex_enter(SD_MUTEX(un)); 24233 goto done; 24234 } 24235 } else { 24236 rval = EIO; 24237 mutex_enter(SD_MUTEX(un)); 24238 goto done; 24239 } 24240 mutex_enter(SD_MUTEX(un)); 24241 24242 sd_update_block_info(un, lbasize, capacity); 24243 24244 /* 24245 * Check if the media in the device is writable or not 24246 */ 24247 if (ISCD(un)) { 24248 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT); 24249 } 24250 24251 mutex_exit(SD_MUTEX(un)); 24252 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 24253 if ((cmlb_validate(un->un_cmlbhandle, 0, 24254 (void *)SD_PATH_DIRECT) == 0) && un->un_f_pkstats_enabled) { 24255 sd_set_pstats(un); 24256 SD_TRACE(SD_LOG_IO_PARTITION, un, 24257 "sd_check_media: un:0x%p pstats created and " 24258 "set\n", un); 24259 } 24260 24261 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 24262 SD_PATH_DIRECT); 24263 24264 sd_pm_exit(un); 24265 24266 if (rval != 0) { 24267 if (rval == EIO) 24268 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 24269 else 24270 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 24271 } 24272 24273 mutex_enter(SD_MUTEX(un)); 24274 } 24275 done: 24276 sd_ssc_fini(ssc); 24277 un->un_f_watcht_stopped = FALSE; 24278 if (token != NULL && un->un_swr_token != NULL) { 24279 /* 24280 * Use of this local token and the mutex ensures that we avoid 24281 * some race conditions associated with terminating the 24282 * scsi watch. 24283 */ 24284 token = un->un_swr_token; 24285 mutex_exit(SD_MUTEX(un)); 24286 (void) scsi_watch_request_terminate(token, 24287 SCSI_WATCH_TERMINATE_WAIT); 24288 if (scsi_watch_get_ref_count(token) == 0) { 24289 mutex_enter(SD_MUTEX(un)); 24290 un->un_swr_token = (opaque_t)NULL; 24291 } else { 24292 mutex_enter(SD_MUTEX(un)); 24293 } 24294 } 24295 24296 /* 24297 * Update the capacity kstat value, if no media previously 24298 * (capacity kstat is 0) and a media has been inserted 24299 * (un_f_blockcount_is_valid == TRUE) 24300 */ 24301 if (un->un_errstats) { 24302 struct sd_errstats *stp = NULL; 24303 24304 stp = (struct sd_errstats *)un->un_errstats->ks_data; 24305 if ((stp->sd_capacity.value.ui64 == 0) && 24306 (un->un_f_blockcount_is_valid == TRUE)) { 24307 stp->sd_capacity.value.ui64 = 24308 (uint64_t)((uint64_t)un->un_blockcount * 24309 un->un_sys_blocksize); 24310 } 24311 } 24312 mutex_exit(SD_MUTEX(un)); 24313 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n"); 24314 return (rval); 24315 } 24316 24317 24318 /* 24319 * Function: sd_delayed_cv_broadcast 24320 * 24321 * Description: Delayed cv_broadcast to allow for target to recover from media 24322 * insertion. 24323 * 24324 * Arguments: arg - driver soft state (unit) structure 24325 */ 24326 24327 static void 24328 sd_delayed_cv_broadcast(void *arg) 24329 { 24330 struct sd_lun *un = arg; 24331 24332 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n"); 24333 24334 mutex_enter(SD_MUTEX(un)); 24335 un->un_dcvb_timeid = NULL; 24336 cv_broadcast(&un->un_state_cv); 24337 mutex_exit(SD_MUTEX(un)); 24338 } 24339 24340 24341 /* 24342 * Function: sd_media_watch_cb 24343 * 24344 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This 24345 * routine processes the TUR sense data and updates the driver 24346 * state if a transition has occurred. The user thread 24347 * (sd_check_media) is then signalled. 24348 * 24349 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24350 * among multiple watches that share this callback function 24351 * resultp - scsi watch facility result packet containing scsi 24352 * packet, status byte and sense data 24353 * 24354 * Return Code: 0 for success, -1 for failure 24355 */ 24356 24357 static int 24358 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 24359 { 24360 struct sd_lun *un; 24361 struct scsi_status *statusp = resultp->statusp; 24362 uint8_t *sensep = (uint8_t *)resultp->sensep; 24363 enum dkio_state state = DKIO_NONE; 24364 dev_t dev = (dev_t)arg; 24365 uchar_t actual_sense_length; 24366 uint8_t skey, asc, ascq; 24367 24368 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24369 return (-1); 24370 } 24371 actual_sense_length = resultp->actual_sense_length; 24372 24373 mutex_enter(SD_MUTEX(un)); 24374 SD_TRACE(SD_LOG_COMMON, un, 24375 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n", 24376 *((char *)statusp), (void *)sensep, actual_sense_length); 24377 24378 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) { 24379 un->un_mediastate = DKIO_DEV_GONE; 24380 cv_broadcast(&un->un_state_cv); 24381 mutex_exit(SD_MUTEX(un)); 24382 24383 return (0); 24384 } 24385 24386 if (un->un_f_mmc_cap && un->un_f_mmc_gesn_polling) { 24387 if (sd_gesn_media_data_valid(resultp->mmc_data)) { 24388 if ((resultp->mmc_data[5] & 24389 SD_GESN_MEDIA_EVENT_STATUS_PRESENT) != 0) { 24390 state = DKIO_INSERTED; 24391 } else { 24392 state = DKIO_EJECTED; 24393 } 24394 if ((resultp->mmc_data[4] & SD_GESN_MEDIA_EVENT_CODE) == 24395 SD_GESN_MEDIA_EVENT_EJECTREQUEST) { 24396 sd_log_eject_request_event(un, KM_NOSLEEP); 24397 } 24398 } 24399 } else if (sensep != NULL) { 24400 /* 24401 * If there was a check condition then sensep points to valid 24402 * sense data. If status was not a check condition but a 24403 * reservation or busy status then the new state is DKIO_NONE. 24404 */ 24405 skey = scsi_sense_key(sensep); 24406 asc = scsi_sense_asc(sensep); 24407 ascq = scsi_sense_ascq(sensep); 24408 24409 SD_INFO(SD_LOG_COMMON, un, 24410 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n", 24411 skey, asc, ascq); 24412 /* This routine only uses up to 13 bytes of sense data. */ 24413 if (actual_sense_length >= 13) { 24414 if (skey == KEY_UNIT_ATTENTION) { 24415 if (asc == 0x28) { 24416 state = DKIO_INSERTED; 24417 } 24418 } else if (skey == KEY_NOT_READY) { 24419 /* 24420 * Sense data of 02/06/00 means that the 24421 * drive could not read the media (No 24422 * reference position found). In this case 24423 * to prevent a hang on the DKIOCSTATE IOCTL 24424 * we set the media state to DKIO_INSERTED. 24425 */ 24426 if (asc == 0x06 && ascq == 0x00) 24427 state = DKIO_INSERTED; 24428 24429 /* 24430 * if 02/04/02 means that the host 24431 * should send start command. Explicitly 24432 * leave the media state as is 24433 * (inserted) as the media is inserted 24434 * and host has stopped device for PM 24435 * reasons. Upon next true read/write 24436 * to this media will bring the 24437 * device to the right state good for 24438 * media access. 24439 */ 24440 if (asc == 0x3a) { 24441 state = DKIO_EJECTED; 24442 } else { 24443 /* 24444 * If the drive is busy with an 24445 * operation or long write, keep the 24446 * media in an inserted state. 24447 */ 24448 24449 if ((asc == 0x04) && 24450 ((ascq == 0x02) || 24451 (ascq == 0x07) || 24452 (ascq == 0x08))) { 24453 state = DKIO_INSERTED; 24454 } 24455 } 24456 } else if (skey == KEY_NO_SENSE) { 24457 if ((asc == 0x00) && (ascq == 0x00)) { 24458 /* 24459 * Sense Data 00/00/00 does not provide 24460 * any information about the state of 24461 * the media. Ignore it. 24462 */ 24463 mutex_exit(SD_MUTEX(un)); 24464 return (0); 24465 } 24466 } 24467 } 24468 } else if ((*((char *)statusp) == STATUS_GOOD) && 24469 (resultp->pkt->pkt_reason == CMD_CMPLT)) { 24470 state = DKIO_INSERTED; 24471 } 24472 24473 SD_TRACE(SD_LOG_COMMON, un, 24474 "sd_media_watch_cb: state=%x, specified=%x\n", 24475 state, un->un_specified_mediastate); 24476 24477 /* 24478 * now signal the waiting thread if this is *not* the specified state; 24479 * delay the signal if the state is DKIO_INSERTED to allow the target 24480 * to recover 24481 */ 24482 if (state != un->un_specified_mediastate) { 24483 un->un_mediastate = state; 24484 if (state == DKIO_INSERTED) { 24485 /* 24486 * delay the signal to give the drive a chance 24487 * to do what it apparently needs to do 24488 */ 24489 SD_TRACE(SD_LOG_COMMON, un, 24490 "sd_media_watch_cb: delayed cv_broadcast\n"); 24491 if (un->un_dcvb_timeid == NULL) { 24492 un->un_dcvb_timeid = 24493 timeout(sd_delayed_cv_broadcast, un, 24494 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY)); 24495 } 24496 } else { 24497 SD_TRACE(SD_LOG_COMMON, un, 24498 "sd_media_watch_cb: immediate cv_broadcast\n"); 24499 cv_broadcast(&un->un_state_cv); 24500 } 24501 } 24502 mutex_exit(SD_MUTEX(un)); 24503 return (0); 24504 } 24505 24506 24507 /* 24508 * Function: sd_dkio_get_temp 24509 * 24510 * Description: This routine is the driver entry point for handling ioctl 24511 * requests to get the disk temperature. 24512 * 24513 * Arguments: dev - the device number 24514 * arg - pointer to user provided dk_temperature structure. 24515 * flag - this argument is a pass through to ddi_copyxxx() 24516 * directly from the mode argument of ioctl(). 24517 * 24518 * Return Code: 0 24519 * EFAULT 24520 * ENXIO 24521 * EAGAIN 24522 */ 24523 24524 static int 24525 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag) 24526 { 24527 struct sd_lun *un = NULL; 24528 struct dk_temperature *dktemp = NULL; 24529 uchar_t *temperature_page; 24530 int rval = 0; 24531 int path_flag = SD_PATH_STANDARD; 24532 sd_ssc_t *ssc; 24533 24534 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24535 return (ENXIO); 24536 } 24537 24538 ssc = sd_ssc_init(un); 24539 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP); 24540 24541 /* copyin the disk temp argument to get the user flags */ 24542 if (ddi_copyin((void *)arg, dktemp, 24543 sizeof (struct dk_temperature), flag) != 0) { 24544 rval = EFAULT; 24545 goto done; 24546 } 24547 24548 /* Initialize the temperature to invalid. */ 24549 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 24550 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 24551 24552 /* 24553 * Note: Investigate removing the "bypass pm" semantic. 24554 * Can we just bypass PM always? 24555 */ 24556 if (dktemp->dkt_flags & DKT_BYPASS_PM) { 24557 path_flag = SD_PATH_DIRECT; 24558 ASSERT(!mutex_owned(&un->un_pm_mutex)); 24559 mutex_enter(&un->un_pm_mutex); 24560 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 24561 /* 24562 * If DKT_BYPASS_PM is set, and the drive happens to be 24563 * in low power mode, we can not wake it up, Need to 24564 * return EAGAIN. 24565 */ 24566 mutex_exit(&un->un_pm_mutex); 24567 rval = EAGAIN; 24568 goto done; 24569 } else { 24570 /* 24571 * Indicate to PM the device is busy. This is required 24572 * to avoid a race - i.e. the ioctl is issuing a 24573 * command and the pm framework brings down the device 24574 * to low power mode (possible power cut-off on some 24575 * platforms). 24576 */ 24577 mutex_exit(&un->un_pm_mutex); 24578 if (sd_pm_entry(un) != DDI_SUCCESS) { 24579 rval = EAGAIN; 24580 goto done; 24581 } 24582 } 24583 } 24584 24585 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP); 24586 24587 rval = sd_send_scsi_LOG_SENSE(ssc, temperature_page, 24588 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag); 24589 if (rval != 0) 24590 goto done2; 24591 24592 /* 24593 * For the current temperature verify that the parameter length is 0x02 24594 * and the parameter code is 0x00 24595 */ 24596 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) && 24597 (temperature_page[5] == 0x00)) { 24598 if (temperature_page[9] == 0xFF) { 24599 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 24600 } else { 24601 dktemp->dkt_cur_temp = (short)(temperature_page[9]); 24602 } 24603 } 24604 24605 /* 24606 * For the reference temperature verify that the parameter 24607 * length is 0x02 and the parameter code is 0x01 24608 */ 24609 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) && 24610 (temperature_page[11] == 0x01)) { 24611 if (temperature_page[15] == 0xFF) { 24612 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 24613 } else { 24614 dktemp->dkt_ref_temp = (short)(temperature_page[15]); 24615 } 24616 } 24617 24618 /* Do the copyout regardless of the temperature commands status. */ 24619 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature), 24620 flag) != 0) { 24621 rval = EFAULT; 24622 goto done1; 24623 } 24624 24625 done2: 24626 if (rval != 0) { 24627 if (rval == EIO) 24628 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 24629 else 24630 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 24631 } 24632 done1: 24633 if (path_flag == SD_PATH_DIRECT) { 24634 sd_pm_exit(un); 24635 } 24636 24637 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE); 24638 done: 24639 sd_ssc_fini(ssc); 24640 if (dktemp != NULL) { 24641 kmem_free(dktemp, sizeof (struct dk_temperature)); 24642 } 24643 24644 return (rval); 24645 } 24646 24647 24648 /* 24649 * Function: sd_log_page_supported 24650 * 24651 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of 24652 * supported log pages. 24653 * 24654 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 24655 * structure for this target. 24656 * log_page - 24657 * 24658 * Return Code: -1 - on error (log sense is optional and may not be supported). 24659 * 0 - log page not found. 24660 * 1 - log page found. 24661 */ 24662 24663 static int 24664 sd_log_page_supported(sd_ssc_t *ssc, int log_page) 24665 { 24666 uchar_t *log_page_data; 24667 int i; 24668 int match = 0; 24669 int log_size; 24670 int status = 0; 24671 struct sd_lun *un; 24672 24673 ASSERT(ssc != NULL); 24674 un = ssc->ssc_un; 24675 ASSERT(un != NULL); 24676 24677 log_page_data = kmem_zalloc(0xFF, KM_SLEEP); 24678 24679 status = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 0xFF, 0, 0x01, 0, 24680 SD_PATH_DIRECT); 24681 24682 if (status != 0) { 24683 if (status == EIO) { 24684 /* 24685 * Some disks do not support log sense, we 24686 * should ignore this kind of error(sense key is 24687 * 0x5 - illegal request). 24688 */ 24689 uint8_t *sensep; 24690 int senlen; 24691 24692 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 24693 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 24694 ssc->ssc_uscsi_cmd->uscsi_rqresid); 24695 24696 if (senlen > 0 && 24697 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 24698 sd_ssc_assessment(ssc, 24699 SD_FMT_IGNORE_COMPROMISE); 24700 } else { 24701 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 24702 } 24703 } else { 24704 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 24705 } 24706 24707 SD_ERROR(SD_LOG_COMMON, un, 24708 "sd_log_page_supported: failed log page retrieval\n"); 24709 kmem_free(log_page_data, 0xFF); 24710 return (-1); 24711 } 24712 24713 log_size = log_page_data[3]; 24714 24715 /* 24716 * The list of supported log pages start from the fourth byte. Check 24717 * until we run out of log pages or a match is found. 24718 */ 24719 for (i = 4; (i < (log_size + 4)) && !match; i++) { 24720 if (log_page_data[i] == log_page) { 24721 match++; 24722 } 24723 } 24724 kmem_free(log_page_data, 0xFF); 24725 return (match); 24726 } 24727 24728 24729 /* 24730 * Function: sd_mhdioc_failfast 24731 * 24732 * Description: This routine is the driver entry point for handling ioctl 24733 * requests to enable/disable the multihost failfast option. 24734 * (MHIOCENFAILFAST) 24735 * 24736 * Arguments: dev - the device number 24737 * arg - user specified probing interval. 24738 * flag - this argument is a pass through to ddi_copyxxx() 24739 * directly from the mode argument of ioctl(). 24740 * 24741 * Return Code: 0 24742 * EFAULT 24743 * ENXIO 24744 */ 24745 24746 static int 24747 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag) 24748 { 24749 struct sd_lun *un = NULL; 24750 int mh_time; 24751 int rval = 0; 24752 24753 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24754 return (ENXIO); 24755 } 24756 24757 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag)) 24758 return (EFAULT); 24759 24760 if (mh_time) { 24761 mutex_enter(SD_MUTEX(un)); 24762 un->un_resvd_status |= SD_FAILFAST; 24763 mutex_exit(SD_MUTEX(un)); 24764 /* 24765 * If mh_time is INT_MAX, then this ioctl is being used for 24766 * SCSI-3 PGR purposes, and we don't need to spawn watch thread. 24767 */ 24768 if (mh_time != INT_MAX) { 24769 rval = sd_check_mhd(dev, mh_time); 24770 } 24771 } else { 24772 (void) sd_check_mhd(dev, 0); 24773 mutex_enter(SD_MUTEX(un)); 24774 un->un_resvd_status &= ~SD_FAILFAST; 24775 mutex_exit(SD_MUTEX(un)); 24776 } 24777 return (rval); 24778 } 24779 24780 24781 /* 24782 * Function: sd_mhdioc_takeown 24783 * 24784 * Description: This routine is the driver entry point for handling ioctl 24785 * requests to forcefully acquire exclusive access rights to the 24786 * multihost disk (MHIOCTKOWN). 24787 * 24788 * Arguments: dev - the device number 24789 * arg - user provided structure specifying the delay 24790 * parameters in milliseconds 24791 * flag - this argument is a pass through to ddi_copyxxx() 24792 * directly from the mode argument of ioctl(). 24793 * 24794 * Return Code: 0 24795 * EFAULT 24796 * ENXIO 24797 */ 24798 24799 static int 24800 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag) 24801 { 24802 struct sd_lun *un = NULL; 24803 struct mhioctkown *tkown = NULL; 24804 int rval = 0; 24805 24806 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24807 return (ENXIO); 24808 } 24809 24810 if (arg != NULL) { 24811 tkown = (struct mhioctkown *) 24812 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP); 24813 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag); 24814 if (rval != 0) { 24815 rval = EFAULT; 24816 goto error; 24817 } 24818 } 24819 24820 rval = sd_take_ownership(dev, tkown); 24821 mutex_enter(SD_MUTEX(un)); 24822 if (rval == 0) { 24823 un->un_resvd_status |= SD_RESERVE; 24824 if (tkown != NULL && tkown->reinstate_resv_delay != 0) { 24825 sd_reinstate_resv_delay = 24826 tkown->reinstate_resv_delay * 1000; 24827 } else { 24828 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 24829 } 24830 /* 24831 * Give the scsi_watch routine interval set by 24832 * the MHIOCENFAILFAST ioctl precedence here. 24833 */ 24834 if ((un->un_resvd_status & SD_FAILFAST) == 0) { 24835 mutex_exit(SD_MUTEX(un)); 24836 (void) sd_check_mhd(dev, 24837 sd_reinstate_resv_delay / 1000); 24838 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24839 "sd_mhdioc_takeown : %d\n", 24840 sd_reinstate_resv_delay); 24841 } else { 24842 mutex_exit(SD_MUTEX(un)); 24843 } 24844 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY, 24845 sd_mhd_reset_notify_cb, (caddr_t)un); 24846 } else { 24847 un->un_resvd_status &= ~SD_RESERVE; 24848 mutex_exit(SD_MUTEX(un)); 24849 } 24850 24851 error: 24852 if (tkown != NULL) { 24853 kmem_free(tkown, sizeof (struct mhioctkown)); 24854 } 24855 return (rval); 24856 } 24857 24858 24859 /* 24860 * Function: sd_mhdioc_release 24861 * 24862 * Description: This routine is the driver entry point for handling ioctl 24863 * requests to release exclusive access rights to the multihost 24864 * disk (MHIOCRELEASE). 24865 * 24866 * Arguments: dev - the device number 24867 * 24868 * Return Code: 0 24869 * ENXIO 24870 */ 24871 24872 static int 24873 sd_mhdioc_release(dev_t dev) 24874 { 24875 struct sd_lun *un = NULL; 24876 timeout_id_t resvd_timeid_save; 24877 int resvd_status_save; 24878 int rval = 0; 24879 24880 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24881 return (ENXIO); 24882 } 24883 24884 mutex_enter(SD_MUTEX(un)); 24885 resvd_status_save = un->un_resvd_status; 24886 un->un_resvd_status &= 24887 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE); 24888 if (un->un_resvd_timeid) { 24889 resvd_timeid_save = un->un_resvd_timeid; 24890 un->un_resvd_timeid = NULL; 24891 mutex_exit(SD_MUTEX(un)); 24892 (void) untimeout(resvd_timeid_save); 24893 } else { 24894 mutex_exit(SD_MUTEX(un)); 24895 } 24896 24897 /* 24898 * destroy any pending timeout thread that may be attempting to 24899 * reinstate reservation on this device. 24900 */ 24901 sd_rmv_resv_reclaim_req(dev); 24902 24903 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) { 24904 mutex_enter(SD_MUTEX(un)); 24905 if ((un->un_mhd_token) && 24906 ((un->un_resvd_status & SD_FAILFAST) == 0)) { 24907 mutex_exit(SD_MUTEX(un)); 24908 (void) sd_check_mhd(dev, 0); 24909 } else { 24910 mutex_exit(SD_MUTEX(un)); 24911 } 24912 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 24913 sd_mhd_reset_notify_cb, (caddr_t)un); 24914 } else { 24915 /* 24916 * sd_mhd_watch_cb will restart the resvd recover timeout thread 24917 */ 24918 mutex_enter(SD_MUTEX(un)); 24919 un->un_resvd_status = resvd_status_save; 24920 mutex_exit(SD_MUTEX(un)); 24921 } 24922 return (rval); 24923 } 24924 24925 24926 /* 24927 * Function: sd_mhdioc_register_devid 24928 * 24929 * Description: This routine is the driver entry point for handling ioctl 24930 * requests to register the device id (MHIOCREREGISTERDEVID). 24931 * 24932 * Note: The implementation for this ioctl has been updated to 24933 * be consistent with the original PSARC case (1999/357) 24934 * (4375899, 4241671, 4220005) 24935 * 24936 * Arguments: dev - the device number 24937 * 24938 * Return Code: 0 24939 * ENXIO 24940 */ 24941 24942 static int 24943 sd_mhdioc_register_devid(dev_t dev) 24944 { 24945 struct sd_lun *un = NULL; 24946 int rval = 0; 24947 sd_ssc_t *ssc; 24948 24949 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24950 return (ENXIO); 24951 } 24952 24953 ASSERT(!mutex_owned(SD_MUTEX(un))); 24954 24955 mutex_enter(SD_MUTEX(un)); 24956 24957 /* If a devid already exists, de-register it */ 24958 if (un->un_devid != NULL) { 24959 ddi_devid_unregister(SD_DEVINFO(un)); 24960 /* 24961 * After unregister devid, needs to free devid memory 24962 */ 24963 ddi_devid_free(un->un_devid); 24964 un->un_devid = NULL; 24965 } 24966 24967 /* Check for reservation conflict */ 24968 mutex_exit(SD_MUTEX(un)); 24969 ssc = sd_ssc_init(un); 24970 rval = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 24971 mutex_enter(SD_MUTEX(un)); 24972 24973 switch (rval) { 24974 case 0: 24975 sd_register_devid(ssc, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED); 24976 break; 24977 case EACCES: 24978 break; 24979 default: 24980 rval = EIO; 24981 } 24982 24983 mutex_exit(SD_MUTEX(un)); 24984 if (rval != 0) { 24985 if (rval == EIO) 24986 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 24987 else 24988 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 24989 } 24990 sd_ssc_fini(ssc); 24991 return (rval); 24992 } 24993 24994 24995 /* 24996 * Function: sd_mhdioc_inkeys 24997 * 24998 * Description: This routine is the driver entry point for handling ioctl 24999 * requests to issue the SCSI-3 Persistent In Read Keys command 25000 * to the device (MHIOCGRP_INKEYS). 25001 * 25002 * Arguments: dev - the device number 25003 * arg - user provided in_keys structure 25004 * flag - this argument is a pass through to ddi_copyxxx() 25005 * directly from the mode argument of ioctl(). 25006 * 25007 * Return Code: code returned by sd_persistent_reservation_in_read_keys() 25008 * ENXIO 25009 * EFAULT 25010 */ 25011 25012 static int 25013 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag) 25014 { 25015 struct sd_lun *un; 25016 mhioc_inkeys_t inkeys; 25017 int rval = 0; 25018 25019 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25020 return (ENXIO); 25021 } 25022 25023 #ifdef _MULTI_DATAMODEL 25024 switch (ddi_model_convert_from(flag & FMODELS)) { 25025 case DDI_MODEL_ILP32: { 25026 struct mhioc_inkeys32 inkeys32; 25027 25028 if (ddi_copyin(arg, &inkeys32, 25029 sizeof (struct mhioc_inkeys32), flag) != 0) { 25030 return (EFAULT); 25031 } 25032 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li; 25033 if ((rval = sd_persistent_reservation_in_read_keys(un, 25034 &inkeys, flag)) != 0) { 25035 return (rval); 25036 } 25037 inkeys32.generation = inkeys.generation; 25038 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32), 25039 flag) != 0) { 25040 return (EFAULT); 25041 } 25042 break; 25043 } 25044 case DDI_MODEL_NONE: 25045 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), 25046 flag) != 0) { 25047 return (EFAULT); 25048 } 25049 if ((rval = sd_persistent_reservation_in_read_keys(un, 25050 &inkeys, flag)) != 0) { 25051 return (rval); 25052 } 25053 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), 25054 flag) != 0) { 25055 return (EFAULT); 25056 } 25057 break; 25058 } 25059 25060 #else /* ! _MULTI_DATAMODEL */ 25061 25062 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) { 25063 return (EFAULT); 25064 } 25065 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag); 25066 if (rval != 0) { 25067 return (rval); 25068 } 25069 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) { 25070 return (EFAULT); 25071 } 25072 25073 #endif /* _MULTI_DATAMODEL */ 25074 25075 return (rval); 25076 } 25077 25078 25079 /* 25080 * Function: sd_mhdioc_inresv 25081 * 25082 * Description: This routine is the driver entry point for handling ioctl 25083 * requests to issue the SCSI-3 Persistent In Read Reservations 25084 * command to the device (MHIOCGRP_INKEYS). 25085 * 25086 * Arguments: dev - the device number 25087 * arg - user provided in_resv structure 25088 * flag - this argument is a pass through to ddi_copyxxx() 25089 * directly from the mode argument of ioctl(). 25090 * 25091 * Return Code: code returned by sd_persistent_reservation_in_read_resv() 25092 * ENXIO 25093 * EFAULT 25094 */ 25095 25096 static int 25097 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag) 25098 { 25099 struct sd_lun *un; 25100 mhioc_inresvs_t inresvs; 25101 int rval = 0; 25102 25103 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25104 return (ENXIO); 25105 } 25106 25107 #ifdef _MULTI_DATAMODEL 25108 25109 switch (ddi_model_convert_from(flag & FMODELS)) { 25110 case DDI_MODEL_ILP32: { 25111 struct mhioc_inresvs32 inresvs32; 25112 25113 if (ddi_copyin(arg, &inresvs32, 25114 sizeof (struct mhioc_inresvs32), flag) != 0) { 25115 return (EFAULT); 25116 } 25117 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li; 25118 if ((rval = sd_persistent_reservation_in_read_resv(un, 25119 &inresvs, flag)) != 0) { 25120 return (rval); 25121 } 25122 inresvs32.generation = inresvs.generation; 25123 if (ddi_copyout(&inresvs32, arg, 25124 sizeof (struct mhioc_inresvs32), flag) != 0) { 25125 return (EFAULT); 25126 } 25127 break; 25128 } 25129 case DDI_MODEL_NONE: 25130 if (ddi_copyin(arg, &inresvs, 25131 sizeof (mhioc_inresvs_t), flag) != 0) { 25132 return (EFAULT); 25133 } 25134 if ((rval = sd_persistent_reservation_in_read_resv(un, 25135 &inresvs, flag)) != 0) { 25136 return (rval); 25137 } 25138 if (ddi_copyout(&inresvs, arg, 25139 sizeof (mhioc_inresvs_t), flag) != 0) { 25140 return (EFAULT); 25141 } 25142 break; 25143 } 25144 25145 #else /* ! _MULTI_DATAMODEL */ 25146 25147 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) { 25148 return (EFAULT); 25149 } 25150 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag); 25151 if (rval != 0) { 25152 return (rval); 25153 } 25154 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) { 25155 return (EFAULT); 25156 } 25157 25158 #endif /* ! _MULTI_DATAMODEL */ 25159 25160 return (rval); 25161 } 25162 25163 25164 /* 25165 * The following routines support the clustering functionality described below 25166 * and implement lost reservation reclaim functionality. 25167 * 25168 * Clustering 25169 * ---------- 25170 * The clustering code uses two different, independent forms of SCSI 25171 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3 25172 * Persistent Group Reservations. For any particular disk, it will use either 25173 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk. 25174 * 25175 * SCSI-2 25176 * The cluster software takes ownership of a multi-hosted disk by issuing the 25177 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the 25178 * MHIOCRELEASE ioctl. Closely related is the MHIOCENFAILFAST ioctl -- a 25179 * cluster, just after taking ownership of the disk with the MHIOCTKOWN ioctl 25180 * then issues the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the 25181 * driver. The meaning of failfast is that if the driver (on this host) ever 25182 * encounters the scsi error return code RESERVATION_CONFLICT from the device, 25183 * it should immediately panic the host. The motivation for this ioctl is that 25184 * if this host does encounter reservation conflict, the underlying cause is 25185 * that some other host of the cluster has decided that this host is no longer 25186 * in the cluster and has seized control of the disks for itself. Since this 25187 * host is no longer in the cluster, it ought to panic itself. The 25188 * MHIOCENFAILFAST ioctl does two things: 25189 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT 25190 * error to panic the host 25191 * (b) it sets up a periodic timer to test whether this host still has 25192 * "access" (in that no other host has reserved the device): if the 25193 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The 25194 * purpose of that periodic timer is to handle scenarios where the host is 25195 * otherwise temporarily quiescent, temporarily doing no real i/o. 25196 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host, 25197 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for 25198 * the device itself. 25199 * 25200 * SCSI-3 PGR 25201 * A direct semantic implementation of the SCSI-3 Persistent Reservation 25202 * facility is supported through the shared multihost disk ioctls 25203 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE, 25204 * MHIOCGRP_PREEMPTANDABORT, MHIOCGRP_CLEAR) 25205 * 25206 * Reservation Reclaim: 25207 * -------------------- 25208 * To support the lost reservation reclaim operations this driver creates a 25209 * single thread to handle reinstating reservations on all devices that have 25210 * lost reservations sd_resv_reclaim_requests are logged for all devices that 25211 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb 25212 * and the reservation reclaim thread loops through the requests to regain the 25213 * lost reservations. 25214 */ 25215 25216 /* 25217 * Function: sd_check_mhd() 25218 * 25219 * Description: This function sets up and submits a scsi watch request or 25220 * terminates an existing watch request. This routine is used in 25221 * support of reservation reclaim. 25222 * 25223 * Arguments: dev - the device 'dev_t' is used for context to discriminate 25224 * among multiple watches that share the callback function 25225 * interval - the number of microseconds specifying the watch 25226 * interval for issuing TEST UNIT READY commands. If 25227 * set to 0 the watch should be terminated. If the 25228 * interval is set to 0 and if the device is required 25229 * to hold reservation while disabling failfast, the 25230 * watch is restarted with an interval of 25231 * reinstate_resv_delay. 25232 * 25233 * Return Code: 0 - Successful submit/terminate of scsi watch request 25234 * ENXIO - Indicates an invalid device was specified 25235 * EAGAIN - Unable to submit the scsi watch request 25236 */ 25237 25238 static int 25239 sd_check_mhd(dev_t dev, int interval) 25240 { 25241 struct sd_lun *un; 25242 opaque_t token; 25243 25244 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25245 return (ENXIO); 25246 } 25247 25248 /* is this a watch termination request? */ 25249 if (interval == 0) { 25250 mutex_enter(SD_MUTEX(un)); 25251 /* if there is an existing watch task then terminate it */ 25252 if (un->un_mhd_token) { 25253 token = un->un_mhd_token; 25254 un->un_mhd_token = NULL; 25255 mutex_exit(SD_MUTEX(un)); 25256 (void) scsi_watch_request_terminate(token, 25257 SCSI_WATCH_TERMINATE_ALL_WAIT); 25258 mutex_enter(SD_MUTEX(un)); 25259 } else { 25260 mutex_exit(SD_MUTEX(un)); 25261 /* 25262 * Note: If we return here we don't check for the 25263 * failfast case. This is the original legacy 25264 * implementation but perhaps we should be checking 25265 * the failfast case. 25266 */ 25267 return (0); 25268 } 25269 /* 25270 * If the device is required to hold reservation while 25271 * disabling failfast, we need to restart the scsi_watch 25272 * routine with an interval of reinstate_resv_delay. 25273 */ 25274 if (un->un_resvd_status & SD_RESERVE) { 25275 interval = sd_reinstate_resv_delay / 1000; 25276 } else { 25277 /* no failfast so bail */ 25278 mutex_exit(SD_MUTEX(un)); 25279 return (0); 25280 } 25281 mutex_exit(SD_MUTEX(un)); 25282 } 25283 25284 /* 25285 * adjust minimum time interval to 1 second, 25286 * and convert from msecs to usecs 25287 */ 25288 if (interval > 0 && interval < 1000) { 25289 interval = 1000; 25290 } 25291 interval *= 1000; 25292 25293 /* 25294 * submit the request to the scsi_watch service 25295 */ 25296 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval, 25297 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev); 25298 if (token == NULL) { 25299 return (EAGAIN); 25300 } 25301 25302 /* 25303 * save token for termination later on 25304 */ 25305 mutex_enter(SD_MUTEX(un)); 25306 un->un_mhd_token = token; 25307 mutex_exit(SD_MUTEX(un)); 25308 return (0); 25309 } 25310 25311 25312 /* 25313 * Function: sd_mhd_watch_cb() 25314 * 25315 * Description: This function is the call back function used by the scsi watch 25316 * facility. The scsi watch facility sends the "Test Unit Ready" 25317 * and processes the status. If applicable (i.e. a "Unit Attention" 25318 * status and automatic "Request Sense" not used) the scsi watch 25319 * facility will send a "Request Sense" and retrieve the sense data 25320 * to be passed to this callback function. In either case the 25321 * automatic "Request Sense" or the facility submitting one, this 25322 * callback is passed the status and sense data. 25323 * 25324 * Arguments: arg - the device 'dev_t' is used for context to discriminate 25325 * among multiple watches that share this callback function 25326 * resultp - scsi watch facility result packet containing scsi 25327 * packet, status byte and sense data 25328 * 25329 * Return Code: 0 - continue the watch task 25330 * non-zero - terminate the watch task 25331 */ 25332 25333 static int 25334 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 25335 { 25336 struct sd_lun *un; 25337 struct scsi_status *statusp; 25338 uint8_t *sensep; 25339 struct scsi_pkt *pkt; 25340 uchar_t actual_sense_length; 25341 dev_t dev = (dev_t)arg; 25342 25343 ASSERT(resultp != NULL); 25344 statusp = resultp->statusp; 25345 sensep = (uint8_t *)resultp->sensep; 25346 pkt = resultp->pkt; 25347 actual_sense_length = resultp->actual_sense_length; 25348 25349 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25350 return (ENXIO); 25351 } 25352 25353 SD_TRACE(SD_LOG_IOCTL_MHD, un, 25354 "sd_mhd_watch_cb: reason '%s', status '%s'\n", 25355 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp))); 25356 25357 /* Begin processing of the status and/or sense data */ 25358 if (pkt->pkt_reason != CMD_CMPLT) { 25359 /* Handle the incomplete packet */ 25360 sd_mhd_watch_incomplete(un, pkt); 25361 return (0); 25362 } else if (*((unsigned char *)statusp) != STATUS_GOOD) { 25363 if (*((unsigned char *)statusp) 25364 == STATUS_RESERVATION_CONFLICT) { 25365 /* 25366 * Handle a reservation conflict by panicking if 25367 * configured for failfast or by logging the conflict 25368 * and updating the reservation status 25369 */ 25370 mutex_enter(SD_MUTEX(un)); 25371 if ((un->un_resvd_status & SD_FAILFAST) && 25372 (sd_failfast_enable)) { 25373 sd_panic_for_res_conflict(un); 25374 /*NOTREACHED*/ 25375 } 25376 SD_INFO(SD_LOG_IOCTL_MHD, un, 25377 "sd_mhd_watch_cb: Reservation Conflict\n"); 25378 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 25379 mutex_exit(SD_MUTEX(un)); 25380 } 25381 } 25382 25383 if (sensep != NULL) { 25384 if (actual_sense_length >= (SENSE_LENGTH - 2)) { 25385 mutex_enter(SD_MUTEX(un)); 25386 if ((scsi_sense_asc(sensep) == 25387 SD_SCSI_RESET_SENSE_CODE) && 25388 (un->un_resvd_status & SD_RESERVE)) { 25389 /* 25390 * The additional sense code indicates a power 25391 * on or bus device reset has occurred; update 25392 * the reservation status. 25393 */ 25394 un->un_resvd_status |= 25395 (SD_LOST_RESERVE | SD_WANT_RESERVE); 25396 SD_INFO(SD_LOG_IOCTL_MHD, un, 25397 "sd_mhd_watch_cb: Lost Reservation\n"); 25398 } 25399 } else { 25400 return (0); 25401 } 25402 } else { 25403 mutex_enter(SD_MUTEX(un)); 25404 } 25405 25406 if ((un->un_resvd_status & SD_RESERVE) && 25407 (un->un_resvd_status & SD_LOST_RESERVE)) { 25408 if (un->un_resvd_status & SD_WANT_RESERVE) { 25409 /* 25410 * A reset occurred in between the last probe and this 25411 * one so if a timeout is pending cancel it. 25412 */ 25413 if (un->un_resvd_timeid) { 25414 timeout_id_t temp_id = un->un_resvd_timeid; 25415 un->un_resvd_timeid = NULL; 25416 mutex_exit(SD_MUTEX(un)); 25417 (void) untimeout(temp_id); 25418 mutex_enter(SD_MUTEX(un)); 25419 } 25420 un->un_resvd_status &= ~SD_WANT_RESERVE; 25421 } 25422 if (un->un_resvd_timeid == 0) { 25423 /* Schedule a timeout to handle the lost reservation */ 25424 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover, 25425 (void *)dev, 25426 drv_usectohz(sd_reinstate_resv_delay)); 25427 } 25428 } 25429 mutex_exit(SD_MUTEX(un)); 25430 return (0); 25431 } 25432 25433 25434 /* 25435 * Function: sd_mhd_watch_incomplete() 25436 * 25437 * Description: This function is used to find out why a scsi pkt sent by the 25438 * scsi watch facility was not completed. Under some scenarios this 25439 * routine will return. Otherwise it will send a bus reset to see 25440 * if the drive is still online. 25441 * 25442 * Arguments: un - driver soft state (unit) structure 25443 * pkt - incomplete scsi pkt 25444 */ 25445 25446 static void 25447 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt) 25448 { 25449 int be_chatty; 25450 int perr; 25451 25452 ASSERT(pkt != NULL); 25453 ASSERT(un != NULL); 25454 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT)); 25455 perr = (pkt->pkt_statistics & STAT_PERR); 25456 25457 mutex_enter(SD_MUTEX(un)); 25458 if (un->un_state == SD_STATE_DUMPING) { 25459 mutex_exit(SD_MUTEX(un)); 25460 return; 25461 } 25462 25463 switch (pkt->pkt_reason) { 25464 case CMD_UNX_BUS_FREE: 25465 /* 25466 * If we had a parity error that caused the target to drop BSY*, 25467 * don't be chatty about it. 25468 */ 25469 if (perr && be_chatty) { 25470 be_chatty = 0; 25471 } 25472 break; 25473 case CMD_TAG_REJECT: 25474 /* 25475 * The SCSI-2 spec states that a tag reject will be sent by the 25476 * target if tagged queuing is not supported. A tag reject may 25477 * also be sent during certain initialization periods or to 25478 * control internal resources. For the latter case the target 25479 * may also return Queue Full. 25480 * 25481 * If this driver receives a tag reject from a target that is 25482 * going through an init period or controlling internal 25483 * resources tagged queuing will be disabled. This is a less 25484 * than optimal behavior but the driver is unable to determine 25485 * the target state and assumes tagged queueing is not supported 25486 */ 25487 pkt->pkt_flags = 0; 25488 un->un_tagflags = 0; 25489 25490 if (un->un_f_opt_queueing == TRUE) { 25491 un->un_throttle = min(un->un_throttle, 3); 25492 } else { 25493 un->un_throttle = 1; 25494 } 25495 mutex_exit(SD_MUTEX(un)); 25496 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 25497 mutex_enter(SD_MUTEX(un)); 25498 break; 25499 case CMD_INCOMPLETE: 25500 /* 25501 * The transport stopped with an abnormal state, fallthrough and 25502 * reset the target and/or bus unless selection did not complete 25503 * (indicated by STATE_GOT_BUS) in which case we don't want to 25504 * go through a target/bus reset 25505 */ 25506 if (pkt->pkt_state == STATE_GOT_BUS) { 25507 break; 25508 } 25509 /*FALLTHROUGH*/ 25510 25511 case CMD_TIMEOUT: 25512 default: 25513 /* 25514 * The lun may still be running the command, so a lun reset 25515 * should be attempted. If the lun reset fails or cannot be 25516 * issued, than try a target reset. Lastly try a bus reset. 25517 */ 25518 if ((pkt->pkt_statistics & 25519 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) == 0) { 25520 int reset_retval = 0; 25521 mutex_exit(SD_MUTEX(un)); 25522 if (un->un_f_allow_bus_device_reset == TRUE) { 25523 if (un->un_f_lun_reset_enabled == TRUE) { 25524 reset_retval = 25525 scsi_reset(SD_ADDRESS(un), 25526 RESET_LUN); 25527 } 25528 if (reset_retval == 0) { 25529 reset_retval = 25530 scsi_reset(SD_ADDRESS(un), 25531 RESET_TARGET); 25532 } 25533 } 25534 if (reset_retval == 0) { 25535 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 25536 } 25537 mutex_enter(SD_MUTEX(un)); 25538 } 25539 break; 25540 } 25541 25542 /* A device/bus reset has occurred; update the reservation status. */ 25543 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics & 25544 (STAT_BUS_RESET | STAT_DEV_RESET))) { 25545 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 25546 un->un_resvd_status |= 25547 (SD_LOST_RESERVE | SD_WANT_RESERVE); 25548 SD_INFO(SD_LOG_IOCTL_MHD, un, 25549 "sd_mhd_watch_incomplete: Lost Reservation\n"); 25550 } 25551 } 25552 25553 /* 25554 * The disk has been turned off; Update the device state. 25555 * 25556 * Note: Should we be offlining the disk here? 25557 */ 25558 if (pkt->pkt_state == STATE_GOT_BUS) { 25559 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: " 25560 "Disk not responding to selection\n"); 25561 if (un->un_state != SD_STATE_OFFLINE) { 25562 New_state(un, SD_STATE_OFFLINE); 25563 } 25564 } else if (be_chatty) { 25565 /* 25566 * suppress messages if they are all the same pkt reason; 25567 * with TQ, many (up to 256) are returned with the same 25568 * pkt_reason 25569 */ 25570 if (pkt->pkt_reason != un->un_last_pkt_reason) { 25571 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25572 "sd_mhd_watch_incomplete: " 25573 "SCSI transport failed: reason '%s'\n", 25574 scsi_rname(pkt->pkt_reason)); 25575 } 25576 } 25577 un->un_last_pkt_reason = pkt->pkt_reason; 25578 mutex_exit(SD_MUTEX(un)); 25579 } 25580 25581 25582 /* 25583 * Function: sd_sname() 25584 * 25585 * Description: This is a simple little routine to return a string containing 25586 * a printable description of command status byte for use in 25587 * logging. 25588 * 25589 * Arguments: status - pointer to a status byte 25590 * 25591 * Return Code: char * - string containing status description. 25592 */ 25593 25594 static char * 25595 sd_sname(uchar_t status) 25596 { 25597 switch (status & STATUS_MASK) { 25598 case STATUS_GOOD: 25599 return ("good status"); 25600 case STATUS_CHECK: 25601 return ("check condition"); 25602 case STATUS_MET: 25603 return ("condition met"); 25604 case STATUS_BUSY: 25605 return ("busy"); 25606 case STATUS_INTERMEDIATE: 25607 return ("intermediate"); 25608 case STATUS_INTERMEDIATE_MET: 25609 return ("intermediate - condition met"); 25610 case STATUS_RESERVATION_CONFLICT: 25611 return ("reservation_conflict"); 25612 case STATUS_TERMINATED: 25613 return ("command terminated"); 25614 case STATUS_QFULL: 25615 return ("queue full"); 25616 default: 25617 return ("<unknown status>"); 25618 } 25619 } 25620 25621 25622 /* 25623 * Function: sd_mhd_resvd_recover() 25624 * 25625 * Description: This function adds a reservation entry to the 25626 * sd_resv_reclaim_request list and signals the reservation 25627 * reclaim thread that there is work pending. If the reservation 25628 * reclaim thread has not been previously created this function 25629 * will kick it off. 25630 * 25631 * Arguments: arg - the device 'dev_t' is used for context to discriminate 25632 * among multiple watches that share this callback function 25633 * 25634 * Context: This routine is called by timeout() and is run in interrupt 25635 * context. It must not sleep or call other functions which may 25636 * sleep. 25637 */ 25638 25639 static void 25640 sd_mhd_resvd_recover(void *arg) 25641 { 25642 dev_t dev = (dev_t)arg; 25643 struct sd_lun *un; 25644 struct sd_thr_request *sd_treq = NULL; 25645 struct sd_thr_request *sd_cur = NULL; 25646 struct sd_thr_request *sd_prev = NULL; 25647 int already_there = 0; 25648 25649 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25650 return; 25651 } 25652 25653 mutex_enter(SD_MUTEX(un)); 25654 un->un_resvd_timeid = NULL; 25655 if (un->un_resvd_status & SD_WANT_RESERVE) { 25656 /* 25657 * There was a reset so don't issue the reserve, allow the 25658 * sd_mhd_watch_cb callback function to notice this and 25659 * reschedule the timeout for reservation. 25660 */ 25661 mutex_exit(SD_MUTEX(un)); 25662 return; 25663 } 25664 mutex_exit(SD_MUTEX(un)); 25665 25666 /* 25667 * Add this device to the sd_resv_reclaim_request list and the 25668 * sd_resv_reclaim_thread should take care of the rest. 25669 * 25670 * Note: We can't sleep in this context so if the memory allocation 25671 * fails allow the sd_mhd_watch_cb callback function to notice this and 25672 * reschedule the timeout for reservation. (4378460) 25673 */ 25674 sd_treq = (struct sd_thr_request *) 25675 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP); 25676 if (sd_treq == NULL) { 25677 return; 25678 } 25679 25680 sd_treq->sd_thr_req_next = NULL; 25681 sd_treq->dev = dev; 25682 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25683 if (sd_tr.srq_thr_req_head == NULL) { 25684 sd_tr.srq_thr_req_head = sd_treq; 25685 } else { 25686 sd_cur = sd_prev = sd_tr.srq_thr_req_head; 25687 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) { 25688 if (sd_cur->dev == dev) { 25689 /* 25690 * already in Queue so don't log 25691 * another request for the device 25692 */ 25693 already_there = 1; 25694 break; 25695 } 25696 sd_prev = sd_cur; 25697 } 25698 if (!already_there) { 25699 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: " 25700 "logging request for %lx\n", dev); 25701 sd_prev->sd_thr_req_next = sd_treq; 25702 } else { 25703 kmem_free(sd_treq, sizeof (struct sd_thr_request)); 25704 } 25705 } 25706 25707 /* 25708 * Create a kernel thread to do the reservation reclaim and free up this 25709 * thread. We cannot block this thread while we go away to do the 25710 * reservation reclaim 25711 */ 25712 if (sd_tr.srq_resv_reclaim_thread == NULL) 25713 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0, 25714 sd_resv_reclaim_thread, NULL, 25715 0, &p0, TS_RUN, v.v_maxsyspri - 2); 25716 25717 /* Tell the reservation reclaim thread that it has work to do */ 25718 cv_signal(&sd_tr.srq_resv_reclaim_cv); 25719 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25720 } 25721 25722 /* 25723 * Function: sd_resv_reclaim_thread() 25724 * 25725 * Description: This function implements the reservation reclaim operations 25726 * 25727 * Arguments: arg - the device 'dev_t' is used for context to discriminate 25728 * among multiple watches that share this callback function 25729 */ 25730 25731 static void 25732 sd_resv_reclaim_thread() 25733 { 25734 struct sd_lun *un; 25735 struct sd_thr_request *sd_mhreq; 25736 25737 /* Wait for work */ 25738 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25739 if (sd_tr.srq_thr_req_head == NULL) { 25740 cv_wait(&sd_tr.srq_resv_reclaim_cv, 25741 &sd_tr.srq_resv_reclaim_mutex); 25742 } 25743 25744 /* Loop while we have work */ 25745 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) { 25746 un = ddi_get_soft_state(sd_state, 25747 SDUNIT(sd_tr.srq_thr_cur_req->dev)); 25748 if (un == NULL) { 25749 /* 25750 * softstate structure is NULL so just 25751 * dequeue the request and continue 25752 */ 25753 sd_tr.srq_thr_req_head = 25754 sd_tr.srq_thr_cur_req->sd_thr_req_next; 25755 kmem_free(sd_tr.srq_thr_cur_req, 25756 sizeof (struct sd_thr_request)); 25757 continue; 25758 } 25759 25760 /* dequeue the request */ 25761 sd_mhreq = sd_tr.srq_thr_cur_req; 25762 sd_tr.srq_thr_req_head = 25763 sd_tr.srq_thr_cur_req->sd_thr_req_next; 25764 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25765 25766 /* 25767 * Reclaim reservation only if SD_RESERVE is still set. There 25768 * may have been a call to MHIOCRELEASE before we got here. 25769 */ 25770 mutex_enter(SD_MUTEX(un)); 25771 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 25772 /* 25773 * Note: The SD_LOST_RESERVE flag is cleared before 25774 * reclaiming the reservation. If this is done after the 25775 * call to sd_reserve_release a reservation loss in the 25776 * window between pkt completion of reserve cmd and 25777 * mutex_enter below may not be recognized 25778 */ 25779 un->un_resvd_status &= ~SD_LOST_RESERVE; 25780 mutex_exit(SD_MUTEX(un)); 25781 25782 if (sd_reserve_release(sd_mhreq->dev, 25783 SD_RESERVE) == 0) { 25784 mutex_enter(SD_MUTEX(un)); 25785 un->un_resvd_status |= SD_RESERVE; 25786 mutex_exit(SD_MUTEX(un)); 25787 SD_INFO(SD_LOG_IOCTL_MHD, un, 25788 "sd_resv_reclaim_thread: " 25789 "Reservation Recovered\n"); 25790 } else { 25791 mutex_enter(SD_MUTEX(un)); 25792 un->un_resvd_status |= SD_LOST_RESERVE; 25793 mutex_exit(SD_MUTEX(un)); 25794 SD_INFO(SD_LOG_IOCTL_MHD, un, 25795 "sd_resv_reclaim_thread: Failed " 25796 "Reservation Recovery\n"); 25797 } 25798 } else { 25799 mutex_exit(SD_MUTEX(un)); 25800 } 25801 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25802 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req); 25803 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 25804 sd_mhreq = sd_tr.srq_thr_cur_req = NULL; 25805 /* 25806 * wakeup the destroy thread if anyone is waiting on 25807 * us to complete. 25808 */ 25809 cv_signal(&sd_tr.srq_inprocess_cv); 25810 SD_TRACE(SD_LOG_IOCTL_MHD, un, 25811 "sd_resv_reclaim_thread: cv_signalling current request \n"); 25812 } 25813 25814 /* 25815 * cleanup the sd_tr structure now that this thread will not exist 25816 */ 25817 ASSERT(sd_tr.srq_thr_req_head == NULL); 25818 ASSERT(sd_tr.srq_thr_cur_req == NULL); 25819 sd_tr.srq_resv_reclaim_thread = NULL; 25820 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25821 thread_exit(); 25822 } 25823 25824 25825 /* 25826 * Function: sd_rmv_resv_reclaim_req() 25827 * 25828 * Description: This function removes any pending reservation reclaim requests 25829 * for the specified device. 25830 * 25831 * Arguments: dev - the device 'dev_t' 25832 */ 25833 25834 static void 25835 sd_rmv_resv_reclaim_req(dev_t dev) 25836 { 25837 struct sd_thr_request *sd_mhreq; 25838 struct sd_thr_request *sd_prev; 25839 25840 /* Remove a reservation reclaim request from the list */ 25841 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25842 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) { 25843 /* 25844 * We are attempting to reinstate reservation for 25845 * this device. We wait for sd_reserve_release() 25846 * to return before we return. 25847 */ 25848 cv_wait(&sd_tr.srq_inprocess_cv, 25849 &sd_tr.srq_resv_reclaim_mutex); 25850 } else { 25851 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head; 25852 if (sd_mhreq && sd_mhreq->dev == dev) { 25853 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next; 25854 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 25855 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25856 return; 25857 } 25858 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) { 25859 if (sd_mhreq && sd_mhreq->dev == dev) { 25860 break; 25861 } 25862 sd_prev = sd_mhreq; 25863 } 25864 if (sd_mhreq != NULL) { 25865 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next; 25866 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 25867 } 25868 } 25869 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25870 } 25871 25872 25873 /* 25874 * Function: sd_mhd_reset_notify_cb() 25875 * 25876 * Description: This is a call back function for scsi_reset_notify. This 25877 * function updates the softstate reserved status and logs the 25878 * reset. The driver scsi watch facility callback function 25879 * (sd_mhd_watch_cb) and reservation reclaim thread functionality 25880 * will reclaim the reservation. 25881 * 25882 * Arguments: arg - driver soft state (unit) structure 25883 */ 25884 25885 static void 25886 sd_mhd_reset_notify_cb(caddr_t arg) 25887 { 25888 struct sd_lun *un = (struct sd_lun *)arg; 25889 25890 mutex_enter(SD_MUTEX(un)); 25891 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 25892 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE); 25893 SD_INFO(SD_LOG_IOCTL_MHD, un, 25894 "sd_mhd_reset_notify_cb: Lost Reservation\n"); 25895 } 25896 mutex_exit(SD_MUTEX(un)); 25897 } 25898 25899 25900 /* 25901 * Function: sd_take_ownership() 25902 * 25903 * Description: This routine implements an algorithm to achieve a stable 25904 * reservation on disks which don't implement priority reserve, 25905 * and makes sure that other host lose re-reservation attempts. 25906 * This algorithm contains of a loop that keeps issuing the RESERVE 25907 * for some period of time (min_ownership_delay, default 6 seconds) 25908 * During that loop, it looks to see if there has been a bus device 25909 * reset or bus reset (both of which cause an existing reservation 25910 * to be lost). If the reservation is lost issue RESERVE until a 25911 * period of min_ownership_delay with no resets has gone by, or 25912 * until max_ownership_delay has expired. This loop ensures that 25913 * the host really did manage to reserve the device, in spite of 25914 * resets. The looping for min_ownership_delay (default six 25915 * seconds) is important to early generation clustering products, 25916 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an 25917 * MHIOCENFAILFAST periodic timer of two seconds. By having 25918 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having 25919 * MHIOCENFAILFAST poll every two seconds, the idea is that by the 25920 * time the MHIOCTKOWN ioctl returns, the other host (if any) will 25921 * have already noticed, via the MHIOCENFAILFAST polling, that it 25922 * no longer "owns" the disk and will have panicked itself. Thus, 25923 * the host issuing the MHIOCTKOWN is assured (with timing 25924 * dependencies) that by the time it actually starts to use the 25925 * disk for real work, the old owner is no longer accessing it. 25926 * 25927 * min_ownership_delay is the minimum amount of time for which the 25928 * disk must be reserved continuously devoid of resets before the 25929 * MHIOCTKOWN ioctl will return success. 25930 * 25931 * max_ownership_delay indicates the amount of time by which the 25932 * take ownership should succeed or timeout with an error. 25933 * 25934 * Arguments: dev - the device 'dev_t' 25935 * *p - struct containing timing info. 25936 * 25937 * Return Code: 0 for success or error code 25938 */ 25939 25940 static int 25941 sd_take_ownership(dev_t dev, struct mhioctkown *p) 25942 { 25943 struct sd_lun *un; 25944 int rval; 25945 int err; 25946 int reservation_count = 0; 25947 int min_ownership_delay = 6000000; /* in usec */ 25948 int max_ownership_delay = 30000000; /* in usec */ 25949 clock_t start_time; /* starting time of this algorithm */ 25950 clock_t end_time; /* time limit for giving up */ 25951 clock_t ownership_time; /* time limit for stable ownership */ 25952 clock_t current_time; 25953 clock_t previous_current_time; 25954 25955 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25956 return (ENXIO); 25957 } 25958 25959 /* 25960 * Attempt a device reservation. A priority reservation is requested. 25961 */ 25962 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE)) 25963 != SD_SUCCESS) { 25964 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25965 "sd_take_ownership: return(1)=%d\n", rval); 25966 return (rval); 25967 } 25968 25969 /* Update the softstate reserved status to indicate the reservation */ 25970 mutex_enter(SD_MUTEX(un)); 25971 un->un_resvd_status |= SD_RESERVE; 25972 un->un_resvd_status &= 25973 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT); 25974 mutex_exit(SD_MUTEX(un)); 25975 25976 if (p != NULL) { 25977 if (p->min_ownership_delay != 0) { 25978 min_ownership_delay = p->min_ownership_delay * 1000; 25979 } 25980 if (p->max_ownership_delay != 0) { 25981 max_ownership_delay = p->max_ownership_delay * 1000; 25982 } 25983 } 25984 SD_INFO(SD_LOG_IOCTL_MHD, un, 25985 "sd_take_ownership: min, max delays: %d, %d\n", 25986 min_ownership_delay, max_ownership_delay); 25987 25988 start_time = ddi_get_lbolt(); 25989 current_time = start_time; 25990 ownership_time = current_time + drv_usectohz(min_ownership_delay); 25991 end_time = start_time + drv_usectohz(max_ownership_delay); 25992 25993 while (current_time - end_time < 0) { 25994 delay(drv_usectohz(500000)); 25995 25996 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) { 25997 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) { 25998 mutex_enter(SD_MUTEX(un)); 25999 rval = (un->un_resvd_status & 26000 SD_RESERVATION_CONFLICT) ? EACCES : EIO; 26001 mutex_exit(SD_MUTEX(un)); 26002 break; 26003 } 26004 } 26005 previous_current_time = current_time; 26006 current_time = ddi_get_lbolt(); 26007 mutex_enter(SD_MUTEX(un)); 26008 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) { 26009 ownership_time = ddi_get_lbolt() + 26010 drv_usectohz(min_ownership_delay); 26011 reservation_count = 0; 26012 } else { 26013 reservation_count++; 26014 } 26015 un->un_resvd_status |= SD_RESERVE; 26016 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE); 26017 mutex_exit(SD_MUTEX(un)); 26018 26019 SD_INFO(SD_LOG_IOCTL_MHD, un, 26020 "sd_take_ownership: ticks for loop iteration=%ld, " 26021 "reservation=%s\n", (current_time - previous_current_time), 26022 reservation_count ? "ok" : "reclaimed"); 26023 26024 if (current_time - ownership_time >= 0 && 26025 reservation_count >= 4) { 26026 rval = 0; /* Achieved a stable ownership */ 26027 break; 26028 } 26029 if (current_time - end_time >= 0) { 26030 rval = EACCES; /* No ownership in max possible time */ 26031 break; 26032 } 26033 } 26034 SD_TRACE(SD_LOG_IOCTL_MHD, un, 26035 "sd_take_ownership: return(2)=%d\n", rval); 26036 return (rval); 26037 } 26038 26039 26040 /* 26041 * Function: sd_reserve_release() 26042 * 26043 * Description: This function builds and sends scsi RESERVE, RELEASE, and 26044 * PRIORITY RESERVE commands based on a user specified command type 26045 * 26046 * Arguments: dev - the device 'dev_t' 26047 * cmd - user specified command type; one of SD_PRIORITY_RESERVE, 26048 * SD_RESERVE, SD_RELEASE 26049 * 26050 * Return Code: 0 or Error Code 26051 */ 26052 26053 static int 26054 sd_reserve_release(dev_t dev, int cmd) 26055 { 26056 struct uscsi_cmd *com = NULL; 26057 struct sd_lun *un = NULL; 26058 char cdb[CDB_GROUP0]; 26059 int rval; 26060 26061 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) || 26062 (cmd == SD_PRIORITY_RESERVE)); 26063 26064 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26065 return (ENXIO); 26066 } 26067 26068 /* instantiate and initialize the command and cdb */ 26069 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26070 bzero(cdb, CDB_GROUP0); 26071 com->uscsi_flags = USCSI_SILENT; 26072 com->uscsi_timeout = un->un_reserve_release_time; 26073 com->uscsi_cdblen = CDB_GROUP0; 26074 com->uscsi_cdb = cdb; 26075 if (cmd == SD_RELEASE) { 26076 cdb[0] = SCMD_RELEASE; 26077 } else { 26078 cdb[0] = SCMD_RESERVE; 26079 } 26080 26081 /* Send the command. */ 26082 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26083 SD_PATH_STANDARD); 26084 26085 /* 26086 * "break" a reservation that is held by another host, by issuing a 26087 * reset if priority reserve is desired, and we could not get the 26088 * device. 26089 */ 26090 if ((cmd == SD_PRIORITY_RESERVE) && 26091 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 26092 /* 26093 * First try to reset the LUN. If we cannot, then try a target 26094 * reset, followed by a bus reset if the target reset fails. 26095 */ 26096 int reset_retval = 0; 26097 if (un->un_f_lun_reset_enabled == TRUE) { 26098 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 26099 } 26100 if (reset_retval == 0) { 26101 /* The LUN reset either failed or was not issued */ 26102 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 26103 } 26104 if ((reset_retval == 0) && 26105 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) { 26106 rval = EIO; 26107 kmem_free(com, sizeof (*com)); 26108 return (rval); 26109 } 26110 26111 bzero(com, sizeof (struct uscsi_cmd)); 26112 com->uscsi_flags = USCSI_SILENT; 26113 com->uscsi_cdb = cdb; 26114 com->uscsi_cdblen = CDB_GROUP0; 26115 com->uscsi_timeout = 5; 26116 26117 /* 26118 * Reissue the last reserve command, this time without request 26119 * sense. Assume that it is just a regular reserve command. 26120 */ 26121 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26122 SD_PATH_STANDARD); 26123 } 26124 26125 /* Return an error if still getting a reservation conflict. */ 26126 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 26127 rval = EACCES; 26128 } 26129 26130 kmem_free(com, sizeof (*com)); 26131 return (rval); 26132 } 26133 26134 26135 #define SD_NDUMP_RETRIES 12 26136 /* 26137 * System Crash Dump routine 26138 */ 26139 26140 static int 26141 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 26142 { 26143 int instance; 26144 int partition; 26145 int i; 26146 int err; 26147 struct sd_lun *un; 26148 struct scsi_pkt *wr_pktp; 26149 struct buf *wr_bp; 26150 struct buf wr_buf; 26151 daddr_t tgt_byte_offset; /* rmw - byte offset for target */ 26152 daddr_t tgt_blkno; /* rmw - blkno for target */ 26153 size_t tgt_byte_count; /* rmw - # of bytes to xfer */ 26154 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */ 26155 size_t io_start_offset; 26156 int doing_rmw = FALSE; 26157 int rval; 26158 ssize_t dma_resid; 26159 daddr_t oblkno; 26160 diskaddr_t nblks = 0; 26161 diskaddr_t start_block; 26162 26163 instance = SDUNIT(dev); 26164 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 26165 !SD_IS_VALID_LABEL(un) || ISCD(un)) { 26166 return (ENXIO); 26167 } 26168 26169 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 26170 26171 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); 26172 26173 partition = SDPART(dev); 26174 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); 26175 26176 if (!(NOT_DEVBSIZE(un))) { 26177 int secmask = 0; 26178 int blknomask = 0; 26179 26180 blknomask = (un->un_tgt_blocksize / DEV_BSIZE) - 1; 26181 secmask = un->un_tgt_blocksize - 1; 26182 26183 if (blkno & blknomask) { 26184 SD_TRACE(SD_LOG_DUMP, un, 26185 "sddump: dump start block not modulo %d\n", 26186 un->un_tgt_blocksize); 26187 return (EINVAL); 26188 } 26189 26190 if ((nblk * DEV_BSIZE) & secmask) { 26191 SD_TRACE(SD_LOG_DUMP, un, 26192 "sddump: dump length not modulo %d\n", 26193 un->un_tgt_blocksize); 26194 return (EINVAL); 26195 } 26196 26197 } 26198 26199 /* Validate blocks to dump at against partition size. */ 26200 26201 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 26202 &nblks, &start_block, NULL, NULL, (void *)SD_PATH_DIRECT); 26203 26204 if (NOT_DEVBSIZE(un)) { 26205 if ((blkno + nblk) > nblks) { 26206 SD_TRACE(SD_LOG_DUMP, un, 26207 "sddump: dump range larger than partition: " 26208 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 26209 blkno, nblk, nblks); 26210 return (EINVAL); 26211 } 26212 } else { 26213 if (((blkno / (un->un_tgt_blocksize / DEV_BSIZE)) + 26214 (nblk / (un->un_tgt_blocksize / DEV_BSIZE))) > nblks) { 26215 SD_TRACE(SD_LOG_DUMP, un, 26216 "sddump: dump range larger than partition: " 26217 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 26218 blkno, nblk, nblks); 26219 return (EINVAL); 26220 } 26221 } 26222 26223 mutex_enter(&un->un_pm_mutex); 26224 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 26225 struct scsi_pkt *start_pktp; 26226 26227 mutex_exit(&un->un_pm_mutex); 26228 26229 /* 26230 * use pm framework to power on HBA 1st 26231 */ 26232 (void) pm_raise_power(SD_DEVINFO(un), 0, 26233 SD_PM_STATE_ACTIVE(un)); 26234 26235 /* 26236 * Dump no long uses sdpower to power on a device, it's 26237 * in-line here so it can be done in polled mode. 26238 */ 26239 26240 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n"); 26241 26242 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL, 26243 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL); 26244 26245 if (start_pktp == NULL) { 26246 /* We were not given a SCSI packet, fail. */ 26247 return (EIO); 26248 } 26249 bzero(start_pktp->pkt_cdbp, CDB_GROUP0); 26250 start_pktp->pkt_cdbp[0] = SCMD_START_STOP; 26251 start_pktp->pkt_cdbp[4] = SD_TARGET_START; 26252 start_pktp->pkt_flags = FLAG_NOINTR; 26253 26254 mutex_enter(SD_MUTEX(un)); 26255 SD_FILL_SCSI1_LUN(un, start_pktp); 26256 mutex_exit(SD_MUTEX(un)); 26257 /* 26258 * Scsi_poll returns 0 (success) if the command completes and 26259 * the status block is STATUS_GOOD. 26260 */ 26261 if (sd_scsi_poll(un, start_pktp) != 0) { 26262 scsi_destroy_pkt(start_pktp); 26263 return (EIO); 26264 } 26265 scsi_destroy_pkt(start_pktp); 26266 (void) sd_pm_state_change(un, SD_PM_STATE_ACTIVE(un), 26267 SD_PM_STATE_CHANGE); 26268 } else { 26269 mutex_exit(&un->un_pm_mutex); 26270 } 26271 26272 mutex_enter(SD_MUTEX(un)); 26273 un->un_throttle = 0; 26274 26275 /* 26276 * The first time through, reset the specific target device. 26277 * However, when cpr calls sddump we know that sd is in a 26278 * a good state so no bus reset is required. 26279 * Clear sense data via Request Sense cmd. 26280 * In sddump we don't care about allow_bus_device_reset anymore 26281 */ 26282 26283 if ((un->un_state != SD_STATE_SUSPENDED) && 26284 (un->un_state != SD_STATE_DUMPING)) { 26285 26286 New_state(un, SD_STATE_DUMPING); 26287 26288 if (un->un_f_is_fibre == FALSE) { 26289 mutex_exit(SD_MUTEX(un)); 26290 /* 26291 * Attempt a bus reset for parallel scsi. 26292 * 26293 * Note: A bus reset is required because on some host 26294 * systems (i.e. E420R) a bus device reset is 26295 * insufficient to reset the state of the target. 26296 * 26297 * Note: Don't issue the reset for fibre-channel, 26298 * because this tends to hang the bus (loop) for 26299 * too long while everyone is logging out and in 26300 * and the deadman timer for dumping will fire 26301 * before the dump is complete. 26302 */ 26303 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) { 26304 mutex_enter(SD_MUTEX(un)); 26305 Restore_state(un); 26306 mutex_exit(SD_MUTEX(un)); 26307 return (EIO); 26308 } 26309 26310 /* Delay to give the device some recovery time. */ 26311 drv_usecwait(10000); 26312 26313 if (sd_send_polled_RQS(un) == SD_FAILURE) { 26314 SD_INFO(SD_LOG_DUMP, un, 26315 "sddump: sd_send_polled_RQS failed\n"); 26316 } 26317 mutex_enter(SD_MUTEX(un)); 26318 } 26319 } 26320 26321 /* 26322 * Convert the partition-relative block number to a 26323 * disk physical block number. 26324 */ 26325 if (NOT_DEVBSIZE(un)) { 26326 blkno += start_block; 26327 } else { 26328 blkno = blkno / (un->un_tgt_blocksize / DEV_BSIZE); 26329 blkno += start_block; 26330 } 26331 26332 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno); 26333 26334 26335 /* 26336 * Check if the device has a non-512 block size. 26337 */ 26338 wr_bp = NULL; 26339 if (NOT_DEVBSIZE(un)) { 26340 tgt_byte_offset = blkno * un->un_sys_blocksize; 26341 tgt_byte_count = nblk * un->un_sys_blocksize; 26342 if ((tgt_byte_offset % un->un_tgt_blocksize) || 26343 (tgt_byte_count % un->un_tgt_blocksize)) { 26344 doing_rmw = TRUE; 26345 /* 26346 * Calculate the block number and number of block 26347 * in terms of the media block size. 26348 */ 26349 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 26350 tgt_nblk = 26351 ((tgt_byte_offset + tgt_byte_count + 26352 (un->un_tgt_blocksize - 1)) / 26353 un->un_tgt_blocksize) - tgt_blkno; 26354 26355 /* 26356 * Invoke the routine which is going to do read part 26357 * of read-modify-write. 26358 * Note that this routine returns a pointer to 26359 * a valid bp in wr_bp. 26360 */ 26361 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk, 26362 &wr_bp); 26363 if (err) { 26364 mutex_exit(SD_MUTEX(un)); 26365 return (err); 26366 } 26367 /* 26368 * Offset is being calculated as - 26369 * (original block # * system block size) - 26370 * (new block # * target block size) 26371 */ 26372 io_start_offset = 26373 ((uint64_t)(blkno * un->un_sys_blocksize)) - 26374 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize)); 26375 26376 ASSERT(io_start_offset < un->un_tgt_blocksize); 26377 /* 26378 * Do the modify portion of read modify write. 26379 */ 26380 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset], 26381 (size_t)nblk * un->un_sys_blocksize); 26382 } else { 26383 doing_rmw = FALSE; 26384 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 26385 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize; 26386 } 26387 26388 /* Convert blkno and nblk to target blocks */ 26389 blkno = tgt_blkno; 26390 nblk = tgt_nblk; 26391 } else { 26392 wr_bp = &wr_buf; 26393 bzero(wr_bp, sizeof (struct buf)); 26394 wr_bp->b_flags = B_BUSY; 26395 wr_bp->b_un.b_addr = addr; 26396 wr_bp->b_bcount = nblk << DEV_BSHIFT; 26397 wr_bp->b_resid = 0; 26398 } 26399 26400 mutex_exit(SD_MUTEX(un)); 26401 26402 /* 26403 * Obtain a SCSI packet for the write command. 26404 * It should be safe to call the allocator here without 26405 * worrying about being locked for DVMA mapping because 26406 * the address we're passed is already a DVMA mapping 26407 * 26408 * We are also not going to worry about semaphore ownership 26409 * in the dump buffer. Dumping is single threaded at present. 26410 */ 26411 26412 wr_pktp = NULL; 26413 26414 dma_resid = wr_bp->b_bcount; 26415 oblkno = blkno; 26416 26417 if (!(NOT_DEVBSIZE(un))) { 26418 nblk = nblk / (un->un_tgt_blocksize / DEV_BSIZE); 26419 } 26420 26421 while (dma_resid != 0) { 26422 26423 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 26424 wr_bp->b_flags &= ~B_ERROR; 26425 26426 if (un->un_partial_dma_supported == 1) { 26427 blkno = oblkno + 26428 ((wr_bp->b_bcount - dma_resid) / 26429 un->un_tgt_blocksize); 26430 nblk = dma_resid / un->un_tgt_blocksize; 26431 26432 if (wr_pktp) { 26433 /* 26434 * Partial DMA transfers after initial transfer 26435 */ 26436 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp, 26437 blkno, nblk); 26438 } else { 26439 /* Initial transfer */ 26440 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 26441 un->un_pkt_flags, NULL_FUNC, NULL, 26442 blkno, nblk); 26443 } 26444 } else { 26445 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 26446 0, NULL_FUNC, NULL, blkno, nblk); 26447 } 26448 26449 if (rval == 0) { 26450 /* We were given a SCSI packet, continue. */ 26451 break; 26452 } 26453 26454 if (i == 0) { 26455 if (wr_bp->b_flags & B_ERROR) { 26456 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26457 "no resources for dumping; " 26458 "error code: 0x%x, retrying", 26459 geterror(wr_bp)); 26460 } else { 26461 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26462 "no resources for dumping; retrying"); 26463 } 26464 } else if (i != (SD_NDUMP_RETRIES - 1)) { 26465 if (wr_bp->b_flags & B_ERROR) { 26466 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26467 "no resources for dumping; error code: " 26468 "0x%x, retrying\n", geterror(wr_bp)); 26469 } 26470 } else { 26471 if (wr_bp->b_flags & B_ERROR) { 26472 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26473 "no resources for dumping; " 26474 "error code: 0x%x, retries failed, " 26475 "giving up.\n", geterror(wr_bp)); 26476 } else { 26477 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26478 "no resources for dumping; " 26479 "retries failed, giving up.\n"); 26480 } 26481 mutex_enter(SD_MUTEX(un)); 26482 Restore_state(un); 26483 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) { 26484 mutex_exit(SD_MUTEX(un)); 26485 scsi_free_consistent_buf(wr_bp); 26486 } else { 26487 mutex_exit(SD_MUTEX(un)); 26488 } 26489 return (EIO); 26490 } 26491 drv_usecwait(10000); 26492 } 26493 26494 if (un->un_partial_dma_supported == 1) { 26495 /* 26496 * save the resid from PARTIAL_DMA 26497 */ 26498 dma_resid = wr_pktp->pkt_resid; 26499 if (dma_resid != 0) 26500 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid); 26501 wr_pktp->pkt_resid = 0; 26502 } else { 26503 dma_resid = 0; 26504 } 26505 26506 /* SunBug 1222170 */ 26507 wr_pktp->pkt_flags = FLAG_NOINTR; 26508 26509 err = EIO; 26510 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 26511 26512 /* 26513 * Scsi_poll returns 0 (success) if the command completes and 26514 * the status block is STATUS_GOOD. We should only check 26515 * errors if this condition is not true. Even then we should 26516 * send our own request sense packet only if we have a check 26517 * condition and auto request sense has not been performed by 26518 * the hba. 26519 */ 26520 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n"); 26521 26522 if ((sd_scsi_poll(un, wr_pktp) == 0) && 26523 (wr_pktp->pkt_resid == 0)) { 26524 err = SD_SUCCESS; 26525 break; 26526 } 26527 26528 /* 26529 * Check CMD_DEV_GONE 1st, give up if device is gone. 26530 */ 26531 if (wr_pktp->pkt_reason == CMD_DEV_GONE) { 26532 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26533 "Error while dumping state...Device is gone\n"); 26534 break; 26535 } 26536 26537 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) { 26538 SD_INFO(SD_LOG_DUMP, un, 26539 "sddump: write failed with CHECK, try # %d\n", i); 26540 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) { 26541 (void) sd_send_polled_RQS(un); 26542 } 26543 26544 continue; 26545 } 26546 26547 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) { 26548 int reset_retval = 0; 26549 26550 SD_INFO(SD_LOG_DUMP, un, 26551 "sddump: write failed with BUSY, try # %d\n", i); 26552 26553 if (un->un_f_lun_reset_enabled == TRUE) { 26554 reset_retval = scsi_reset(SD_ADDRESS(un), 26555 RESET_LUN); 26556 } 26557 if (reset_retval == 0) { 26558 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 26559 } 26560 (void) sd_send_polled_RQS(un); 26561 26562 } else { 26563 SD_INFO(SD_LOG_DUMP, un, 26564 "sddump: write failed with 0x%x, try # %d\n", 26565 SD_GET_PKT_STATUS(wr_pktp), i); 26566 mutex_enter(SD_MUTEX(un)); 26567 sd_reset_target(un, wr_pktp); 26568 mutex_exit(SD_MUTEX(un)); 26569 } 26570 26571 /* 26572 * If we are not getting anywhere with lun/target resets, 26573 * let's reset the bus. 26574 */ 26575 if (i == SD_NDUMP_RETRIES / 2) { 26576 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 26577 (void) sd_send_polled_RQS(un); 26578 } 26579 } 26580 } 26581 26582 scsi_destroy_pkt(wr_pktp); 26583 mutex_enter(SD_MUTEX(un)); 26584 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) { 26585 mutex_exit(SD_MUTEX(un)); 26586 scsi_free_consistent_buf(wr_bp); 26587 } else { 26588 mutex_exit(SD_MUTEX(un)); 26589 } 26590 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err); 26591 return (err); 26592 } 26593 26594 /* 26595 * Function: sd_scsi_poll() 26596 * 26597 * Description: This is a wrapper for the scsi_poll call. 26598 * 26599 * Arguments: sd_lun - The unit structure 26600 * scsi_pkt - The scsi packet being sent to the device. 26601 * 26602 * Return Code: 0 - Command completed successfully with good status 26603 * -1 - Command failed. This could indicate a check condition 26604 * or other status value requiring recovery action. 26605 * 26606 * NOTE: This code is only called off sddump(). 26607 */ 26608 26609 static int 26610 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp) 26611 { 26612 int status; 26613 26614 ASSERT(un != NULL); 26615 ASSERT(!mutex_owned(SD_MUTEX(un))); 26616 ASSERT(pktp != NULL); 26617 26618 status = SD_SUCCESS; 26619 26620 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) { 26621 pktp->pkt_flags |= un->un_tagflags; 26622 pktp->pkt_flags &= ~FLAG_NODISCON; 26623 } 26624 26625 status = sd_ddi_scsi_poll(pktp); 26626 /* 26627 * Scsi_poll returns 0 (success) if the command completes and the 26628 * status block is STATUS_GOOD. We should only check errors if this 26629 * condition is not true. Even then we should send our own request 26630 * sense packet only if we have a check condition and auto 26631 * request sense has not been performed by the hba. 26632 * Don't get RQS data if pkt_reason is CMD_DEV_GONE. 26633 */ 26634 if ((status != SD_SUCCESS) && 26635 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) && 26636 (pktp->pkt_state & STATE_ARQ_DONE) == 0 && 26637 (pktp->pkt_reason != CMD_DEV_GONE)) 26638 (void) sd_send_polled_RQS(un); 26639 26640 return (status); 26641 } 26642 26643 /* 26644 * Function: sd_send_polled_RQS() 26645 * 26646 * Description: This sends the request sense command to a device. 26647 * 26648 * Arguments: sd_lun - The unit structure 26649 * 26650 * Return Code: 0 - Command completed successfully with good status 26651 * -1 - Command failed. 26652 * 26653 */ 26654 26655 static int 26656 sd_send_polled_RQS(struct sd_lun *un) 26657 { 26658 int ret_val; 26659 struct scsi_pkt *rqs_pktp; 26660 struct buf *rqs_bp; 26661 26662 ASSERT(un != NULL); 26663 ASSERT(!mutex_owned(SD_MUTEX(un))); 26664 26665 ret_val = SD_SUCCESS; 26666 26667 rqs_pktp = un->un_rqs_pktp; 26668 rqs_bp = un->un_rqs_bp; 26669 26670 mutex_enter(SD_MUTEX(un)); 26671 26672 if (un->un_sense_isbusy) { 26673 ret_val = SD_FAILURE; 26674 mutex_exit(SD_MUTEX(un)); 26675 return (ret_val); 26676 } 26677 26678 /* 26679 * If the request sense buffer (and packet) is not in use, 26680 * let's set the un_sense_isbusy and send our packet 26681 */ 26682 un->un_sense_isbusy = 1; 26683 rqs_pktp->pkt_resid = 0; 26684 rqs_pktp->pkt_reason = 0; 26685 rqs_pktp->pkt_flags |= FLAG_NOINTR; 26686 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH); 26687 26688 mutex_exit(SD_MUTEX(un)); 26689 26690 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at" 26691 " 0x%p\n", rqs_bp->b_un.b_addr); 26692 26693 /* 26694 * Can't send this to sd_scsi_poll, we wrap ourselves around the 26695 * axle - it has a call into us! 26696 */ 26697 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) { 26698 SD_INFO(SD_LOG_COMMON, un, 26699 "sd_send_polled_RQS: RQS failed\n"); 26700 } 26701 26702 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:", 26703 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX); 26704 26705 mutex_enter(SD_MUTEX(un)); 26706 un->un_sense_isbusy = 0; 26707 mutex_exit(SD_MUTEX(un)); 26708 26709 return (ret_val); 26710 } 26711 26712 /* 26713 * Defines needed for localized version of the scsi_poll routine. 26714 */ 26715 #define CSEC 10000 /* usecs */ 26716 #define SEC_TO_CSEC (1000000 / CSEC) 26717 26718 /* 26719 * Function: sd_ddi_scsi_poll() 26720 * 26721 * Description: Localized version of the scsi_poll routine. The purpose is to 26722 * send a scsi_pkt to a device as a polled command. This version 26723 * is to ensure more robust handling of transport errors. 26724 * Specifically this routine cures not ready, coming ready 26725 * transition for power up and reset of sonoma's. This can take 26726 * up to 45 seconds for power-on and 20 seconds for reset of a 26727 * sonoma lun. 26728 * 26729 * Arguments: scsi_pkt - The scsi_pkt being sent to a device 26730 * 26731 * Return Code: 0 - Command completed successfully with good status 26732 * -1 - Command failed. 26733 * 26734 * NOTE: This code is almost identical to scsi_poll, however before 6668774 can 26735 * be fixed (removing this code), we need to determine how to handle the 26736 * KEY_UNIT_ATTENTION condition below in conditions not as limited as sddump(). 26737 * 26738 * NOTE: This code is only called off sddump(). 26739 */ 26740 static int 26741 sd_ddi_scsi_poll(struct scsi_pkt *pkt) 26742 { 26743 int rval = -1; 26744 int savef; 26745 long savet; 26746 void (*savec)(); 26747 int timeout; 26748 int busy_count; 26749 int poll_delay; 26750 int rc; 26751 uint8_t *sensep; 26752 struct scsi_arq_status *arqstat; 26753 extern int do_polled_io; 26754 26755 ASSERT(pkt->pkt_scbp); 26756 26757 /* 26758 * save old flags.. 26759 */ 26760 savef = pkt->pkt_flags; 26761 savec = pkt->pkt_comp; 26762 savet = pkt->pkt_time; 26763 26764 pkt->pkt_flags |= FLAG_NOINTR; 26765 26766 /* 26767 * XXX there is nothing in the SCSA spec that states that we should not 26768 * do a callback for polled cmds; however, removing this will break sd 26769 * and probably other target drivers 26770 */ 26771 pkt->pkt_comp = NULL; 26772 26773 /* 26774 * we don't like a polled command without timeout. 26775 * 60 seconds seems long enough. 26776 */ 26777 if (pkt->pkt_time == 0) 26778 pkt->pkt_time = SCSI_POLL_TIMEOUT; 26779 26780 /* 26781 * Send polled cmd. 26782 * 26783 * We do some error recovery for various errors. Tran_busy, 26784 * queue full, and non-dispatched commands are retried every 10 msec. 26785 * as they are typically transient failures. Busy status and Not 26786 * Ready are retried every second as this status takes a while to 26787 * change. 26788 */ 26789 timeout = pkt->pkt_time * SEC_TO_CSEC; 26790 26791 for (busy_count = 0; busy_count < timeout; busy_count++) { 26792 /* 26793 * Initialize pkt status variables. 26794 */ 26795 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0; 26796 26797 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) { 26798 if (rc != TRAN_BUSY) { 26799 /* Transport failed - give up. */ 26800 break; 26801 } else { 26802 /* Transport busy - try again. */ 26803 poll_delay = 1 * CSEC; /* 10 msec. */ 26804 } 26805 } else { 26806 /* 26807 * Transport accepted - check pkt status. 26808 */ 26809 rc = (*pkt->pkt_scbp) & STATUS_MASK; 26810 if ((pkt->pkt_reason == CMD_CMPLT) && 26811 (rc == STATUS_CHECK) && 26812 (pkt->pkt_state & STATE_ARQ_DONE)) { 26813 arqstat = 26814 (struct scsi_arq_status *)(pkt->pkt_scbp); 26815 sensep = (uint8_t *)&arqstat->sts_sensedata; 26816 } else { 26817 sensep = NULL; 26818 } 26819 26820 if ((pkt->pkt_reason == CMD_CMPLT) && 26821 (rc == STATUS_GOOD)) { 26822 /* No error - we're done */ 26823 rval = 0; 26824 break; 26825 26826 } else if (pkt->pkt_reason == CMD_DEV_GONE) { 26827 /* Lost connection - give up */ 26828 break; 26829 26830 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) && 26831 (pkt->pkt_state == 0)) { 26832 /* Pkt not dispatched - try again. */ 26833 poll_delay = 1 * CSEC; /* 10 msec. */ 26834 26835 } else if ((pkt->pkt_reason == CMD_CMPLT) && 26836 (rc == STATUS_QFULL)) { 26837 /* Queue full - try again. */ 26838 poll_delay = 1 * CSEC; /* 10 msec. */ 26839 26840 } else if ((pkt->pkt_reason == CMD_CMPLT) && 26841 (rc == STATUS_BUSY)) { 26842 /* Busy - try again. */ 26843 poll_delay = 100 * CSEC; /* 1 sec. */ 26844 busy_count += (SEC_TO_CSEC - 1); 26845 26846 } else if ((sensep != NULL) && 26847 (scsi_sense_key(sensep) == KEY_UNIT_ATTENTION)) { 26848 /* 26849 * Unit Attention - try again. 26850 * Pretend it took 1 sec. 26851 * NOTE: 'continue' avoids poll_delay 26852 */ 26853 busy_count += (SEC_TO_CSEC - 1); 26854 continue; 26855 26856 } else if ((sensep != NULL) && 26857 (scsi_sense_key(sensep) == KEY_NOT_READY) && 26858 (scsi_sense_asc(sensep) == 0x04) && 26859 (scsi_sense_ascq(sensep) == 0x01)) { 26860 /* 26861 * Not ready -> ready - try again. 26862 * 04h/01h: LUN IS IN PROCESS OF BECOMING READY 26863 * ...same as STATUS_BUSY 26864 */ 26865 poll_delay = 100 * CSEC; /* 1 sec. */ 26866 busy_count += (SEC_TO_CSEC - 1); 26867 26868 } else { 26869 /* BAD status - give up. */ 26870 break; 26871 } 26872 } 26873 26874 if (((curthread->t_flag & T_INTR_THREAD) == 0) && 26875 !do_polled_io) { 26876 delay(drv_usectohz(poll_delay)); 26877 } else { 26878 /* we busy wait during cpr_dump or interrupt threads */ 26879 drv_usecwait(poll_delay); 26880 } 26881 } 26882 26883 pkt->pkt_flags = savef; 26884 pkt->pkt_comp = savec; 26885 pkt->pkt_time = savet; 26886 26887 /* return on error */ 26888 if (rval) 26889 return (rval); 26890 26891 /* 26892 * This is not a performance critical code path. 26893 * 26894 * As an accommodation for scsi_poll callers, to avoid ddi_dma_sync() 26895 * issues associated with looking at DMA memory prior to 26896 * scsi_pkt_destroy(), we scsi_sync_pkt() prior to return. 26897 */ 26898 scsi_sync_pkt(pkt); 26899 return (0); 26900 } 26901 26902 26903 26904 /* 26905 * Function: sd_persistent_reservation_in_read_keys 26906 * 26907 * Description: This routine is the driver entry point for handling CD-ROM 26908 * multi-host persistent reservation requests (MHIOCGRP_INKEYS) 26909 * by sending the SCSI-3 PRIN commands to the device. 26910 * Processes the read keys command response by copying the 26911 * reservation key information into the user provided buffer. 26912 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented. 26913 * 26914 * Arguments: un - Pointer to soft state struct for the target. 26915 * usrp - user provided pointer to multihost Persistent In Read 26916 * Keys structure (mhioc_inkeys_t) 26917 * flag - this argument is a pass through to ddi_copyxxx() 26918 * directly from the mode argument of ioctl(). 26919 * 26920 * Return Code: 0 - Success 26921 * EACCES 26922 * ENOTSUP 26923 * errno return code from sd_send_scsi_cmd() 26924 * 26925 * Context: Can sleep. Does not return until command is completed. 26926 */ 26927 26928 static int 26929 sd_persistent_reservation_in_read_keys(struct sd_lun *un, 26930 mhioc_inkeys_t *usrp, int flag) 26931 { 26932 #ifdef _MULTI_DATAMODEL 26933 struct mhioc_key_list32 li32; 26934 #endif 26935 sd_prin_readkeys_t *in; 26936 mhioc_inkeys_t *ptr; 26937 mhioc_key_list_t li; 26938 uchar_t *data_bufp = NULL; 26939 int data_len = 0; 26940 int rval = 0; 26941 size_t copysz = 0; 26942 sd_ssc_t *ssc; 26943 26944 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) { 26945 return (EINVAL); 26946 } 26947 bzero(&li, sizeof (mhioc_key_list_t)); 26948 26949 ssc = sd_ssc_init(un); 26950 26951 /* 26952 * Get the listsize from user 26953 */ 26954 #ifdef _MULTI_DATAMODEL 26955 switch (ddi_model_convert_from(flag & FMODELS)) { 26956 case DDI_MODEL_ILP32: 26957 copysz = sizeof (struct mhioc_key_list32); 26958 if (ddi_copyin(ptr->li, &li32, copysz, flag)) { 26959 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26960 "sd_persistent_reservation_in_read_keys: " 26961 "failed ddi_copyin: mhioc_key_list32_t\n"); 26962 rval = EFAULT; 26963 goto done; 26964 } 26965 li.listsize = li32.listsize; 26966 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list; 26967 break; 26968 26969 case DDI_MODEL_NONE: 26970 copysz = sizeof (mhioc_key_list_t); 26971 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 26972 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26973 "sd_persistent_reservation_in_read_keys: " 26974 "failed ddi_copyin: mhioc_key_list_t\n"); 26975 rval = EFAULT; 26976 goto done; 26977 } 26978 break; 26979 } 26980 26981 #else /* ! _MULTI_DATAMODEL */ 26982 copysz = sizeof (mhioc_key_list_t); 26983 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 26984 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26985 "sd_persistent_reservation_in_read_keys: " 26986 "failed ddi_copyin: mhioc_key_list_t\n"); 26987 rval = EFAULT; 26988 goto done; 26989 } 26990 #endif 26991 26992 data_len = li.listsize * MHIOC_RESV_KEY_SIZE; 26993 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t)); 26994 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 26995 26996 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 26997 data_len, data_bufp); 26998 if (rval != 0) { 26999 if (rval == EIO) 27000 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 27001 else 27002 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 27003 goto done; 27004 } 27005 in = (sd_prin_readkeys_t *)data_bufp; 27006 ptr->generation = BE_32(in->generation); 27007 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE; 27008 27009 /* 27010 * Return the min(listsize, listlen) keys 27011 */ 27012 #ifdef _MULTI_DATAMODEL 27013 27014 switch (ddi_model_convert_from(flag & FMODELS)) { 27015 case DDI_MODEL_ILP32: 27016 li32.listlen = li.listlen; 27017 if (ddi_copyout(&li32, ptr->li, copysz, flag)) { 27018 SD_ERROR(SD_LOG_IOCTL_MHD, un, 27019 "sd_persistent_reservation_in_read_keys: " 27020 "failed ddi_copyout: mhioc_key_list32_t\n"); 27021 rval = EFAULT; 27022 goto done; 27023 } 27024 break; 27025 27026 case DDI_MODEL_NONE: 27027 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 27028 SD_ERROR(SD_LOG_IOCTL_MHD, un, 27029 "sd_persistent_reservation_in_read_keys: " 27030 "failed ddi_copyout: mhioc_key_list_t\n"); 27031 rval = EFAULT; 27032 goto done; 27033 } 27034 break; 27035 } 27036 27037 #else /* ! _MULTI_DATAMODEL */ 27038 27039 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 27040 SD_ERROR(SD_LOG_IOCTL_MHD, un, 27041 "sd_persistent_reservation_in_read_keys: " 27042 "failed ddi_copyout: mhioc_key_list_t\n"); 27043 rval = EFAULT; 27044 goto done; 27045 } 27046 27047 #endif /* _MULTI_DATAMODEL */ 27048 27049 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE, 27050 li.listsize * MHIOC_RESV_KEY_SIZE); 27051 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) { 27052 SD_ERROR(SD_LOG_IOCTL_MHD, un, 27053 "sd_persistent_reservation_in_read_keys: " 27054 "failed ddi_copyout: keylist\n"); 27055 rval = EFAULT; 27056 } 27057 done: 27058 sd_ssc_fini(ssc); 27059 kmem_free(data_bufp, data_len); 27060 return (rval); 27061 } 27062 27063 27064 /* 27065 * Function: sd_persistent_reservation_in_read_resv 27066 * 27067 * Description: This routine is the driver entry point for handling CD-ROM 27068 * multi-host persistent reservation requests (MHIOCGRP_INRESV) 27069 * by sending the SCSI-3 PRIN commands to the device. 27070 * Process the read persistent reservations command response by 27071 * copying the reservation information into the user provided 27072 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented. 27073 * 27074 * Arguments: un - Pointer to soft state struct for the target. 27075 * usrp - user provided pointer to multihost Persistent In Read 27076 * Keys structure (mhioc_inkeys_t) 27077 * flag - this argument is a pass through to ddi_copyxxx() 27078 * directly from the mode argument of ioctl(). 27079 * 27080 * Return Code: 0 - Success 27081 * EACCES 27082 * ENOTSUP 27083 * errno return code from sd_send_scsi_cmd() 27084 * 27085 * Context: Can sleep. Does not return until command is completed. 27086 */ 27087 27088 static int 27089 sd_persistent_reservation_in_read_resv(struct sd_lun *un, 27090 mhioc_inresvs_t *usrp, int flag) 27091 { 27092 #ifdef _MULTI_DATAMODEL 27093 struct mhioc_resv_desc_list32 resvlist32; 27094 #endif 27095 sd_prin_readresv_t *in; 27096 mhioc_inresvs_t *ptr; 27097 sd_readresv_desc_t *readresv_ptr; 27098 mhioc_resv_desc_list_t resvlist; 27099 mhioc_resv_desc_t resvdesc; 27100 uchar_t *data_bufp = NULL; 27101 int data_len; 27102 int rval = 0; 27103 int i; 27104 size_t copysz = 0; 27105 mhioc_resv_desc_t *bufp; 27106 sd_ssc_t *ssc; 27107 27108 if ((ptr = usrp) == NULL) { 27109 return (EINVAL); 27110 } 27111 27112 ssc = sd_ssc_init(un); 27113 27114 /* 27115 * Get the listsize from user 27116 */ 27117 #ifdef _MULTI_DATAMODEL 27118 switch (ddi_model_convert_from(flag & FMODELS)) { 27119 case DDI_MODEL_ILP32: 27120 copysz = sizeof (struct mhioc_resv_desc_list32); 27121 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) { 27122 SD_ERROR(SD_LOG_IOCTL_MHD, un, 27123 "sd_persistent_reservation_in_read_resv: " 27124 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 27125 rval = EFAULT; 27126 goto done; 27127 } 27128 resvlist.listsize = resvlist32.listsize; 27129 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list; 27130 break; 27131 27132 case DDI_MODEL_NONE: 27133 copysz = sizeof (mhioc_resv_desc_list_t); 27134 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 27135 SD_ERROR(SD_LOG_IOCTL_MHD, un, 27136 "sd_persistent_reservation_in_read_resv: " 27137 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 27138 rval = EFAULT; 27139 goto done; 27140 } 27141 break; 27142 } 27143 #else /* ! _MULTI_DATAMODEL */ 27144 copysz = sizeof (mhioc_resv_desc_list_t); 27145 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 27146 SD_ERROR(SD_LOG_IOCTL_MHD, un, 27147 "sd_persistent_reservation_in_read_resv: " 27148 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 27149 rval = EFAULT; 27150 goto done; 27151 } 27152 #endif /* ! _MULTI_DATAMODEL */ 27153 27154 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN; 27155 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t)); 27156 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 27157 27158 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_RESV, 27159 data_len, data_bufp); 27160 if (rval != 0) { 27161 if (rval == EIO) 27162 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 27163 else 27164 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 27165 goto done; 27166 } 27167 in = (sd_prin_readresv_t *)data_bufp; 27168 ptr->generation = BE_32(in->generation); 27169 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN; 27170 27171 /* 27172 * Return the min(listsize, listlen( keys 27173 */ 27174 #ifdef _MULTI_DATAMODEL 27175 27176 switch (ddi_model_convert_from(flag & FMODELS)) { 27177 case DDI_MODEL_ILP32: 27178 resvlist32.listlen = resvlist.listlen; 27179 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) { 27180 SD_ERROR(SD_LOG_IOCTL_MHD, un, 27181 "sd_persistent_reservation_in_read_resv: " 27182 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 27183 rval = EFAULT; 27184 goto done; 27185 } 27186 break; 27187 27188 case DDI_MODEL_NONE: 27189 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 27190 SD_ERROR(SD_LOG_IOCTL_MHD, un, 27191 "sd_persistent_reservation_in_read_resv: " 27192 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 27193 rval = EFAULT; 27194 goto done; 27195 } 27196 break; 27197 } 27198 27199 #else /* ! _MULTI_DATAMODEL */ 27200 27201 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 27202 SD_ERROR(SD_LOG_IOCTL_MHD, un, 27203 "sd_persistent_reservation_in_read_resv: " 27204 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 27205 rval = EFAULT; 27206 goto done; 27207 } 27208 27209 #endif /* ! _MULTI_DATAMODEL */ 27210 27211 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc; 27212 bufp = resvlist.list; 27213 copysz = sizeof (mhioc_resv_desc_t); 27214 for (i = 0; i < min(resvlist.listlen, resvlist.listsize); 27215 i++, readresv_ptr++, bufp++) { 27216 27217 bcopy(&readresv_ptr->resvkey, &resvdesc.key, 27218 MHIOC_RESV_KEY_SIZE); 27219 resvdesc.type = readresv_ptr->type; 27220 resvdesc.scope = readresv_ptr->scope; 27221 resvdesc.scope_specific_addr = 27222 BE_32(readresv_ptr->scope_specific_addr); 27223 27224 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) { 27225 SD_ERROR(SD_LOG_IOCTL_MHD, un, 27226 "sd_persistent_reservation_in_read_resv: " 27227 "failed ddi_copyout: resvlist\n"); 27228 rval = EFAULT; 27229 goto done; 27230 } 27231 } 27232 done: 27233 sd_ssc_fini(ssc); 27234 /* only if data_bufp is allocated, we need to free it */ 27235 if (data_bufp) { 27236 kmem_free(data_bufp, data_len); 27237 } 27238 return (rval); 27239 } 27240 27241 27242 /* 27243 * Function: sr_change_blkmode() 27244 * 27245 * Description: This routine is the driver entry point for handling CD-ROM 27246 * block mode ioctl requests. Support for returning and changing 27247 * the current block size in use by the device is implemented. The 27248 * LBA size is changed via a MODE SELECT Block Descriptor. 27249 * 27250 * This routine issues a mode sense with an allocation length of 27251 * 12 bytes for the mode page header and a single block descriptor. 27252 * 27253 * Arguments: dev - the device 'dev_t' 27254 * cmd - the request type; one of CDROMGBLKMODE (get) or 27255 * CDROMSBLKMODE (set) 27256 * data - current block size or requested block size 27257 * flag - this argument is a pass through to ddi_copyxxx() directly 27258 * from the mode argument of ioctl(). 27259 * 27260 * Return Code: the code returned by sd_send_scsi_cmd() 27261 * EINVAL if invalid arguments are provided 27262 * EFAULT if ddi_copyxxx() fails 27263 * ENXIO if fail ddi_get_soft_state 27264 * EIO if invalid mode sense block descriptor length 27265 * 27266 */ 27267 27268 static int 27269 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag) 27270 { 27271 struct sd_lun *un = NULL; 27272 struct mode_header *sense_mhp, *select_mhp; 27273 struct block_descriptor *sense_desc, *select_desc; 27274 int current_bsize; 27275 int rval = EINVAL; 27276 uchar_t *sense = NULL; 27277 uchar_t *select = NULL; 27278 sd_ssc_t *ssc; 27279 27280 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE)); 27281 27282 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27283 return (ENXIO); 27284 } 27285 27286 /* 27287 * The block length is changed via the Mode Select block descriptor, the 27288 * "Read/Write Error Recovery" mode page (0x1) contents are not actually 27289 * required as part of this routine. Therefore the mode sense allocation 27290 * length is specified to be the length of a mode page header and a 27291 * block descriptor. 27292 */ 27293 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 27294 27295 ssc = sd_ssc_init(un); 27296 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 27297 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD); 27298 sd_ssc_fini(ssc); 27299 if (rval != 0) { 27300 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27301 "sr_change_blkmode: Mode Sense Failed\n"); 27302 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 27303 return (rval); 27304 } 27305 27306 /* Check the block descriptor len to handle only 1 block descriptor */ 27307 sense_mhp = (struct mode_header *)sense; 27308 if ((sense_mhp->bdesc_length == 0) || 27309 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) { 27310 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27311 "sr_change_blkmode: Mode Sense returned invalid block" 27312 " descriptor length\n"); 27313 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 27314 return (EIO); 27315 } 27316 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH); 27317 current_bsize = ((sense_desc->blksize_hi << 16) | 27318 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo); 27319 27320 /* Process command */ 27321 switch (cmd) { 27322 case CDROMGBLKMODE: 27323 /* Return the block size obtained during the mode sense */ 27324 if (ddi_copyout(¤t_bsize, (void *)data, 27325 sizeof (int), flag) != 0) 27326 rval = EFAULT; 27327 break; 27328 case CDROMSBLKMODE: 27329 /* Validate the requested block size */ 27330 switch (data) { 27331 case CDROM_BLK_512: 27332 case CDROM_BLK_1024: 27333 case CDROM_BLK_2048: 27334 case CDROM_BLK_2056: 27335 case CDROM_BLK_2336: 27336 case CDROM_BLK_2340: 27337 case CDROM_BLK_2352: 27338 case CDROM_BLK_2368: 27339 case CDROM_BLK_2448: 27340 case CDROM_BLK_2646: 27341 case CDROM_BLK_2647: 27342 break; 27343 default: 27344 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27345 "sr_change_blkmode: " 27346 "Block Size '%ld' Not Supported\n", data); 27347 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 27348 return (EINVAL); 27349 } 27350 27351 /* 27352 * The current block size matches the requested block size so 27353 * there is no need to send the mode select to change the size 27354 */ 27355 if (current_bsize == data) { 27356 break; 27357 } 27358 27359 /* Build the select data for the requested block size */ 27360 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 27361 select_mhp = (struct mode_header *)select; 27362 select_desc = 27363 (struct block_descriptor *)(select + MODE_HEADER_LENGTH); 27364 /* 27365 * The LBA size is changed via the block descriptor, so the 27366 * descriptor is built according to the user data 27367 */ 27368 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH; 27369 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16); 27370 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8); 27371 select_desc->blksize_lo = (char)((data) & 0x000000ff); 27372 27373 /* Send the mode select for the requested block size */ 27374 ssc = sd_ssc_init(un); 27375 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, 27376 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 27377 SD_PATH_STANDARD); 27378 sd_ssc_fini(ssc); 27379 if (rval != 0) { 27380 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27381 "sr_change_blkmode: Mode Select Failed\n"); 27382 /* 27383 * The mode select failed for the requested block size, 27384 * so reset the data for the original block size and 27385 * send it to the target. The error is indicated by the 27386 * return value for the failed mode select. 27387 */ 27388 select_desc->blksize_hi = sense_desc->blksize_hi; 27389 select_desc->blksize_mid = sense_desc->blksize_mid; 27390 select_desc->blksize_lo = sense_desc->blksize_lo; 27391 ssc = sd_ssc_init(un); 27392 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, 27393 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 27394 SD_PATH_STANDARD); 27395 sd_ssc_fini(ssc); 27396 } else { 27397 ASSERT(!mutex_owned(SD_MUTEX(un))); 27398 mutex_enter(SD_MUTEX(un)); 27399 sd_update_block_info(un, (uint32_t)data, 0); 27400 mutex_exit(SD_MUTEX(un)); 27401 } 27402 break; 27403 default: 27404 /* should not reach here, but check anyway */ 27405 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27406 "sr_change_blkmode: Command '%x' Not Supported\n", cmd); 27407 rval = EINVAL; 27408 break; 27409 } 27410 27411 if (select) { 27412 kmem_free(select, BUFLEN_CHG_BLK_MODE); 27413 } 27414 if (sense) { 27415 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 27416 } 27417 return (rval); 27418 } 27419 27420 27421 /* 27422 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines 27423 * implement driver support for getting and setting the CD speed. The command 27424 * set used will be based on the device type. If the device has not been 27425 * identified as MMC the Toshiba vendor specific mode page will be used. If 27426 * the device is MMC but does not support the Real Time Streaming feature 27427 * the SET CD SPEED command will be used to set speed and mode page 0x2A will 27428 * be used to read the speed. 27429 */ 27430 27431 /* 27432 * Function: sr_change_speed() 27433 * 27434 * Description: This routine is the driver entry point for handling CD-ROM 27435 * drive speed ioctl requests for devices supporting the Toshiba 27436 * vendor specific drive speed mode page. Support for returning 27437 * and changing the current drive speed in use by the device is 27438 * implemented. 27439 * 27440 * Arguments: dev - the device 'dev_t' 27441 * cmd - the request type; one of CDROMGDRVSPEED (get) or 27442 * CDROMSDRVSPEED (set) 27443 * data - current drive speed or requested drive speed 27444 * flag - this argument is a pass through to ddi_copyxxx() directly 27445 * from the mode argument of ioctl(). 27446 * 27447 * Return Code: the code returned by sd_send_scsi_cmd() 27448 * EINVAL if invalid arguments are provided 27449 * EFAULT if ddi_copyxxx() fails 27450 * ENXIO if fail ddi_get_soft_state 27451 * EIO if invalid mode sense block descriptor length 27452 */ 27453 27454 static int 27455 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 27456 { 27457 struct sd_lun *un = NULL; 27458 struct mode_header *sense_mhp, *select_mhp; 27459 struct mode_speed *sense_page, *select_page; 27460 int current_speed; 27461 int rval = EINVAL; 27462 int bd_len; 27463 uchar_t *sense = NULL; 27464 uchar_t *select = NULL; 27465 sd_ssc_t *ssc; 27466 27467 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 27468 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27469 return (ENXIO); 27470 } 27471 27472 /* 27473 * Note: The drive speed is being modified here according to a Toshiba 27474 * vendor specific mode page (0x31). 27475 */ 27476 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 27477 27478 ssc = sd_ssc_init(un); 27479 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 27480 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED, 27481 SD_PATH_STANDARD); 27482 sd_ssc_fini(ssc); 27483 if (rval != 0) { 27484 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27485 "sr_change_speed: Mode Sense Failed\n"); 27486 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 27487 return (rval); 27488 } 27489 sense_mhp = (struct mode_header *)sense; 27490 27491 /* Check the block descriptor len to handle only 1 block descriptor */ 27492 bd_len = sense_mhp->bdesc_length; 27493 if (bd_len > MODE_BLK_DESC_LENGTH) { 27494 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27495 "sr_change_speed: Mode Sense returned invalid block " 27496 "descriptor length\n"); 27497 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 27498 return (EIO); 27499 } 27500 27501 sense_page = (struct mode_speed *) 27502 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 27503 current_speed = sense_page->speed; 27504 27505 /* Process command */ 27506 switch (cmd) { 27507 case CDROMGDRVSPEED: 27508 /* Return the drive speed obtained during the mode sense */ 27509 if (current_speed == 0x2) { 27510 current_speed = CDROM_TWELVE_SPEED; 27511 } 27512 if (ddi_copyout(¤t_speed, (void *)data, 27513 sizeof (int), flag) != 0) { 27514 rval = EFAULT; 27515 } 27516 break; 27517 case CDROMSDRVSPEED: 27518 /* Validate the requested drive speed */ 27519 switch ((uchar_t)data) { 27520 case CDROM_TWELVE_SPEED: 27521 data = 0x2; 27522 /*FALLTHROUGH*/ 27523 case CDROM_NORMAL_SPEED: 27524 case CDROM_DOUBLE_SPEED: 27525 case CDROM_QUAD_SPEED: 27526 case CDROM_MAXIMUM_SPEED: 27527 break; 27528 default: 27529 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27530 "sr_change_speed: " 27531 "Drive Speed '%d' Not Supported\n", (uchar_t)data); 27532 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 27533 return (EINVAL); 27534 } 27535 27536 /* 27537 * The current drive speed matches the requested drive speed so 27538 * there is no need to send the mode select to change the speed 27539 */ 27540 if (current_speed == data) { 27541 break; 27542 } 27543 27544 /* Build the select data for the requested drive speed */ 27545 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 27546 select_mhp = (struct mode_header *)select; 27547 select_mhp->bdesc_length = 0; 27548 select_page = 27549 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 27550 select_page = 27551 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 27552 select_page->mode_page.code = CDROM_MODE_SPEED; 27553 select_page->mode_page.length = 2; 27554 select_page->speed = (uchar_t)data; 27555 27556 /* Send the mode select for the requested block size */ 27557 ssc = sd_ssc_init(un); 27558 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 27559 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 27560 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 27561 sd_ssc_fini(ssc); 27562 if (rval != 0) { 27563 /* 27564 * The mode select failed for the requested drive speed, 27565 * so reset the data for the original drive speed and 27566 * send it to the target. The error is indicated by the 27567 * return value for the failed mode select. 27568 */ 27569 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27570 "sr_drive_speed: Mode Select Failed\n"); 27571 select_page->speed = sense_page->speed; 27572 ssc = sd_ssc_init(un); 27573 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 27574 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 27575 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 27576 sd_ssc_fini(ssc); 27577 } 27578 break; 27579 default: 27580 /* should not reach here, but check anyway */ 27581 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27582 "sr_change_speed: Command '%x' Not Supported\n", cmd); 27583 rval = EINVAL; 27584 break; 27585 } 27586 27587 if (select) { 27588 kmem_free(select, BUFLEN_MODE_CDROM_SPEED); 27589 } 27590 if (sense) { 27591 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 27592 } 27593 27594 return (rval); 27595 } 27596 27597 27598 /* 27599 * Function: sr_atapi_change_speed() 27600 * 27601 * Description: This routine is the driver entry point for handling CD-ROM 27602 * drive speed ioctl requests for MMC devices that do not support 27603 * the Real Time Streaming feature (0x107). 27604 * 27605 * Note: This routine will use the SET SPEED command which may not 27606 * be supported by all devices. 27607 * 27608 * Arguments: dev- the device 'dev_t' 27609 * cmd- the request type; one of CDROMGDRVSPEED (get) or 27610 * CDROMSDRVSPEED (set) 27611 * data- current drive speed or requested drive speed 27612 * flag- this argument is a pass through to ddi_copyxxx() directly 27613 * from the mode argument of ioctl(). 27614 * 27615 * Return Code: the code returned by sd_send_scsi_cmd() 27616 * EINVAL if invalid arguments are provided 27617 * EFAULT if ddi_copyxxx() fails 27618 * ENXIO if fail ddi_get_soft_state 27619 * EIO if invalid mode sense block descriptor length 27620 */ 27621 27622 static int 27623 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 27624 { 27625 struct sd_lun *un; 27626 struct uscsi_cmd *com = NULL; 27627 struct mode_header_grp2 *sense_mhp; 27628 uchar_t *sense_page; 27629 uchar_t *sense = NULL; 27630 char cdb[CDB_GROUP5]; 27631 int bd_len; 27632 int current_speed = 0; 27633 int max_speed = 0; 27634 int rval; 27635 sd_ssc_t *ssc; 27636 27637 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 27638 27639 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27640 return (ENXIO); 27641 } 27642 27643 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 27644 27645 ssc = sd_ssc_init(un); 27646 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, 27647 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, 27648 SD_PATH_STANDARD); 27649 sd_ssc_fini(ssc); 27650 if (rval != 0) { 27651 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27652 "sr_atapi_change_speed: Mode Sense Failed\n"); 27653 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 27654 return (rval); 27655 } 27656 27657 /* Check the block descriptor len to handle only 1 block descriptor */ 27658 sense_mhp = (struct mode_header_grp2 *)sense; 27659 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 27660 if (bd_len > MODE_BLK_DESC_LENGTH) { 27661 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27662 "sr_atapi_change_speed: Mode Sense returned invalid " 27663 "block descriptor length\n"); 27664 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 27665 return (EIO); 27666 } 27667 27668 /* Calculate the current and maximum drive speeds */ 27669 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 27670 current_speed = (sense_page[14] << 8) | sense_page[15]; 27671 max_speed = (sense_page[8] << 8) | sense_page[9]; 27672 27673 /* Process the command */ 27674 switch (cmd) { 27675 case CDROMGDRVSPEED: 27676 current_speed /= SD_SPEED_1X; 27677 if (ddi_copyout(¤t_speed, (void *)data, 27678 sizeof (int), flag) != 0) 27679 rval = EFAULT; 27680 break; 27681 case CDROMSDRVSPEED: 27682 /* Convert the speed code to KB/sec */ 27683 switch ((uchar_t)data) { 27684 case CDROM_NORMAL_SPEED: 27685 current_speed = SD_SPEED_1X; 27686 break; 27687 case CDROM_DOUBLE_SPEED: 27688 current_speed = 2 * SD_SPEED_1X; 27689 break; 27690 case CDROM_QUAD_SPEED: 27691 current_speed = 4 * SD_SPEED_1X; 27692 break; 27693 case CDROM_TWELVE_SPEED: 27694 current_speed = 12 * SD_SPEED_1X; 27695 break; 27696 case CDROM_MAXIMUM_SPEED: 27697 current_speed = 0xffff; 27698 break; 27699 default: 27700 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27701 "sr_atapi_change_speed: invalid drive speed %d\n", 27702 (uchar_t)data); 27703 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 27704 return (EINVAL); 27705 } 27706 27707 /* Check the request against the drive's max speed. */ 27708 if (current_speed != 0xffff) { 27709 if (current_speed > max_speed) { 27710 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 27711 return (EINVAL); 27712 } 27713 } 27714 27715 /* 27716 * Build and send the SET SPEED command 27717 * 27718 * Note: The SET SPEED (0xBB) command used in this routine is 27719 * obsolete per the SCSI MMC spec but still supported in the 27720 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 27721 * therefore the command is still implemented in this routine. 27722 */ 27723 bzero(cdb, sizeof (cdb)); 27724 cdb[0] = (char)SCMD_SET_CDROM_SPEED; 27725 cdb[2] = (uchar_t)(current_speed >> 8); 27726 cdb[3] = (uchar_t)current_speed; 27727 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27728 com->uscsi_cdb = (caddr_t)cdb; 27729 com->uscsi_cdblen = CDB_GROUP5; 27730 com->uscsi_bufaddr = NULL; 27731 com->uscsi_buflen = 0; 27732 com->uscsi_flags = USCSI_DIAGNOSE | USCSI_SILENT; 27733 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, 0, SD_PATH_STANDARD); 27734 break; 27735 default: 27736 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27737 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd); 27738 rval = EINVAL; 27739 } 27740 27741 if (sense) { 27742 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 27743 } 27744 if (com) { 27745 kmem_free(com, sizeof (*com)); 27746 } 27747 return (rval); 27748 } 27749 27750 27751 /* 27752 * Function: sr_pause_resume() 27753 * 27754 * Description: This routine is the driver entry point for handling CD-ROM 27755 * pause/resume ioctl requests. This only affects the audio play 27756 * operation. 27757 * 27758 * Arguments: dev - the device 'dev_t' 27759 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used 27760 * for setting the resume bit of the cdb. 27761 * 27762 * Return Code: the code returned by sd_send_scsi_cmd() 27763 * EINVAL if invalid mode specified 27764 * 27765 */ 27766 27767 static int 27768 sr_pause_resume(dev_t dev, int cmd) 27769 { 27770 struct sd_lun *un; 27771 struct uscsi_cmd *com; 27772 char cdb[CDB_GROUP1]; 27773 int rval; 27774 27775 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27776 return (ENXIO); 27777 } 27778 27779 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27780 bzero(cdb, CDB_GROUP1); 27781 cdb[0] = SCMD_PAUSE_RESUME; 27782 switch (cmd) { 27783 case CDROMRESUME: 27784 cdb[8] = 1; 27785 break; 27786 case CDROMPAUSE: 27787 cdb[8] = 0; 27788 break; 27789 default: 27790 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:" 27791 " Command '%x' Not Supported\n", cmd); 27792 rval = EINVAL; 27793 goto done; 27794 } 27795 27796 com->uscsi_cdb = cdb; 27797 com->uscsi_cdblen = CDB_GROUP1; 27798 com->uscsi_flags = USCSI_DIAGNOSE | USCSI_SILENT; 27799 27800 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27801 SD_PATH_STANDARD); 27802 27803 done: 27804 kmem_free(com, sizeof (*com)); 27805 return (rval); 27806 } 27807 27808 27809 /* 27810 * Function: sr_play_msf() 27811 * 27812 * Description: This routine is the driver entry point for handling CD-ROM 27813 * ioctl requests to output the audio signals at the specified 27814 * starting address and continue the audio play until the specified 27815 * ending address (CDROMPLAYMSF) The address is in Minute Second 27816 * Frame (MSF) format. 27817 * 27818 * Arguments: dev - the device 'dev_t' 27819 * data - pointer to user provided audio msf structure, 27820 * specifying start/end addresses. 27821 * flag - this argument is a pass through to ddi_copyxxx() 27822 * directly from the mode argument of ioctl(). 27823 * 27824 * Return Code: the code returned by sd_send_scsi_cmd() 27825 * EFAULT if ddi_copyxxx() fails 27826 * ENXIO if fail ddi_get_soft_state 27827 * EINVAL if data pointer is NULL 27828 */ 27829 27830 static int 27831 sr_play_msf(dev_t dev, caddr_t data, int flag) 27832 { 27833 struct sd_lun *un; 27834 struct uscsi_cmd *com; 27835 struct cdrom_msf msf_struct; 27836 struct cdrom_msf *msf = &msf_struct; 27837 char cdb[CDB_GROUP1]; 27838 int rval; 27839 27840 if (data == NULL) { 27841 return (EINVAL); 27842 } 27843 27844 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27845 return (ENXIO); 27846 } 27847 27848 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) { 27849 return (EFAULT); 27850 } 27851 27852 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27853 bzero(cdb, CDB_GROUP1); 27854 cdb[0] = SCMD_PLAYAUDIO_MSF; 27855 if (un->un_f_cfg_playmsf_bcd == TRUE) { 27856 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0); 27857 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0); 27858 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0); 27859 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1); 27860 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1); 27861 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1); 27862 } else { 27863 cdb[3] = msf->cdmsf_min0; 27864 cdb[4] = msf->cdmsf_sec0; 27865 cdb[5] = msf->cdmsf_frame0; 27866 cdb[6] = msf->cdmsf_min1; 27867 cdb[7] = msf->cdmsf_sec1; 27868 cdb[8] = msf->cdmsf_frame1; 27869 } 27870 com->uscsi_cdb = cdb; 27871 com->uscsi_cdblen = CDB_GROUP1; 27872 com->uscsi_flags = USCSI_DIAGNOSE | USCSI_SILENT; 27873 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27874 SD_PATH_STANDARD); 27875 kmem_free(com, sizeof (*com)); 27876 return (rval); 27877 } 27878 27879 27880 /* 27881 * Function: sr_play_trkind() 27882 * 27883 * Description: This routine is the driver entry point for handling CD-ROM 27884 * ioctl requests to output the audio signals at the specified 27885 * starting address and continue the audio play until the specified 27886 * ending address (CDROMPLAYTRKIND). The address is in Track Index 27887 * format. 27888 * 27889 * Arguments: dev - the device 'dev_t' 27890 * data - pointer to user provided audio track/index structure, 27891 * specifying start/end addresses. 27892 * flag - this argument is a pass through to ddi_copyxxx() 27893 * directly from the mode argument of ioctl(). 27894 * 27895 * Return Code: the code returned by sd_send_scsi_cmd() 27896 * EFAULT if ddi_copyxxx() fails 27897 * ENXIO if fail ddi_get_soft_state 27898 * EINVAL if data pointer is NULL 27899 */ 27900 27901 static int 27902 sr_play_trkind(dev_t dev, caddr_t data, int flag) 27903 { 27904 struct cdrom_ti ti_struct; 27905 struct cdrom_ti *ti = &ti_struct; 27906 struct uscsi_cmd *com = NULL; 27907 char cdb[CDB_GROUP1]; 27908 int rval; 27909 27910 if (data == NULL) { 27911 return (EINVAL); 27912 } 27913 27914 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) { 27915 return (EFAULT); 27916 } 27917 27918 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27919 bzero(cdb, CDB_GROUP1); 27920 cdb[0] = SCMD_PLAYAUDIO_TI; 27921 cdb[4] = ti->cdti_trk0; 27922 cdb[5] = ti->cdti_ind0; 27923 cdb[7] = ti->cdti_trk1; 27924 cdb[8] = ti->cdti_ind1; 27925 com->uscsi_cdb = cdb; 27926 com->uscsi_cdblen = CDB_GROUP1; 27927 com->uscsi_flags = USCSI_DIAGNOSE | USCSI_SILENT; 27928 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27929 SD_PATH_STANDARD); 27930 kmem_free(com, sizeof (*com)); 27931 return (rval); 27932 } 27933 27934 27935 /* 27936 * Function: sr_read_all_subcodes() 27937 * 27938 * Description: This routine is the driver entry point for handling CD-ROM 27939 * ioctl requests to return raw subcode data while the target is 27940 * playing audio (CDROMSUBCODE). 27941 * 27942 * Arguments: dev - the device 'dev_t' 27943 * data - pointer to user provided cdrom subcode structure, 27944 * specifying the transfer length and address. 27945 * flag - this argument is a pass through to ddi_copyxxx() 27946 * directly from the mode argument of ioctl(). 27947 * 27948 * Return Code: the code returned by sd_send_scsi_cmd() 27949 * EFAULT if ddi_copyxxx() fails 27950 * ENXIO if fail ddi_get_soft_state 27951 * EINVAL if data pointer is NULL 27952 */ 27953 27954 static int 27955 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag) 27956 { 27957 struct sd_lun *un = NULL; 27958 struct uscsi_cmd *com = NULL; 27959 struct cdrom_subcode *subcode = NULL; 27960 int rval; 27961 size_t buflen; 27962 char cdb[CDB_GROUP5]; 27963 27964 #ifdef _MULTI_DATAMODEL 27965 /* To support ILP32 applications in an LP64 world */ 27966 struct cdrom_subcode32 cdrom_subcode32; 27967 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32; 27968 #endif 27969 if (data == NULL) { 27970 return (EINVAL); 27971 } 27972 27973 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27974 return (ENXIO); 27975 } 27976 27977 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP); 27978 27979 #ifdef _MULTI_DATAMODEL 27980 switch (ddi_model_convert_from(flag & FMODELS)) { 27981 case DDI_MODEL_ILP32: 27982 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) { 27983 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27984 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27985 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27986 return (EFAULT); 27987 } 27988 /* Convert the ILP32 uscsi data from the application to LP64 */ 27989 cdrom_subcode32tocdrom_subcode(cdsc32, subcode); 27990 break; 27991 case DDI_MODEL_NONE: 27992 if (ddi_copyin(data, subcode, 27993 sizeof (struct cdrom_subcode), flag)) { 27994 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27995 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27996 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27997 return (EFAULT); 27998 } 27999 break; 28000 } 28001 #else /* ! _MULTI_DATAMODEL */ 28002 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) { 28003 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28004 "sr_read_all_subcodes: ddi_copyin Failed\n"); 28005 kmem_free(subcode, sizeof (struct cdrom_subcode)); 28006 return (EFAULT); 28007 } 28008 #endif /* _MULTI_DATAMODEL */ 28009 28010 /* 28011 * Since MMC-2 expects max 3 bytes for length, check if the 28012 * length input is greater than 3 bytes 28013 */ 28014 if ((subcode->cdsc_length & 0xFF000000) != 0) { 28015 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28016 "sr_read_all_subcodes: " 28017 "cdrom transfer length too large: %d (limit %d)\n", 28018 subcode->cdsc_length, 0xFFFFFF); 28019 kmem_free(subcode, sizeof (struct cdrom_subcode)); 28020 return (EINVAL); 28021 } 28022 28023 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length; 28024 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28025 bzero(cdb, CDB_GROUP5); 28026 28027 if (un->un_f_mmc_cap == TRUE) { 28028 cdb[0] = (char)SCMD_READ_CD; 28029 cdb[2] = (char)0xff; 28030 cdb[3] = (char)0xff; 28031 cdb[4] = (char)0xff; 28032 cdb[5] = (char)0xff; 28033 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 28034 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 28035 cdb[8] = ((subcode->cdsc_length) & 0x000000ff); 28036 cdb[10] = 1; 28037 } else { 28038 /* 28039 * Note: A vendor specific command (0xDF) is being used here to 28040 * request a read of all subcodes. 28041 */ 28042 cdb[0] = (char)SCMD_READ_ALL_SUBCODES; 28043 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24); 28044 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 28045 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 28046 cdb[9] = ((subcode->cdsc_length) & 0x000000ff); 28047 } 28048 com->uscsi_cdb = cdb; 28049 com->uscsi_cdblen = CDB_GROUP5; 28050 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr; 28051 com->uscsi_buflen = buflen; 28052 com->uscsi_flags = USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ; 28053 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 28054 SD_PATH_STANDARD); 28055 kmem_free(subcode, sizeof (struct cdrom_subcode)); 28056 kmem_free(com, sizeof (*com)); 28057 return (rval); 28058 } 28059 28060 28061 /* 28062 * Function: sr_read_subchannel() 28063 * 28064 * Description: This routine is the driver entry point for handling CD-ROM 28065 * ioctl requests to return the Q sub-channel data of the CD 28066 * current position block. (CDROMSUBCHNL) The data includes the 28067 * track number, index number, absolute CD-ROM address (LBA or MSF 28068 * format per the user) , track relative CD-ROM address (LBA or MSF 28069 * format per the user), control data and audio status. 28070 * 28071 * Arguments: dev - the device 'dev_t' 28072 * data - pointer to user provided cdrom sub-channel structure 28073 * flag - this argument is a pass through to ddi_copyxxx() 28074 * directly from the mode argument of ioctl(). 28075 * 28076 * Return Code: the code returned by sd_send_scsi_cmd() 28077 * EFAULT if ddi_copyxxx() fails 28078 * ENXIO if fail ddi_get_soft_state 28079 * EINVAL if data pointer is NULL 28080 */ 28081 28082 static int 28083 sr_read_subchannel(dev_t dev, caddr_t data, int flag) 28084 { 28085 struct sd_lun *un; 28086 struct uscsi_cmd *com; 28087 struct cdrom_subchnl subchanel; 28088 struct cdrom_subchnl *subchnl = &subchanel; 28089 char cdb[CDB_GROUP1]; 28090 caddr_t buffer; 28091 int rval; 28092 28093 if (data == NULL) { 28094 return (EINVAL); 28095 } 28096 28097 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28098 (un->un_state == SD_STATE_OFFLINE)) { 28099 return (ENXIO); 28100 } 28101 28102 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) { 28103 return (EFAULT); 28104 } 28105 28106 buffer = kmem_zalloc((size_t)16, KM_SLEEP); 28107 bzero(cdb, CDB_GROUP1); 28108 cdb[0] = SCMD_READ_SUBCHANNEL; 28109 /* Set the MSF bit based on the user requested address format */ 28110 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02; 28111 /* 28112 * Set the Q bit in byte 2 to indicate that Q sub-channel data be 28113 * returned 28114 */ 28115 cdb[2] = 0x40; 28116 /* 28117 * Set byte 3 to specify the return data format. A value of 0x01 28118 * indicates that the CD-ROM current position should be returned. 28119 */ 28120 cdb[3] = 0x01; 28121 cdb[8] = 0x10; 28122 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28123 com->uscsi_cdb = cdb; 28124 com->uscsi_cdblen = CDB_GROUP1; 28125 com->uscsi_bufaddr = buffer; 28126 com->uscsi_buflen = 16; 28127 com->uscsi_flags = USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ; 28128 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 28129 SD_PATH_STANDARD); 28130 if (rval != 0) { 28131 kmem_free(buffer, 16); 28132 kmem_free(com, sizeof (*com)); 28133 return (rval); 28134 } 28135 28136 /* Process the returned Q sub-channel data */ 28137 subchnl->cdsc_audiostatus = buffer[1]; 28138 subchnl->cdsc_adr = (buffer[5] & 0xF0) >> 4; 28139 subchnl->cdsc_ctrl = (buffer[5] & 0x0F); 28140 subchnl->cdsc_trk = buffer[6]; 28141 subchnl->cdsc_ind = buffer[7]; 28142 if (subchnl->cdsc_format & CDROM_LBA) { 28143 subchnl->cdsc_absaddr.lba = 28144 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 28145 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 28146 subchnl->cdsc_reladdr.lba = 28147 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) + 28148 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]); 28149 } else if (un->un_f_cfg_readsub_bcd == TRUE) { 28150 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]); 28151 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]); 28152 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]); 28153 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]); 28154 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]); 28155 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]); 28156 } else { 28157 subchnl->cdsc_absaddr.msf.minute = buffer[9]; 28158 subchnl->cdsc_absaddr.msf.second = buffer[10]; 28159 subchnl->cdsc_absaddr.msf.frame = buffer[11]; 28160 subchnl->cdsc_reladdr.msf.minute = buffer[13]; 28161 subchnl->cdsc_reladdr.msf.second = buffer[14]; 28162 subchnl->cdsc_reladdr.msf.frame = buffer[15]; 28163 } 28164 kmem_free(buffer, 16); 28165 kmem_free(com, sizeof (*com)); 28166 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag) 28167 != 0) { 28168 return (EFAULT); 28169 } 28170 return (rval); 28171 } 28172 28173 28174 /* 28175 * Function: sr_read_tocentry() 28176 * 28177 * Description: This routine is the driver entry point for handling CD-ROM 28178 * ioctl requests to read from the Table of Contents (TOC) 28179 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL 28180 * fields, the starting address (LBA or MSF format per the user) 28181 * and the data mode if the user specified track is a data track. 28182 * 28183 * Note: The READ HEADER (0x44) command used in this routine is 28184 * obsolete per the SCSI MMC spec but still supported in the 28185 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 28186 * therefore the command is still implemented in this routine. 28187 * 28188 * Arguments: dev - the device 'dev_t' 28189 * data - pointer to user provided toc entry structure, 28190 * specifying the track # and the address format 28191 * (LBA or MSF). 28192 * flag - this argument is a pass through to ddi_copyxxx() 28193 * directly from the mode argument of ioctl(). 28194 * 28195 * Return Code: the code returned by sd_send_scsi_cmd() 28196 * EFAULT if ddi_copyxxx() fails 28197 * ENXIO if fail ddi_get_soft_state 28198 * EINVAL if data pointer is NULL 28199 */ 28200 28201 static int 28202 sr_read_tocentry(dev_t dev, caddr_t data, int flag) 28203 { 28204 struct sd_lun *un = NULL; 28205 struct uscsi_cmd *com; 28206 struct cdrom_tocentry toc_entry; 28207 struct cdrom_tocentry *entry = &toc_entry; 28208 caddr_t buffer; 28209 int rval; 28210 char cdb[CDB_GROUP1]; 28211 28212 if (data == NULL) { 28213 return (EINVAL); 28214 } 28215 28216 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28217 (un->un_state == SD_STATE_OFFLINE)) { 28218 return (ENXIO); 28219 } 28220 28221 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) { 28222 return (EFAULT); 28223 } 28224 28225 /* Validate the requested track and address format */ 28226 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) { 28227 return (EINVAL); 28228 } 28229 28230 if (entry->cdte_track == 0) { 28231 return (EINVAL); 28232 } 28233 28234 buffer = kmem_zalloc((size_t)12, KM_SLEEP); 28235 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28236 bzero(cdb, CDB_GROUP1); 28237 28238 cdb[0] = SCMD_READ_TOC; 28239 /* Set the MSF bit based on the user requested address format */ 28240 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2); 28241 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 28242 cdb[6] = BYTE_TO_BCD(entry->cdte_track); 28243 } else { 28244 cdb[6] = entry->cdte_track; 28245 } 28246 28247 /* 28248 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 28249 * (4 byte TOC response header + 8 byte track descriptor) 28250 */ 28251 cdb[8] = 12; 28252 com->uscsi_cdb = cdb; 28253 com->uscsi_cdblen = CDB_GROUP1; 28254 com->uscsi_bufaddr = buffer; 28255 com->uscsi_buflen = 0x0C; 28256 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ); 28257 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 28258 SD_PATH_STANDARD); 28259 if (rval != 0) { 28260 kmem_free(buffer, 12); 28261 kmem_free(com, sizeof (*com)); 28262 return (rval); 28263 } 28264 28265 /* Process the toc entry */ 28266 entry->cdte_adr = (buffer[5] & 0xF0) >> 4; 28267 entry->cdte_ctrl = (buffer[5] & 0x0F); 28268 if (entry->cdte_format & CDROM_LBA) { 28269 entry->cdte_addr.lba = 28270 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 28271 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 28272 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) { 28273 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]); 28274 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]); 28275 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]); 28276 /* 28277 * Send a READ TOC command using the LBA address format to get 28278 * the LBA for the track requested so it can be used in the 28279 * READ HEADER request 28280 * 28281 * Note: The MSF bit of the READ HEADER command specifies the 28282 * output format. The block address specified in that command 28283 * must be in LBA format. 28284 */ 28285 cdb[1] = 0; 28286 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 28287 SD_PATH_STANDARD); 28288 if (rval != 0) { 28289 kmem_free(buffer, 12); 28290 kmem_free(com, sizeof (*com)); 28291 return (rval); 28292 } 28293 } else { 28294 entry->cdte_addr.msf.minute = buffer[9]; 28295 entry->cdte_addr.msf.second = buffer[10]; 28296 entry->cdte_addr.msf.frame = buffer[11]; 28297 /* 28298 * Send a READ TOC command using the LBA address format to get 28299 * the LBA for the track requested so it can be used in the 28300 * READ HEADER request 28301 * 28302 * Note: The MSF bit of the READ HEADER command specifies the 28303 * output format. The block address specified in that command 28304 * must be in LBA format. 28305 */ 28306 cdb[1] = 0; 28307 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 28308 SD_PATH_STANDARD); 28309 if (rval != 0) { 28310 kmem_free(buffer, 12); 28311 kmem_free(com, sizeof (*com)); 28312 return (rval); 28313 } 28314 } 28315 28316 /* 28317 * Build and send the READ HEADER command to determine the data mode of 28318 * the user specified track. 28319 */ 28320 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) && 28321 (entry->cdte_track != CDROM_LEADOUT)) { 28322 bzero(cdb, CDB_GROUP1); 28323 cdb[0] = SCMD_READ_HEADER; 28324 cdb[2] = buffer[8]; 28325 cdb[3] = buffer[9]; 28326 cdb[4] = buffer[10]; 28327 cdb[5] = buffer[11]; 28328 cdb[8] = 0x08; 28329 com->uscsi_buflen = 0x08; 28330 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 28331 SD_PATH_STANDARD); 28332 if (rval == 0) { 28333 entry->cdte_datamode = buffer[0]; 28334 } else { 28335 /* 28336 * READ HEADER command failed, since this is 28337 * obsoleted in one spec, its better to return 28338 * -1 for an invlid track so that we can still 28339 * receive the rest of the TOC data. 28340 */ 28341 entry->cdte_datamode = (uchar_t)-1; 28342 } 28343 } else { 28344 entry->cdte_datamode = (uchar_t)-1; 28345 } 28346 28347 kmem_free(buffer, 12); 28348 kmem_free(com, sizeof (*com)); 28349 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0) 28350 return (EFAULT); 28351 28352 return (rval); 28353 } 28354 28355 28356 /* 28357 * Function: sr_read_tochdr() 28358 * 28359 * Description: This routine is the driver entry point for handling CD-ROM 28360 * ioctl requests to read the Table of Contents (TOC) header 28361 * (CDROMREADTOHDR). The TOC header consists of the disk starting 28362 * and ending track numbers 28363 * 28364 * Arguments: dev - the device 'dev_t' 28365 * data - pointer to user provided toc header structure, 28366 * specifying the starting and ending track numbers. 28367 * flag - this argument is a pass through to ddi_copyxxx() 28368 * directly from the mode argument of ioctl(). 28369 * 28370 * Return Code: the code returned by sd_send_scsi_cmd() 28371 * EFAULT if ddi_copyxxx() fails 28372 * ENXIO if fail ddi_get_soft_state 28373 * EINVAL if data pointer is NULL 28374 */ 28375 28376 static int 28377 sr_read_tochdr(dev_t dev, caddr_t data, int flag) 28378 { 28379 struct sd_lun *un; 28380 struct uscsi_cmd *com; 28381 struct cdrom_tochdr toc_header; 28382 struct cdrom_tochdr *hdr = &toc_header; 28383 char cdb[CDB_GROUP1]; 28384 int rval; 28385 caddr_t buffer; 28386 28387 if (data == NULL) { 28388 return (EINVAL); 28389 } 28390 28391 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28392 (un->un_state == SD_STATE_OFFLINE)) { 28393 return (ENXIO); 28394 } 28395 28396 buffer = kmem_zalloc(4, KM_SLEEP); 28397 bzero(cdb, CDB_GROUP1); 28398 cdb[0] = SCMD_READ_TOC; 28399 /* 28400 * Specifying a track number of 0x00 in the READ TOC command indicates 28401 * that the TOC header should be returned 28402 */ 28403 cdb[6] = 0x00; 28404 /* 28405 * Bytes 7 & 8 are the 4 byte allocation length for TOC header. 28406 * (2 byte data len + 1 byte starting track # + 1 byte ending track #) 28407 */ 28408 cdb[8] = 0x04; 28409 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28410 com->uscsi_cdb = cdb; 28411 com->uscsi_cdblen = CDB_GROUP1; 28412 com->uscsi_bufaddr = buffer; 28413 com->uscsi_buflen = 0x04; 28414 com->uscsi_timeout = 300; 28415 com->uscsi_flags = USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ; 28416 28417 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 28418 SD_PATH_STANDARD); 28419 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 28420 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]); 28421 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]); 28422 } else { 28423 hdr->cdth_trk0 = buffer[2]; 28424 hdr->cdth_trk1 = buffer[3]; 28425 } 28426 kmem_free(buffer, 4); 28427 kmem_free(com, sizeof (*com)); 28428 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) { 28429 return (EFAULT); 28430 } 28431 return (rval); 28432 } 28433 28434 28435 /* 28436 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(), 28437 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for 28438 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data, 28439 * digital audio and extended architecture digital audio. These modes are 28440 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3 28441 * MMC specs. 28442 * 28443 * In addition to support for the various data formats these routines also 28444 * include support for devices that implement only the direct access READ 28445 * commands (0x08, 0x28), devices that implement the READ_CD commands 28446 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and 28447 * READ CDXA commands (0xD8, 0xDB) 28448 */ 28449 28450 /* 28451 * Function: sr_read_mode1() 28452 * 28453 * Description: This routine is the driver entry point for handling CD-ROM 28454 * ioctl read mode1 requests (CDROMREADMODE1). 28455 * 28456 * Arguments: dev - the device 'dev_t' 28457 * data - pointer to user provided cd read structure specifying 28458 * the lba buffer address and length. 28459 * flag - this argument is a pass through to ddi_copyxxx() 28460 * directly from the mode argument of ioctl(). 28461 * 28462 * Return Code: the code returned by sd_send_scsi_cmd() 28463 * EFAULT if ddi_copyxxx() fails 28464 * ENXIO if fail ddi_get_soft_state 28465 * EINVAL if data pointer is NULL 28466 */ 28467 28468 static int 28469 sr_read_mode1(dev_t dev, caddr_t data, int flag) 28470 { 28471 struct sd_lun *un; 28472 struct cdrom_read mode1_struct; 28473 struct cdrom_read *mode1 = &mode1_struct; 28474 int rval; 28475 sd_ssc_t *ssc; 28476 28477 #ifdef _MULTI_DATAMODEL 28478 /* To support ILP32 applications in an LP64 world */ 28479 struct cdrom_read32 cdrom_read32; 28480 struct cdrom_read32 *cdrd32 = &cdrom_read32; 28481 #endif /* _MULTI_DATAMODEL */ 28482 28483 if (data == NULL) { 28484 return (EINVAL); 28485 } 28486 28487 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28488 (un->un_state == SD_STATE_OFFLINE)) { 28489 return (ENXIO); 28490 } 28491 28492 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 28493 "sd_read_mode1: entry: un:0x%p\n", un); 28494 28495 #ifdef _MULTI_DATAMODEL 28496 switch (ddi_model_convert_from(flag & FMODELS)) { 28497 case DDI_MODEL_ILP32: 28498 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 28499 return (EFAULT); 28500 } 28501 /* Convert the ILP32 uscsi data from the application to LP64 */ 28502 cdrom_read32tocdrom_read(cdrd32, mode1); 28503 break; 28504 case DDI_MODEL_NONE: 28505 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 28506 return (EFAULT); 28507 } 28508 } 28509 #else /* ! _MULTI_DATAMODEL */ 28510 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 28511 return (EFAULT); 28512 } 28513 #endif /* _MULTI_DATAMODEL */ 28514 28515 ssc = sd_ssc_init(un); 28516 rval = sd_send_scsi_READ(ssc, mode1->cdread_bufaddr, 28517 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD); 28518 sd_ssc_fini(ssc); 28519 28520 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 28521 "sd_read_mode1: exit: un:0x%p\n", un); 28522 28523 return (rval); 28524 } 28525 28526 28527 /* 28528 * Function: sr_read_cd_mode2() 28529 * 28530 * Description: This routine is the driver entry point for handling CD-ROM 28531 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 28532 * support the READ CD (0xBE) command or the 1st generation 28533 * READ CD (0xD4) command. 28534 * 28535 * Arguments: dev - the device 'dev_t' 28536 * data - pointer to user provided cd read structure specifying 28537 * the lba buffer address and length. 28538 * flag - this argument is a pass through to ddi_copyxxx() 28539 * directly from the mode argument of ioctl(). 28540 * 28541 * Return Code: the code returned by sd_send_scsi_cmd() 28542 * EFAULT if ddi_copyxxx() fails 28543 * ENXIO if fail ddi_get_soft_state 28544 * EINVAL if data pointer is NULL 28545 */ 28546 28547 static int 28548 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag) 28549 { 28550 struct sd_lun *un; 28551 struct uscsi_cmd *com; 28552 struct cdrom_read mode2_struct; 28553 struct cdrom_read *mode2 = &mode2_struct; 28554 uchar_t cdb[CDB_GROUP5]; 28555 int nblocks; 28556 int rval; 28557 #ifdef _MULTI_DATAMODEL 28558 /* To support ILP32 applications in an LP64 world */ 28559 struct cdrom_read32 cdrom_read32; 28560 struct cdrom_read32 *cdrd32 = &cdrom_read32; 28561 #endif /* _MULTI_DATAMODEL */ 28562 28563 if (data == NULL) { 28564 return (EINVAL); 28565 } 28566 28567 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28568 (un->un_state == SD_STATE_OFFLINE)) { 28569 return (ENXIO); 28570 } 28571 28572 #ifdef _MULTI_DATAMODEL 28573 switch (ddi_model_convert_from(flag & FMODELS)) { 28574 case DDI_MODEL_ILP32: 28575 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 28576 return (EFAULT); 28577 } 28578 /* Convert the ILP32 uscsi data from the application to LP64 */ 28579 cdrom_read32tocdrom_read(cdrd32, mode2); 28580 break; 28581 case DDI_MODEL_NONE: 28582 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 28583 return (EFAULT); 28584 } 28585 break; 28586 } 28587 28588 #else /* ! _MULTI_DATAMODEL */ 28589 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 28590 return (EFAULT); 28591 } 28592 #endif /* _MULTI_DATAMODEL */ 28593 28594 bzero(cdb, sizeof (cdb)); 28595 if (un->un_f_cfg_read_cd_xd4 == TRUE) { 28596 /* Read command supported by 1st generation atapi drives */ 28597 cdb[0] = SCMD_READ_CDD4; 28598 } else { 28599 /* Universal CD Access Command */ 28600 cdb[0] = SCMD_READ_CD; 28601 } 28602 28603 /* 28604 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book 28605 */ 28606 cdb[1] = CDROM_SECTOR_TYPE_MODE2; 28607 28608 /* set the start address */ 28609 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF); 28610 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF); 28611 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 28612 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF); 28613 28614 /* set the transfer length */ 28615 nblocks = mode2->cdread_buflen / 2336; 28616 cdb[6] = (uchar_t)(nblocks >> 16); 28617 cdb[7] = (uchar_t)(nblocks >> 8); 28618 cdb[8] = (uchar_t)nblocks; 28619 28620 /* set the filter bits */ 28621 cdb[9] = CDROM_READ_CD_USERDATA; 28622 28623 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28624 com->uscsi_cdb = (caddr_t)cdb; 28625 com->uscsi_cdblen = sizeof (cdb); 28626 com->uscsi_bufaddr = mode2->cdread_bufaddr; 28627 com->uscsi_buflen = mode2->cdread_buflen; 28628 com->uscsi_flags = USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ; 28629 28630 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 28631 SD_PATH_STANDARD); 28632 kmem_free(com, sizeof (*com)); 28633 return (rval); 28634 } 28635 28636 28637 /* 28638 * Function: sr_read_mode2() 28639 * 28640 * Description: This routine is the driver entry point for handling CD-ROM 28641 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 28642 * do not support the READ CD (0xBE) command. 28643 * 28644 * Arguments: dev - the device 'dev_t' 28645 * data - pointer to user provided cd read structure specifying 28646 * the lba buffer address and length. 28647 * flag - this argument is a pass through to ddi_copyxxx() 28648 * directly from the mode argument of ioctl(). 28649 * 28650 * Return Code: the code returned by sd_send_scsi_cmd() 28651 * EFAULT if ddi_copyxxx() fails 28652 * ENXIO if fail ddi_get_soft_state 28653 * EINVAL if data pointer is NULL 28654 * EIO if fail to reset block size 28655 * EAGAIN if commands are in progress in the driver 28656 */ 28657 28658 static int 28659 sr_read_mode2(dev_t dev, caddr_t data, int flag) 28660 { 28661 struct sd_lun *un; 28662 struct cdrom_read mode2_struct; 28663 struct cdrom_read *mode2 = &mode2_struct; 28664 int rval; 28665 uint32_t restore_blksize; 28666 struct uscsi_cmd *com; 28667 uchar_t cdb[CDB_GROUP0]; 28668 int nblocks; 28669 28670 #ifdef _MULTI_DATAMODEL 28671 /* To support ILP32 applications in an LP64 world */ 28672 struct cdrom_read32 cdrom_read32; 28673 struct cdrom_read32 *cdrd32 = &cdrom_read32; 28674 #endif /* _MULTI_DATAMODEL */ 28675 28676 if (data == NULL) { 28677 return (EINVAL); 28678 } 28679 28680 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28681 (un->un_state == SD_STATE_OFFLINE)) { 28682 return (ENXIO); 28683 } 28684 28685 /* 28686 * Because this routine will update the device and driver block size 28687 * being used we want to make sure there are no commands in progress. 28688 * If commands are in progress the user will have to try again. 28689 * 28690 * We check for 1 instead of 0 because we increment un_ncmds_in_driver 28691 * in sdioctl to protect commands from sdioctl through to the top of 28692 * sd_uscsi_strategy. See sdioctl for details. 28693 */ 28694 mutex_enter(SD_MUTEX(un)); 28695 if (un->un_ncmds_in_driver != 1) { 28696 mutex_exit(SD_MUTEX(un)); 28697 return (EAGAIN); 28698 } 28699 mutex_exit(SD_MUTEX(un)); 28700 28701 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 28702 "sd_read_mode2: entry: un:0x%p\n", un); 28703 28704 #ifdef _MULTI_DATAMODEL 28705 switch (ddi_model_convert_from(flag & FMODELS)) { 28706 case DDI_MODEL_ILP32: 28707 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 28708 return (EFAULT); 28709 } 28710 /* Convert the ILP32 uscsi data from the application to LP64 */ 28711 cdrom_read32tocdrom_read(cdrd32, mode2); 28712 break; 28713 case DDI_MODEL_NONE: 28714 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 28715 return (EFAULT); 28716 } 28717 break; 28718 } 28719 #else /* ! _MULTI_DATAMODEL */ 28720 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) { 28721 return (EFAULT); 28722 } 28723 #endif /* _MULTI_DATAMODEL */ 28724 28725 /* Store the current target block size for restoration later */ 28726 restore_blksize = un->un_tgt_blocksize; 28727 28728 /* Change the device and soft state target block size to 2336 */ 28729 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) { 28730 rval = EIO; 28731 goto done; 28732 } 28733 28734 28735 bzero(cdb, sizeof (cdb)); 28736 28737 /* set READ operation */ 28738 cdb[0] = SCMD_READ; 28739 28740 /* adjust lba for 2kbyte blocks from 512 byte blocks */ 28741 mode2->cdread_lba >>= 2; 28742 28743 /* set the start address */ 28744 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F); 28745 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 28746 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF); 28747 28748 /* set the transfer length */ 28749 nblocks = mode2->cdread_buflen / 2336; 28750 cdb[4] = (uchar_t)nblocks & 0xFF; 28751 28752 /* build command */ 28753 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28754 com->uscsi_cdb = (caddr_t)cdb; 28755 com->uscsi_cdblen = sizeof (cdb); 28756 com->uscsi_bufaddr = mode2->cdread_bufaddr; 28757 com->uscsi_buflen = mode2->cdread_buflen; 28758 com->uscsi_flags = USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ; 28759 28760 /* 28761 * Issue SCSI command with user space address for read buffer. 28762 * 28763 * This sends the command through main channel in the driver. 28764 * 28765 * Since this is accessed via an IOCTL call, we go through the 28766 * standard path, so that if the device was powered down, then 28767 * it would be 'awakened' to handle the command. 28768 */ 28769 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 28770 SD_PATH_STANDARD); 28771 28772 kmem_free(com, sizeof (*com)); 28773 28774 /* Restore the device and soft state target block size */ 28775 if (sr_sector_mode(dev, restore_blksize) != 0) { 28776 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28777 "can't do switch back to mode 1\n"); 28778 /* 28779 * If sd_send_scsi_READ succeeded we still need to report 28780 * an error because we failed to reset the block size 28781 */ 28782 if (rval == 0) { 28783 rval = EIO; 28784 } 28785 } 28786 28787 done: 28788 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 28789 "sd_read_mode2: exit: un:0x%p\n", un); 28790 28791 return (rval); 28792 } 28793 28794 28795 /* 28796 * Function: sr_sector_mode() 28797 * 28798 * Description: This utility function is used by sr_read_mode2 to set the target 28799 * block size based on the user specified size. This is a legacy 28800 * implementation based upon a vendor specific mode page 28801 * 28802 * Arguments: dev - the device 'dev_t' 28803 * data - flag indicating if block size is being set to 2336 or 28804 * 512. 28805 * 28806 * Return Code: the code returned by sd_send_scsi_cmd() 28807 * EFAULT if ddi_copyxxx() fails 28808 * ENXIO if fail ddi_get_soft_state 28809 * EINVAL if data pointer is NULL 28810 */ 28811 28812 static int 28813 sr_sector_mode(dev_t dev, uint32_t blksize) 28814 { 28815 struct sd_lun *un; 28816 uchar_t *sense; 28817 uchar_t *select; 28818 int rval; 28819 sd_ssc_t *ssc; 28820 28821 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28822 (un->un_state == SD_STATE_OFFLINE)) { 28823 return (ENXIO); 28824 } 28825 28826 sense = kmem_zalloc(20, KM_SLEEP); 28827 28828 /* Note: This is a vendor specific mode page (0x81) */ 28829 ssc = sd_ssc_init(un); 28830 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 20, 0x81, 28831 SD_PATH_STANDARD); 28832 sd_ssc_fini(ssc); 28833 if (rval != 0) { 28834 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 28835 "sr_sector_mode: Mode Sense failed\n"); 28836 kmem_free(sense, 20); 28837 return (rval); 28838 } 28839 select = kmem_zalloc(20, KM_SLEEP); 28840 select[3] = 0x08; 28841 select[10] = ((blksize >> 8) & 0xff); 28842 select[11] = (blksize & 0xff); 28843 select[12] = 0x01; 28844 select[13] = 0x06; 28845 select[14] = sense[14]; 28846 select[15] = sense[15]; 28847 if (blksize == SD_MODE2_BLKSIZE) { 28848 select[14] |= 0x01; 28849 } 28850 28851 ssc = sd_ssc_init(un); 28852 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 20, 28853 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 28854 sd_ssc_fini(ssc); 28855 if (rval != 0) { 28856 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 28857 "sr_sector_mode: Mode Select failed\n"); 28858 } else { 28859 /* 28860 * Only update the softstate block size if we successfully 28861 * changed the device block mode. 28862 */ 28863 mutex_enter(SD_MUTEX(un)); 28864 sd_update_block_info(un, blksize, 0); 28865 mutex_exit(SD_MUTEX(un)); 28866 } 28867 kmem_free(sense, 20); 28868 kmem_free(select, 20); 28869 return (rval); 28870 } 28871 28872 28873 /* 28874 * Function: sr_read_cdda() 28875 * 28876 * Description: This routine is the driver entry point for handling CD-ROM 28877 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If 28878 * the target supports CDDA these requests are handled via a vendor 28879 * specific command (0xD8) If the target does not support CDDA 28880 * these requests are handled via the READ CD command (0xBE). 28881 * 28882 * Arguments: dev - the device 'dev_t' 28883 * data - pointer to user provided CD-DA structure specifying 28884 * the track starting address, transfer length, and 28885 * subcode options. 28886 * flag - this argument is a pass through to ddi_copyxxx() 28887 * directly from the mode argument of ioctl(). 28888 * 28889 * Return Code: the code returned by sd_send_scsi_cmd() 28890 * EFAULT if ddi_copyxxx() fails 28891 * ENXIO if fail ddi_get_soft_state 28892 * EINVAL if invalid arguments are provided 28893 * ENOTTY 28894 */ 28895 28896 static int 28897 sr_read_cdda(dev_t dev, caddr_t data, int flag) 28898 { 28899 struct sd_lun *un; 28900 struct uscsi_cmd *com; 28901 struct cdrom_cdda *cdda; 28902 int rval; 28903 size_t buflen; 28904 char cdb[CDB_GROUP5]; 28905 28906 #ifdef _MULTI_DATAMODEL 28907 /* To support ILP32 applications in an LP64 world */ 28908 struct cdrom_cdda32 cdrom_cdda32; 28909 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32; 28910 #endif /* _MULTI_DATAMODEL */ 28911 28912 if (data == NULL) { 28913 return (EINVAL); 28914 } 28915 28916 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28917 return (ENXIO); 28918 } 28919 28920 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP); 28921 28922 #ifdef _MULTI_DATAMODEL 28923 switch (ddi_model_convert_from(flag & FMODELS)) { 28924 case DDI_MODEL_ILP32: 28925 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) { 28926 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28927 "sr_read_cdda: ddi_copyin Failed\n"); 28928 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28929 return (EFAULT); 28930 } 28931 /* Convert the ILP32 uscsi data from the application to LP64 */ 28932 cdrom_cdda32tocdrom_cdda(cdda32, cdda); 28933 break; 28934 case DDI_MODEL_NONE: 28935 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 28936 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28937 "sr_read_cdda: ddi_copyin Failed\n"); 28938 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28939 return (EFAULT); 28940 } 28941 break; 28942 } 28943 #else /* ! _MULTI_DATAMODEL */ 28944 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 28945 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28946 "sr_read_cdda: ddi_copyin Failed\n"); 28947 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28948 return (EFAULT); 28949 } 28950 #endif /* _MULTI_DATAMODEL */ 28951 28952 /* 28953 * Since MMC-2 expects max 3 bytes for length, check if the 28954 * length input is greater than 3 bytes 28955 */ 28956 if ((cdda->cdda_length & 0xFF000000) != 0) { 28957 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: " 28958 "cdrom transfer length too large: %d (limit %d)\n", 28959 cdda->cdda_length, 0xFFFFFF); 28960 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28961 return (EINVAL); 28962 } 28963 28964 switch (cdda->cdda_subcode) { 28965 case CDROM_DA_NO_SUBCODE: 28966 buflen = CDROM_BLK_2352 * cdda->cdda_length; 28967 break; 28968 case CDROM_DA_SUBQ: 28969 buflen = CDROM_BLK_2368 * cdda->cdda_length; 28970 break; 28971 case CDROM_DA_ALL_SUBCODE: 28972 buflen = CDROM_BLK_2448 * cdda->cdda_length; 28973 break; 28974 case CDROM_DA_SUBCODE_ONLY: 28975 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length; 28976 break; 28977 default: 28978 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28979 "sr_read_cdda: Subcode '0x%x' Not Supported\n", 28980 cdda->cdda_subcode); 28981 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28982 return (EINVAL); 28983 } 28984 28985 /* Build and send the command */ 28986 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28987 bzero(cdb, CDB_GROUP5); 28988 28989 if (un->un_f_cfg_cdda == TRUE) { 28990 cdb[0] = (char)SCMD_READ_CD; 28991 cdb[1] = 0x04; 28992 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 28993 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 28994 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 28995 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 28996 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 28997 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 28998 cdb[8] = ((cdda->cdda_length) & 0x000000ff); 28999 cdb[9] = 0x10; 29000 switch (cdda->cdda_subcode) { 29001 case CDROM_DA_NO_SUBCODE : 29002 cdb[10] = 0x0; 29003 break; 29004 case CDROM_DA_SUBQ : 29005 cdb[10] = 0x2; 29006 break; 29007 case CDROM_DA_ALL_SUBCODE : 29008 cdb[10] = 0x1; 29009 break; 29010 case CDROM_DA_SUBCODE_ONLY : 29011 /* FALLTHROUGH */ 29012 default : 29013 kmem_free(cdda, sizeof (struct cdrom_cdda)); 29014 kmem_free(com, sizeof (*com)); 29015 return (ENOTTY); 29016 } 29017 } else { 29018 cdb[0] = (char)SCMD_READ_CDDA; 29019 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 29020 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 29021 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 29022 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 29023 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24); 29024 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 29025 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 29026 cdb[9] = ((cdda->cdda_length) & 0x000000ff); 29027 cdb[10] = cdda->cdda_subcode; 29028 } 29029 29030 com->uscsi_cdb = cdb; 29031 com->uscsi_cdblen = CDB_GROUP5; 29032 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data; 29033 com->uscsi_buflen = buflen; 29034 com->uscsi_flags = USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ; 29035 29036 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 29037 SD_PATH_STANDARD); 29038 29039 kmem_free(cdda, sizeof (struct cdrom_cdda)); 29040 kmem_free(com, sizeof (*com)); 29041 return (rval); 29042 } 29043 29044 29045 /* 29046 * Function: sr_read_cdxa() 29047 * 29048 * Description: This routine is the driver entry point for handling CD-ROM 29049 * ioctl requests to return CD-XA (Extended Architecture) data. 29050 * (CDROMCDXA). 29051 * 29052 * Arguments: dev - the device 'dev_t' 29053 * data - pointer to user provided CD-XA structure specifying 29054 * the data starting address, transfer length, and format 29055 * flag - this argument is a pass through to ddi_copyxxx() 29056 * directly from the mode argument of ioctl(). 29057 * 29058 * Return Code: the code returned by sd_send_scsi_cmd() 29059 * EFAULT if ddi_copyxxx() fails 29060 * ENXIO if fail ddi_get_soft_state 29061 * EINVAL if data pointer is NULL 29062 */ 29063 29064 static int 29065 sr_read_cdxa(dev_t dev, caddr_t data, int flag) 29066 { 29067 struct sd_lun *un; 29068 struct uscsi_cmd *com; 29069 struct cdrom_cdxa *cdxa; 29070 int rval; 29071 size_t buflen; 29072 char cdb[CDB_GROUP5]; 29073 uchar_t read_flags; 29074 29075 #ifdef _MULTI_DATAMODEL 29076 /* To support ILP32 applications in an LP64 world */ 29077 struct cdrom_cdxa32 cdrom_cdxa32; 29078 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32; 29079 #endif /* _MULTI_DATAMODEL */ 29080 29081 if (data == NULL) { 29082 return (EINVAL); 29083 } 29084 29085 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 29086 return (ENXIO); 29087 } 29088 29089 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP); 29090 29091 #ifdef _MULTI_DATAMODEL 29092 switch (ddi_model_convert_from(flag & FMODELS)) { 29093 case DDI_MODEL_ILP32: 29094 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) { 29095 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 29096 return (EFAULT); 29097 } 29098 /* 29099 * Convert the ILP32 uscsi data from the 29100 * application to LP64 for internal use. 29101 */ 29102 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa); 29103 break; 29104 case DDI_MODEL_NONE: 29105 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 29106 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 29107 return (EFAULT); 29108 } 29109 break; 29110 } 29111 #else /* ! _MULTI_DATAMODEL */ 29112 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 29113 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 29114 return (EFAULT); 29115 } 29116 #endif /* _MULTI_DATAMODEL */ 29117 29118 /* 29119 * Since MMC-2 expects max 3 bytes for length, check if the 29120 * length input is greater than 3 bytes 29121 */ 29122 if ((cdxa->cdxa_length & 0xFF000000) != 0) { 29123 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: " 29124 "cdrom transfer length too large: %d (limit %d)\n", 29125 cdxa->cdxa_length, 0xFFFFFF); 29126 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 29127 return (EINVAL); 29128 } 29129 29130 switch (cdxa->cdxa_format) { 29131 case CDROM_XA_DATA: 29132 buflen = CDROM_BLK_2048 * cdxa->cdxa_length; 29133 read_flags = 0x10; 29134 break; 29135 case CDROM_XA_SECTOR_DATA: 29136 buflen = CDROM_BLK_2352 * cdxa->cdxa_length; 29137 read_flags = 0xf8; 29138 break; 29139 case CDROM_XA_DATA_W_ERROR: 29140 buflen = CDROM_BLK_2646 * cdxa->cdxa_length; 29141 read_flags = 0xfc; 29142 break; 29143 default: 29144 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29145 "sr_read_cdxa: Format '0x%x' Not Supported\n", 29146 cdxa->cdxa_format); 29147 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 29148 return (EINVAL); 29149 } 29150 29151 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 29152 bzero(cdb, CDB_GROUP5); 29153 if (un->un_f_mmc_cap == TRUE) { 29154 cdb[0] = (char)SCMD_READ_CD; 29155 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 29156 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 29157 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 29158 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 29159 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 29160 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 29161 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff); 29162 cdb[9] = (char)read_flags; 29163 } else { 29164 /* 29165 * Note: A vendor specific command (0xDB) is being used her to 29166 * request a read of all subcodes. 29167 */ 29168 cdb[0] = (char)SCMD_READ_CDXA; 29169 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 29170 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 29171 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 29172 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 29173 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24); 29174 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 29175 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 29176 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff); 29177 cdb[10] = cdxa->cdxa_format; 29178 } 29179 com->uscsi_cdb = cdb; 29180 com->uscsi_cdblen = CDB_GROUP5; 29181 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data; 29182 com->uscsi_buflen = buflen; 29183 com->uscsi_flags = USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ; 29184 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 29185 SD_PATH_STANDARD); 29186 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 29187 kmem_free(com, sizeof (*com)); 29188 return (rval); 29189 } 29190 29191 29192 /* 29193 * Function: sr_eject() 29194 * 29195 * Description: This routine is the driver entry point for handling CD-ROM 29196 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT) 29197 * 29198 * Arguments: dev - the device 'dev_t' 29199 * 29200 * Return Code: the code returned by sd_send_scsi_cmd() 29201 */ 29202 29203 static int 29204 sr_eject(dev_t dev) 29205 { 29206 struct sd_lun *un; 29207 int rval; 29208 sd_ssc_t *ssc; 29209 29210 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 29211 (un->un_state == SD_STATE_OFFLINE)) { 29212 return (ENXIO); 29213 } 29214 29215 /* 29216 * To prevent race conditions with the eject 29217 * command, keep track of an eject command as 29218 * it progresses. If we are already handling 29219 * an eject command in the driver for the given 29220 * unit and another request to eject is received 29221 * immediately return EAGAIN so we don't lose 29222 * the command if the current eject command fails. 29223 */ 29224 mutex_enter(SD_MUTEX(un)); 29225 if (un->un_f_ejecting == TRUE) { 29226 mutex_exit(SD_MUTEX(un)); 29227 return (EAGAIN); 29228 } 29229 un->un_f_ejecting = TRUE; 29230 mutex_exit(SD_MUTEX(un)); 29231 29232 ssc = sd_ssc_init(un); 29233 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW, 29234 SD_PATH_STANDARD); 29235 sd_ssc_fini(ssc); 29236 29237 if (rval != 0) { 29238 mutex_enter(SD_MUTEX(un)); 29239 un->un_f_ejecting = FALSE; 29240 mutex_exit(SD_MUTEX(un)); 29241 return (rval); 29242 } 29243 29244 ssc = sd_ssc_init(un); 29245 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 29246 SD_TARGET_EJECT, SD_PATH_STANDARD); 29247 sd_ssc_fini(ssc); 29248 29249 if (rval == 0) { 29250 mutex_enter(SD_MUTEX(un)); 29251 sr_ejected(un); 29252 un->un_mediastate = DKIO_EJECTED; 29253 un->un_f_ejecting = FALSE; 29254 cv_broadcast(&un->un_state_cv); 29255 mutex_exit(SD_MUTEX(un)); 29256 } else { 29257 mutex_enter(SD_MUTEX(un)); 29258 un->un_f_ejecting = FALSE; 29259 mutex_exit(SD_MUTEX(un)); 29260 } 29261 return (rval); 29262 } 29263 29264 29265 /* 29266 * Function: sr_ejected() 29267 * 29268 * Description: This routine updates the soft state structure to invalidate the 29269 * geometry information after the media has been ejected or a 29270 * media eject has been detected. 29271 * 29272 * Arguments: un - driver soft state (unit) structure 29273 */ 29274 29275 static void 29276 sr_ejected(struct sd_lun *un) 29277 { 29278 struct sd_errstats *stp; 29279 29280 ASSERT(un != NULL); 29281 ASSERT(mutex_owned(SD_MUTEX(un))); 29282 29283 un->un_f_blockcount_is_valid = FALSE; 29284 un->un_f_tgt_blocksize_is_valid = FALSE; 29285 mutex_exit(SD_MUTEX(un)); 29286 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 29287 mutex_enter(SD_MUTEX(un)); 29288 29289 if (un->un_errstats != NULL) { 29290 stp = (struct sd_errstats *)un->un_errstats->ks_data; 29291 stp->sd_capacity.value.ui64 = 0; 29292 } 29293 } 29294 29295 29296 /* 29297 * Function: sr_check_wp() 29298 * 29299 * Description: This routine checks the write protection of a removable 29300 * media disk and hotpluggable devices via the write protect bit of 29301 * the Mode Page Header device specific field. Some devices choke 29302 * on unsupported mode page. In order to workaround this issue, 29303 * this routine has been implemented to use 0x3f mode page(request 29304 * for all pages) for all device types. 29305 * 29306 * Arguments: dev - the device 'dev_t' 29307 * 29308 * Return Code: int indicating if the device is write protected (1) or not (0) 29309 * 29310 * Context: Kernel thread. 29311 * 29312 */ 29313 29314 static int 29315 sr_check_wp(dev_t dev) 29316 { 29317 struct sd_lun *un; 29318 uchar_t device_specific; 29319 uchar_t *sense; 29320 int hdrlen; 29321 int rval = FALSE; 29322 int status; 29323 sd_ssc_t *ssc; 29324 29325 /* 29326 * Note: The return codes for this routine should be reworked to 29327 * properly handle the case of a NULL softstate. 29328 */ 29329 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 29330 return (FALSE); 29331 } 29332 29333 if (un->un_f_cfg_is_atapi == TRUE) { 29334 /* 29335 * The mode page contents are not required; set the allocation 29336 * length for the mode page header only 29337 */ 29338 hdrlen = MODE_HEADER_LENGTH_GRP2; 29339 sense = kmem_zalloc(hdrlen, KM_SLEEP); 29340 ssc = sd_ssc_init(un); 29341 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, hdrlen, 29342 MODEPAGE_ALLPAGES, SD_PATH_STANDARD); 29343 sd_ssc_fini(ssc); 29344 if (status != 0) 29345 goto err_exit; 29346 device_specific = 29347 ((struct mode_header_grp2 *)sense)->device_specific; 29348 } else { 29349 hdrlen = MODE_HEADER_LENGTH; 29350 sense = kmem_zalloc(hdrlen, KM_SLEEP); 29351 ssc = sd_ssc_init(un); 29352 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, hdrlen, 29353 MODEPAGE_ALLPAGES, SD_PATH_STANDARD); 29354 sd_ssc_fini(ssc); 29355 if (status != 0) 29356 goto err_exit; 29357 device_specific = 29358 ((struct mode_header *)sense)->device_specific; 29359 } 29360 29361 29362 /* 29363 * Write protect mode sense failed; not all disks 29364 * understand this query. Return FALSE assuming that 29365 * these devices are not writable. 29366 */ 29367 if (device_specific & WRITE_PROTECT) { 29368 rval = TRUE; 29369 } 29370 29371 err_exit: 29372 kmem_free(sense, hdrlen); 29373 return (rval); 29374 } 29375 29376 /* 29377 * Function: sr_volume_ctrl() 29378 * 29379 * Description: This routine is the driver entry point for handling CD-ROM 29380 * audio output volume ioctl requests. (CDROMVOLCTRL) 29381 * 29382 * Arguments: dev - the device 'dev_t' 29383 * data - pointer to user audio volume control structure 29384 * flag - this argument is a pass through to ddi_copyxxx() 29385 * directly from the mode argument of ioctl(). 29386 * 29387 * Return Code: the code returned by sd_send_scsi_cmd() 29388 * EFAULT if ddi_copyxxx() fails 29389 * ENXIO if fail ddi_get_soft_state 29390 * EINVAL if data pointer is NULL 29391 * 29392 */ 29393 29394 static int 29395 sr_volume_ctrl(dev_t dev, caddr_t data, int flag) 29396 { 29397 struct sd_lun *un; 29398 struct cdrom_volctrl volume; 29399 struct cdrom_volctrl *vol = &volume; 29400 uchar_t *sense_page; 29401 uchar_t *select_page; 29402 uchar_t *sense; 29403 uchar_t *select; 29404 int sense_buflen; 29405 int select_buflen; 29406 int rval; 29407 sd_ssc_t *ssc; 29408 29409 if (data == NULL) { 29410 return (EINVAL); 29411 } 29412 29413 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 29414 (un->un_state == SD_STATE_OFFLINE)) { 29415 return (ENXIO); 29416 } 29417 29418 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) { 29419 return (EFAULT); 29420 } 29421 29422 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 29423 struct mode_header_grp2 *sense_mhp; 29424 struct mode_header_grp2 *select_mhp; 29425 int bd_len; 29426 29427 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN; 29428 select_buflen = MODE_HEADER_LENGTH_GRP2 + 29429 MODEPAGE_AUDIO_CTRL_LEN; 29430 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 29431 select = kmem_zalloc(select_buflen, KM_SLEEP); 29432 ssc = sd_ssc_init(un); 29433 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, 29434 sense_buflen, MODEPAGE_AUDIO_CTRL, 29435 SD_PATH_STANDARD); 29436 sd_ssc_fini(ssc); 29437 29438 if (rval != 0) { 29439 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 29440 "sr_volume_ctrl: Mode Sense Failed\n"); 29441 kmem_free(sense, sense_buflen); 29442 kmem_free(select, select_buflen); 29443 return (rval); 29444 } 29445 sense_mhp = (struct mode_header_grp2 *)sense; 29446 select_mhp = (struct mode_header_grp2 *)select; 29447 bd_len = (sense_mhp->bdesc_length_hi << 8) | 29448 sense_mhp->bdesc_length_lo; 29449 if (bd_len > MODE_BLK_DESC_LENGTH) { 29450 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29451 "sr_volume_ctrl: Mode Sense returned invalid " 29452 "block descriptor length\n"); 29453 kmem_free(sense, sense_buflen); 29454 kmem_free(select, select_buflen); 29455 return (EIO); 29456 } 29457 sense_page = (uchar_t *) 29458 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 29459 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2); 29460 select_mhp->length_msb = 0; 29461 select_mhp->length_lsb = 0; 29462 select_mhp->bdesc_length_hi = 0; 29463 select_mhp->bdesc_length_lo = 0; 29464 } else { 29465 struct mode_header *sense_mhp, *select_mhp; 29466 29467 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 29468 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 29469 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 29470 select = kmem_zalloc(select_buflen, KM_SLEEP); 29471 ssc = sd_ssc_init(un); 29472 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 29473 sense_buflen, MODEPAGE_AUDIO_CTRL, 29474 SD_PATH_STANDARD); 29475 sd_ssc_fini(ssc); 29476 29477 if (rval != 0) { 29478 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29479 "sr_volume_ctrl: Mode Sense Failed\n"); 29480 kmem_free(sense, sense_buflen); 29481 kmem_free(select, select_buflen); 29482 return (rval); 29483 } 29484 sense_mhp = (struct mode_header *)sense; 29485 select_mhp = (struct mode_header *)select; 29486 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) { 29487 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29488 "sr_volume_ctrl: Mode Sense returned invalid " 29489 "block descriptor length\n"); 29490 kmem_free(sense, sense_buflen); 29491 kmem_free(select, select_buflen); 29492 return (EIO); 29493 } 29494 sense_page = (uchar_t *) 29495 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 29496 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH); 29497 select_mhp->length = 0; 29498 select_mhp->bdesc_length = 0; 29499 } 29500 /* 29501 * Note: An audio control data structure could be created and overlayed 29502 * on the following in place of the array indexing method implemented. 29503 */ 29504 29505 /* Build the select data for the user volume data */ 29506 select_page[0] = MODEPAGE_AUDIO_CTRL; 29507 select_page[1] = 0xE; 29508 /* Set the immediate bit */ 29509 select_page[2] = 0x04; 29510 /* Zero out reserved fields */ 29511 select_page[3] = 0x00; 29512 select_page[4] = 0x00; 29513 /* Return sense data for fields not to be modified */ 29514 select_page[5] = sense_page[5]; 29515 select_page[6] = sense_page[6]; 29516 select_page[7] = sense_page[7]; 29517 /* Set the user specified volume levels for channel 0 and 1 */ 29518 select_page[8] = 0x01; 29519 select_page[9] = vol->channel0; 29520 select_page[10] = 0x02; 29521 select_page[11] = vol->channel1; 29522 /* Channel 2 and 3 are currently unsupported so return the sense data */ 29523 select_page[12] = sense_page[12]; 29524 select_page[13] = sense_page[13]; 29525 select_page[14] = sense_page[14]; 29526 select_page[15] = sense_page[15]; 29527 29528 ssc = sd_ssc_init(un); 29529 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 29530 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP1, select, 29531 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 29532 } else { 29533 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 29534 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 29535 } 29536 sd_ssc_fini(ssc); 29537 29538 kmem_free(sense, sense_buflen); 29539 kmem_free(select, select_buflen); 29540 return (rval); 29541 } 29542 29543 29544 /* 29545 * Function: sr_read_sony_session_offset() 29546 * 29547 * Description: This routine is the driver entry point for handling CD-ROM 29548 * ioctl requests for session offset information. (CDROMREADOFFSET) 29549 * The address of the first track in the last session of a 29550 * multi-session CD-ROM is returned 29551 * 29552 * Note: This routine uses a vendor specific key value in the 29553 * command control field without implementing any vendor check here 29554 * or in the ioctl routine. 29555 * 29556 * Arguments: dev - the device 'dev_t' 29557 * data - pointer to an int to hold the requested address 29558 * flag - this argument is a pass through to ddi_copyxxx() 29559 * directly from the mode argument of ioctl(). 29560 * 29561 * Return Code: the code returned by sd_send_scsi_cmd() 29562 * EFAULT if ddi_copyxxx() fails 29563 * ENXIO if fail ddi_get_soft_state 29564 * EINVAL if data pointer is NULL 29565 */ 29566 29567 static int 29568 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag) 29569 { 29570 struct sd_lun *un; 29571 struct uscsi_cmd *com; 29572 caddr_t buffer; 29573 char cdb[CDB_GROUP1]; 29574 int session_offset = 0; 29575 int rval; 29576 29577 if (data == NULL) { 29578 return (EINVAL); 29579 } 29580 29581 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 29582 (un->un_state == SD_STATE_OFFLINE)) { 29583 return (ENXIO); 29584 } 29585 29586 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP); 29587 bzero(cdb, CDB_GROUP1); 29588 cdb[0] = SCMD_READ_TOC; 29589 /* 29590 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 29591 * (4 byte TOC response header + 8 byte response data) 29592 */ 29593 cdb[8] = SONY_SESSION_OFFSET_LEN; 29594 /* Byte 9 is the control byte. A vendor specific value is used */ 29595 cdb[9] = SONY_SESSION_OFFSET_KEY; 29596 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 29597 com->uscsi_cdb = cdb; 29598 com->uscsi_cdblen = CDB_GROUP1; 29599 com->uscsi_bufaddr = buffer; 29600 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN; 29601 com->uscsi_flags = USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ; 29602 29603 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 29604 SD_PATH_STANDARD); 29605 if (rval != 0) { 29606 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 29607 kmem_free(com, sizeof (*com)); 29608 return (rval); 29609 } 29610 if (buffer[1] == SONY_SESSION_OFFSET_VALID) { 29611 session_offset = 29612 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 29613 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 29614 /* 29615 * Offset returned offset in current lbasize block's. Convert to 29616 * 2k block's to return to the user 29617 */ 29618 if (un->un_tgt_blocksize == CDROM_BLK_512) { 29619 session_offset >>= 2; 29620 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) { 29621 session_offset >>= 1; 29622 } 29623 } 29624 29625 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) { 29626 rval = EFAULT; 29627 } 29628 29629 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 29630 kmem_free(com, sizeof (*com)); 29631 return (rval); 29632 } 29633 29634 29635 /* 29636 * Function: sd_wm_cache_constructor() 29637 * 29638 * Description: Cache Constructor for the wmap cache for the read/modify/write 29639 * devices. 29640 * 29641 * Arguments: wm - A pointer to the sd_w_map to be initialized. 29642 * un - sd_lun structure for the device. 29643 * flag - the km flags passed to constructor 29644 * 29645 * Return Code: 0 on success. 29646 * -1 on failure. 29647 */ 29648 29649 /*ARGSUSED*/ 29650 static int 29651 sd_wm_cache_constructor(void *wm, void *un, int flags) 29652 { 29653 bzero(wm, sizeof (struct sd_w_map)); 29654 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL); 29655 return (0); 29656 } 29657 29658 29659 /* 29660 * Function: sd_wm_cache_destructor() 29661 * 29662 * Description: Cache destructor for the wmap cache for the read/modify/write 29663 * devices. 29664 * 29665 * Arguments: wm - A pointer to the sd_w_map to be initialized. 29666 * un - sd_lun structure for the device. 29667 */ 29668 /*ARGSUSED*/ 29669 static void 29670 sd_wm_cache_destructor(void *wm, void *un) 29671 { 29672 cv_destroy(&((struct sd_w_map *)wm)->wm_avail); 29673 } 29674 29675 29676 /* 29677 * Function: sd_range_lock() 29678 * 29679 * Description: Lock the range of blocks specified as parameter to ensure 29680 * that read, modify write is atomic and no other i/o writes 29681 * to the same location. The range is specified in terms 29682 * of start and end blocks. Block numbers are the actual 29683 * media block numbers and not system. 29684 * 29685 * Arguments: un - sd_lun structure for the device. 29686 * startb - The starting block number 29687 * endb - The end block number 29688 * typ - type of i/o - simple/read_modify_write 29689 * 29690 * Return Code: wm - pointer to the wmap structure. 29691 * 29692 * Context: This routine can sleep. 29693 */ 29694 29695 static struct sd_w_map * 29696 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ) 29697 { 29698 struct sd_w_map *wmp = NULL; 29699 struct sd_w_map *sl_wmp = NULL; 29700 struct sd_w_map *tmp_wmp; 29701 wm_state state = SD_WM_CHK_LIST; 29702 29703 29704 ASSERT(un != NULL); 29705 ASSERT(!mutex_owned(SD_MUTEX(un))); 29706 29707 mutex_enter(SD_MUTEX(un)); 29708 29709 while (state != SD_WM_DONE) { 29710 29711 switch (state) { 29712 case SD_WM_CHK_LIST: 29713 /* 29714 * This is the starting state. Check the wmap list 29715 * to see if the range is currently available. 29716 */ 29717 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) { 29718 /* 29719 * If this is a simple write and no rmw 29720 * i/o is pending then try to lock the 29721 * range as the range should be available. 29722 */ 29723 state = SD_WM_LOCK_RANGE; 29724 } else { 29725 tmp_wmp = sd_get_range(un, startb, endb); 29726 if (tmp_wmp != NULL) { 29727 if ((wmp != NULL) && ONLIST(un, wmp)) { 29728 /* 29729 * Should not keep onlist wmps 29730 * while waiting this macro 29731 * will also do wmp = NULL; 29732 */ 29733 FREE_ONLIST_WMAP(un, wmp); 29734 } 29735 /* 29736 * sl_wmp is the wmap on which wait 29737 * is done, since the tmp_wmp points 29738 * to the inuse wmap, set sl_wmp to 29739 * tmp_wmp and change the state to sleep 29740 */ 29741 sl_wmp = tmp_wmp; 29742 state = SD_WM_WAIT_MAP; 29743 } else { 29744 state = SD_WM_LOCK_RANGE; 29745 } 29746 29747 } 29748 break; 29749 29750 case SD_WM_LOCK_RANGE: 29751 ASSERT(un->un_wm_cache); 29752 /* 29753 * The range need to be locked, try to get a wmap. 29754 * First attempt it with NO_SLEEP, want to avoid a sleep 29755 * if possible as we will have to release the sd mutex 29756 * if we have to sleep. 29757 */ 29758 if (wmp == NULL) 29759 wmp = kmem_cache_alloc(un->un_wm_cache, 29760 KM_NOSLEEP); 29761 if (wmp == NULL) { 29762 mutex_exit(SD_MUTEX(un)); 29763 _NOTE(DATA_READABLE_WITHOUT_LOCK 29764 (sd_lun::un_wm_cache)) 29765 wmp = kmem_cache_alloc(un->un_wm_cache, 29766 KM_SLEEP); 29767 mutex_enter(SD_MUTEX(un)); 29768 /* 29769 * we released the mutex so recheck and go to 29770 * check list state. 29771 */ 29772 state = SD_WM_CHK_LIST; 29773 } else { 29774 /* 29775 * We exit out of state machine since we 29776 * have the wmap. Do the housekeeping first. 29777 * place the wmap on the wmap list if it is not 29778 * on it already and then set the state to done. 29779 */ 29780 wmp->wm_start = startb; 29781 wmp->wm_end = endb; 29782 wmp->wm_flags = typ | SD_WM_BUSY; 29783 if (typ & SD_WTYPE_RMW) { 29784 un->un_rmw_count++; 29785 } 29786 /* 29787 * If not already on the list then link 29788 */ 29789 if (!ONLIST(un, wmp)) { 29790 wmp->wm_next = un->un_wm; 29791 wmp->wm_prev = NULL; 29792 if (wmp->wm_next) 29793 wmp->wm_next->wm_prev = wmp; 29794 un->un_wm = wmp; 29795 } 29796 state = SD_WM_DONE; 29797 } 29798 break; 29799 29800 case SD_WM_WAIT_MAP: 29801 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY); 29802 /* 29803 * Wait is done on sl_wmp, which is set in the 29804 * check_list state. 29805 */ 29806 sl_wmp->wm_wanted_count++; 29807 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un)); 29808 sl_wmp->wm_wanted_count--; 29809 /* 29810 * We can reuse the memory from the completed sl_wmp 29811 * lock range for our new lock, but only if noone is 29812 * waiting for it. 29813 */ 29814 ASSERT(!(sl_wmp->wm_flags & SD_WM_BUSY)); 29815 if (sl_wmp->wm_wanted_count == 0) { 29816 if (wmp != NULL) { 29817 CHK_N_FREEWMP(un, wmp); 29818 } 29819 wmp = sl_wmp; 29820 } 29821 sl_wmp = NULL; 29822 /* 29823 * After waking up, need to recheck for availability of 29824 * range. 29825 */ 29826 state = SD_WM_CHK_LIST; 29827 break; 29828 29829 default: 29830 panic("sd_range_lock: " 29831 "Unknown state %d in sd_range_lock", state); 29832 /*NOTREACHED*/ 29833 } /* switch(state) */ 29834 29835 } /* while(state != SD_WM_DONE) */ 29836 29837 mutex_exit(SD_MUTEX(un)); 29838 29839 ASSERT(wmp != NULL); 29840 29841 return (wmp); 29842 } 29843 29844 29845 /* 29846 * Function: sd_get_range() 29847 * 29848 * Description: Find if there any overlapping I/O to this one 29849 * Returns the write-map of 1st such I/O, NULL otherwise. 29850 * 29851 * Arguments: un - sd_lun structure for the device. 29852 * startb - The starting block number 29853 * endb - The end block number 29854 * 29855 * Return Code: wm - pointer to the wmap structure. 29856 */ 29857 29858 static struct sd_w_map * 29859 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb) 29860 { 29861 struct sd_w_map *wmp; 29862 29863 ASSERT(un != NULL); 29864 29865 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) { 29866 if (!(wmp->wm_flags & SD_WM_BUSY)) { 29867 continue; 29868 } 29869 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) { 29870 break; 29871 } 29872 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) { 29873 break; 29874 } 29875 } 29876 29877 return (wmp); 29878 } 29879 29880 29881 /* 29882 * Function: sd_free_inlist_wmap() 29883 * 29884 * Description: Unlink and free a write map struct. 29885 * 29886 * Arguments: un - sd_lun structure for the device. 29887 * wmp - sd_w_map which needs to be unlinked. 29888 */ 29889 29890 static void 29891 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp) 29892 { 29893 ASSERT(un != NULL); 29894 29895 if (un->un_wm == wmp) { 29896 un->un_wm = wmp->wm_next; 29897 } else { 29898 wmp->wm_prev->wm_next = wmp->wm_next; 29899 } 29900 29901 if (wmp->wm_next) { 29902 wmp->wm_next->wm_prev = wmp->wm_prev; 29903 } 29904 29905 wmp->wm_next = wmp->wm_prev = NULL; 29906 29907 kmem_cache_free(un->un_wm_cache, wmp); 29908 } 29909 29910 29911 /* 29912 * Function: sd_range_unlock() 29913 * 29914 * Description: Unlock the range locked by wm. 29915 * Free write map if nobody else is waiting on it. 29916 * 29917 * Arguments: un - sd_lun structure for the device. 29918 * wmp - sd_w_map which needs to be unlinked. 29919 */ 29920 29921 static void 29922 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm) 29923 { 29924 ASSERT(un != NULL); 29925 ASSERT(wm != NULL); 29926 ASSERT(!mutex_owned(SD_MUTEX(un))); 29927 29928 mutex_enter(SD_MUTEX(un)); 29929 29930 if (wm->wm_flags & SD_WTYPE_RMW) { 29931 un->un_rmw_count--; 29932 } 29933 29934 if (wm->wm_wanted_count) { 29935 wm->wm_flags = 0; 29936 /* 29937 * Broadcast that the wmap is available now. 29938 */ 29939 cv_broadcast(&wm->wm_avail); 29940 } else { 29941 /* 29942 * If no one is waiting on the map, it should be free'ed. 29943 */ 29944 sd_free_inlist_wmap(un, wm); 29945 } 29946 29947 mutex_exit(SD_MUTEX(un)); 29948 } 29949 29950 29951 /* 29952 * Function: sd_read_modify_write_task 29953 * 29954 * Description: Called from a taskq thread to initiate the write phase of 29955 * a read-modify-write request. This is used for targets where 29956 * un->un_sys_blocksize != un->un_tgt_blocksize. 29957 * 29958 * Arguments: arg - a pointer to the buf(9S) struct for the write command. 29959 * 29960 * Context: Called under taskq thread context. 29961 */ 29962 29963 static void 29964 sd_read_modify_write_task(void *arg) 29965 { 29966 struct sd_mapblocksize_info *bsp; 29967 struct buf *bp; 29968 struct sd_xbuf *xp; 29969 struct sd_lun *un; 29970 29971 bp = arg; /* The bp is given in arg */ 29972 ASSERT(bp != NULL); 29973 29974 /* Get the pointer to the layer-private data struct */ 29975 xp = SD_GET_XBUF(bp); 29976 ASSERT(xp != NULL); 29977 bsp = xp->xb_private; 29978 ASSERT(bsp != NULL); 29979 29980 un = SD_GET_UN(bp); 29981 ASSERT(un != NULL); 29982 ASSERT(!mutex_owned(SD_MUTEX(un))); 29983 29984 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 29985 "sd_read_modify_write_task: entry: buf:0x%p\n", bp); 29986 29987 /* 29988 * This is the write phase of a read-modify-write request, called 29989 * under the context of a taskq thread in response to the completion 29990 * of the read portion of the rmw request completing under interrupt 29991 * context. The write request must be sent from here down the iostart 29992 * chain as if it were being sent from sd_mapblocksize_iostart(), so 29993 * we use the layer index saved in the layer-private data area. 29994 */ 29995 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp); 29996 29997 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 29998 "sd_read_modify_write_task: exit: buf:0x%p\n", bp); 29999 } 30000 30001 30002 /* 30003 * Function: sddump_do_read_of_rmw() 30004 * 30005 * Description: This routine will be called from sddump, If sddump is called 30006 * with an I/O which not aligned on device blocksize boundary 30007 * then the write has to be converted to read-modify-write. 30008 * Do the read part here in order to keep sddump simple. 30009 * Note - That the sd_mutex is held across the call to this 30010 * routine. 30011 * 30012 * Arguments: un - sd_lun 30013 * blkno - block number in terms of media block size. 30014 * nblk - number of blocks. 30015 * bpp - pointer to pointer to the buf structure. On return 30016 * from this function, *bpp points to the valid buffer 30017 * to which the write has to be done. 30018 * 30019 * Return Code: 0 for success or errno-type return code 30020 */ 30021 30022 static int 30023 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 30024 struct buf **bpp) 30025 { 30026 int err; 30027 int i; 30028 int rval; 30029 struct buf *bp; 30030 struct scsi_pkt *pkt = NULL; 30031 uint32_t target_blocksize; 30032 30033 ASSERT(un != NULL); 30034 ASSERT(mutex_owned(SD_MUTEX(un))); 30035 30036 target_blocksize = un->un_tgt_blocksize; 30037 30038 mutex_exit(SD_MUTEX(un)); 30039 30040 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL, 30041 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL); 30042 if (bp == NULL) { 30043 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 30044 "no resources for dumping; giving up"); 30045 err = ENOMEM; 30046 goto done; 30047 } 30048 30049 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL, 30050 blkno, nblk); 30051 if (rval != 0) { 30052 scsi_free_consistent_buf(bp); 30053 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 30054 "no resources for dumping; giving up"); 30055 err = ENOMEM; 30056 goto done; 30057 } 30058 30059 pkt->pkt_flags |= FLAG_NOINTR; 30060 30061 err = EIO; 30062 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 30063 30064 /* 30065 * Scsi_poll returns 0 (success) if the command completes and 30066 * the status block is STATUS_GOOD. We should only check 30067 * errors if this condition is not true. Even then we should 30068 * send our own request sense packet only if we have a check 30069 * condition and auto request sense has not been performed by 30070 * the hba. 30071 */ 30072 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n"); 30073 30074 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) { 30075 err = 0; 30076 break; 30077 } 30078 30079 /* 30080 * Check CMD_DEV_GONE 1st, give up if device is gone, 30081 * no need to read RQS data. 30082 */ 30083 if (pkt->pkt_reason == CMD_DEV_GONE) { 30084 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 30085 "Error while dumping state with rmw..." 30086 "Device is gone\n"); 30087 break; 30088 } 30089 30090 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) { 30091 SD_INFO(SD_LOG_DUMP, un, 30092 "sddump: read failed with CHECK, try # %d\n", i); 30093 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) { 30094 (void) sd_send_polled_RQS(un); 30095 } 30096 30097 continue; 30098 } 30099 30100 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) { 30101 int reset_retval = 0; 30102 30103 SD_INFO(SD_LOG_DUMP, un, 30104 "sddump: read failed with BUSY, try # %d\n", i); 30105 30106 if (un->un_f_lun_reset_enabled == TRUE) { 30107 reset_retval = scsi_reset(SD_ADDRESS(un), 30108 RESET_LUN); 30109 } 30110 if (reset_retval == 0) { 30111 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 30112 } 30113 (void) sd_send_polled_RQS(un); 30114 30115 } else { 30116 SD_INFO(SD_LOG_DUMP, un, 30117 "sddump: read failed with 0x%x, try # %d\n", 30118 SD_GET_PKT_STATUS(pkt), i); 30119 mutex_enter(SD_MUTEX(un)); 30120 sd_reset_target(un, pkt); 30121 mutex_exit(SD_MUTEX(un)); 30122 } 30123 30124 /* 30125 * If we are not getting anywhere with lun/target resets, 30126 * let's reset the bus. 30127 */ 30128 if (i > SD_NDUMP_RETRIES / 2) { 30129 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 30130 (void) sd_send_polled_RQS(un); 30131 } 30132 30133 } 30134 scsi_destroy_pkt(pkt); 30135 30136 if (err != 0) { 30137 scsi_free_consistent_buf(bp); 30138 *bpp = NULL; 30139 } else { 30140 *bpp = bp; 30141 } 30142 30143 done: 30144 mutex_enter(SD_MUTEX(un)); 30145 return (err); 30146 } 30147 30148 30149 /* 30150 * Function: sd_failfast_flushq 30151 * 30152 * Description: Take all bp's on the wait queue that have B_FAILFAST set 30153 * in b_flags and move them onto the failfast queue, then kick 30154 * off a thread to return all bp's on the failfast queue to 30155 * their owners with an error set. 30156 * 30157 * Arguments: un - pointer to the soft state struct for the instance. 30158 * 30159 * Context: may execute in interrupt context. 30160 */ 30161 30162 static void 30163 sd_failfast_flushq(struct sd_lun *un) 30164 { 30165 struct buf *bp; 30166 struct buf *next_waitq_bp; 30167 struct buf *prev_waitq_bp = NULL; 30168 30169 ASSERT(un != NULL); 30170 ASSERT(mutex_owned(SD_MUTEX(un))); 30171 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE); 30172 ASSERT(un->un_failfast_bp == NULL); 30173 30174 SD_TRACE(SD_LOG_IO_FAILFAST, un, 30175 "sd_failfast_flushq: entry: un:0x%p\n", un); 30176 30177 /* 30178 * Check if we should flush all bufs when entering failfast state, or 30179 * just those with B_FAILFAST set. 30180 */ 30181 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { 30182 /* 30183 * Move *all* bp's on the wait queue to the failfast flush 30184 * queue, including those that do NOT have B_FAILFAST set. 30185 */ 30186 if (un->un_failfast_headp == NULL) { 30187 ASSERT(un->un_failfast_tailp == NULL); 30188 un->un_failfast_headp = un->un_waitq_headp; 30189 } else { 30190 ASSERT(un->un_failfast_tailp != NULL); 30191 un->un_failfast_tailp->av_forw = un->un_waitq_headp; 30192 } 30193 30194 un->un_failfast_tailp = un->un_waitq_tailp; 30195 30196 /* update kstat for each bp moved out of the waitq */ 30197 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) { 30198 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 30199 } 30200 30201 /* empty the waitq */ 30202 un->un_waitq_headp = un->un_waitq_tailp = NULL; 30203 30204 } else { 30205 /* 30206 * Go thru the wait queue, pick off all entries with 30207 * B_FAILFAST set, and move these onto the failfast queue. 30208 */ 30209 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) { 30210 /* 30211 * Save the pointer to the next bp on the wait queue, 30212 * so we get to it on the next iteration of this loop. 30213 */ 30214 next_waitq_bp = bp->av_forw; 30215 30216 /* 30217 * If this bp from the wait queue does NOT have 30218 * B_FAILFAST set, just move on to the next element 30219 * in the wait queue. Note, this is the only place 30220 * where it is correct to set prev_waitq_bp. 30221 */ 30222 if ((bp->b_flags & B_FAILFAST) == 0) { 30223 prev_waitq_bp = bp; 30224 continue; 30225 } 30226 30227 /* 30228 * Remove the bp from the wait queue. 30229 */ 30230 if (bp == un->un_waitq_headp) { 30231 /* The bp is the first element of the waitq. */ 30232 un->un_waitq_headp = next_waitq_bp; 30233 if (un->un_waitq_headp == NULL) { 30234 /* The wait queue is now empty */ 30235 un->un_waitq_tailp = NULL; 30236 } 30237 } else { 30238 /* 30239 * The bp is either somewhere in the middle 30240 * or at the end of the wait queue. 30241 */ 30242 ASSERT(un->un_waitq_headp != NULL); 30243 ASSERT(prev_waitq_bp != NULL); 30244 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST) 30245 == 0); 30246 if (bp == un->un_waitq_tailp) { 30247 /* bp is the last entry on the waitq. */ 30248 ASSERT(next_waitq_bp == NULL); 30249 un->un_waitq_tailp = prev_waitq_bp; 30250 } 30251 prev_waitq_bp->av_forw = next_waitq_bp; 30252 } 30253 bp->av_forw = NULL; 30254 30255 /* 30256 * update kstat since the bp is moved out of 30257 * the waitq 30258 */ 30259 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 30260 30261 /* 30262 * Now put the bp onto the failfast queue. 30263 */ 30264 if (un->un_failfast_headp == NULL) { 30265 /* failfast queue is currently empty */ 30266 ASSERT(un->un_failfast_tailp == NULL); 30267 un->un_failfast_headp = 30268 un->un_failfast_tailp = bp; 30269 } else { 30270 /* Add the bp to the end of the failfast q */ 30271 ASSERT(un->un_failfast_tailp != NULL); 30272 ASSERT(un->un_failfast_tailp->b_flags & 30273 B_FAILFAST); 30274 un->un_failfast_tailp->av_forw = bp; 30275 un->un_failfast_tailp = bp; 30276 } 30277 } 30278 } 30279 30280 /* 30281 * Now return all bp's on the failfast queue to their owners. 30282 */ 30283 while ((bp = un->un_failfast_headp) != NULL) { 30284 30285 un->un_failfast_headp = bp->av_forw; 30286 if (un->un_failfast_headp == NULL) { 30287 un->un_failfast_tailp = NULL; 30288 } 30289 30290 /* 30291 * We want to return the bp with a failure error code, but 30292 * we do not want a call to sd_start_cmds() to occur here, 30293 * so use sd_return_failed_command_no_restart() instead of 30294 * sd_return_failed_command(). 30295 */ 30296 sd_return_failed_command_no_restart(un, bp, EIO); 30297 } 30298 30299 /* Flush the xbuf queues if required. */ 30300 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) { 30301 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback); 30302 } 30303 30304 SD_TRACE(SD_LOG_IO_FAILFAST, un, 30305 "sd_failfast_flushq: exit: un:0x%p\n", un); 30306 } 30307 30308 30309 /* 30310 * Function: sd_failfast_flushq_callback 30311 * 30312 * Description: Return TRUE if the given bp meets the criteria for failfast 30313 * flushing. Used with ddi_xbuf_flushq(9F). 30314 * 30315 * Arguments: bp - ptr to buf struct to be examined. 30316 * 30317 * Context: Any 30318 */ 30319 30320 static int 30321 sd_failfast_flushq_callback(struct buf *bp) 30322 { 30323 /* 30324 * Return TRUE if (1) we want to flush ALL bufs when the failfast 30325 * state is entered; OR (2) the given bp has B_FAILFAST set. 30326 */ 30327 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || 30328 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE); 30329 } 30330 30331 30332 30333 /* 30334 * Function: sd_setup_next_xfer 30335 * 30336 * Description: Prepare next I/O operation using DMA_PARTIAL 30337 * 30338 */ 30339 30340 static int 30341 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 30342 struct scsi_pkt *pkt, struct sd_xbuf *xp) 30343 { 30344 ssize_t num_blks_not_xfered; 30345 daddr_t strt_blk_num; 30346 ssize_t bytes_not_xfered; 30347 int rval; 30348 30349 ASSERT(pkt->pkt_resid == 0); 30350 30351 /* 30352 * Calculate next block number and amount to be transferred. 30353 * 30354 * How much data NOT transfered to the HBA yet. 30355 */ 30356 bytes_not_xfered = xp->xb_dma_resid; 30357 30358 /* 30359 * figure how many blocks NOT transfered to the HBA yet. 30360 */ 30361 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered); 30362 30363 /* 30364 * set starting block number to the end of what WAS transfered. 30365 */ 30366 strt_blk_num = xp->xb_blkno + 30367 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered); 30368 30369 /* 30370 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt 30371 * will call scsi_initpkt with NULL_FUNC so we do not have to release 30372 * the disk mutex here. 30373 */ 30374 rval = sd_setup_next_rw_pkt(un, pkt, bp, 30375 strt_blk_num, num_blks_not_xfered); 30376 30377 if (rval == 0) { 30378 30379 /* 30380 * Success. 30381 * 30382 * Adjust things if there are still more blocks to be 30383 * transfered. 30384 */ 30385 xp->xb_dma_resid = pkt->pkt_resid; 30386 pkt->pkt_resid = 0; 30387 30388 return (1); 30389 } 30390 30391 /* 30392 * There's really only one possible return value from 30393 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt 30394 * returns NULL. 30395 */ 30396 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 30397 30398 bp->b_resid = bp->b_bcount; 30399 bp->b_flags |= B_ERROR; 30400 30401 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 30402 "Error setting up next portion of DMA transfer\n"); 30403 30404 return (0); 30405 } 30406 30407 /* 30408 * Function: sd_panic_for_res_conflict 30409 * 30410 * Description: Call panic with a string formatted with "Reservation Conflict" 30411 * and a human readable identifier indicating the SD instance 30412 * that experienced the reservation conflict. 30413 * 30414 * Arguments: un - pointer to the soft state struct for the instance. 30415 * 30416 * Context: may execute in interrupt context. 30417 */ 30418 30419 #define SD_RESV_CONFLICT_FMT_LEN 40 30420 void 30421 sd_panic_for_res_conflict(struct sd_lun *un) 30422 { 30423 char panic_str[SD_RESV_CONFLICT_FMT_LEN + MAXPATHLEN]; 30424 char path_str[MAXPATHLEN]; 30425 30426 (void) snprintf(panic_str, sizeof (panic_str), 30427 "Reservation Conflict\nDisk: %s", 30428 ddi_pathname(SD_DEVINFO(un), path_str)); 30429 30430 panic(panic_str); 30431 } 30432 30433 /* 30434 * Note: The following sd_faultinjection_ioctl( ) routines implement 30435 * driver support for handling fault injection for error analysis 30436 * causing faults in multiple layers of the driver. 30437 * 30438 */ 30439 30440 #ifdef SD_FAULT_INJECTION 30441 static uint_t sd_fault_injection_on = 0; 30442 30443 /* 30444 * Function: sd_faultinjection_ioctl() 30445 * 30446 * Description: This routine is the driver entry point for handling 30447 * faultinjection ioctls to inject errors into the 30448 * layer model 30449 * 30450 * Arguments: cmd - the ioctl cmd received 30451 * arg - the arguments from user and returns 30452 */ 30453 30454 static void 30455 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) 30456 { 30457 uint_t i = 0; 30458 uint_t rval; 30459 30460 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); 30461 30462 mutex_enter(SD_MUTEX(un)); 30463 30464 switch (cmd) { 30465 case SDIOCRUN: 30466 /* Allow pushed faults to be injected */ 30467 SD_INFO(SD_LOG_SDTEST, un, 30468 "sd_faultinjection_ioctl: Injecting Fault Run\n"); 30469 30470 sd_fault_injection_on = 1; 30471 30472 SD_INFO(SD_LOG_IOERR, un, 30473 "sd_faultinjection_ioctl: run finished\n"); 30474 break; 30475 30476 case SDIOCSTART: 30477 /* Start Injection Session */ 30478 SD_INFO(SD_LOG_SDTEST, un, 30479 "sd_faultinjection_ioctl: Injecting Fault Start\n"); 30480 30481 sd_fault_injection_on = 0; 30482 un->sd_injection_mask = 0xFFFFFFFF; 30483 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 30484 un->sd_fi_fifo_pkt[i] = NULL; 30485 un->sd_fi_fifo_xb[i] = NULL; 30486 un->sd_fi_fifo_un[i] = NULL; 30487 un->sd_fi_fifo_arq[i] = NULL; 30488 } 30489 un->sd_fi_fifo_start = 0; 30490 un->sd_fi_fifo_end = 0; 30491 30492 mutex_enter(&(un->un_fi_mutex)); 30493 un->sd_fi_log[0] = '\0'; 30494 un->sd_fi_buf_len = 0; 30495 mutex_exit(&(un->un_fi_mutex)); 30496 30497 SD_INFO(SD_LOG_IOERR, un, 30498 "sd_faultinjection_ioctl: start finished\n"); 30499 break; 30500 30501 case SDIOCSTOP: 30502 /* Stop Injection Session */ 30503 SD_INFO(SD_LOG_SDTEST, un, 30504 "sd_faultinjection_ioctl: Injecting Fault Stop\n"); 30505 sd_fault_injection_on = 0; 30506 un->sd_injection_mask = 0x0; 30507 30508 /* Empty stray or unuseds structs from fifo */ 30509 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 30510 if (un->sd_fi_fifo_pkt[i] != NULL) { 30511 kmem_free(un->sd_fi_fifo_pkt[i], 30512 sizeof (struct sd_fi_pkt)); 30513 } 30514 if (un->sd_fi_fifo_xb[i] != NULL) { 30515 kmem_free(un->sd_fi_fifo_xb[i], 30516 sizeof (struct sd_fi_xb)); 30517 } 30518 if (un->sd_fi_fifo_un[i] != NULL) { 30519 kmem_free(un->sd_fi_fifo_un[i], 30520 sizeof (struct sd_fi_un)); 30521 } 30522 if (un->sd_fi_fifo_arq[i] != NULL) { 30523 kmem_free(un->sd_fi_fifo_arq[i], 30524 sizeof (struct sd_fi_arq)); 30525 } 30526 un->sd_fi_fifo_pkt[i] = NULL; 30527 un->sd_fi_fifo_un[i] = NULL; 30528 un->sd_fi_fifo_xb[i] = NULL; 30529 un->sd_fi_fifo_arq[i] = NULL; 30530 } 30531 un->sd_fi_fifo_start = 0; 30532 un->sd_fi_fifo_end = 0; 30533 30534 SD_INFO(SD_LOG_IOERR, un, 30535 "sd_faultinjection_ioctl: stop finished\n"); 30536 break; 30537 30538 case SDIOCINSERTPKT: 30539 /* Store a packet struct to be pushed onto fifo */ 30540 SD_INFO(SD_LOG_SDTEST, un, 30541 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); 30542 30543 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 30544 30545 sd_fault_injection_on = 0; 30546 30547 /* No more that SD_FI_MAX_ERROR allowed in Queue */ 30548 if (un->sd_fi_fifo_pkt[i] != NULL) { 30549 kmem_free(un->sd_fi_fifo_pkt[i], 30550 sizeof (struct sd_fi_pkt)); 30551 } 30552 if (arg != (uintptr_t)NULL) { 30553 un->sd_fi_fifo_pkt[i] = 30554 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); 30555 if (un->sd_fi_fifo_pkt[i] == NULL) { 30556 /* Alloc failed don't store anything */ 30557 break; 30558 } 30559 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], 30560 sizeof (struct sd_fi_pkt), 0); 30561 if (rval == -1) { 30562 kmem_free(un->sd_fi_fifo_pkt[i], 30563 sizeof (struct sd_fi_pkt)); 30564 un->sd_fi_fifo_pkt[i] = NULL; 30565 } 30566 } else { 30567 SD_INFO(SD_LOG_IOERR, un, 30568 "sd_faultinjection_ioctl: pkt null\n"); 30569 } 30570 break; 30571 30572 case SDIOCINSERTXB: 30573 /* Store a xb struct to be pushed onto fifo */ 30574 SD_INFO(SD_LOG_SDTEST, un, 30575 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); 30576 30577 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 30578 30579 sd_fault_injection_on = 0; 30580 30581 if (un->sd_fi_fifo_xb[i] != NULL) { 30582 kmem_free(un->sd_fi_fifo_xb[i], 30583 sizeof (struct sd_fi_xb)); 30584 un->sd_fi_fifo_xb[i] = NULL; 30585 } 30586 if (arg != (uintptr_t)NULL) { 30587 un->sd_fi_fifo_xb[i] = 30588 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); 30589 if (un->sd_fi_fifo_xb[i] == NULL) { 30590 /* Alloc failed don't store anything */ 30591 break; 30592 } 30593 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], 30594 sizeof (struct sd_fi_xb), 0); 30595 30596 if (rval == -1) { 30597 kmem_free(un->sd_fi_fifo_xb[i], 30598 sizeof (struct sd_fi_xb)); 30599 un->sd_fi_fifo_xb[i] = NULL; 30600 } 30601 } else { 30602 SD_INFO(SD_LOG_IOERR, un, 30603 "sd_faultinjection_ioctl: xb null\n"); 30604 } 30605 break; 30606 30607 case SDIOCINSERTUN: 30608 /* Store a un struct to be pushed onto fifo */ 30609 SD_INFO(SD_LOG_SDTEST, un, 30610 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); 30611 30612 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 30613 30614 sd_fault_injection_on = 0; 30615 30616 if (un->sd_fi_fifo_un[i] != NULL) { 30617 kmem_free(un->sd_fi_fifo_un[i], 30618 sizeof (struct sd_fi_un)); 30619 un->sd_fi_fifo_un[i] = NULL; 30620 } 30621 if (arg != (uintptr_t)NULL) { 30622 un->sd_fi_fifo_un[i] = 30623 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); 30624 if (un->sd_fi_fifo_un[i] == NULL) { 30625 /* Alloc failed don't store anything */ 30626 break; 30627 } 30628 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], 30629 sizeof (struct sd_fi_un), 0); 30630 if (rval == -1) { 30631 kmem_free(un->sd_fi_fifo_un[i], 30632 sizeof (struct sd_fi_un)); 30633 un->sd_fi_fifo_un[i] = NULL; 30634 } 30635 30636 } else { 30637 SD_INFO(SD_LOG_IOERR, un, 30638 "sd_faultinjection_ioctl: un null\n"); 30639 } 30640 30641 break; 30642 30643 case SDIOCINSERTARQ: 30644 /* Store a arq struct to be pushed onto fifo */ 30645 SD_INFO(SD_LOG_SDTEST, un, 30646 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); 30647 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 30648 30649 sd_fault_injection_on = 0; 30650 30651 if (un->sd_fi_fifo_arq[i] != NULL) { 30652 kmem_free(un->sd_fi_fifo_arq[i], 30653 sizeof (struct sd_fi_arq)); 30654 un->sd_fi_fifo_arq[i] = NULL; 30655 } 30656 if (arg != (uintptr_t)NULL) { 30657 un->sd_fi_fifo_arq[i] = 30658 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); 30659 if (un->sd_fi_fifo_arq[i] == NULL) { 30660 /* Alloc failed don't store anything */ 30661 break; 30662 } 30663 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], 30664 sizeof (struct sd_fi_arq), 0); 30665 if (rval == -1) { 30666 kmem_free(un->sd_fi_fifo_arq[i], 30667 sizeof (struct sd_fi_arq)); 30668 un->sd_fi_fifo_arq[i] = NULL; 30669 } 30670 30671 } else { 30672 SD_INFO(SD_LOG_IOERR, un, 30673 "sd_faultinjection_ioctl: arq null\n"); 30674 } 30675 30676 break; 30677 30678 case SDIOCPUSH: 30679 /* Push stored xb, pkt, un, and arq onto fifo */ 30680 sd_fault_injection_on = 0; 30681 30682 if (arg != (uintptr_t)NULL) { 30683 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); 30684 if (rval != -1 && 30685 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 30686 un->sd_fi_fifo_end += i; 30687 } 30688 } else { 30689 SD_INFO(SD_LOG_IOERR, un, 30690 "sd_faultinjection_ioctl: push arg null\n"); 30691 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 30692 un->sd_fi_fifo_end++; 30693 } 30694 } 30695 SD_INFO(SD_LOG_IOERR, un, 30696 "sd_faultinjection_ioctl: push to end=%d\n", 30697 un->sd_fi_fifo_end); 30698 break; 30699 30700 case SDIOCRETRIEVE: 30701 /* Return buffer of log from Injection session */ 30702 SD_INFO(SD_LOG_SDTEST, un, 30703 "sd_faultinjection_ioctl: Injecting Fault Retreive"); 30704 30705 sd_fault_injection_on = 0; 30706 30707 mutex_enter(&(un->un_fi_mutex)); 30708 rval = ddi_copyout(un->sd_fi_log, (void *)arg, 30709 un->sd_fi_buf_len+1, 0); 30710 mutex_exit(&(un->un_fi_mutex)); 30711 30712 if (rval == -1) { 30713 /* 30714 * arg is possibly invalid setting 30715 * it to NULL for return 30716 */ 30717 arg = (uintptr_t)NULL; 30718 } 30719 break; 30720 } 30721 30722 mutex_exit(SD_MUTEX(un)); 30723 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: exit\n"); 30724 } 30725 30726 30727 /* 30728 * Function: sd_injection_log() 30729 * 30730 * Description: This routine adds buff to the already existing injection log 30731 * for retrieval via faultinjection_ioctl for use in fault 30732 * detection and recovery 30733 * 30734 * Arguments: buf - the string to add to the log 30735 */ 30736 30737 static void 30738 sd_injection_log(char *buf, struct sd_lun *un) 30739 { 30740 uint_t len; 30741 30742 ASSERT(un != NULL); 30743 ASSERT(buf != NULL); 30744 30745 mutex_enter(&(un->un_fi_mutex)); 30746 30747 len = min(strlen(buf), 255); 30748 /* Add logged value to Injection log to be returned later */ 30749 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) { 30750 uint_t offset = strlen((char *)un->sd_fi_log); 30751 char *destp = (char *)un->sd_fi_log + offset; 30752 int i; 30753 for (i = 0; i < len; i++) { 30754 *destp++ = *buf++; 30755 } 30756 un->sd_fi_buf_len += len; 30757 un->sd_fi_log[un->sd_fi_buf_len] = '\0'; 30758 } 30759 30760 mutex_exit(&(un->un_fi_mutex)); 30761 } 30762 30763 30764 /* 30765 * Function: sd_faultinjection() 30766 * 30767 * Description: This routine takes the pkt and changes its 30768 * content based on error injection scenerio. 30769 * 30770 * Arguments: pktp - packet to be changed 30771 */ 30772 30773 static void 30774 sd_faultinjection(struct scsi_pkt *pktp) 30775 { 30776 uint_t i; 30777 struct sd_fi_pkt *fi_pkt; 30778 struct sd_fi_xb *fi_xb; 30779 struct sd_fi_un *fi_un; 30780 struct sd_fi_arq *fi_arq; 30781 struct buf *bp; 30782 struct sd_xbuf *xb; 30783 struct sd_lun *un; 30784 30785 ASSERT(pktp != NULL); 30786 30787 /* pull bp xb and un from pktp */ 30788 bp = (struct buf *)pktp->pkt_private; 30789 xb = SD_GET_XBUF(bp); 30790 un = SD_GET_UN(bp); 30791 30792 ASSERT(un != NULL); 30793 30794 mutex_enter(SD_MUTEX(un)); 30795 30796 SD_TRACE(SD_LOG_SDTEST, un, 30797 "sd_faultinjection: entry Injection from sdintr\n"); 30798 30799 /* if injection is off return */ 30800 if (sd_fault_injection_on == 0 || 30801 un->sd_fi_fifo_start == un->sd_fi_fifo_end) { 30802 mutex_exit(SD_MUTEX(un)); 30803 return; 30804 } 30805 30806 SD_INFO(SD_LOG_SDTEST, un, 30807 "sd_faultinjection: is working for copying\n"); 30808 30809 /* take next set off fifo */ 30810 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; 30811 30812 fi_pkt = un->sd_fi_fifo_pkt[i]; 30813 fi_xb = un->sd_fi_fifo_xb[i]; 30814 fi_un = un->sd_fi_fifo_un[i]; 30815 fi_arq = un->sd_fi_fifo_arq[i]; 30816 30817 30818 /* set variables accordingly */ 30819 /* set pkt if it was on fifo */ 30820 if (fi_pkt != NULL) { 30821 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags"); 30822 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp"); 30823 if (fi_pkt->pkt_cdbp != 0xff) 30824 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp"); 30825 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state"); 30826 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics"); 30827 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason"); 30828 30829 } 30830 /* set xb if it was on fifo */ 30831 if (fi_xb != NULL) { 30832 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno"); 30833 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid"); 30834 if (fi_xb->xb_retry_count != 0) 30835 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count"); 30836 SD_CONDSET(xb, xb, xb_victim_retry_count, 30837 "xb_victim_retry_count"); 30838 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status"); 30839 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state"); 30840 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid"); 30841 30842 /* copy in block data from sense */ 30843 /* 30844 * if (fi_xb->xb_sense_data[0] != -1) { 30845 * bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, 30846 * SENSE_LENGTH); 30847 * } 30848 */ 30849 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, SENSE_LENGTH); 30850 30851 /* copy in extended sense codes */ 30852 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 30853 xb, es_code, "es_code"); 30854 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 30855 xb, es_key, "es_key"); 30856 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 30857 xb, es_add_code, "es_add_code"); 30858 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 30859 xb, es_qual_code, "es_qual_code"); 30860 struct scsi_extended_sense *esp; 30861 esp = (struct scsi_extended_sense *)xb->xb_sense_data; 30862 esp->es_class = CLASS_EXTENDED_SENSE; 30863 } 30864 30865 /* set un if it was on fifo */ 30866 if (fi_un != NULL) { 30867 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb"); 30868 SD_CONDSET(un, un, un_ctype, "un_ctype"); 30869 SD_CONDSET(un, un, un_reset_retry_count, 30870 "un_reset_retry_count"); 30871 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type"); 30872 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status"); 30873 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled"); 30874 SD_CONDSET(un, un, un_f_allow_bus_device_reset, 30875 "un_f_allow_bus_device_reset"); 30876 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing"); 30877 30878 } 30879 30880 /* copy in auto request sense if it was on fifo */ 30881 if (fi_arq != NULL) { 30882 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq)); 30883 } 30884 30885 /* free structs */ 30886 if (un->sd_fi_fifo_pkt[i] != NULL) { 30887 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); 30888 } 30889 if (un->sd_fi_fifo_xb[i] != NULL) { 30890 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); 30891 } 30892 if (un->sd_fi_fifo_un[i] != NULL) { 30893 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); 30894 } 30895 if (un->sd_fi_fifo_arq[i] != NULL) { 30896 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); 30897 } 30898 30899 /* 30900 * kmem_free does not gurantee to set to NULL 30901 * since we uses these to determine if we set 30902 * values or not lets confirm they are always 30903 * NULL after free 30904 */ 30905 un->sd_fi_fifo_pkt[i] = NULL; 30906 un->sd_fi_fifo_un[i] = NULL; 30907 un->sd_fi_fifo_xb[i] = NULL; 30908 un->sd_fi_fifo_arq[i] = NULL; 30909 30910 un->sd_fi_fifo_start++; 30911 30912 mutex_exit(SD_MUTEX(un)); 30913 30914 SD_INFO(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n"); 30915 } 30916 30917 #endif /* SD_FAULT_INJECTION */ 30918 30919 /* 30920 * This routine is invoked in sd_unit_attach(). Before calling it, the 30921 * properties in conf file should be processed already, and "hotpluggable" 30922 * property was processed also. 30923 * 30924 * The sd driver distinguishes 3 different type of devices: removable media, 30925 * non-removable media, and hotpluggable. Below the differences are defined: 30926 * 30927 * 1. Device ID 30928 * 30929 * The device ID of a device is used to identify this device. Refer to 30930 * ddi_devid_register(9F). 30931 * 30932 * For a non-removable media disk device which can provide 0x80 or 0x83 30933 * VPD page (refer to INQUIRY command of SCSI SPC specification), a unique 30934 * device ID is created to identify this device. For other non-removable 30935 * media devices, a default device ID is created only if this device has 30936 * at least 2 alter cylinders. Otherwise, this device has no devid. 30937 * 30938 * ------------------------------------------------------- 30939 * removable media hotpluggable | Can Have Device ID 30940 * ------------------------------------------------------- 30941 * false false | Yes 30942 * false true | Yes 30943 * true x | No 30944 * ------------------------------------------------------ 30945 * 30946 * 30947 * 2. SCSI group 4 commands 30948 * 30949 * In SCSI specs, only some commands in group 4 command set can use 30950 * 8-byte addresses that can be used to access >2TB storage spaces. 30951 * Other commands have no such capability. Without supporting group4, 30952 * it is impossible to make full use of storage spaces of a disk with 30953 * capacity larger than 2TB. 30954 * 30955 * ----------------------------------------------- 30956 * removable media hotpluggable LP64 | Group 30957 * ----------------------------------------------- 30958 * false false false | 1 30959 * false false true | 4 30960 * false true false | 1 30961 * false true true | 4 30962 * true x x | 5 30963 * ----------------------------------------------- 30964 * 30965 * 30966 * 3. Check for VTOC Label 30967 * 30968 * If a direct-access disk has no EFI label, sd will check if it has a 30969 * valid VTOC label. Now, sd also does that check for removable media 30970 * and hotpluggable devices. 30971 * 30972 * -------------------------------------------------------------- 30973 * Direct-Access removable media hotpluggable | Check Label 30974 * ------------------------------------------------------------- 30975 * false false false | No 30976 * false false true | No 30977 * false true false | Yes 30978 * false true true | Yes 30979 * true x x | Yes 30980 * -------------------------------------------------------------- 30981 * 30982 * 30983 * 4. Building default VTOC label 30984 * 30985 * As section 3 says, sd checks if some kinds of devices have VTOC label. 30986 * If those devices have no valid VTOC label, sd(7d) will attempt to 30987 * create default VTOC for them. Currently sd creates default VTOC label 30988 * for all devices on x86 platform (VTOC_16), but only for removable 30989 * media devices on SPARC (VTOC_8). 30990 * 30991 * ----------------------------------------------------------- 30992 * removable media hotpluggable platform | Default Label 30993 * ----------------------------------------------------------- 30994 * false false sparc | No 30995 * false true x86 | Yes 30996 * false true sparc | Yes 30997 * true x x | Yes 30998 * ---------------------------------------------------------- 30999 * 31000 * 31001 * 5. Supported blocksizes of target devices 31002 * 31003 * Sd supports non-512-byte blocksize for removable media devices only. 31004 * For other devices, only 512-byte blocksize is supported. This may be 31005 * changed in near future because some RAID devices require non-512-byte 31006 * blocksize 31007 * 31008 * ----------------------------------------------------------- 31009 * removable media hotpluggable | non-512-byte blocksize 31010 * ----------------------------------------------------------- 31011 * false false | No 31012 * false true | No 31013 * true x | Yes 31014 * ----------------------------------------------------------- 31015 * 31016 * 31017 * 6. Automatic mount & unmount 31018 * 31019 * Sd(7d) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query 31020 * if a device is removable media device. It return 1 for removable media 31021 * devices, and 0 for others. 31022 * 31023 * The automatic mounting subsystem should distinguish between the types 31024 * of devices and apply automounting policies to each. 31025 * 31026 * 31027 * 7. fdisk partition management 31028 * 31029 * Fdisk is traditional partition method on x86 platform. Sd(7d) driver 31030 * just supports fdisk partitions on x86 platform. On sparc platform, sd 31031 * doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize 31032 * fdisk partitions on both x86 and SPARC platform. 31033 * 31034 * ----------------------------------------------------------- 31035 * platform removable media USB/1394 | fdisk supported 31036 * ----------------------------------------------------------- 31037 * x86 X X | true 31038 * ------------------------------------------------------------ 31039 * sparc X X | false 31040 * ------------------------------------------------------------ 31041 * 31042 * 31043 * 8. MBOOT/MBR 31044 * 31045 * Although sd(7d) doesn't support fdisk on SPARC platform, it does support 31046 * read/write mboot for removable media devices on sparc platform. 31047 * 31048 * ----------------------------------------------------------- 31049 * platform removable media USB/1394 | mboot supported 31050 * ----------------------------------------------------------- 31051 * x86 X X | true 31052 * ------------------------------------------------------------ 31053 * sparc false false | false 31054 * sparc false true | true 31055 * sparc true false | true 31056 * sparc true true | true 31057 * ------------------------------------------------------------ 31058 * 31059 * 31060 * 9. error handling during opening device 31061 * 31062 * If failed to open a disk device, an errno is returned. For some kinds 31063 * of errors, different errno is returned depending on if this device is 31064 * a removable media device. This brings USB/1394 hard disks in line with 31065 * expected hard disk behavior. It is not expected that this breaks any 31066 * application. 31067 * 31068 * ------------------------------------------------------ 31069 * removable media hotpluggable | errno 31070 * ------------------------------------------------------ 31071 * false false | EIO 31072 * false true | EIO 31073 * true x | ENXIO 31074 * ------------------------------------------------------ 31075 * 31076 * 31077 * 11. ioctls: DKIOCEJECT, CDROMEJECT 31078 * 31079 * These IOCTLs are applicable only to removable media devices. 31080 * 31081 * ----------------------------------------------------------- 31082 * removable media hotpluggable |DKIOCEJECT, CDROMEJECT 31083 * ----------------------------------------------------------- 31084 * false false | No 31085 * false true | No 31086 * true x | Yes 31087 * ----------------------------------------------------------- 31088 * 31089 * 31090 * 12. Kstats for partitions 31091 * 31092 * sd creates partition kstat for non-removable media devices. USB and 31093 * Firewire hard disks now have partition kstats 31094 * 31095 * ------------------------------------------------------ 31096 * removable media hotpluggable | kstat 31097 * ------------------------------------------------------ 31098 * false false | Yes 31099 * false true | Yes 31100 * true x | No 31101 * ------------------------------------------------------ 31102 * 31103 * 31104 * 13. Removable media & hotpluggable properties 31105 * 31106 * Sd driver creates a "removable-media" property for removable media 31107 * devices. Parent nexus drivers create a "hotpluggable" property if 31108 * it supports hotplugging. 31109 * 31110 * --------------------------------------------------------------------- 31111 * removable media hotpluggable | "removable-media" " hotpluggable" 31112 * --------------------------------------------------------------------- 31113 * false false | No No 31114 * false true | No Yes 31115 * true false | Yes No 31116 * true true | Yes Yes 31117 * --------------------------------------------------------------------- 31118 * 31119 * 31120 * 14. Power Management 31121 * 31122 * sd only power manages removable media devices or devices that support 31123 * LOG_SENSE or have a "pm-capable" property (PSARC/2002/250) 31124 * 31125 * A parent nexus that supports hotplugging can also set "pm-capable" 31126 * if the disk can be power managed. 31127 * 31128 * ------------------------------------------------------------ 31129 * removable media hotpluggable pm-capable | power manage 31130 * ------------------------------------------------------------ 31131 * false false false | No 31132 * false false true | Yes 31133 * false true false | No 31134 * false true true | Yes 31135 * true x x | Yes 31136 * ------------------------------------------------------------ 31137 * 31138 * USB and firewire hard disks can now be power managed independently 31139 * of the framebuffer 31140 * 31141 * 31142 * 15. Support for USB disks with capacity larger than 1TB 31143 * 31144 * Currently, sd doesn't permit a fixed disk device with capacity 31145 * larger than 1TB to be used in a 32-bit operating system environment. 31146 * However, sd doesn't do that for removable media devices. Instead, it 31147 * assumes that removable media devices cannot have a capacity larger 31148 * than 1TB. Therefore, using those devices on 32-bit system is partially 31149 * supported, which can cause some unexpected results. 31150 * 31151 * --------------------------------------------------------------------- 31152 * removable media USB/1394 | Capacity > 1TB | Used in 32-bit env 31153 * --------------------------------------------------------------------- 31154 * false false | true | no 31155 * false true | true | no 31156 * true false | true | Yes 31157 * true true | true | Yes 31158 * --------------------------------------------------------------------- 31159 * 31160 * 31161 * 16. Check write-protection at open time 31162 * 31163 * When a removable media device is being opened for writing without NDELAY 31164 * flag, sd will check if this device is writable. If attempting to open 31165 * without NDELAY flag a write-protected device, this operation will abort. 31166 * 31167 * ------------------------------------------------------------ 31168 * removable media USB/1394 | WP Check 31169 * ------------------------------------------------------------ 31170 * false false | No 31171 * false true | No 31172 * true false | Yes 31173 * true true | Yes 31174 * ------------------------------------------------------------ 31175 * 31176 * 31177 * 17. syslog when corrupted VTOC is encountered 31178 * 31179 * Currently, if an invalid VTOC is encountered, sd only print syslog 31180 * for fixed SCSI disks. 31181 * ------------------------------------------------------------ 31182 * removable media USB/1394 | print syslog 31183 * ------------------------------------------------------------ 31184 * false false | Yes 31185 * false true | No 31186 * true false | No 31187 * true true | No 31188 * ------------------------------------------------------------ 31189 */ 31190 static void 31191 sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi) 31192 { 31193 int pm_cap; 31194 31195 ASSERT(un->un_sd); 31196 ASSERT(un->un_sd->sd_inq); 31197 31198 /* 31199 * Enable SYNC CACHE support for all devices. 31200 */ 31201 un->un_f_sync_cache_supported = TRUE; 31202 31203 /* 31204 * Set the sync cache required flag to false. 31205 * This would ensure that there is no SYNC CACHE 31206 * sent when there are no writes 31207 */ 31208 un->un_f_sync_cache_required = FALSE; 31209 31210 if (un->un_sd->sd_inq->inq_rmb) { 31211 /* 31212 * The media of this device is removable. And for this kind 31213 * of devices, it is possible to change medium after opening 31214 * devices. Thus we should support this operation. 31215 */ 31216 un->un_f_has_removable_media = TRUE; 31217 31218 /* 31219 * support non-512-byte blocksize of removable media devices 31220 */ 31221 un->un_f_non_devbsize_supported = TRUE; 31222 31223 /* 31224 * Assume that all removable media devices support DOOR_LOCK 31225 */ 31226 un->un_f_doorlock_supported = TRUE; 31227 31228 /* 31229 * For a removable media device, it is possible to be opened 31230 * with NDELAY flag when there is no media in drive, in this 31231 * case we don't care if device is writable. But if without 31232 * NDELAY flag, we need to check if media is write-protected. 31233 */ 31234 un->un_f_chk_wp_open = TRUE; 31235 31236 /* 31237 * need to start a SCSI watch thread to monitor media state, 31238 * when media is being inserted or ejected, notify syseventd. 31239 */ 31240 un->un_f_monitor_media_state = TRUE; 31241 31242 /* 31243 * Some devices don't support START_STOP_UNIT command. 31244 * Therefore, we'd better check if a device supports it 31245 * before sending it. 31246 */ 31247 un->un_f_check_start_stop = TRUE; 31248 31249 /* 31250 * support eject media ioctl: 31251 * FDEJECT, DKIOCEJECT, CDROMEJECT 31252 */ 31253 un->un_f_eject_media_supported = TRUE; 31254 31255 /* 31256 * Because many removable-media devices don't support 31257 * LOG_SENSE, we couldn't use this command to check if 31258 * a removable media device support power-management. 31259 * We assume that they support power-management via 31260 * START_STOP_UNIT command and can be spun up and down 31261 * without limitations. 31262 */ 31263 un->un_f_pm_supported = TRUE; 31264 31265 /* 31266 * Need to create a zero length (Boolean) property 31267 * removable-media for the removable media devices. 31268 * Note that the return value of the property is not being 31269 * checked, since if unable to create the property 31270 * then do not want the attach to fail altogether. Consistent 31271 * with other property creation in attach. 31272 */ 31273 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, 31274 DDI_PROP_CANSLEEP, "removable-media", NULL, 0); 31275 31276 } else { 31277 /* 31278 * create device ID for device 31279 */ 31280 un->un_f_devid_supported = TRUE; 31281 31282 /* 31283 * Spin up non-removable-media devices once it is attached 31284 */ 31285 un->un_f_attach_spinup = TRUE; 31286 31287 /* 31288 * According to SCSI specification, Sense data has two kinds of 31289 * format: fixed format, and descriptor format. At present, we 31290 * don't support descriptor format sense data for removable 31291 * media. 31292 */ 31293 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT) { 31294 un->un_f_descr_format_supported = TRUE; 31295 } 31296 31297 /* 31298 * kstats are created only for non-removable media devices. 31299 * 31300 * Set this in sd.conf to 0 in order to disable kstats. The 31301 * default is 1, so they are enabled by default. 31302 */ 31303 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY, 31304 SD_DEVINFO(un), DDI_PROP_DONTPASS, 31305 "enable-partition-kstats", 1)); 31306 31307 /* 31308 * Check if HBA has set the "pm-capable" property. 31309 * If "pm-capable" exists and is non-zero then we can 31310 * power manage the device without checking the start/stop 31311 * cycle count log sense page. 31312 * 31313 * If "pm-capable" exists and is set to be false (0), 31314 * then we should not power manage the device. 31315 * 31316 * If "pm-capable" doesn't exist then pm_cap will 31317 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case, 31318 * sd will check the start/stop cycle count log sense page 31319 * and power manage the device if the cycle count limit has 31320 * not been exceeded. 31321 */ 31322 pm_cap = ddi_prop_get_int(DDI_DEV_T_ANY, devi, 31323 DDI_PROP_DONTPASS, "pm-capable", SD_PM_CAPABLE_UNDEFINED); 31324 if (SD_PM_CAPABLE_IS_UNDEFINED(pm_cap)) { 31325 un->un_f_log_sense_supported = TRUE; 31326 if (!un->un_f_power_condition_disabled && 31327 SD_INQUIRY(un)->inq_ansi == 6) { 31328 un->un_f_power_condition_supported = TRUE; 31329 } 31330 } else { 31331 /* 31332 * pm-capable property exists. 31333 * 31334 * Convert "TRUE" values for pm_cap to 31335 * SD_PM_CAPABLE_IS_TRUE to make it easier to check 31336 * later. "TRUE" values are any values defined in 31337 * inquiry.h. 31338 */ 31339 if (SD_PM_CAPABLE_IS_FALSE(pm_cap)) { 31340 un->un_f_log_sense_supported = FALSE; 31341 } else { 31342 /* SD_PM_CAPABLE_IS_TRUE case */ 31343 un->un_f_pm_supported = TRUE; 31344 if (!un->un_f_power_condition_disabled && 31345 SD_PM_CAPABLE_IS_SPC_4(pm_cap)) { 31346 un->un_f_power_condition_supported = 31347 TRUE; 31348 } 31349 if (SD_PM_CAP_LOG_SUPPORTED(pm_cap)) { 31350 un->un_f_log_sense_supported = TRUE; 31351 un->un_f_pm_log_sense_smart = 31352 SD_PM_CAP_SMART_LOG(pm_cap); 31353 } 31354 } 31355 31356 SD_INFO(SD_LOG_ATTACH_DETACH, un, 31357 "sd_unit_attach: un:0x%p pm-capable " 31358 "property set to %d.\n", un, un->un_f_pm_supported); 31359 } 31360 } 31361 31362 if (un->un_f_is_hotpluggable) { 31363 31364 /* 31365 * Have to watch hotpluggable devices as well, since 31366 * that's the only way for userland applications to 31367 * detect hot removal while device is busy/mounted. 31368 */ 31369 un->un_f_monitor_media_state = TRUE; 31370 31371 un->un_f_check_start_stop = TRUE; 31372 31373 } 31374 } 31375 31376 /* 31377 * sd_tg_rdwr: 31378 * Provides rdwr access for cmlb via sd_tgops. The start_block is 31379 * in sys block size, req_length in bytes. 31380 * 31381 */ 31382 static int 31383 sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 31384 diskaddr_t start_block, size_t reqlength, void *tg_cookie) 31385 { 31386 struct sd_lun *un; 31387 int path_flag = (int)(uintptr_t)tg_cookie; 31388 char *dkl = NULL; 31389 diskaddr_t real_addr = start_block; 31390 diskaddr_t first_byte, end_block; 31391 31392 size_t buffer_size = reqlength; 31393 int rval = 0; 31394 diskaddr_t cap; 31395 uint32_t lbasize; 31396 sd_ssc_t *ssc; 31397 31398 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 31399 if (un == NULL) 31400 return (ENXIO); 31401 31402 if (cmd != TG_READ && cmd != TG_WRITE) 31403 return (EINVAL); 31404 31405 ssc = sd_ssc_init(un); 31406 mutex_enter(SD_MUTEX(un)); 31407 if (un->un_f_tgt_blocksize_is_valid == FALSE) { 31408 mutex_exit(SD_MUTEX(un)); 31409 rval = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap, 31410 &lbasize, path_flag); 31411 if (rval != 0) 31412 goto done1; 31413 mutex_enter(SD_MUTEX(un)); 31414 sd_update_block_info(un, lbasize, cap); 31415 if ((un->un_f_tgt_blocksize_is_valid == FALSE)) { 31416 mutex_exit(SD_MUTEX(un)); 31417 rval = EIO; 31418 goto done; 31419 } 31420 } 31421 31422 if (NOT_DEVBSIZE(un)) { 31423 /* 31424 * sys_blocksize != tgt_blocksize, need to re-adjust 31425 * blkno and save the index to beginning of dk_label 31426 */ 31427 first_byte = SD_SYSBLOCKS2BYTES(start_block); 31428 real_addr = first_byte / un->un_tgt_blocksize; 31429 31430 end_block = (first_byte + reqlength + 31431 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize; 31432 31433 /* round up buffer size to multiple of target block size */ 31434 buffer_size = (end_block - real_addr) * un->un_tgt_blocksize; 31435 31436 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_tg_rdwr", 31437 "label_addr: 0x%x allocation size: 0x%x\n", 31438 real_addr, buffer_size); 31439 31440 if (((first_byte % un->un_tgt_blocksize) != 0) || 31441 (reqlength % un->un_tgt_blocksize) != 0) 31442 /* the request is not aligned */ 31443 dkl = kmem_zalloc(buffer_size, KM_SLEEP); 31444 } 31445 31446 /* 31447 * The MMC standard allows READ CAPACITY to be 31448 * inaccurate by a bounded amount (in the interest of 31449 * response latency). As a result, failed READs are 31450 * commonplace (due to the reading of metadata and not 31451 * data). Depending on the per-Vendor/drive Sense data, 31452 * the failed READ can cause many (unnecessary) retries. 31453 */ 31454 31455 if (ISCD(un) && (cmd == TG_READ) && 31456 (un->un_f_blockcount_is_valid == TRUE) && 31457 ((start_block == (un->un_blockcount - 1)) || 31458 (start_block == (un->un_blockcount - 2)))) { 31459 path_flag = SD_PATH_DIRECT_PRIORITY; 31460 } 31461 31462 mutex_exit(SD_MUTEX(un)); 31463 if (cmd == TG_READ) { 31464 rval = sd_send_scsi_READ(ssc, (dkl != NULL) ? dkl : bufaddr, 31465 buffer_size, real_addr, path_flag); 31466 if (dkl != NULL) 31467 bcopy(dkl + SD_TGTBYTEOFFSET(un, start_block, 31468 real_addr), bufaddr, reqlength); 31469 } else { 31470 if (dkl) { 31471 rval = sd_send_scsi_READ(ssc, dkl, buffer_size, 31472 real_addr, path_flag); 31473 if (rval) { 31474 goto done1; 31475 } 31476 bcopy(bufaddr, dkl + SD_TGTBYTEOFFSET(un, start_block, 31477 real_addr), reqlength); 31478 } 31479 rval = sd_send_scsi_WRITE(ssc, (dkl != NULL) ? dkl : bufaddr, 31480 buffer_size, real_addr, path_flag); 31481 } 31482 31483 done1: 31484 if (dkl != NULL) 31485 kmem_free(dkl, buffer_size); 31486 31487 if (rval != 0) { 31488 if (rval == EIO) 31489 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 31490 else 31491 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 31492 } 31493 done: 31494 sd_ssc_fini(ssc); 31495 return (rval); 31496 } 31497 31498 31499 static int 31500 sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie) 31501 { 31502 31503 struct sd_lun *un; 31504 diskaddr_t cap; 31505 uint32_t lbasize; 31506 int path_flag = (int)(uintptr_t)tg_cookie; 31507 int ret = 0; 31508 31509 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 31510 if (un == NULL) 31511 return (ENXIO); 31512 31513 switch (cmd) { 31514 case TG_GETPHYGEOM: 31515 case TG_GETVIRTGEOM: 31516 case TG_GETCAPACITY: 31517 case TG_GETBLOCKSIZE: 31518 mutex_enter(SD_MUTEX(un)); 31519 31520 if ((un->un_f_blockcount_is_valid == TRUE) && 31521 (un->un_f_tgt_blocksize_is_valid == TRUE)) { 31522 cap = un->un_blockcount; 31523 lbasize = un->un_tgt_blocksize; 31524 mutex_exit(SD_MUTEX(un)); 31525 } else { 31526 sd_ssc_t *ssc; 31527 mutex_exit(SD_MUTEX(un)); 31528 ssc = sd_ssc_init(un); 31529 ret = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap, 31530 &lbasize, path_flag); 31531 if (ret != 0) { 31532 if (ret == EIO) 31533 sd_ssc_assessment(ssc, 31534 SD_FMT_STATUS_CHECK); 31535 else 31536 sd_ssc_assessment(ssc, 31537 SD_FMT_IGNORE); 31538 sd_ssc_fini(ssc); 31539 return (ret); 31540 } 31541 sd_ssc_fini(ssc); 31542 mutex_enter(SD_MUTEX(un)); 31543 sd_update_block_info(un, lbasize, cap); 31544 if ((un->un_f_blockcount_is_valid == FALSE) || 31545 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 31546 mutex_exit(SD_MUTEX(un)); 31547 return (EIO); 31548 } 31549 mutex_exit(SD_MUTEX(un)); 31550 } 31551 31552 if (cmd == TG_GETCAPACITY) { 31553 *(diskaddr_t *)arg = cap; 31554 return (0); 31555 } 31556 31557 if (cmd == TG_GETBLOCKSIZE) { 31558 *(uint32_t *)arg = lbasize; 31559 return (0); 31560 } 31561 31562 if (cmd == TG_GETPHYGEOM) 31563 ret = sd_get_physical_geometry(un, (cmlb_geom_t *)arg, 31564 cap, lbasize, path_flag); 31565 else 31566 /* TG_GETVIRTGEOM */ 31567 ret = sd_get_virtual_geometry(un, 31568 (cmlb_geom_t *)arg, cap, lbasize); 31569 31570 return (ret); 31571 31572 case TG_GETATTR: 31573 mutex_enter(SD_MUTEX(un)); 31574 ((tg_attribute_t *)arg)->media_is_writable = 31575 un->un_f_mmc_writable_media; 31576 ((tg_attribute_t *)arg)->media_is_solid_state = 31577 un->un_f_is_solid_state; 31578 ((tg_attribute_t *)arg)->media_is_rotational = 31579 un->un_f_is_rotational; 31580 mutex_exit(SD_MUTEX(un)); 31581 return (0); 31582 default: 31583 return (ENOTTY); 31584 31585 } 31586 } 31587 31588 /* 31589 * Function: sd_ssc_ereport_post 31590 * 31591 * Description: Will be called when SD driver need to post an ereport. 31592 * 31593 * Context: Kernel thread or interrupt context. 31594 */ 31595 31596 #define DEVID_IF_KNOWN(d) "devid", DATA_TYPE_STRING, (d) ? (d) : "unknown" 31597 31598 static void 31599 sd_ssc_ereport_post(sd_ssc_t *ssc, enum sd_driver_assessment drv_assess) 31600 { 31601 int uscsi_path_instance = 0; 31602 uchar_t uscsi_pkt_reason; 31603 uint32_t uscsi_pkt_state; 31604 uint32_t uscsi_pkt_statistics; 31605 uint64_t uscsi_ena; 31606 uchar_t op_code; 31607 uint8_t *sensep; 31608 union scsi_cdb *cdbp; 31609 uint_t cdblen = 0; 31610 uint_t senlen = 0; 31611 struct sd_lun *un; 31612 dev_info_t *dip; 31613 char *devid; 31614 int ssc_invalid_flags = SSC_FLAGS_INVALID_PKT_REASON | 31615 SSC_FLAGS_INVALID_STATUS | 31616 SSC_FLAGS_INVALID_SENSE | 31617 SSC_FLAGS_INVALID_DATA; 31618 char assessment[16]; 31619 31620 ASSERT(ssc != NULL); 31621 ASSERT(ssc->ssc_uscsi_cmd != NULL); 31622 ASSERT(ssc->ssc_uscsi_info != NULL); 31623 31624 un = ssc->ssc_un; 31625 ASSERT(un != NULL); 31626 31627 dip = un->un_sd->sd_dev; 31628 31629 /* 31630 * Get the devid: 31631 * devid will only be passed to non-transport error reports. 31632 */ 31633 devid = DEVI(dip)->devi_devid_str; 31634 31635 /* 31636 * If we are syncing or dumping, the command will not be executed 31637 * so we bypass this situation. 31638 */ 31639 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 31640 (un->un_state == SD_STATE_DUMPING)) 31641 return; 31642 31643 uscsi_pkt_reason = ssc->ssc_uscsi_info->ui_pkt_reason; 31644 uscsi_path_instance = ssc->ssc_uscsi_cmd->uscsi_path_instance; 31645 uscsi_pkt_state = ssc->ssc_uscsi_info->ui_pkt_state; 31646 uscsi_pkt_statistics = ssc->ssc_uscsi_info->ui_pkt_statistics; 31647 uscsi_ena = ssc->ssc_uscsi_info->ui_ena; 31648 31649 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 31650 cdbp = (union scsi_cdb *)ssc->ssc_uscsi_cmd->uscsi_cdb; 31651 31652 /* In rare cases, EG:DOORLOCK, the cdb could be NULL */ 31653 if (cdbp == NULL) { 31654 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 31655 "sd_ssc_ereport_post meet empty cdb\n"); 31656 return; 31657 } 31658 31659 op_code = cdbp->scc_cmd; 31660 31661 cdblen = (int)ssc->ssc_uscsi_cmd->uscsi_cdblen; 31662 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 31663 ssc->ssc_uscsi_cmd->uscsi_rqresid); 31664 31665 if (senlen > 0) 31666 ASSERT(sensep != NULL); 31667 31668 /* 31669 * Initialize drv_assess to corresponding values. 31670 * SD_FM_DRV_FATAL will be mapped to "fail" or "fatal" depending 31671 * on the sense-key returned back. 31672 */ 31673 switch (drv_assess) { 31674 case SD_FM_DRV_RECOVERY: 31675 (void) sprintf(assessment, "%s", "recovered"); 31676 break; 31677 case SD_FM_DRV_RETRY: 31678 (void) sprintf(assessment, "%s", "retry"); 31679 break; 31680 case SD_FM_DRV_NOTICE: 31681 (void) sprintf(assessment, "%s", "info"); 31682 break; 31683 case SD_FM_DRV_FATAL: 31684 default: 31685 (void) sprintf(assessment, "%s", "unknown"); 31686 } 31687 /* 31688 * If drv_assess == SD_FM_DRV_RECOVERY, this should be a recovered 31689 * command, we will post ereport.io.scsi.cmd.disk.recovered. 31690 * driver-assessment will always be "recovered" here. 31691 */ 31692 if (drv_assess == SD_FM_DRV_RECOVERY) { 31693 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, NULL, 31694 "cmd.disk.recovered", uscsi_ena, devid, NULL, 31695 DDI_NOSLEEP, NULL, 31696 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 31697 DEVID_IF_KNOWN(devid), 31698 "driver-assessment", DATA_TYPE_STRING, assessment, 31699 "op-code", DATA_TYPE_UINT8, op_code, 31700 "cdb", DATA_TYPE_UINT8_ARRAY, 31701 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 31702 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 31703 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 31704 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics, 31705 NULL); 31706 return; 31707 } 31708 31709 /* 31710 * If there is un-expected/un-decodable data, we should post 31711 * ereport.io.scsi.cmd.disk.dev.uderr. 31712 * driver-assessment will be set based on parameter drv_assess. 31713 * SSC_FLAGS_INVALID_SENSE - invalid sense data sent back. 31714 * SSC_FLAGS_INVALID_PKT_REASON - invalid pkt-reason encountered. 31715 * SSC_FLAGS_INVALID_STATUS - invalid stat-code encountered. 31716 * SSC_FLAGS_INVALID_DATA - invalid data sent back. 31717 */ 31718 if (ssc->ssc_flags & ssc_invalid_flags) { 31719 if (ssc->ssc_flags & SSC_FLAGS_INVALID_SENSE) { 31720 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 31721 NULL, "cmd.disk.dev.uderr", uscsi_ena, devid, 31722 NULL, DDI_NOSLEEP, NULL, 31723 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 31724 DEVID_IF_KNOWN(devid), 31725 "driver-assessment", DATA_TYPE_STRING, 31726 drv_assess == SD_FM_DRV_FATAL ? 31727 "fail" : assessment, 31728 "op-code", DATA_TYPE_UINT8, op_code, 31729 "cdb", DATA_TYPE_UINT8_ARRAY, 31730 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 31731 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 31732 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 31733 "pkt-stats", DATA_TYPE_UINT32, 31734 uscsi_pkt_statistics, 31735 "stat-code", DATA_TYPE_UINT8, 31736 ssc->ssc_uscsi_cmd->uscsi_status, 31737 "un-decode-info", DATA_TYPE_STRING, 31738 ssc->ssc_info, 31739 "un-decode-value", DATA_TYPE_UINT8_ARRAY, 31740 senlen, sensep, 31741 NULL); 31742 } else { 31743 /* 31744 * For other type of invalid data, the 31745 * un-decode-value field would be empty because the 31746 * un-decodable content could be seen from upper 31747 * level payload or inside un-decode-info. 31748 */ 31749 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 31750 NULL, 31751 "cmd.disk.dev.uderr", uscsi_ena, devid, 31752 NULL, DDI_NOSLEEP, NULL, 31753 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 31754 DEVID_IF_KNOWN(devid), 31755 "driver-assessment", DATA_TYPE_STRING, 31756 drv_assess == SD_FM_DRV_FATAL ? 31757 "fail" : assessment, 31758 "op-code", DATA_TYPE_UINT8, op_code, 31759 "cdb", DATA_TYPE_UINT8_ARRAY, 31760 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 31761 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 31762 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 31763 "pkt-stats", DATA_TYPE_UINT32, 31764 uscsi_pkt_statistics, 31765 "stat-code", DATA_TYPE_UINT8, 31766 ssc->ssc_uscsi_cmd->uscsi_status, 31767 "un-decode-info", DATA_TYPE_STRING, 31768 ssc->ssc_info, 31769 "un-decode-value", DATA_TYPE_UINT8_ARRAY, 31770 0, NULL, 31771 NULL); 31772 } 31773 ssc->ssc_flags &= ~ssc_invalid_flags; 31774 return; 31775 } 31776 31777 if (uscsi_pkt_reason != CMD_CMPLT || 31778 (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT)) { 31779 /* 31780 * pkt-reason != CMD_CMPLT or SSC_FLAGS_TRAN_ABORT was 31781 * set inside sd_start_cmds due to errors(bad packet or 31782 * fatal transport error), we should take it as a 31783 * transport error, so we post ereport.io.scsi.cmd.disk.tran. 31784 * driver-assessment will be set based on drv_assess. 31785 * We will set devid to NULL because it is a transport 31786 * error. 31787 */ 31788 if (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT) 31789 ssc->ssc_flags &= ~SSC_FLAGS_TRAN_ABORT; 31790 31791 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, NULL, 31792 "cmd.disk.tran", uscsi_ena, NULL, NULL, DDI_NOSLEEP, NULL, 31793 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 31794 DEVID_IF_KNOWN(devid), 31795 "driver-assessment", DATA_TYPE_STRING, 31796 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment, 31797 "op-code", DATA_TYPE_UINT8, op_code, 31798 "cdb", DATA_TYPE_UINT8_ARRAY, 31799 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 31800 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 31801 "pkt-state", DATA_TYPE_UINT8, uscsi_pkt_state, 31802 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics, 31803 NULL); 31804 } else { 31805 /* 31806 * If we got here, we have a completed command, and we need 31807 * to further investigate the sense data to see what kind 31808 * of ereport we should post. 31809 * No ereport is needed if sense-key is KEY_RECOVERABLE_ERROR 31810 * and asc/ascq is "ATA PASS-THROUGH INFORMATION AVAILABLE". 31811 * Post ereport.io.scsi.cmd.disk.dev.rqs.merr if sense-key is 31812 * KEY_MEDIUM_ERROR. 31813 * Post ereport.io.scsi.cmd.disk.dev.rqs.derr otherwise. 31814 * driver-assessment will be set based on the parameter 31815 * drv_assess. 31816 */ 31817 if (senlen > 0) { 31818 /* 31819 * Here we have sense data available. 31820 */ 31821 uint8_t sense_key = scsi_sense_key(sensep); 31822 uint8_t sense_asc = scsi_sense_asc(sensep); 31823 uint8_t sense_ascq = scsi_sense_ascq(sensep); 31824 31825 if (sense_key == KEY_RECOVERABLE_ERROR && 31826 sense_asc == 0x00 && sense_ascq == 0x1d) 31827 return; 31828 31829 if (sense_key == KEY_MEDIUM_ERROR) { 31830 /* 31831 * driver-assessment should be "fatal" if 31832 * drv_assess is SD_FM_DRV_FATAL. 31833 */ 31834 scsi_fm_ereport_post(un->un_sd, 31835 uscsi_path_instance, NULL, 31836 "cmd.disk.dev.rqs.merr", 31837 uscsi_ena, devid, NULL, DDI_NOSLEEP, NULL, 31838 FM_VERSION, DATA_TYPE_UINT8, 31839 FM_EREPORT_VERS0, 31840 DEVID_IF_KNOWN(devid), 31841 "driver-assessment", 31842 DATA_TYPE_STRING, 31843 drv_assess == SD_FM_DRV_FATAL ? 31844 "fatal" : assessment, 31845 "op-code", 31846 DATA_TYPE_UINT8, op_code, 31847 "cdb", 31848 DATA_TYPE_UINT8_ARRAY, cdblen, 31849 ssc->ssc_uscsi_cmd->uscsi_cdb, 31850 "pkt-reason", 31851 DATA_TYPE_UINT8, uscsi_pkt_reason, 31852 "pkt-state", 31853 DATA_TYPE_UINT8, uscsi_pkt_state, 31854 "pkt-stats", 31855 DATA_TYPE_UINT32, 31856 uscsi_pkt_statistics, 31857 "stat-code", 31858 DATA_TYPE_UINT8, 31859 ssc->ssc_uscsi_cmd->uscsi_status, 31860 "key", 31861 DATA_TYPE_UINT8, 31862 scsi_sense_key(sensep), 31863 "asc", 31864 DATA_TYPE_UINT8, 31865 scsi_sense_asc(sensep), 31866 "ascq", 31867 DATA_TYPE_UINT8, 31868 scsi_sense_ascq(sensep), 31869 "sense-data", 31870 DATA_TYPE_UINT8_ARRAY, 31871 senlen, sensep, 31872 "lba", 31873 DATA_TYPE_UINT64, 31874 ssc->ssc_uscsi_info->ui_lba, 31875 NULL); 31876 } else { 31877 /* 31878 * if sense-key == 0x4(hardware 31879 * error), driver-assessment should 31880 * be "fatal" if drv_assess is 31881 * SD_FM_DRV_FATAL. 31882 */ 31883 scsi_fm_ereport_post(un->un_sd, 31884 uscsi_path_instance, NULL, 31885 "cmd.disk.dev.rqs.derr", 31886 uscsi_ena, devid, 31887 NULL, DDI_NOSLEEP, NULL, 31888 FM_VERSION, 31889 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 31890 DEVID_IF_KNOWN(devid), 31891 "driver-assessment", 31892 DATA_TYPE_STRING, 31893 drv_assess == SD_FM_DRV_FATAL ? 31894 (sense_key == 0x4 ? 31895 "fatal" : "fail") : assessment, 31896 "op-code", 31897 DATA_TYPE_UINT8, op_code, 31898 "cdb", 31899 DATA_TYPE_UINT8_ARRAY, cdblen, 31900 ssc->ssc_uscsi_cmd->uscsi_cdb, 31901 "pkt-reason", 31902 DATA_TYPE_UINT8, uscsi_pkt_reason, 31903 "pkt-state", 31904 DATA_TYPE_UINT8, uscsi_pkt_state, 31905 "pkt-stats", 31906 DATA_TYPE_UINT32, 31907 uscsi_pkt_statistics, 31908 "stat-code", 31909 DATA_TYPE_UINT8, 31910 ssc->ssc_uscsi_cmd->uscsi_status, 31911 "key", 31912 DATA_TYPE_UINT8, 31913 scsi_sense_key(sensep), 31914 "asc", 31915 DATA_TYPE_UINT8, 31916 scsi_sense_asc(sensep), 31917 "ascq", 31918 DATA_TYPE_UINT8, 31919 scsi_sense_ascq(sensep), 31920 "sense-data", 31921 DATA_TYPE_UINT8_ARRAY, 31922 senlen, sensep, 31923 NULL); 31924 } 31925 } else { 31926 /* 31927 * For stat_code == STATUS_GOOD, this is not a 31928 * hardware error. 31929 */ 31930 if (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD) 31931 return; 31932 31933 /* 31934 * Post ereport.io.scsi.cmd.disk.dev.serr if we got the 31935 * stat-code but with sense data unavailable. 31936 * driver-assessment will be set based on parameter 31937 * drv_assess. 31938 */ 31939 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 31940 NULL, 31941 "cmd.disk.dev.serr", uscsi_ena, 31942 devid, NULL, DDI_NOSLEEP, NULL, 31943 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 31944 DEVID_IF_KNOWN(devid), 31945 "driver-assessment", DATA_TYPE_STRING, 31946 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment, 31947 "op-code", DATA_TYPE_UINT8, op_code, 31948 "cdb", 31949 DATA_TYPE_UINT8_ARRAY, 31950 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 31951 "pkt-reason", 31952 DATA_TYPE_UINT8, uscsi_pkt_reason, 31953 "pkt-state", 31954 DATA_TYPE_UINT8, uscsi_pkt_state, 31955 "pkt-stats", 31956 DATA_TYPE_UINT32, uscsi_pkt_statistics, 31957 "stat-code", 31958 DATA_TYPE_UINT8, 31959 ssc->ssc_uscsi_cmd->uscsi_status, 31960 NULL); 31961 } 31962 } 31963 } 31964 31965 /* 31966 * Function: sd_ssc_extract_info 31967 * 31968 * Description: Extract information available to help generate ereport. 31969 * 31970 * Context: Kernel thread or interrupt context. 31971 */ 31972 static void 31973 sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, struct scsi_pkt *pktp, 31974 struct buf *bp, struct sd_xbuf *xp) 31975 { 31976 size_t senlen = 0; 31977 union scsi_cdb *cdbp; 31978 int path_instance; 31979 /* 31980 * Need scsi_cdb_size array to determine the cdb length. 31981 */ 31982 extern uchar_t scsi_cdb_size[]; 31983 31984 ASSERT(un != NULL); 31985 ASSERT(pktp != NULL); 31986 ASSERT(bp != NULL); 31987 ASSERT(xp != NULL); 31988 ASSERT(ssc != NULL); 31989 ASSERT(mutex_owned(SD_MUTEX(un))); 31990 31991 /* 31992 * Transfer the cdb buffer pointer here. 31993 */ 31994 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 31995 31996 ssc->ssc_uscsi_cmd->uscsi_cdblen = scsi_cdb_size[GETGROUP(cdbp)]; 31997 ssc->ssc_uscsi_cmd->uscsi_cdb = (caddr_t)cdbp; 31998 31999 /* 32000 * Transfer the sense data buffer pointer if sense data is available, 32001 * calculate the sense data length first. 32002 */ 32003 if ((xp->xb_sense_state & STATE_XARQ_DONE) || 32004 (xp->xb_sense_state & STATE_ARQ_DONE)) { 32005 /* 32006 * For arq case, we will enter here. 32007 */ 32008 if (xp->xb_sense_state & STATE_XARQ_DONE) { 32009 senlen = MAX_SENSE_LENGTH - xp->xb_sense_resid; 32010 } else { 32011 senlen = SENSE_LENGTH; 32012 } 32013 } else { 32014 /* 32015 * For non-arq case, we will enter this branch. 32016 */ 32017 if (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK && 32018 (xp->xb_sense_state & STATE_XFERRED_DATA)) { 32019 senlen = SENSE_LENGTH - xp->xb_sense_resid; 32020 } 32021 32022 } 32023 32024 ssc->ssc_uscsi_cmd->uscsi_rqlen = (senlen & 0xff); 32025 ssc->ssc_uscsi_cmd->uscsi_rqresid = 0; 32026 ssc->ssc_uscsi_cmd->uscsi_rqbuf = (caddr_t)xp->xb_sense_data; 32027 32028 ssc->ssc_uscsi_cmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 32029 32030 /* 32031 * Only transfer path_instance when scsi_pkt was properly allocated. 32032 */ 32033 path_instance = pktp->pkt_path_instance; 32034 if (scsi_pkt_allocated_correctly(pktp) && path_instance) 32035 ssc->ssc_uscsi_cmd->uscsi_path_instance = path_instance; 32036 else 32037 ssc->ssc_uscsi_cmd->uscsi_path_instance = 0; 32038 32039 /* 32040 * Copy in the other fields we may need when posting ereport. 32041 */ 32042 ssc->ssc_uscsi_info->ui_pkt_reason = pktp->pkt_reason; 32043 ssc->ssc_uscsi_info->ui_pkt_state = pktp->pkt_state; 32044 ssc->ssc_uscsi_info->ui_pkt_statistics = pktp->pkt_statistics; 32045 ssc->ssc_uscsi_info->ui_lba = (uint64_t)SD_GET_BLKNO(bp); 32046 32047 /* 32048 * For partially read/write command, we will not create ena 32049 * in case of a successful command be reconized as recovered. 32050 */ 32051 if ((pktp->pkt_reason == CMD_CMPLT) && 32052 (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD) && 32053 (senlen == 0)) { 32054 return; 32055 } 32056 32057 /* 32058 * To associate ereports of a single command execution flow, we 32059 * need a shared ena for a specific command. 32060 */ 32061 if (xp->xb_ena == 0) 32062 xp->xb_ena = fm_ena_generate(0, FM_ENA_FMT1); 32063 ssc->ssc_uscsi_info->ui_ena = xp->xb_ena; 32064 } 32065 32066 32067 /* 32068 * Function: sd_check_bdc_vpd 32069 * 32070 * Description: Query the optional INQUIRY VPD page 0xb1. If the device 32071 * supports VPD page 0xb1, sd examines the MEDIUM ROTATION 32072 * RATE. 32073 * 32074 * Set the following based on RPM value: 32075 * = 0 device is not solid state, non-rotational 32076 * = 1 device is solid state, non-rotational 32077 * > 1 device is not solid state, rotational 32078 * 32079 * Context: Kernel thread or interrupt context. 32080 */ 32081 32082 static void 32083 sd_check_bdc_vpd(sd_ssc_t *ssc) 32084 { 32085 int rval = 0; 32086 uchar_t *inqb1 = NULL; 32087 size_t inqb1_len = MAX_INQUIRY_SIZE; 32088 size_t inqb1_resid = 0; 32089 struct sd_lun *un; 32090 32091 ASSERT(ssc != NULL); 32092 un = ssc->ssc_un; 32093 ASSERT(un != NULL); 32094 ASSERT(!mutex_owned(SD_MUTEX(un))); 32095 32096 mutex_enter(SD_MUTEX(un)); 32097 un->un_f_is_rotational = TRUE; 32098 un->un_f_is_solid_state = FALSE; 32099 32100 if (ISCD(un)) { 32101 mutex_exit(SD_MUTEX(un)); 32102 return; 32103 } 32104 32105 if (sd_check_vpd_page_support(ssc) == 0 && 32106 un->un_vpd_page_mask & SD_VPD_DEV_CHARACTER_PG) { 32107 mutex_exit(SD_MUTEX(un)); 32108 /* collect page b1 data */ 32109 inqb1 = kmem_zalloc(inqb1_len, KM_SLEEP); 32110 32111 rval = sd_send_scsi_INQUIRY(ssc, inqb1, inqb1_len, 32112 0x01, 0xB1, &inqb1_resid); 32113 32114 if (rval == 0 && (inqb1_len - inqb1_resid > 5)) { 32115 SD_TRACE(SD_LOG_COMMON, un, 32116 "sd_check_bdc_vpd: \ 32117 successfully get VPD page: %x \ 32118 PAGE LENGTH: %x BYTE 4: %x \ 32119 BYTE 5: %x", inqb1[1], inqb1[3], inqb1[4], 32120 inqb1[5]); 32121 32122 mutex_enter(SD_MUTEX(un)); 32123 /* 32124 * Check the MEDIUM ROTATION RATE. 32125 */ 32126 if (inqb1[4] == 0) { 32127 if (inqb1[5] == 0) { 32128 un->un_f_is_rotational = FALSE; 32129 } else if (inqb1[5] == 1) { 32130 un->un_f_is_rotational = FALSE; 32131 un->un_f_is_solid_state = TRUE; 32132 /* 32133 * Solid state drives don't need 32134 * disksort. 32135 */ 32136 un->un_f_disksort_disabled = TRUE; 32137 } 32138 } 32139 mutex_exit(SD_MUTEX(un)); 32140 } else if (rval != 0) { 32141 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 32142 } 32143 32144 kmem_free(inqb1, inqb1_len); 32145 } else { 32146 mutex_exit(SD_MUTEX(un)); 32147 } 32148 } 32149 32150 /* 32151 * Function: sd_check_emulation_mode 32152 * 32153 * Description: Check whether the SSD is at emulation mode 32154 * by issuing READ_CAPACITY_16 to see whether 32155 * we can get physical block size of the drive. 32156 * 32157 * Context: Kernel thread or interrupt context. 32158 */ 32159 32160 static void 32161 sd_check_emulation_mode(sd_ssc_t *ssc) 32162 { 32163 int rval = 0; 32164 uint64_t capacity; 32165 uint_t lbasize; 32166 uint_t pbsize; 32167 int i; 32168 int devid_len; 32169 struct sd_lun *un; 32170 32171 ASSERT(ssc != NULL); 32172 un = ssc->ssc_un; 32173 ASSERT(un != NULL); 32174 ASSERT(!mutex_owned(SD_MUTEX(un))); 32175 32176 mutex_enter(SD_MUTEX(un)); 32177 if (ISCD(un)) { 32178 mutex_exit(SD_MUTEX(un)); 32179 return; 32180 } 32181 32182 if (un->un_f_descr_format_supported) { 32183 mutex_exit(SD_MUTEX(un)); 32184 rval = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, &lbasize, 32185 &pbsize, SD_PATH_DIRECT); 32186 mutex_enter(SD_MUTEX(un)); 32187 32188 if (rval != 0) { 32189 un->un_phy_blocksize = DEV_BSIZE; 32190 } else { 32191 if (!ISP2(pbsize % DEV_BSIZE) || pbsize == 0) { 32192 un->un_phy_blocksize = DEV_BSIZE; 32193 } else if (pbsize > un->un_phy_blocksize) { 32194 /* 32195 * Don't reset the physical blocksize 32196 * unless we've detected a larger value. 32197 */ 32198 un->un_phy_blocksize = pbsize; 32199 } 32200 } 32201 } 32202 32203 for (i = 0; i < sd_flash_dev_table_size; i++) { 32204 devid_len = (int)strlen(sd_flash_dev_table[i]); 32205 if (sd_sdconf_id_match(un, sd_flash_dev_table[i], devid_len) 32206 == SD_SUCCESS) { 32207 un->un_phy_blocksize = SSD_SECSIZE; 32208 if (un->un_f_is_solid_state && 32209 un->un_phy_blocksize != un->un_tgt_blocksize) 32210 un->un_f_enable_rmw = TRUE; 32211 } 32212 } 32213 32214 mutex_exit(SD_MUTEX(un)); 32215 } 32216