1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved. 24 */ 25 /* 26 * Copyright (c) 2011 Bayard G. Bell. All rights reserved. 27 * Copyright (c) 2012 by Delphix. All rights reserved. 28 * Copyright 2013 Nexenta Systems, Inc. All rights reserved. 29 * Copyright 2012 DEY Storage Systems, Inc. All rights reserved. 30 */ 31 /* 32 * Copyright 2011 cyril.galibern@opensvc.com 33 */ 34 35 /* 36 * SCSI disk target driver. 37 */ 38 #include <sys/scsi/scsi.h> 39 #include <sys/dkbad.h> 40 #include <sys/dklabel.h> 41 #include <sys/dkio.h> 42 #include <sys/fdio.h> 43 #include <sys/cdio.h> 44 #include <sys/mhd.h> 45 #include <sys/vtoc.h> 46 #include <sys/dktp/fdisk.h> 47 #include <sys/kstat.h> 48 #include <sys/vtrace.h> 49 #include <sys/note.h> 50 #include <sys/thread.h> 51 #include <sys/proc.h> 52 #include <sys/efi_partition.h> 53 #include <sys/var.h> 54 #include <sys/aio_req.h> 55 56 #ifdef __lock_lint 57 #define _LP64 58 #define __amd64 59 #endif 60 61 #if (defined(__fibre)) 62 /* Note: is there a leadville version of the following? */ 63 #include <sys/fc4/fcal_linkapp.h> 64 #endif 65 #include <sys/taskq.h> 66 #include <sys/uuid.h> 67 #include <sys/byteorder.h> 68 #include <sys/sdt.h> 69 70 #include "sd_xbuf.h" 71 72 #include <sys/scsi/targets/sddef.h> 73 #include <sys/cmlb.h> 74 #include <sys/sysevent/eventdefs.h> 75 #include <sys/sysevent/dev.h> 76 77 #include <sys/fm/protocol.h> 78 79 /* 80 * Loadable module info. 81 */ 82 #if (defined(__fibre)) 83 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver" 84 #else /* !__fibre */ 85 #define SD_MODULE_NAME "SCSI Disk Driver" 86 #endif /* !__fibre */ 87 88 /* 89 * Define the interconnect type, to allow the driver to distinguish 90 * between parallel SCSI (sd) and fibre channel (ssd) behaviors. 91 * 92 * This is really for backward compatibility. In the future, the driver 93 * should actually check the "interconnect-type" property as reported by 94 * the HBA; however at present this property is not defined by all HBAs, 95 * so we will use this #define (1) to permit the driver to run in 96 * backward-compatibility mode; and (2) to print a notification message 97 * if an FC HBA does not support the "interconnect-type" property. The 98 * behavior of the driver will be to assume parallel SCSI behaviors unless 99 * the "interconnect-type" property is defined by the HBA **AND** has a 100 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or 101 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre 102 * Channel behaviors (as per the old ssd). (Note that the 103 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and 104 * will result in the driver assuming parallel SCSI behaviors.) 105 * 106 * (see common/sys/scsi/impl/services.h) 107 * 108 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default 109 * since some FC HBAs may already support that, and there is some code in 110 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the 111 * default would confuse that code, and besides things should work fine 112 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the 113 * "interconnect_type" property. 114 * 115 */ 116 #if (defined(__fibre)) 117 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE 118 #else 119 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL 120 #endif 121 122 /* 123 * The name of the driver, established from the module name in _init. 124 */ 125 static char *sd_label = NULL; 126 127 /* 128 * Driver name is unfortunately prefixed on some driver.conf properties. 129 */ 130 #if (defined(__fibre)) 131 #define sd_max_xfer_size ssd_max_xfer_size 132 #define sd_config_list ssd_config_list 133 static char *sd_max_xfer_size = "ssd_max_xfer_size"; 134 static char *sd_config_list = "ssd-config-list"; 135 #else 136 static char *sd_max_xfer_size = "sd_max_xfer_size"; 137 static char *sd_config_list = "sd-config-list"; 138 #endif 139 140 /* 141 * Driver global variables 142 */ 143 144 #if (defined(__fibre)) 145 /* 146 * These #defines are to avoid namespace collisions that occur because this 147 * code is currently used to compile two separate driver modules: sd and ssd. 148 * All global variables need to be treated this way (even if declared static) 149 * in order to allow the debugger to resolve the names properly. 150 * It is anticipated that in the near future the ssd module will be obsoleted, 151 * at which time this namespace issue should go away. 152 */ 153 #define sd_state ssd_state 154 #define sd_io_time ssd_io_time 155 #define sd_failfast_enable ssd_failfast_enable 156 #define sd_ua_retry_count ssd_ua_retry_count 157 #define sd_report_pfa ssd_report_pfa 158 #define sd_max_throttle ssd_max_throttle 159 #define sd_min_throttle ssd_min_throttle 160 #define sd_rot_delay ssd_rot_delay 161 162 #define sd_retry_on_reservation_conflict \ 163 ssd_retry_on_reservation_conflict 164 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay 165 #define sd_resv_conflict_name ssd_resv_conflict_name 166 167 #define sd_component_mask ssd_component_mask 168 #define sd_level_mask ssd_level_mask 169 #define sd_debug_un ssd_debug_un 170 #define sd_error_level ssd_error_level 171 172 #define sd_xbuf_active_limit ssd_xbuf_active_limit 173 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit 174 175 #define sd_tr ssd_tr 176 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout 177 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout 178 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable 179 #define sd_check_media_time ssd_check_media_time 180 #define sd_wait_cmds_complete ssd_wait_cmds_complete 181 #define sd_label_mutex ssd_label_mutex 182 #define sd_detach_mutex ssd_detach_mutex 183 #define sd_log_buf ssd_log_buf 184 #define sd_log_mutex ssd_log_mutex 185 186 #define sd_disk_table ssd_disk_table 187 #define sd_disk_table_size ssd_disk_table_size 188 #define sd_sense_mutex ssd_sense_mutex 189 #define sd_cdbtab ssd_cdbtab 190 191 #define sd_cb_ops ssd_cb_ops 192 #define sd_ops ssd_ops 193 #define sd_additional_codes ssd_additional_codes 194 #define sd_tgops ssd_tgops 195 196 #define sd_minor_data ssd_minor_data 197 #define sd_minor_data_efi ssd_minor_data_efi 198 199 #define sd_tq ssd_tq 200 #define sd_wmr_tq ssd_wmr_tq 201 #define sd_taskq_name ssd_taskq_name 202 #define sd_wmr_taskq_name ssd_wmr_taskq_name 203 #define sd_taskq_minalloc ssd_taskq_minalloc 204 #define sd_taskq_maxalloc ssd_taskq_maxalloc 205 206 #define sd_dump_format_string ssd_dump_format_string 207 208 #define sd_iostart_chain ssd_iostart_chain 209 #define sd_iodone_chain ssd_iodone_chain 210 211 #define sd_pm_idletime ssd_pm_idletime 212 213 #define sd_force_pm_supported ssd_force_pm_supported 214 215 #define sd_dtype_optical_bind ssd_dtype_optical_bind 216 217 #define sd_ssc_init ssd_ssc_init 218 #define sd_ssc_send ssd_ssc_send 219 #define sd_ssc_fini ssd_ssc_fini 220 #define sd_ssc_assessment ssd_ssc_assessment 221 #define sd_ssc_post ssd_ssc_post 222 #define sd_ssc_print ssd_ssc_print 223 #define sd_ssc_ereport_post ssd_ssc_ereport_post 224 #define sd_ssc_set_info ssd_ssc_set_info 225 #define sd_ssc_extract_info ssd_ssc_extract_info 226 227 #endif 228 229 #ifdef SDDEBUG 230 int sd_force_pm_supported = 0; 231 #endif /* SDDEBUG */ 232 233 void *sd_state = NULL; 234 int sd_io_time = SD_IO_TIME; 235 int sd_failfast_enable = 1; 236 int sd_ua_retry_count = SD_UA_RETRY_COUNT; 237 int sd_report_pfa = 1; 238 int sd_max_throttle = SD_MAX_THROTTLE; 239 int sd_min_throttle = SD_MIN_THROTTLE; 240 int sd_rot_delay = 4; /* Default 4ms Rotation delay */ 241 int sd_qfull_throttle_enable = TRUE; 242 243 int sd_retry_on_reservation_conflict = 1; 244 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 245 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) 246 247 static int sd_dtype_optical_bind = -1; 248 249 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ 250 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; 251 252 /* 253 * Global data for debug logging. To enable debug printing, sd_component_mask 254 * and sd_level_mask should be set to the desired bit patterns as outlined in 255 * sddef.h. 256 */ 257 uint_t sd_component_mask = 0x0; 258 uint_t sd_level_mask = 0x0; 259 struct sd_lun *sd_debug_un = NULL; 260 uint_t sd_error_level = SCSI_ERR_RETRYABLE; 261 262 /* Note: these may go away in the future... */ 263 static uint32_t sd_xbuf_active_limit = 512; 264 static uint32_t sd_xbuf_reserve_limit = 16; 265 266 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 }; 267 268 /* 269 * Timer value used to reset the throttle after it has been reduced 270 * (typically in response to TRAN_BUSY or STATUS_QFULL) 271 */ 272 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT; 273 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT; 274 275 /* 276 * Interval value associated with the media change scsi watch. 277 */ 278 static int sd_check_media_time = 3000000; 279 280 /* 281 * Wait value used for in progress operations during a DDI_SUSPEND 282 */ 283 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE; 284 285 /* 286 * sd_label_mutex protects a static buffer used in the disk label 287 * component of the driver 288 */ 289 static kmutex_t sd_label_mutex; 290 291 /* 292 * sd_detach_mutex protects un_layer_count, un_detach_count, and 293 * un_opens_in_progress in the sd_lun structure. 294 */ 295 static kmutex_t sd_detach_mutex; 296 297 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex, 298 sd_lun::{un_layer_count un_detach_count un_opens_in_progress})) 299 300 /* 301 * Global buffer and mutex for debug logging 302 */ 303 static char sd_log_buf[1024]; 304 static kmutex_t sd_log_mutex; 305 306 /* 307 * Structs and globals for recording attached lun information. 308 * This maintains a chain. Each node in the chain represents a SCSI controller. 309 * The structure records the number of luns attached to each target connected 310 * with the controller. 311 * For parallel scsi device only. 312 */ 313 struct sd_scsi_hba_tgt_lun { 314 struct sd_scsi_hba_tgt_lun *next; 315 dev_info_t *pdip; 316 int nlun[NTARGETS_WIDE]; 317 }; 318 319 /* 320 * Flag to indicate the lun is attached or detached 321 */ 322 #define SD_SCSI_LUN_ATTACH 0 323 #define SD_SCSI_LUN_DETACH 1 324 325 static kmutex_t sd_scsi_target_lun_mutex; 326 static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL; 327 328 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 329 sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip)) 330 331 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 332 sd_scsi_target_lun_head)) 333 334 /* 335 * "Smart" Probe Caching structs, globals, #defines, etc. 336 * For parallel scsi and non-self-identify device only. 337 */ 338 339 /* 340 * The following resources and routines are implemented to support 341 * "smart" probing, which caches the scsi_probe() results in an array, 342 * in order to help avoid long probe times. 343 */ 344 struct sd_scsi_probe_cache { 345 struct sd_scsi_probe_cache *next; 346 dev_info_t *pdip; 347 int cache[NTARGETS_WIDE]; 348 }; 349 350 static kmutex_t sd_scsi_probe_cache_mutex; 351 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; 352 353 /* 354 * Really we only need protection on the head of the linked list, but 355 * better safe than sorry. 356 */ 357 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 358 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) 359 360 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 361 sd_scsi_probe_cache_head)) 362 363 /* 364 * Power attribute table 365 */ 366 static sd_power_attr_ss sd_pwr_ss = { 367 { "NAME=spindle-motor", "0=off", "1=on", NULL }, 368 {0, 100}, 369 {30, 0}, 370 {20000, 0} 371 }; 372 373 static sd_power_attr_pc sd_pwr_pc = { 374 { "NAME=spindle-motor", "0=stopped", "1=standby", "2=idle", 375 "3=active", NULL }, 376 {0, 0, 0, 100}, 377 {90, 90, 20, 0}, 378 {15000, 15000, 1000, 0} 379 }; 380 381 /* 382 * Power level to power condition 383 */ 384 static int sd_pl2pc[] = { 385 SD_TARGET_START_VALID, 386 SD_TARGET_STANDBY, 387 SD_TARGET_IDLE, 388 SD_TARGET_ACTIVE 389 }; 390 391 /* 392 * Vendor specific data name property declarations 393 */ 394 395 #if defined(__fibre) || defined(__i386) ||defined(__amd64) 396 397 static sd_tunables seagate_properties = { 398 SEAGATE_THROTTLE_VALUE, 399 0, 400 0, 401 0, 402 0, 403 0, 404 0, 405 0, 406 0 407 }; 408 409 410 static sd_tunables fujitsu_properties = { 411 FUJITSU_THROTTLE_VALUE, 412 0, 413 0, 414 0, 415 0, 416 0, 417 0, 418 0, 419 0 420 }; 421 422 static sd_tunables ibm_properties = { 423 IBM_THROTTLE_VALUE, 424 0, 425 0, 426 0, 427 0, 428 0, 429 0, 430 0, 431 0 432 }; 433 434 static sd_tunables purple_properties = { 435 PURPLE_THROTTLE_VALUE, 436 0, 437 0, 438 PURPLE_BUSY_RETRIES, 439 PURPLE_RESET_RETRY_COUNT, 440 PURPLE_RESERVE_RELEASE_TIME, 441 0, 442 0, 443 0 444 }; 445 446 static sd_tunables sve_properties = { 447 SVE_THROTTLE_VALUE, 448 0, 449 0, 450 SVE_BUSY_RETRIES, 451 SVE_RESET_RETRY_COUNT, 452 SVE_RESERVE_RELEASE_TIME, 453 SVE_MIN_THROTTLE_VALUE, 454 SVE_DISKSORT_DISABLED_FLAG, 455 0 456 }; 457 458 static sd_tunables maserati_properties = { 459 0, 460 0, 461 0, 462 0, 463 0, 464 0, 465 0, 466 MASERATI_DISKSORT_DISABLED_FLAG, 467 MASERATI_LUN_RESET_ENABLED_FLAG 468 }; 469 470 static sd_tunables pirus_properties = { 471 PIRUS_THROTTLE_VALUE, 472 0, 473 PIRUS_NRR_COUNT, 474 PIRUS_BUSY_RETRIES, 475 PIRUS_RESET_RETRY_COUNT, 476 0, 477 PIRUS_MIN_THROTTLE_VALUE, 478 PIRUS_DISKSORT_DISABLED_FLAG, 479 PIRUS_LUN_RESET_ENABLED_FLAG 480 }; 481 482 #endif 483 484 #if (defined(__sparc) && !defined(__fibre)) || \ 485 (defined(__i386) || defined(__amd64)) 486 487 488 static sd_tunables elite_properties = { 489 ELITE_THROTTLE_VALUE, 490 0, 491 0, 492 0, 493 0, 494 0, 495 0, 496 0, 497 0 498 }; 499 500 static sd_tunables st31200n_properties = { 501 ST31200N_THROTTLE_VALUE, 502 0, 503 0, 504 0, 505 0, 506 0, 507 0, 508 0, 509 0 510 }; 511 512 #endif /* Fibre or not */ 513 514 static sd_tunables lsi_properties_scsi = { 515 LSI_THROTTLE_VALUE, 516 0, 517 LSI_NOTREADY_RETRIES, 518 0, 519 0, 520 0, 521 0, 522 0, 523 0 524 }; 525 526 static sd_tunables symbios_properties = { 527 SYMBIOS_THROTTLE_VALUE, 528 0, 529 SYMBIOS_NOTREADY_RETRIES, 530 0, 531 0, 532 0, 533 0, 534 0, 535 0 536 }; 537 538 static sd_tunables lsi_properties = { 539 0, 540 0, 541 LSI_NOTREADY_RETRIES, 542 0, 543 0, 544 0, 545 0, 546 0, 547 0 548 }; 549 550 static sd_tunables lsi_oem_properties = { 551 0, 552 0, 553 LSI_OEM_NOTREADY_RETRIES, 554 0, 555 0, 556 0, 557 0, 558 0, 559 0, 560 1 561 }; 562 563 564 565 #if (defined(SD_PROP_TST)) 566 567 #define SD_TST_CTYPE_VAL CTYPE_CDROM 568 #define SD_TST_THROTTLE_VAL 16 569 #define SD_TST_NOTREADY_VAL 12 570 #define SD_TST_BUSY_VAL 60 571 #define SD_TST_RST_RETRY_VAL 36 572 #define SD_TST_RSV_REL_TIME 60 573 574 static sd_tunables tst_properties = { 575 SD_TST_THROTTLE_VAL, 576 SD_TST_CTYPE_VAL, 577 SD_TST_NOTREADY_VAL, 578 SD_TST_BUSY_VAL, 579 SD_TST_RST_RETRY_VAL, 580 SD_TST_RSV_REL_TIME, 581 0, 582 0, 583 0 584 }; 585 #endif 586 587 /* This is similar to the ANSI toupper implementation */ 588 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C)) 589 590 /* 591 * Static Driver Configuration Table 592 * 593 * This is the table of disks which need throttle adjustment (or, perhaps 594 * something else as defined by the flags at a future time.) device_id 595 * is a string consisting of concatenated vid (vendor), pid (product/model) 596 * and revision strings as defined in the scsi_inquiry structure. Offsets of 597 * the parts of the string are as defined by the sizes in the scsi_inquiry 598 * structure. Device type is searched as far as the device_id string is 599 * defined. Flags defines which values are to be set in the driver from the 600 * properties list. 601 * 602 * Entries below which begin and end with a "*" are a special case. 603 * These do not have a specific vendor, and the string which follows 604 * can appear anywhere in the 16 byte PID portion of the inquiry data. 605 * 606 * Entries below which begin and end with a " " (blank) are a special 607 * case. The comparison function will treat multiple consecutive blanks 608 * as equivalent to a single blank. For example, this causes a 609 * sd_disk_table entry of " NEC CDROM " to match a device's id string 610 * of "NEC CDROM". 611 * 612 * Note: The MD21 controller type has been obsoleted. 613 * ST318202F is a Legacy device 614 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been 615 * made with an FC connection. The entries here are a legacy. 616 */ 617 static sd_disk_config_t sd_disk_table[] = { 618 #if defined(__fibre) || defined(__i386) || defined(__amd64) 619 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 620 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 621 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 622 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 623 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 624 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 625 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 626 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 627 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 628 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 629 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 630 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 631 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 632 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 633 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 634 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 635 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 636 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 637 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 638 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 639 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 640 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 641 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 642 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties }, 643 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties }, 644 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties }, 645 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties }, 646 { "IBM 1724-100", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 647 { "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 648 { "IBM 1726-22x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 649 { "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 650 { "IBM 1726-42x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 651 { "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 652 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 653 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 654 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 655 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 656 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 657 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 658 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 659 { "IBM 1814", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 660 { "IBM 1814-200", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 661 { "IBM 1818", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 662 { "DELL MD3000", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 663 { "DELL MD3000i", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 664 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 665 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 666 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 667 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 668 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT | 669 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 670 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT | 671 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 672 { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties }, 673 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, 674 { "SUN T3", SD_CONF_BSET_THROTTLE | 675 SD_CONF_BSET_BSY_RETRY_COUNT| 676 SD_CONF_BSET_RST_RETRIES| 677 SD_CONF_BSET_RSV_REL_TIME, 678 &purple_properties }, 679 { "SUN SESS01", SD_CONF_BSET_THROTTLE | 680 SD_CONF_BSET_BSY_RETRY_COUNT| 681 SD_CONF_BSET_RST_RETRIES| 682 SD_CONF_BSET_RSV_REL_TIME| 683 SD_CONF_BSET_MIN_THROTTLE| 684 SD_CONF_BSET_DISKSORT_DISABLED, 685 &sve_properties }, 686 { "SUN T4", SD_CONF_BSET_THROTTLE | 687 SD_CONF_BSET_BSY_RETRY_COUNT| 688 SD_CONF_BSET_RST_RETRIES| 689 SD_CONF_BSET_RSV_REL_TIME, 690 &purple_properties }, 691 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | 692 SD_CONF_BSET_LUN_RESET_ENABLED, 693 &maserati_properties }, 694 { "SUN SE6920", SD_CONF_BSET_THROTTLE | 695 SD_CONF_BSET_NRR_COUNT| 696 SD_CONF_BSET_BSY_RETRY_COUNT| 697 SD_CONF_BSET_RST_RETRIES| 698 SD_CONF_BSET_MIN_THROTTLE| 699 SD_CONF_BSET_DISKSORT_DISABLED| 700 SD_CONF_BSET_LUN_RESET_ENABLED, 701 &pirus_properties }, 702 { "SUN SE6940", SD_CONF_BSET_THROTTLE | 703 SD_CONF_BSET_NRR_COUNT| 704 SD_CONF_BSET_BSY_RETRY_COUNT| 705 SD_CONF_BSET_RST_RETRIES| 706 SD_CONF_BSET_MIN_THROTTLE| 707 SD_CONF_BSET_DISKSORT_DISABLED| 708 SD_CONF_BSET_LUN_RESET_ENABLED, 709 &pirus_properties }, 710 { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE | 711 SD_CONF_BSET_NRR_COUNT| 712 SD_CONF_BSET_BSY_RETRY_COUNT| 713 SD_CONF_BSET_RST_RETRIES| 714 SD_CONF_BSET_MIN_THROTTLE| 715 SD_CONF_BSET_DISKSORT_DISABLED| 716 SD_CONF_BSET_LUN_RESET_ENABLED, 717 &pirus_properties }, 718 { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE | 719 SD_CONF_BSET_NRR_COUNT| 720 SD_CONF_BSET_BSY_RETRY_COUNT| 721 SD_CONF_BSET_RST_RETRIES| 722 SD_CONF_BSET_MIN_THROTTLE| 723 SD_CONF_BSET_DISKSORT_DISABLED| 724 SD_CONF_BSET_LUN_RESET_ENABLED, 725 &pirus_properties }, 726 { "SUN PSX1000", SD_CONF_BSET_THROTTLE | 727 SD_CONF_BSET_NRR_COUNT| 728 SD_CONF_BSET_BSY_RETRY_COUNT| 729 SD_CONF_BSET_RST_RETRIES| 730 SD_CONF_BSET_MIN_THROTTLE| 731 SD_CONF_BSET_DISKSORT_DISABLED| 732 SD_CONF_BSET_LUN_RESET_ENABLED, 733 &pirus_properties }, 734 { "SUN SE6330", SD_CONF_BSET_THROTTLE | 735 SD_CONF_BSET_NRR_COUNT| 736 SD_CONF_BSET_BSY_RETRY_COUNT| 737 SD_CONF_BSET_RST_RETRIES| 738 SD_CONF_BSET_MIN_THROTTLE| 739 SD_CONF_BSET_DISKSORT_DISABLED| 740 SD_CONF_BSET_LUN_RESET_ENABLED, 741 &pirus_properties }, 742 { "SUN STK6580_6780", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 743 { "SUN SUN_6180", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 744 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 745 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 746 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 747 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 748 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, 749 #endif /* fibre or NON-sparc platforms */ 750 #if ((defined(__sparc) && !defined(__fibre)) ||\ 751 (defined(__i386) || defined(__amd64))) 752 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, 753 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, 754 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, 755 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, 756 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, 757 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL }, 758 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, 759 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, 760 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, 761 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, 762 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, 763 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, 764 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, 765 &symbios_properties }, 766 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, 767 &lsi_properties_scsi }, 768 #if defined(__i386) || defined(__amd64) 769 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD 770 | SD_CONF_BSET_READSUB_BCD 771 | SD_CONF_BSET_READ_TOC_ADDR_BCD 772 | SD_CONF_BSET_NO_READ_HEADER 773 | SD_CONF_BSET_READ_CD_XD4), NULL }, 774 775 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD 776 | SD_CONF_BSET_READSUB_BCD 777 | SD_CONF_BSET_READ_TOC_ADDR_BCD 778 | SD_CONF_BSET_NO_READ_HEADER 779 | SD_CONF_BSET_READ_CD_XD4), NULL }, 780 #endif /* __i386 || __amd64 */ 781 #endif /* sparc NON-fibre or NON-sparc platforms */ 782 783 #if (defined(SD_PROP_TST)) 784 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE 785 | SD_CONF_BSET_CTYPE 786 | SD_CONF_BSET_NRR_COUNT 787 | SD_CONF_BSET_FAB_DEVID 788 | SD_CONF_BSET_NOCACHE 789 | SD_CONF_BSET_BSY_RETRY_COUNT 790 | SD_CONF_BSET_PLAYMSF_BCD 791 | SD_CONF_BSET_READSUB_BCD 792 | SD_CONF_BSET_READ_TOC_TRK_BCD 793 | SD_CONF_BSET_READ_TOC_ADDR_BCD 794 | SD_CONF_BSET_NO_READ_HEADER 795 | SD_CONF_BSET_READ_CD_XD4 796 | SD_CONF_BSET_RST_RETRIES 797 | SD_CONF_BSET_RSV_REL_TIME 798 | SD_CONF_BSET_TUR_CHECK), &tst_properties}, 799 #endif 800 }; 801 802 static const int sd_disk_table_size = 803 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); 804 805 /* 806 * Emulation mode disk drive VID/PID table 807 */ 808 static char sd_flash_dev_table[][25] = { 809 "ATA MARVELL SD88SA02", 810 "MARVELL SD88SA02", 811 "TOSHIBA THNSNV05", 812 }; 813 814 static const int sd_flash_dev_table_size = 815 sizeof (sd_flash_dev_table) / sizeof (sd_flash_dev_table[0]); 816 817 #define SD_INTERCONNECT_PARALLEL 0 818 #define SD_INTERCONNECT_FABRIC 1 819 #define SD_INTERCONNECT_FIBRE 2 820 #define SD_INTERCONNECT_SSA 3 821 #define SD_INTERCONNECT_SATA 4 822 #define SD_INTERCONNECT_SAS 5 823 824 #define SD_IS_PARALLEL_SCSI(un) \ 825 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL) 826 #define SD_IS_SERIAL(un) \ 827 (((un)->un_interconnect_type == SD_INTERCONNECT_SATA) ||\ 828 ((un)->un_interconnect_type == SD_INTERCONNECT_SAS)) 829 830 /* 831 * Definitions used by device id registration routines 832 */ 833 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */ 834 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */ 835 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */ 836 837 static kmutex_t sd_sense_mutex = {0}; 838 839 /* 840 * Macros for updates of the driver state 841 */ 842 #define New_state(un, s) \ 843 (un)->un_last_state = (un)->un_state, (un)->un_state = (s) 844 #define Restore_state(un) \ 845 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); } 846 847 static struct sd_cdbinfo sd_cdbtab[] = { 848 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, }, 849 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, }, 850 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, }, 851 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, }, 852 }; 853 854 /* 855 * Specifies the number of seconds that must have elapsed since the last 856 * cmd. has completed for a device to be declared idle to the PM framework. 857 */ 858 static int sd_pm_idletime = 1; 859 860 /* 861 * Internal function prototypes 862 */ 863 864 #if (defined(__fibre)) 865 /* 866 * These #defines are to avoid namespace collisions that occur because this 867 * code is currently used to compile two separate driver modules: sd and ssd. 868 * All function names need to be treated this way (even if declared static) 869 * in order to allow the debugger to resolve the names properly. 870 * It is anticipated that in the near future the ssd module will be obsoleted, 871 * at which time this ugliness should go away. 872 */ 873 #define sd_log_trace ssd_log_trace 874 #define sd_log_info ssd_log_info 875 #define sd_log_err ssd_log_err 876 #define sdprobe ssdprobe 877 #define sdinfo ssdinfo 878 #define sd_prop_op ssd_prop_op 879 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init 880 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini 881 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache 882 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache 883 #define sd_scsi_target_lun_init ssd_scsi_target_lun_init 884 #define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini 885 #define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count 886 #define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target 887 #define sd_spin_up_unit ssd_spin_up_unit 888 #define sd_enable_descr_sense ssd_enable_descr_sense 889 #define sd_reenable_dsense_task ssd_reenable_dsense_task 890 #define sd_set_mmc_caps ssd_set_mmc_caps 891 #define sd_read_unit_properties ssd_read_unit_properties 892 #define sd_process_sdconf_file ssd_process_sdconf_file 893 #define sd_process_sdconf_table ssd_process_sdconf_table 894 #define sd_sdconf_id_match ssd_sdconf_id_match 895 #define sd_blank_cmp ssd_blank_cmp 896 #define sd_chk_vers1_data ssd_chk_vers1_data 897 #define sd_set_vers1_properties ssd_set_vers1_properties 898 #define sd_check_solid_state ssd_check_solid_state 899 #define sd_check_emulation_mode ssd_check_emulation_mode 900 901 #define sd_get_physical_geometry ssd_get_physical_geometry 902 #define sd_get_virtual_geometry ssd_get_virtual_geometry 903 #define sd_update_block_info ssd_update_block_info 904 #define sd_register_devid ssd_register_devid 905 #define sd_get_devid ssd_get_devid 906 #define sd_create_devid ssd_create_devid 907 #define sd_write_deviceid ssd_write_deviceid 908 #define sd_check_vpd_page_support ssd_check_vpd_page_support 909 #define sd_setup_pm ssd_setup_pm 910 #define sd_create_pm_components ssd_create_pm_components 911 #define sd_ddi_suspend ssd_ddi_suspend 912 #define sd_ddi_resume ssd_ddi_resume 913 #define sd_pm_state_change ssd_pm_state_change 914 #define sdpower ssdpower 915 #define sdattach ssdattach 916 #define sddetach ssddetach 917 #define sd_unit_attach ssd_unit_attach 918 #define sd_unit_detach ssd_unit_detach 919 #define sd_set_unit_attributes ssd_set_unit_attributes 920 #define sd_create_errstats ssd_create_errstats 921 #define sd_set_errstats ssd_set_errstats 922 #define sd_set_pstats ssd_set_pstats 923 #define sddump ssddump 924 #define sd_scsi_poll ssd_scsi_poll 925 #define sd_send_polled_RQS ssd_send_polled_RQS 926 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll 927 #define sd_init_event_callbacks ssd_init_event_callbacks 928 #define sd_event_callback ssd_event_callback 929 #define sd_cache_control ssd_cache_control 930 #define sd_get_write_cache_enabled ssd_get_write_cache_enabled 931 #define sd_get_nv_sup ssd_get_nv_sup 932 #define sd_make_device ssd_make_device 933 #define sdopen ssdopen 934 #define sdclose ssdclose 935 #define sd_ready_and_valid ssd_ready_and_valid 936 #define sdmin ssdmin 937 #define sdread ssdread 938 #define sdwrite ssdwrite 939 #define sdaread ssdaread 940 #define sdawrite ssdawrite 941 #define sdstrategy ssdstrategy 942 #define sdioctl ssdioctl 943 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart 944 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart 945 #define sd_checksum_iostart ssd_checksum_iostart 946 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart 947 #define sd_pm_iostart ssd_pm_iostart 948 #define sd_core_iostart ssd_core_iostart 949 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone 950 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone 951 #define sd_checksum_iodone ssd_checksum_iodone 952 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone 953 #define sd_pm_iodone ssd_pm_iodone 954 #define sd_initpkt_for_buf ssd_initpkt_for_buf 955 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf 956 #define sd_setup_rw_pkt ssd_setup_rw_pkt 957 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt 958 #define sd_buf_iodone ssd_buf_iodone 959 #define sd_uscsi_strategy ssd_uscsi_strategy 960 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi 961 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi 962 #define sd_uscsi_iodone ssd_uscsi_iodone 963 #define sd_xbuf_strategy ssd_xbuf_strategy 964 #define sd_xbuf_init ssd_xbuf_init 965 #define sd_pm_entry ssd_pm_entry 966 #define sd_pm_exit ssd_pm_exit 967 968 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler 969 #define sd_pm_timeout_handler ssd_pm_timeout_handler 970 971 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq 972 #define sdintr ssdintr 973 #define sd_start_cmds ssd_start_cmds 974 #define sd_send_scsi_cmd ssd_send_scsi_cmd 975 #define sd_bioclone_alloc ssd_bioclone_alloc 976 #define sd_bioclone_free ssd_bioclone_free 977 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc 978 #define sd_shadow_buf_free ssd_shadow_buf_free 979 #define sd_print_transport_rejected_message \ 980 ssd_print_transport_rejected_message 981 #define sd_retry_command ssd_retry_command 982 #define sd_set_retry_bp ssd_set_retry_bp 983 #define sd_send_request_sense_command ssd_send_request_sense_command 984 #define sd_start_retry_command ssd_start_retry_command 985 #define sd_start_direct_priority_command \ 986 ssd_start_direct_priority_command 987 #define sd_return_failed_command ssd_return_failed_command 988 #define sd_return_failed_command_no_restart \ 989 ssd_return_failed_command_no_restart 990 #define sd_return_command ssd_return_command 991 #define sd_sync_with_callback ssd_sync_with_callback 992 #define sdrunout ssdrunout 993 #define sd_mark_rqs_busy ssd_mark_rqs_busy 994 #define sd_mark_rqs_idle ssd_mark_rqs_idle 995 #define sd_reduce_throttle ssd_reduce_throttle 996 #define sd_restore_throttle ssd_restore_throttle 997 #define sd_print_incomplete_msg ssd_print_incomplete_msg 998 #define sd_init_cdb_limits ssd_init_cdb_limits 999 #define sd_pkt_status_good ssd_pkt_status_good 1000 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition 1001 #define sd_pkt_status_busy ssd_pkt_status_busy 1002 #define sd_pkt_status_reservation_conflict \ 1003 ssd_pkt_status_reservation_conflict 1004 #define sd_pkt_status_qfull ssd_pkt_status_qfull 1005 #define sd_handle_request_sense ssd_handle_request_sense 1006 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense 1007 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg 1008 #define sd_validate_sense_data ssd_validate_sense_data 1009 #define sd_decode_sense ssd_decode_sense 1010 #define sd_print_sense_msg ssd_print_sense_msg 1011 #define sd_sense_key_no_sense ssd_sense_key_no_sense 1012 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error 1013 #define sd_sense_key_not_ready ssd_sense_key_not_ready 1014 #define sd_sense_key_medium_or_hardware_error \ 1015 ssd_sense_key_medium_or_hardware_error 1016 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request 1017 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention 1018 #define sd_sense_key_fail_command ssd_sense_key_fail_command 1019 #define sd_sense_key_blank_check ssd_sense_key_blank_check 1020 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command 1021 #define sd_sense_key_default ssd_sense_key_default 1022 #define sd_print_retry_msg ssd_print_retry_msg 1023 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg 1024 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete 1025 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err 1026 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset 1027 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted 1028 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout 1029 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free 1030 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject 1031 #define sd_pkt_reason_default ssd_pkt_reason_default 1032 #define sd_reset_target ssd_reset_target 1033 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback 1034 #define sd_start_stop_unit_task ssd_start_stop_unit_task 1035 #define sd_taskq_create ssd_taskq_create 1036 #define sd_taskq_delete ssd_taskq_delete 1037 #define sd_target_change_task ssd_target_change_task 1038 #define sd_log_dev_status_event ssd_log_dev_status_event 1039 #define sd_log_lun_expansion_event ssd_log_lun_expansion_event 1040 #define sd_log_eject_request_event ssd_log_eject_request_event 1041 #define sd_media_change_task ssd_media_change_task 1042 #define sd_handle_mchange ssd_handle_mchange 1043 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK 1044 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY 1045 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 1046 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION 1047 #define sd_send_scsi_feature_GET_CONFIGURATION \ 1048 sd_send_scsi_feature_GET_CONFIGURATION 1049 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT 1050 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY 1051 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY 1052 #define sd_send_scsi_PERSISTENT_RESERVE_IN \ 1053 ssd_send_scsi_PERSISTENT_RESERVE_IN 1054 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ 1055 ssd_send_scsi_PERSISTENT_RESERVE_OUT 1056 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE 1057 #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \ 1058 ssd_send_scsi_SYNCHRONIZE_CACHE_biodone 1059 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE 1060 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT 1061 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR 1062 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE 1063 #define sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION \ 1064 ssd_send_scsi_GET_EVENT_STATUS_NOTIFICATION 1065 #define sd_gesn_media_data_valid ssd_gesn_media_data_valid 1066 #define sd_alloc_rqs ssd_alloc_rqs 1067 #define sd_free_rqs ssd_free_rqs 1068 #define sd_dump_memory ssd_dump_memory 1069 #define sd_get_media_info_com ssd_get_media_info_com 1070 #define sd_get_media_info ssd_get_media_info 1071 #define sd_get_media_info_ext ssd_get_media_info_ext 1072 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info 1073 #define sd_nvpair_str_decode ssd_nvpair_str_decode 1074 #define sd_strtok_r ssd_strtok_r 1075 #define sd_set_properties ssd_set_properties 1076 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf 1077 #define sd_setup_next_xfer ssd_setup_next_xfer 1078 #define sd_dkio_get_temp ssd_dkio_get_temp 1079 #define sd_check_mhd ssd_check_mhd 1080 #define sd_mhd_watch_cb ssd_mhd_watch_cb 1081 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete 1082 #define sd_sname ssd_sname 1083 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover 1084 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread 1085 #define sd_take_ownership ssd_take_ownership 1086 #define sd_reserve_release ssd_reserve_release 1087 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req 1088 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb 1089 #define sd_persistent_reservation_in_read_keys \ 1090 ssd_persistent_reservation_in_read_keys 1091 #define sd_persistent_reservation_in_read_resv \ 1092 ssd_persistent_reservation_in_read_resv 1093 #define sd_mhdioc_takeown ssd_mhdioc_takeown 1094 #define sd_mhdioc_failfast ssd_mhdioc_failfast 1095 #define sd_mhdioc_release ssd_mhdioc_release 1096 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid 1097 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys 1098 #define sd_mhdioc_inresv ssd_mhdioc_inresv 1099 #define sr_change_blkmode ssr_change_blkmode 1100 #define sr_change_speed ssr_change_speed 1101 #define sr_atapi_change_speed ssr_atapi_change_speed 1102 #define sr_pause_resume ssr_pause_resume 1103 #define sr_play_msf ssr_play_msf 1104 #define sr_play_trkind ssr_play_trkind 1105 #define sr_read_all_subcodes ssr_read_all_subcodes 1106 #define sr_read_subchannel ssr_read_subchannel 1107 #define sr_read_tocentry ssr_read_tocentry 1108 #define sr_read_tochdr ssr_read_tochdr 1109 #define sr_read_cdda ssr_read_cdda 1110 #define sr_read_cdxa ssr_read_cdxa 1111 #define sr_read_mode1 ssr_read_mode1 1112 #define sr_read_mode2 ssr_read_mode2 1113 #define sr_read_cd_mode2 ssr_read_cd_mode2 1114 #define sr_sector_mode ssr_sector_mode 1115 #define sr_eject ssr_eject 1116 #define sr_ejected ssr_ejected 1117 #define sr_check_wp ssr_check_wp 1118 #define sd_watch_request_submit ssd_watch_request_submit 1119 #define sd_check_media ssd_check_media 1120 #define sd_media_watch_cb ssd_media_watch_cb 1121 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast 1122 #define sr_volume_ctrl ssr_volume_ctrl 1123 #define sr_read_sony_session_offset ssr_read_sony_session_offset 1124 #define sd_log_page_supported ssd_log_page_supported 1125 #define sd_check_for_writable_cd ssd_check_for_writable_cd 1126 #define sd_wm_cache_constructor ssd_wm_cache_constructor 1127 #define sd_wm_cache_destructor ssd_wm_cache_destructor 1128 #define sd_range_lock ssd_range_lock 1129 #define sd_get_range ssd_get_range 1130 #define sd_free_inlist_wmap ssd_free_inlist_wmap 1131 #define sd_range_unlock ssd_range_unlock 1132 #define sd_read_modify_write_task ssd_read_modify_write_task 1133 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw 1134 1135 #define sd_iostart_chain ssd_iostart_chain 1136 #define sd_iodone_chain ssd_iodone_chain 1137 #define sd_initpkt_map ssd_initpkt_map 1138 #define sd_destroypkt_map ssd_destroypkt_map 1139 #define sd_chain_type_map ssd_chain_type_map 1140 #define sd_chain_index_map ssd_chain_index_map 1141 1142 #define sd_failfast_flushctl ssd_failfast_flushctl 1143 #define sd_failfast_flushq ssd_failfast_flushq 1144 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback 1145 1146 #define sd_is_lsi ssd_is_lsi 1147 #define sd_tg_rdwr ssd_tg_rdwr 1148 #define sd_tg_getinfo ssd_tg_getinfo 1149 #define sd_rmw_msg_print_handler ssd_rmw_msg_print_handler 1150 1151 #endif /* #if (defined(__fibre)) */ 1152 1153 1154 int _init(void); 1155 int _fini(void); 1156 int _info(struct modinfo *modinfop); 1157 1158 /*PRINTFLIKE3*/ 1159 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1160 /*PRINTFLIKE3*/ 1161 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1162 /*PRINTFLIKE3*/ 1163 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1164 1165 static int sdprobe(dev_info_t *devi); 1166 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 1167 void **result); 1168 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1169 int mod_flags, char *name, caddr_t valuep, int *lengthp); 1170 1171 /* 1172 * Smart probe for parallel scsi 1173 */ 1174 static void sd_scsi_probe_cache_init(void); 1175 static void sd_scsi_probe_cache_fini(void); 1176 static void sd_scsi_clear_probe_cache(void); 1177 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)()); 1178 1179 /* 1180 * Attached luns on target for parallel scsi 1181 */ 1182 static void sd_scsi_target_lun_init(void); 1183 static void sd_scsi_target_lun_fini(void); 1184 static int sd_scsi_get_target_lun_count(dev_info_t *dip, int target); 1185 static void sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag); 1186 1187 static int sd_spin_up_unit(sd_ssc_t *ssc); 1188 1189 /* 1190 * Using sd_ssc_init to establish sd_ssc_t struct 1191 * Using sd_ssc_send to send uscsi internal command 1192 * Using sd_ssc_fini to free sd_ssc_t struct 1193 */ 1194 static sd_ssc_t *sd_ssc_init(struct sd_lun *un); 1195 static int sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, 1196 int flag, enum uio_seg dataspace, int path_flag); 1197 static void sd_ssc_fini(sd_ssc_t *ssc); 1198 1199 /* 1200 * Using sd_ssc_assessment to set correct type-of-assessment 1201 * Using sd_ssc_post to post ereport & system log 1202 * sd_ssc_post will call sd_ssc_print to print system log 1203 * sd_ssc_post will call sd_ssd_ereport_post to post ereport 1204 */ 1205 static void sd_ssc_assessment(sd_ssc_t *ssc, 1206 enum sd_type_assessment tp_assess); 1207 1208 static void sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess); 1209 static void sd_ssc_print(sd_ssc_t *ssc, int sd_severity); 1210 static void sd_ssc_ereport_post(sd_ssc_t *ssc, 1211 enum sd_driver_assessment drv_assess); 1212 1213 /* 1214 * Using sd_ssc_set_info to mark an un-decodable-data error. 1215 * Using sd_ssc_extract_info to transfer information from internal 1216 * data structures to sd_ssc_t. 1217 */ 1218 static void sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, uint_t comp, 1219 const char *fmt, ...); 1220 static void sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, 1221 struct scsi_pkt *pktp, struct buf *bp, struct sd_xbuf *xp); 1222 1223 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1224 enum uio_seg dataspace, int path_flag); 1225 1226 #ifdef _LP64 1227 static void sd_enable_descr_sense(sd_ssc_t *ssc); 1228 static void sd_reenable_dsense_task(void *arg); 1229 #endif /* _LP64 */ 1230 1231 static void sd_set_mmc_caps(sd_ssc_t *ssc); 1232 1233 static void sd_read_unit_properties(struct sd_lun *un); 1234 static int sd_process_sdconf_file(struct sd_lun *un); 1235 static void sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str); 1236 static char *sd_strtok_r(char *string, const char *sepset, char **lasts); 1237 static void sd_set_properties(struct sd_lun *un, char *name, char *value); 1238 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags, 1239 int *data_list, sd_tunables *values); 1240 static void sd_process_sdconf_table(struct sd_lun *un); 1241 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen); 1242 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen); 1243 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 1244 int list_len, char *dataname_ptr); 1245 static void sd_set_vers1_properties(struct sd_lun *un, int flags, 1246 sd_tunables *prop_list); 1247 1248 static void sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, 1249 int reservation_flag); 1250 static int sd_get_devid(sd_ssc_t *ssc); 1251 static ddi_devid_t sd_create_devid(sd_ssc_t *ssc); 1252 static int sd_write_deviceid(sd_ssc_t *ssc); 1253 static int sd_get_devid_page(struct sd_lun *un, uchar_t *wwn, int *len); 1254 static int sd_check_vpd_page_support(sd_ssc_t *ssc); 1255 1256 static void sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi); 1257 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); 1258 1259 static int sd_ddi_suspend(dev_info_t *devi); 1260 static int sd_ddi_resume(dev_info_t *devi); 1261 static int sd_pm_state_change(struct sd_lun *un, int level, int flag); 1262 static int sdpower(dev_info_t *devi, int component, int level); 1263 1264 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 1265 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 1266 static int sd_unit_attach(dev_info_t *devi); 1267 static int sd_unit_detach(dev_info_t *devi); 1268 1269 static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi); 1270 static void sd_create_errstats(struct sd_lun *un, int instance); 1271 static void sd_set_errstats(struct sd_lun *un); 1272 static void sd_set_pstats(struct sd_lun *un); 1273 1274 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 1275 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); 1276 static int sd_send_polled_RQS(struct sd_lun *un); 1277 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); 1278 1279 #if (defined(__fibre)) 1280 /* 1281 * Event callbacks (photon) 1282 */ 1283 static void sd_init_event_callbacks(struct sd_lun *un); 1284 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); 1285 #endif 1286 1287 /* 1288 * Defines for sd_cache_control 1289 */ 1290 1291 #define SD_CACHE_ENABLE 1 1292 #define SD_CACHE_DISABLE 0 1293 #define SD_CACHE_NOCHANGE -1 1294 1295 static int sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag); 1296 static int sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled); 1297 static void sd_get_nv_sup(sd_ssc_t *ssc); 1298 static dev_t sd_make_device(dev_info_t *devi); 1299 static void sd_check_solid_state(sd_ssc_t *ssc); 1300 static void sd_check_emulation_mode(sd_ssc_t *ssc); 1301 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize, 1302 uint64_t capacity); 1303 1304 /* 1305 * Driver entry point functions. 1306 */ 1307 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 1308 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 1309 static int sd_ready_and_valid(sd_ssc_t *ssc, int part); 1310 1311 static void sdmin(struct buf *bp); 1312 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p); 1313 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 1314 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1315 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1316 1317 static int sdstrategy(struct buf *bp); 1318 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1319 1320 /* 1321 * Function prototypes for layering functions in the iostart chain. 1322 */ 1323 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un, 1324 struct buf *bp); 1325 static void sd_mapblocksize_iostart(int index, struct sd_lun *un, 1326 struct buf *bp); 1327 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp); 1328 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un, 1329 struct buf *bp); 1330 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp); 1331 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp); 1332 1333 /* 1334 * Function prototypes for layering functions in the iodone chain. 1335 */ 1336 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp); 1337 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp); 1338 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un, 1339 struct buf *bp); 1340 static void sd_mapblocksize_iodone(int index, struct sd_lun *un, 1341 struct buf *bp); 1342 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp); 1343 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un, 1344 struct buf *bp); 1345 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp); 1346 1347 /* 1348 * Prototypes for functions to support buf(9S) based IO. 1349 */ 1350 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg); 1351 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **); 1352 static void sd_destroypkt_for_buf(struct buf *); 1353 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp, 1354 struct buf *bp, int flags, 1355 int (*callback)(caddr_t), caddr_t callback_arg, 1356 diskaddr_t lba, uint32_t blockcount); 1357 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp, 1358 struct buf *bp, diskaddr_t lba, uint32_t blockcount); 1359 1360 /* 1361 * Prototypes for functions to support USCSI IO. 1362 */ 1363 static int sd_uscsi_strategy(struct buf *bp); 1364 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **); 1365 static void sd_destroypkt_for_uscsi(struct buf *); 1366 1367 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 1368 uchar_t chain_type, void *pktinfop); 1369 1370 static int sd_pm_entry(struct sd_lun *un); 1371 static void sd_pm_exit(struct sd_lun *un); 1372 1373 static void sd_pm_idletimeout_handler(void *arg); 1374 1375 /* 1376 * sd_core internal functions (used at the sd_core_io layer). 1377 */ 1378 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp); 1379 static void sdintr(struct scsi_pkt *pktp); 1380 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp); 1381 1382 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1383 enum uio_seg dataspace, int path_flag); 1384 1385 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen, 1386 daddr_t blkno, int (*func)(struct buf *)); 1387 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen, 1388 uint_t bflags, daddr_t blkno, int (*func)(struct buf *)); 1389 static void sd_bioclone_free(struct buf *bp); 1390 static void sd_shadow_buf_free(struct buf *bp); 1391 1392 static void sd_print_transport_rejected_message(struct sd_lun *un, 1393 struct sd_xbuf *xp, int code); 1394 static void sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, 1395 void *arg, int code); 1396 static void sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, 1397 void *arg, int code); 1398 static void sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, 1399 void *arg, int code); 1400 1401 static void sd_retry_command(struct sd_lun *un, struct buf *bp, 1402 int retry_check_flag, 1403 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, 1404 int c), 1405 void *user_arg, int failure_code, clock_t retry_delay, 1406 void (*statp)(kstat_io_t *)); 1407 1408 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, 1409 clock_t retry_delay, void (*statp)(kstat_io_t *)); 1410 1411 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 1412 struct scsi_pkt *pktp); 1413 static void sd_start_retry_command(void *arg); 1414 static void sd_start_direct_priority_command(void *arg); 1415 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, 1416 int errcode); 1417 static void sd_return_failed_command_no_restart(struct sd_lun *un, 1418 struct buf *bp, int errcode); 1419 static void sd_return_command(struct sd_lun *un, struct buf *bp); 1420 static void sd_sync_with_callback(struct sd_lun *un); 1421 static int sdrunout(caddr_t arg); 1422 1423 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp); 1424 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp); 1425 1426 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type); 1427 static void sd_restore_throttle(void *arg); 1428 1429 static void sd_init_cdb_limits(struct sd_lun *un); 1430 1431 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 1432 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1433 1434 /* 1435 * Error handling functions 1436 */ 1437 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 1438 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1439 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, 1440 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1441 static void sd_pkt_status_reservation_conflict(struct sd_lun *un, 1442 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1443 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 1444 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1445 1446 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp, 1447 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1448 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 1449 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1450 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp, 1451 struct sd_xbuf *xp, size_t actual_len); 1452 static void sd_decode_sense(struct sd_lun *un, struct buf *bp, 1453 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1454 1455 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp, 1456 void *arg, int code); 1457 1458 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 1459 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1460 static void sd_sense_key_recoverable_error(struct sd_lun *un, 1461 uint8_t *sense_datap, 1462 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1463 static void sd_sense_key_not_ready(struct sd_lun *un, 1464 uint8_t *sense_datap, 1465 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1466 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 1467 uint8_t *sense_datap, 1468 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1469 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 1470 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1471 static void sd_sense_key_unit_attention(struct sd_lun *un, 1472 uint8_t *sense_datap, 1473 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1474 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 1475 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1476 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 1477 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1478 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 1479 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1480 static void sd_sense_key_default(struct sd_lun *un, 1481 uint8_t *sense_datap, 1482 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1483 1484 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp, 1485 void *arg, int flag); 1486 1487 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 1488 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1489 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 1490 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1491 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 1492 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1493 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 1494 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1495 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 1496 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1497 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 1498 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1499 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 1500 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1501 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 1502 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1503 1504 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp); 1505 1506 static void sd_start_stop_unit_callback(void *arg); 1507 static void sd_start_stop_unit_task(void *arg); 1508 1509 static void sd_taskq_create(void); 1510 static void sd_taskq_delete(void); 1511 static void sd_target_change_task(void *arg); 1512 static void sd_log_dev_status_event(struct sd_lun *un, char *esc, int km_flag); 1513 static void sd_log_lun_expansion_event(struct sd_lun *un, int km_flag); 1514 static void sd_log_eject_request_event(struct sd_lun *un, int km_flag); 1515 static void sd_media_change_task(void *arg); 1516 1517 static int sd_handle_mchange(struct sd_lun *un); 1518 static int sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag); 1519 static int sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, 1520 uint32_t *lbap, int path_flag); 1521 static int sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp, 1522 uint32_t *lbap, uint32_t *psp, int path_flag); 1523 static int sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int pc_flag, 1524 int flag, int path_flag); 1525 static int sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, 1526 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp); 1527 static int sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag); 1528 static int sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, 1529 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp); 1530 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, 1531 uchar_t usr_cmd, uchar_t *usr_bufp); 1532 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, 1533 struct dk_callback *dkc); 1534 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp); 1535 static int sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, 1536 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1537 uchar_t *bufaddr, uint_t buflen, int path_flag); 1538 static int sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc, 1539 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1540 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag); 1541 static int sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, 1542 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag); 1543 static int sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, 1544 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag); 1545 static int sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr, 1546 size_t buflen, daddr_t start_block, int path_flag); 1547 #define sd_send_scsi_READ(ssc, bufaddr, buflen, start_block, path_flag) \ 1548 sd_send_scsi_RDWR(ssc, SCMD_READ, bufaddr, buflen, start_block, \ 1549 path_flag) 1550 #define sd_send_scsi_WRITE(ssc, bufaddr, buflen, start_block, path_flag)\ 1551 sd_send_scsi_RDWR(ssc, SCMD_WRITE, bufaddr, buflen, start_block,\ 1552 path_flag) 1553 1554 static int sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, 1555 uint16_t buflen, uchar_t page_code, uchar_t page_control, 1556 uint16_t param_ptr, int path_flag); 1557 static int sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION(sd_ssc_t *ssc, 1558 uchar_t *bufaddr, size_t buflen, uchar_t class_req); 1559 static boolean_t sd_gesn_media_data_valid(uchar_t *data); 1560 1561 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un); 1562 static void sd_free_rqs(struct sd_lun *un); 1563 1564 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, 1565 uchar_t *data, int len, int fmt); 1566 static void sd_panic_for_res_conflict(struct sd_lun *un); 1567 1568 /* 1569 * Disk Ioctl Function Prototypes 1570 */ 1571 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag); 1572 static int sd_get_media_info_ext(dev_t dev, caddr_t arg, int flag); 1573 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag); 1574 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag); 1575 1576 /* 1577 * Multi-host Ioctl Prototypes 1578 */ 1579 static int sd_check_mhd(dev_t dev, int interval); 1580 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1581 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt); 1582 static char *sd_sname(uchar_t status); 1583 static void sd_mhd_resvd_recover(void *arg); 1584 static void sd_resv_reclaim_thread(); 1585 static int sd_take_ownership(dev_t dev, struct mhioctkown *p); 1586 static int sd_reserve_release(dev_t dev, int cmd); 1587 static void sd_rmv_resv_reclaim_req(dev_t dev); 1588 static void sd_mhd_reset_notify_cb(caddr_t arg); 1589 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un, 1590 mhioc_inkeys_t *usrp, int flag); 1591 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un, 1592 mhioc_inresvs_t *usrp, int flag); 1593 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag); 1594 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag); 1595 static int sd_mhdioc_release(dev_t dev); 1596 static int sd_mhdioc_register_devid(dev_t dev); 1597 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag); 1598 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag); 1599 1600 /* 1601 * SCSI removable prototypes 1602 */ 1603 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag); 1604 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1605 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1606 static int sr_pause_resume(dev_t dev, int mode); 1607 static int sr_play_msf(dev_t dev, caddr_t data, int flag); 1608 static int sr_play_trkind(dev_t dev, caddr_t data, int flag); 1609 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag); 1610 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag); 1611 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag); 1612 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag); 1613 static int sr_read_cdda(dev_t dev, caddr_t data, int flag); 1614 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag); 1615 static int sr_read_mode1(dev_t dev, caddr_t data, int flag); 1616 static int sr_read_mode2(dev_t dev, caddr_t data, int flag); 1617 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag); 1618 static int sr_sector_mode(dev_t dev, uint32_t blksize); 1619 static int sr_eject(dev_t dev); 1620 static void sr_ejected(register struct sd_lun *un); 1621 static int sr_check_wp(dev_t dev); 1622 static opaque_t sd_watch_request_submit(struct sd_lun *un); 1623 static int sd_check_media(dev_t dev, enum dkio_state state); 1624 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1625 static void sd_delayed_cv_broadcast(void *arg); 1626 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); 1627 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); 1628 1629 static int sd_log_page_supported(sd_ssc_t *ssc, int log_page); 1630 1631 /* 1632 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. 1633 */ 1634 static void sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag); 1635 static int sd_wm_cache_constructor(void *wm, void *un, int flags); 1636 static void sd_wm_cache_destructor(void *wm, void *un); 1637 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb, 1638 daddr_t endb, ushort_t typ); 1639 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb, 1640 daddr_t endb); 1641 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp); 1642 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm); 1643 static void sd_read_modify_write_task(void * arg); 1644 static int 1645 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 1646 struct buf **bpp); 1647 1648 1649 /* 1650 * Function prototypes for failfast support. 1651 */ 1652 static void sd_failfast_flushq(struct sd_lun *un); 1653 static int sd_failfast_flushq_callback(struct buf *bp); 1654 1655 /* 1656 * Function prototypes to check for lsi devices 1657 */ 1658 static void sd_is_lsi(struct sd_lun *un); 1659 1660 /* 1661 * Function prototypes for partial DMA support 1662 */ 1663 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 1664 struct scsi_pkt *pkt, struct sd_xbuf *xp); 1665 1666 1667 /* Function prototypes for cmlb */ 1668 static int sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 1669 diskaddr_t start_block, size_t reqlength, void *tg_cookie); 1670 1671 static int sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie); 1672 1673 /* 1674 * For printing RMW warning message timely 1675 */ 1676 static void sd_rmw_msg_print_handler(void *arg); 1677 1678 /* 1679 * Constants for failfast support: 1680 * 1681 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO 1682 * failfast processing being performed. 1683 * 1684 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing 1685 * failfast processing on all bufs with B_FAILFAST set. 1686 */ 1687 1688 #define SD_FAILFAST_INACTIVE 0 1689 #define SD_FAILFAST_ACTIVE 1 1690 1691 /* 1692 * Bitmask to control behavior of buf(9S) flushes when a transition to 1693 * the failfast state occurs. Optional bits include: 1694 * 1695 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that 1696 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will 1697 * be flushed. 1698 * 1699 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the 1700 * driver, in addition to the regular wait queue. This includes the xbuf 1701 * queues. When clear, only the driver's wait queue will be flushed. 1702 */ 1703 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 1704 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 1705 1706 /* 1707 * The default behavior is to only flush bufs that have B_FAILFAST set, but 1708 * to flush all queues within the driver. 1709 */ 1710 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; 1711 1712 1713 /* 1714 * SD Testing Fault Injection 1715 */ 1716 #ifdef SD_FAULT_INJECTION 1717 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); 1718 static void sd_faultinjection(struct scsi_pkt *pktp); 1719 static void sd_injection_log(char *buf, struct sd_lun *un); 1720 #endif 1721 1722 /* 1723 * Device driver ops vector 1724 */ 1725 static struct cb_ops sd_cb_ops = { 1726 sdopen, /* open */ 1727 sdclose, /* close */ 1728 sdstrategy, /* strategy */ 1729 nodev, /* print */ 1730 sddump, /* dump */ 1731 sdread, /* read */ 1732 sdwrite, /* write */ 1733 sdioctl, /* ioctl */ 1734 nodev, /* devmap */ 1735 nodev, /* mmap */ 1736 nodev, /* segmap */ 1737 nochpoll, /* poll */ 1738 sd_prop_op, /* cb_prop_op */ 1739 0, /* streamtab */ 1740 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */ 1741 CB_REV, /* cb_rev */ 1742 sdaread, /* async I/O read entry point */ 1743 sdawrite /* async I/O write entry point */ 1744 }; 1745 1746 struct dev_ops sd_ops = { 1747 DEVO_REV, /* devo_rev, */ 1748 0, /* refcnt */ 1749 sdinfo, /* info */ 1750 nulldev, /* identify */ 1751 sdprobe, /* probe */ 1752 sdattach, /* attach */ 1753 sddetach, /* detach */ 1754 nodev, /* reset */ 1755 &sd_cb_ops, /* driver operations */ 1756 NULL, /* bus operations */ 1757 sdpower, /* power */ 1758 ddi_quiesce_not_needed, /* quiesce */ 1759 }; 1760 1761 /* 1762 * This is the loadable module wrapper. 1763 */ 1764 #include <sys/modctl.h> 1765 1766 #ifndef XPV_HVM_DRIVER 1767 static struct modldrv modldrv = { 1768 &mod_driverops, /* Type of module. This one is a driver */ 1769 SD_MODULE_NAME, /* Module name. */ 1770 &sd_ops /* driver ops */ 1771 }; 1772 1773 static struct modlinkage modlinkage = { 1774 MODREV_1, &modldrv, NULL 1775 }; 1776 1777 #else /* XPV_HVM_DRIVER */ 1778 static struct modlmisc modlmisc = { 1779 &mod_miscops, /* Type of module. This one is a misc */ 1780 "HVM " SD_MODULE_NAME, /* Module name. */ 1781 }; 1782 1783 static struct modlinkage modlinkage = { 1784 MODREV_1, &modlmisc, NULL 1785 }; 1786 1787 #endif /* XPV_HVM_DRIVER */ 1788 1789 static cmlb_tg_ops_t sd_tgops = { 1790 TG_DK_OPS_VERSION_1, 1791 sd_tg_rdwr, 1792 sd_tg_getinfo 1793 }; 1794 1795 static struct scsi_asq_key_strings sd_additional_codes[] = { 1796 0x81, 0, "Logical Unit is Reserved", 1797 0x85, 0, "Audio Address Not Valid", 1798 0xb6, 0, "Media Load Mechanism Failed", 1799 0xB9, 0, "Audio Play Operation Aborted", 1800 0xbf, 0, "Buffer Overflow for Read All Subcodes Command", 1801 0x53, 2, "Medium removal prevented", 1802 0x6f, 0, "Authentication failed during key exchange", 1803 0x6f, 1, "Key not present", 1804 0x6f, 2, "Key not established", 1805 0x6f, 3, "Read without proper authentication", 1806 0x6f, 4, "Mismatched region to this logical unit", 1807 0x6f, 5, "Region reset count error", 1808 0xffff, 0x0, NULL 1809 }; 1810 1811 1812 /* 1813 * Struct for passing printing information for sense data messages 1814 */ 1815 struct sd_sense_info { 1816 int ssi_severity; 1817 int ssi_pfa_flag; 1818 }; 1819 1820 /* 1821 * Table of function pointers for iostart-side routines. Separate "chains" 1822 * of layered function calls are formed by placing the function pointers 1823 * sequentially in the desired order. Functions are called according to an 1824 * incrementing table index ordering. The last function in each chain must 1825 * be sd_core_iostart(). The corresponding iodone-side routines are expected 1826 * in the sd_iodone_chain[] array. 1827 * 1828 * Note: It may seem more natural to organize both the iostart and iodone 1829 * functions together, into an array of structures (or some similar 1830 * organization) with a common index, rather than two separate arrays which 1831 * must be maintained in synchronization. The purpose of this division is 1832 * to achieve improved performance: individual arrays allows for more 1833 * effective cache line utilization on certain platforms. 1834 */ 1835 1836 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp); 1837 1838 1839 static sd_chain_t sd_iostart_chain[] = { 1840 1841 /* Chain for buf IO for disk drive targets (PM enabled) */ 1842 sd_mapblockaddr_iostart, /* Index: 0 */ 1843 sd_pm_iostart, /* Index: 1 */ 1844 sd_core_iostart, /* Index: 2 */ 1845 1846 /* Chain for buf IO for disk drive targets (PM disabled) */ 1847 sd_mapblockaddr_iostart, /* Index: 3 */ 1848 sd_core_iostart, /* Index: 4 */ 1849 1850 /* 1851 * Chain for buf IO for removable-media or large sector size 1852 * disk drive targets with RMW needed (PM enabled) 1853 */ 1854 sd_mapblockaddr_iostart, /* Index: 5 */ 1855 sd_mapblocksize_iostart, /* Index: 6 */ 1856 sd_pm_iostart, /* Index: 7 */ 1857 sd_core_iostart, /* Index: 8 */ 1858 1859 /* 1860 * Chain for buf IO for removable-media or large sector size 1861 * disk drive targets with RMW needed (PM disabled) 1862 */ 1863 sd_mapblockaddr_iostart, /* Index: 9 */ 1864 sd_mapblocksize_iostart, /* Index: 10 */ 1865 sd_core_iostart, /* Index: 11 */ 1866 1867 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1868 sd_mapblockaddr_iostart, /* Index: 12 */ 1869 sd_checksum_iostart, /* Index: 13 */ 1870 sd_pm_iostart, /* Index: 14 */ 1871 sd_core_iostart, /* Index: 15 */ 1872 1873 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1874 sd_mapblockaddr_iostart, /* Index: 16 */ 1875 sd_checksum_iostart, /* Index: 17 */ 1876 sd_core_iostart, /* Index: 18 */ 1877 1878 /* Chain for USCSI commands (all targets) */ 1879 sd_pm_iostart, /* Index: 19 */ 1880 sd_core_iostart, /* Index: 20 */ 1881 1882 /* Chain for checksumming USCSI commands (all targets) */ 1883 sd_checksum_uscsi_iostart, /* Index: 21 */ 1884 sd_pm_iostart, /* Index: 22 */ 1885 sd_core_iostart, /* Index: 23 */ 1886 1887 /* Chain for "direct" USCSI commands (all targets) */ 1888 sd_core_iostart, /* Index: 24 */ 1889 1890 /* Chain for "direct priority" USCSI commands (all targets) */ 1891 sd_core_iostart, /* Index: 25 */ 1892 1893 /* 1894 * Chain for buf IO for large sector size disk drive targets 1895 * with RMW needed with checksumming (PM enabled) 1896 */ 1897 sd_mapblockaddr_iostart, /* Index: 26 */ 1898 sd_mapblocksize_iostart, /* Index: 27 */ 1899 sd_checksum_iostart, /* Index: 28 */ 1900 sd_pm_iostart, /* Index: 29 */ 1901 sd_core_iostart, /* Index: 30 */ 1902 1903 /* 1904 * Chain for buf IO for large sector size disk drive targets 1905 * with RMW needed with checksumming (PM disabled) 1906 */ 1907 sd_mapblockaddr_iostart, /* Index: 31 */ 1908 sd_mapblocksize_iostart, /* Index: 32 */ 1909 sd_checksum_iostart, /* Index: 33 */ 1910 sd_core_iostart, /* Index: 34 */ 1911 1912 }; 1913 1914 /* 1915 * Macros to locate the first function of each iostart chain in the 1916 * sd_iostart_chain[] array. These are located by the index in the array. 1917 */ 1918 #define SD_CHAIN_DISK_IOSTART 0 1919 #define SD_CHAIN_DISK_IOSTART_NO_PM 3 1920 #define SD_CHAIN_MSS_DISK_IOSTART 5 1921 #define SD_CHAIN_RMMEDIA_IOSTART 5 1922 #define SD_CHAIN_MSS_DISK_IOSTART_NO_PM 9 1923 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9 1924 #define SD_CHAIN_CHKSUM_IOSTART 12 1925 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16 1926 #define SD_CHAIN_USCSI_CMD_IOSTART 19 1927 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21 1928 #define SD_CHAIN_DIRECT_CMD_IOSTART 24 1929 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25 1930 #define SD_CHAIN_MSS_CHKSUM_IOSTART 26 1931 #define SD_CHAIN_MSS_CHKSUM_IOSTART_NO_PM 31 1932 1933 1934 /* 1935 * Table of function pointers for the iodone-side routines for the driver- 1936 * internal layering mechanism. The calling sequence for iodone routines 1937 * uses a decrementing table index, so the last routine called in a chain 1938 * must be at the lowest array index location for that chain. The last 1939 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs) 1940 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering 1941 * of the functions in an iodone side chain must correspond to the ordering 1942 * of the iostart routines for that chain. Note that there is no iodone 1943 * side routine that corresponds to sd_core_iostart(), so there is no 1944 * entry in the table for this. 1945 */ 1946 1947 static sd_chain_t sd_iodone_chain[] = { 1948 1949 /* Chain for buf IO for disk drive targets (PM enabled) */ 1950 sd_buf_iodone, /* Index: 0 */ 1951 sd_mapblockaddr_iodone, /* Index: 1 */ 1952 sd_pm_iodone, /* Index: 2 */ 1953 1954 /* Chain for buf IO for disk drive targets (PM disabled) */ 1955 sd_buf_iodone, /* Index: 3 */ 1956 sd_mapblockaddr_iodone, /* Index: 4 */ 1957 1958 /* 1959 * Chain for buf IO for removable-media or large sector size 1960 * disk drive targets with RMW needed (PM enabled) 1961 */ 1962 sd_buf_iodone, /* Index: 5 */ 1963 sd_mapblockaddr_iodone, /* Index: 6 */ 1964 sd_mapblocksize_iodone, /* Index: 7 */ 1965 sd_pm_iodone, /* Index: 8 */ 1966 1967 /* 1968 * Chain for buf IO for removable-media or large sector size 1969 * disk drive targets with RMW needed (PM disabled) 1970 */ 1971 sd_buf_iodone, /* Index: 9 */ 1972 sd_mapblockaddr_iodone, /* Index: 10 */ 1973 sd_mapblocksize_iodone, /* Index: 11 */ 1974 1975 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1976 sd_buf_iodone, /* Index: 12 */ 1977 sd_mapblockaddr_iodone, /* Index: 13 */ 1978 sd_checksum_iodone, /* Index: 14 */ 1979 sd_pm_iodone, /* Index: 15 */ 1980 1981 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1982 sd_buf_iodone, /* Index: 16 */ 1983 sd_mapblockaddr_iodone, /* Index: 17 */ 1984 sd_checksum_iodone, /* Index: 18 */ 1985 1986 /* Chain for USCSI commands (non-checksum targets) */ 1987 sd_uscsi_iodone, /* Index: 19 */ 1988 sd_pm_iodone, /* Index: 20 */ 1989 1990 /* Chain for USCSI commands (checksum targets) */ 1991 sd_uscsi_iodone, /* Index: 21 */ 1992 sd_checksum_uscsi_iodone, /* Index: 22 */ 1993 sd_pm_iodone, /* Index: 22 */ 1994 1995 /* Chain for "direct" USCSI commands (all targets) */ 1996 sd_uscsi_iodone, /* Index: 24 */ 1997 1998 /* Chain for "direct priority" USCSI commands (all targets) */ 1999 sd_uscsi_iodone, /* Index: 25 */ 2000 2001 /* 2002 * Chain for buf IO for large sector size disk drive targets 2003 * with checksumming (PM enabled) 2004 */ 2005 sd_buf_iodone, /* Index: 26 */ 2006 sd_mapblockaddr_iodone, /* Index: 27 */ 2007 sd_mapblocksize_iodone, /* Index: 28 */ 2008 sd_checksum_iodone, /* Index: 29 */ 2009 sd_pm_iodone, /* Index: 30 */ 2010 2011 /* 2012 * Chain for buf IO for large sector size disk drive targets 2013 * with checksumming (PM disabled) 2014 */ 2015 sd_buf_iodone, /* Index: 31 */ 2016 sd_mapblockaddr_iodone, /* Index: 32 */ 2017 sd_mapblocksize_iodone, /* Index: 33 */ 2018 sd_checksum_iodone, /* Index: 34 */ 2019 }; 2020 2021 2022 /* 2023 * Macros to locate the "first" function in the sd_iodone_chain[] array for 2024 * each iodone-side chain. These are located by the array index, but as the 2025 * iodone side functions are called in a decrementing-index order, the 2026 * highest index number in each chain must be specified (as these correspond 2027 * to the first function in the iodone chain that will be called by the core 2028 * at IO completion time). 2029 */ 2030 2031 #define SD_CHAIN_DISK_IODONE 2 2032 #define SD_CHAIN_DISK_IODONE_NO_PM 4 2033 #define SD_CHAIN_RMMEDIA_IODONE 8 2034 #define SD_CHAIN_MSS_DISK_IODONE 8 2035 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11 2036 #define SD_CHAIN_MSS_DISK_IODONE_NO_PM 11 2037 #define SD_CHAIN_CHKSUM_IODONE 15 2038 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18 2039 #define SD_CHAIN_USCSI_CMD_IODONE 20 2040 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22 2041 #define SD_CHAIN_DIRECT_CMD_IODONE 24 2042 #define SD_CHAIN_PRIORITY_CMD_IODONE 25 2043 #define SD_CHAIN_MSS_CHKSUM_IODONE 30 2044 #define SD_CHAIN_MSS_CHKSUM_IODONE_NO_PM 34 2045 2046 2047 2048 /* 2049 * Array to map a layering chain index to the appropriate initpkt routine. 2050 * The redundant entries are present so that the index used for accessing 2051 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 2052 * with this table as well. 2053 */ 2054 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **); 2055 2056 static sd_initpkt_t sd_initpkt_map[] = { 2057 2058 /* Chain for buf IO for disk drive targets (PM enabled) */ 2059 sd_initpkt_for_buf, /* Index: 0 */ 2060 sd_initpkt_for_buf, /* Index: 1 */ 2061 sd_initpkt_for_buf, /* Index: 2 */ 2062 2063 /* Chain for buf IO for disk drive targets (PM disabled) */ 2064 sd_initpkt_for_buf, /* Index: 3 */ 2065 sd_initpkt_for_buf, /* Index: 4 */ 2066 2067 /* 2068 * Chain for buf IO for removable-media or large sector size 2069 * disk drive targets (PM enabled) 2070 */ 2071 sd_initpkt_for_buf, /* Index: 5 */ 2072 sd_initpkt_for_buf, /* Index: 6 */ 2073 sd_initpkt_for_buf, /* Index: 7 */ 2074 sd_initpkt_for_buf, /* Index: 8 */ 2075 2076 /* 2077 * Chain for buf IO for removable-media or large sector size 2078 * disk drive targets (PM disabled) 2079 */ 2080 sd_initpkt_for_buf, /* Index: 9 */ 2081 sd_initpkt_for_buf, /* Index: 10 */ 2082 sd_initpkt_for_buf, /* Index: 11 */ 2083 2084 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2085 sd_initpkt_for_buf, /* Index: 12 */ 2086 sd_initpkt_for_buf, /* Index: 13 */ 2087 sd_initpkt_for_buf, /* Index: 14 */ 2088 sd_initpkt_for_buf, /* Index: 15 */ 2089 2090 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2091 sd_initpkt_for_buf, /* Index: 16 */ 2092 sd_initpkt_for_buf, /* Index: 17 */ 2093 sd_initpkt_for_buf, /* Index: 18 */ 2094 2095 /* Chain for USCSI commands (non-checksum targets) */ 2096 sd_initpkt_for_uscsi, /* Index: 19 */ 2097 sd_initpkt_for_uscsi, /* Index: 20 */ 2098 2099 /* Chain for USCSI commands (checksum targets) */ 2100 sd_initpkt_for_uscsi, /* Index: 21 */ 2101 sd_initpkt_for_uscsi, /* Index: 22 */ 2102 sd_initpkt_for_uscsi, /* Index: 22 */ 2103 2104 /* Chain for "direct" USCSI commands (all targets) */ 2105 sd_initpkt_for_uscsi, /* Index: 24 */ 2106 2107 /* Chain for "direct priority" USCSI commands (all targets) */ 2108 sd_initpkt_for_uscsi, /* Index: 25 */ 2109 2110 /* 2111 * Chain for buf IO for large sector size disk drive targets 2112 * with checksumming (PM enabled) 2113 */ 2114 sd_initpkt_for_buf, /* Index: 26 */ 2115 sd_initpkt_for_buf, /* Index: 27 */ 2116 sd_initpkt_for_buf, /* Index: 28 */ 2117 sd_initpkt_for_buf, /* Index: 29 */ 2118 sd_initpkt_for_buf, /* Index: 30 */ 2119 2120 /* 2121 * Chain for buf IO for large sector size disk drive targets 2122 * with checksumming (PM disabled) 2123 */ 2124 sd_initpkt_for_buf, /* Index: 31 */ 2125 sd_initpkt_for_buf, /* Index: 32 */ 2126 sd_initpkt_for_buf, /* Index: 33 */ 2127 sd_initpkt_for_buf, /* Index: 34 */ 2128 }; 2129 2130 2131 /* 2132 * Array to map a layering chain index to the appropriate destroypktpkt routine. 2133 * The redundant entries are present so that the index used for accessing 2134 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 2135 * with this table as well. 2136 */ 2137 typedef void (*sd_destroypkt_t)(struct buf *); 2138 2139 static sd_destroypkt_t sd_destroypkt_map[] = { 2140 2141 /* Chain for buf IO for disk drive targets (PM enabled) */ 2142 sd_destroypkt_for_buf, /* Index: 0 */ 2143 sd_destroypkt_for_buf, /* Index: 1 */ 2144 sd_destroypkt_for_buf, /* Index: 2 */ 2145 2146 /* Chain for buf IO for disk drive targets (PM disabled) */ 2147 sd_destroypkt_for_buf, /* Index: 3 */ 2148 sd_destroypkt_for_buf, /* Index: 4 */ 2149 2150 /* 2151 * Chain for buf IO for removable-media or large sector size 2152 * disk drive targets (PM enabled) 2153 */ 2154 sd_destroypkt_for_buf, /* Index: 5 */ 2155 sd_destroypkt_for_buf, /* Index: 6 */ 2156 sd_destroypkt_for_buf, /* Index: 7 */ 2157 sd_destroypkt_for_buf, /* Index: 8 */ 2158 2159 /* 2160 * Chain for buf IO for removable-media or large sector size 2161 * disk drive targets (PM disabled) 2162 */ 2163 sd_destroypkt_for_buf, /* Index: 9 */ 2164 sd_destroypkt_for_buf, /* Index: 10 */ 2165 sd_destroypkt_for_buf, /* Index: 11 */ 2166 2167 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2168 sd_destroypkt_for_buf, /* Index: 12 */ 2169 sd_destroypkt_for_buf, /* Index: 13 */ 2170 sd_destroypkt_for_buf, /* Index: 14 */ 2171 sd_destroypkt_for_buf, /* Index: 15 */ 2172 2173 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2174 sd_destroypkt_for_buf, /* Index: 16 */ 2175 sd_destroypkt_for_buf, /* Index: 17 */ 2176 sd_destroypkt_for_buf, /* Index: 18 */ 2177 2178 /* Chain for USCSI commands (non-checksum targets) */ 2179 sd_destroypkt_for_uscsi, /* Index: 19 */ 2180 sd_destroypkt_for_uscsi, /* Index: 20 */ 2181 2182 /* Chain for USCSI commands (checksum targets) */ 2183 sd_destroypkt_for_uscsi, /* Index: 21 */ 2184 sd_destroypkt_for_uscsi, /* Index: 22 */ 2185 sd_destroypkt_for_uscsi, /* Index: 22 */ 2186 2187 /* Chain for "direct" USCSI commands (all targets) */ 2188 sd_destroypkt_for_uscsi, /* Index: 24 */ 2189 2190 /* Chain for "direct priority" USCSI commands (all targets) */ 2191 sd_destroypkt_for_uscsi, /* Index: 25 */ 2192 2193 /* 2194 * Chain for buf IO for large sector size disk drive targets 2195 * with checksumming (PM disabled) 2196 */ 2197 sd_destroypkt_for_buf, /* Index: 26 */ 2198 sd_destroypkt_for_buf, /* Index: 27 */ 2199 sd_destroypkt_for_buf, /* Index: 28 */ 2200 sd_destroypkt_for_buf, /* Index: 29 */ 2201 sd_destroypkt_for_buf, /* Index: 30 */ 2202 2203 /* 2204 * Chain for buf IO for large sector size disk drive targets 2205 * with checksumming (PM enabled) 2206 */ 2207 sd_destroypkt_for_buf, /* Index: 31 */ 2208 sd_destroypkt_for_buf, /* Index: 32 */ 2209 sd_destroypkt_for_buf, /* Index: 33 */ 2210 sd_destroypkt_for_buf, /* Index: 34 */ 2211 }; 2212 2213 2214 2215 /* 2216 * Array to map a layering chain index to the appropriate chain "type". 2217 * The chain type indicates a specific property/usage of the chain. 2218 * The redundant entries are present so that the index used for accessing 2219 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 2220 * with this table as well. 2221 */ 2222 2223 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */ 2224 #define SD_CHAIN_BUFIO 1 /* regular buf IO */ 2225 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */ 2226 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */ 2227 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */ 2228 /* (for error recovery) */ 2229 2230 static int sd_chain_type_map[] = { 2231 2232 /* Chain for buf IO for disk drive targets (PM enabled) */ 2233 SD_CHAIN_BUFIO, /* Index: 0 */ 2234 SD_CHAIN_BUFIO, /* Index: 1 */ 2235 SD_CHAIN_BUFIO, /* Index: 2 */ 2236 2237 /* Chain for buf IO for disk drive targets (PM disabled) */ 2238 SD_CHAIN_BUFIO, /* Index: 3 */ 2239 SD_CHAIN_BUFIO, /* Index: 4 */ 2240 2241 /* 2242 * Chain for buf IO for removable-media or large sector size 2243 * disk drive targets (PM enabled) 2244 */ 2245 SD_CHAIN_BUFIO, /* Index: 5 */ 2246 SD_CHAIN_BUFIO, /* Index: 6 */ 2247 SD_CHAIN_BUFIO, /* Index: 7 */ 2248 SD_CHAIN_BUFIO, /* Index: 8 */ 2249 2250 /* 2251 * Chain for buf IO for removable-media or large sector size 2252 * disk drive targets (PM disabled) 2253 */ 2254 SD_CHAIN_BUFIO, /* Index: 9 */ 2255 SD_CHAIN_BUFIO, /* Index: 10 */ 2256 SD_CHAIN_BUFIO, /* Index: 11 */ 2257 2258 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2259 SD_CHAIN_BUFIO, /* Index: 12 */ 2260 SD_CHAIN_BUFIO, /* Index: 13 */ 2261 SD_CHAIN_BUFIO, /* Index: 14 */ 2262 SD_CHAIN_BUFIO, /* Index: 15 */ 2263 2264 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2265 SD_CHAIN_BUFIO, /* Index: 16 */ 2266 SD_CHAIN_BUFIO, /* Index: 17 */ 2267 SD_CHAIN_BUFIO, /* Index: 18 */ 2268 2269 /* Chain for USCSI commands (non-checksum targets) */ 2270 SD_CHAIN_USCSI, /* Index: 19 */ 2271 SD_CHAIN_USCSI, /* Index: 20 */ 2272 2273 /* Chain for USCSI commands (checksum targets) */ 2274 SD_CHAIN_USCSI, /* Index: 21 */ 2275 SD_CHAIN_USCSI, /* Index: 22 */ 2276 SD_CHAIN_USCSI, /* Index: 23 */ 2277 2278 /* Chain for "direct" USCSI commands (all targets) */ 2279 SD_CHAIN_DIRECT, /* Index: 24 */ 2280 2281 /* Chain for "direct priority" USCSI commands (all targets) */ 2282 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */ 2283 2284 /* 2285 * Chain for buf IO for large sector size disk drive targets 2286 * with checksumming (PM enabled) 2287 */ 2288 SD_CHAIN_BUFIO, /* Index: 26 */ 2289 SD_CHAIN_BUFIO, /* Index: 27 */ 2290 SD_CHAIN_BUFIO, /* Index: 28 */ 2291 SD_CHAIN_BUFIO, /* Index: 29 */ 2292 SD_CHAIN_BUFIO, /* Index: 30 */ 2293 2294 /* 2295 * Chain for buf IO for large sector size disk drive targets 2296 * with checksumming (PM disabled) 2297 */ 2298 SD_CHAIN_BUFIO, /* Index: 31 */ 2299 SD_CHAIN_BUFIO, /* Index: 32 */ 2300 SD_CHAIN_BUFIO, /* Index: 33 */ 2301 SD_CHAIN_BUFIO, /* Index: 34 */ 2302 }; 2303 2304 2305 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */ 2306 #define SD_IS_BUFIO(xp) \ 2307 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO) 2308 2309 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */ 2310 #define SD_IS_DIRECT_PRIORITY(xp) \ 2311 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY) 2312 2313 2314 2315 /* 2316 * Struct, array, and macros to map a specific chain to the appropriate 2317 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays. 2318 * 2319 * The sd_chain_index_map[] array is used at attach time to set the various 2320 * un_xxx_chain type members of the sd_lun softstate to the specific layering 2321 * chain to be used with the instance. This allows different instances to use 2322 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart 2323 * and xb_chain_iodone index values in the sd_xbuf are initialized to these 2324 * values at sd_xbuf init time, this allows (1) layering chains may be changed 2325 * dynamically & without the use of locking; and (2) a layer may update the 2326 * xb_chain_io[start|done] member in a given xbuf with its current index value, 2327 * to allow for deferred processing of an IO within the same chain from a 2328 * different execution context. 2329 */ 2330 2331 struct sd_chain_index { 2332 int sci_iostart_index; 2333 int sci_iodone_index; 2334 }; 2335 2336 static struct sd_chain_index sd_chain_index_map[] = { 2337 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE }, 2338 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM }, 2339 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE }, 2340 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM }, 2341 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE }, 2342 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM }, 2343 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE }, 2344 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE }, 2345 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE }, 2346 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE }, 2347 { SD_CHAIN_MSS_CHKSUM_IOSTART, SD_CHAIN_MSS_CHKSUM_IODONE }, 2348 { SD_CHAIN_MSS_CHKSUM_IOSTART_NO_PM, SD_CHAIN_MSS_CHKSUM_IODONE_NO_PM }, 2349 2350 }; 2351 2352 2353 /* 2354 * The following are indexes into the sd_chain_index_map[] array. 2355 */ 2356 2357 /* un->un_buf_chain_type must be set to one of these */ 2358 #define SD_CHAIN_INFO_DISK 0 2359 #define SD_CHAIN_INFO_DISK_NO_PM 1 2360 #define SD_CHAIN_INFO_RMMEDIA 2 2361 #define SD_CHAIN_INFO_MSS_DISK 2 2362 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3 2363 #define SD_CHAIN_INFO_MSS_DSK_NO_PM 3 2364 #define SD_CHAIN_INFO_CHKSUM 4 2365 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5 2366 #define SD_CHAIN_INFO_MSS_DISK_CHKSUM 10 2367 #define SD_CHAIN_INFO_MSS_DISK_CHKSUM_NO_PM 11 2368 2369 /* un->un_uscsi_chain_type must be set to one of these */ 2370 #define SD_CHAIN_INFO_USCSI_CMD 6 2371 /* USCSI with PM disabled is the same as DIRECT */ 2372 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8 2373 #define SD_CHAIN_INFO_USCSI_CHKSUM 7 2374 2375 /* un->un_direct_chain_type must be set to one of these */ 2376 #define SD_CHAIN_INFO_DIRECT_CMD 8 2377 2378 /* un->un_priority_chain_type must be set to one of these */ 2379 #define SD_CHAIN_INFO_PRIORITY_CMD 9 2380 2381 /* size for devid inquiries */ 2382 #define MAX_INQUIRY_SIZE 0xF0 2383 2384 /* 2385 * Macros used by functions to pass a given buf(9S) struct along to the 2386 * next function in the layering chain for further processing. 2387 * 2388 * In the following macros, passing more than three arguments to the called 2389 * routines causes the optimizer for the SPARC compiler to stop doing tail 2390 * call elimination which results in significant performance degradation. 2391 */ 2392 #define SD_BEGIN_IOSTART(index, un, bp) \ 2393 ((*(sd_iostart_chain[index]))(index, un, bp)) 2394 2395 #define SD_BEGIN_IODONE(index, un, bp) \ 2396 ((*(sd_iodone_chain[index]))(index, un, bp)) 2397 2398 #define SD_NEXT_IOSTART(index, un, bp) \ 2399 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp)) 2400 2401 #define SD_NEXT_IODONE(index, un, bp) \ 2402 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp)) 2403 2404 /* 2405 * Function: _init 2406 * 2407 * Description: This is the driver _init(9E) entry point. 2408 * 2409 * Return Code: Returns the value from mod_install(9F) or 2410 * ddi_soft_state_init(9F) as appropriate. 2411 * 2412 * Context: Called when driver module loaded. 2413 */ 2414 2415 int 2416 _init(void) 2417 { 2418 int err; 2419 2420 /* establish driver name from module name */ 2421 sd_label = (char *)mod_modname(&modlinkage); 2422 2423 #ifndef XPV_HVM_DRIVER 2424 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun), 2425 SD_MAXUNIT); 2426 if (err != 0) { 2427 return (err); 2428 } 2429 2430 #else /* XPV_HVM_DRIVER */ 2431 /* Remove the leading "hvm_" from the module name */ 2432 ASSERT(strncmp(sd_label, "hvm_", strlen("hvm_")) == 0); 2433 sd_label += strlen("hvm_"); 2434 2435 #endif /* XPV_HVM_DRIVER */ 2436 2437 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL); 2438 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL); 2439 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL); 2440 2441 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL); 2442 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL); 2443 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL); 2444 2445 /* 2446 * it's ok to init here even for fibre device 2447 */ 2448 sd_scsi_probe_cache_init(); 2449 2450 sd_scsi_target_lun_init(); 2451 2452 /* 2453 * Creating taskq before mod_install ensures that all callers (threads) 2454 * that enter the module after a successful mod_install encounter 2455 * a valid taskq. 2456 */ 2457 sd_taskq_create(); 2458 2459 err = mod_install(&modlinkage); 2460 if (err != 0) { 2461 /* delete taskq if install fails */ 2462 sd_taskq_delete(); 2463 2464 mutex_destroy(&sd_detach_mutex); 2465 mutex_destroy(&sd_log_mutex); 2466 mutex_destroy(&sd_label_mutex); 2467 2468 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2469 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2470 cv_destroy(&sd_tr.srq_inprocess_cv); 2471 2472 sd_scsi_probe_cache_fini(); 2473 2474 sd_scsi_target_lun_fini(); 2475 2476 #ifndef XPV_HVM_DRIVER 2477 ddi_soft_state_fini(&sd_state); 2478 #endif /* !XPV_HVM_DRIVER */ 2479 return (err); 2480 } 2481 2482 return (err); 2483 } 2484 2485 2486 /* 2487 * Function: _fini 2488 * 2489 * Description: This is the driver _fini(9E) entry point. 2490 * 2491 * Return Code: Returns the value from mod_remove(9F) 2492 * 2493 * Context: Called when driver module is unloaded. 2494 */ 2495 2496 int 2497 _fini(void) 2498 { 2499 int err; 2500 2501 if ((err = mod_remove(&modlinkage)) != 0) { 2502 return (err); 2503 } 2504 2505 sd_taskq_delete(); 2506 2507 mutex_destroy(&sd_detach_mutex); 2508 mutex_destroy(&sd_log_mutex); 2509 mutex_destroy(&sd_label_mutex); 2510 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2511 2512 sd_scsi_probe_cache_fini(); 2513 2514 sd_scsi_target_lun_fini(); 2515 2516 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2517 cv_destroy(&sd_tr.srq_inprocess_cv); 2518 2519 #ifndef XPV_HVM_DRIVER 2520 ddi_soft_state_fini(&sd_state); 2521 #endif /* !XPV_HVM_DRIVER */ 2522 2523 return (err); 2524 } 2525 2526 2527 /* 2528 * Function: _info 2529 * 2530 * Description: This is the driver _info(9E) entry point. 2531 * 2532 * Arguments: modinfop - pointer to the driver modinfo structure 2533 * 2534 * Return Code: Returns the value from mod_info(9F). 2535 * 2536 * Context: Kernel thread context 2537 */ 2538 2539 int 2540 _info(struct modinfo *modinfop) 2541 { 2542 return (mod_info(&modlinkage, modinfop)); 2543 } 2544 2545 2546 /* 2547 * The following routines implement the driver message logging facility. 2548 * They provide component- and level- based debug output filtering. 2549 * Output may also be restricted to messages for a single instance by 2550 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set 2551 * to NULL, then messages for all instances are printed. 2552 * 2553 * These routines have been cloned from each other due to the language 2554 * constraints of macros and variable argument list processing. 2555 */ 2556 2557 2558 /* 2559 * Function: sd_log_err 2560 * 2561 * Description: This routine is called by the SD_ERROR macro for debug 2562 * logging of error conditions. 2563 * 2564 * Arguments: comp - driver component being logged 2565 * dev - pointer to driver info structure 2566 * fmt - error string and format to be logged 2567 */ 2568 2569 static void 2570 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...) 2571 { 2572 va_list ap; 2573 dev_info_t *dev; 2574 2575 ASSERT(un != NULL); 2576 dev = SD_DEVINFO(un); 2577 ASSERT(dev != NULL); 2578 2579 /* 2580 * Filter messages based on the global component and level masks. 2581 * Also print if un matches the value of sd_debug_un, or if 2582 * sd_debug_un is set to NULL. 2583 */ 2584 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) && 2585 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2586 mutex_enter(&sd_log_mutex); 2587 va_start(ap, fmt); 2588 (void) vsprintf(sd_log_buf, fmt, ap); 2589 va_end(ap); 2590 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2591 mutex_exit(&sd_log_mutex); 2592 } 2593 #ifdef SD_FAULT_INJECTION 2594 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2595 if (un->sd_injection_mask & comp) { 2596 mutex_enter(&sd_log_mutex); 2597 va_start(ap, fmt); 2598 (void) vsprintf(sd_log_buf, fmt, ap); 2599 va_end(ap); 2600 sd_injection_log(sd_log_buf, un); 2601 mutex_exit(&sd_log_mutex); 2602 } 2603 #endif 2604 } 2605 2606 2607 /* 2608 * Function: sd_log_info 2609 * 2610 * Description: This routine is called by the SD_INFO macro for debug 2611 * logging of general purpose informational conditions. 2612 * 2613 * Arguments: comp - driver component being logged 2614 * dev - pointer to driver info structure 2615 * fmt - info string and format to be logged 2616 */ 2617 2618 static void 2619 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...) 2620 { 2621 va_list ap; 2622 dev_info_t *dev; 2623 2624 ASSERT(un != NULL); 2625 dev = SD_DEVINFO(un); 2626 ASSERT(dev != NULL); 2627 2628 /* 2629 * Filter messages based on the global component and level masks. 2630 * Also print if un matches the value of sd_debug_un, or if 2631 * sd_debug_un is set to NULL. 2632 */ 2633 if ((sd_component_mask & component) && 2634 (sd_level_mask & SD_LOGMASK_INFO) && 2635 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2636 mutex_enter(&sd_log_mutex); 2637 va_start(ap, fmt); 2638 (void) vsprintf(sd_log_buf, fmt, ap); 2639 va_end(ap); 2640 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2641 mutex_exit(&sd_log_mutex); 2642 } 2643 #ifdef SD_FAULT_INJECTION 2644 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2645 if (un->sd_injection_mask & component) { 2646 mutex_enter(&sd_log_mutex); 2647 va_start(ap, fmt); 2648 (void) vsprintf(sd_log_buf, fmt, ap); 2649 va_end(ap); 2650 sd_injection_log(sd_log_buf, un); 2651 mutex_exit(&sd_log_mutex); 2652 } 2653 #endif 2654 } 2655 2656 2657 /* 2658 * Function: sd_log_trace 2659 * 2660 * Description: This routine is called by the SD_TRACE macro for debug 2661 * logging of trace conditions (i.e. function entry/exit). 2662 * 2663 * Arguments: comp - driver component being logged 2664 * dev - pointer to driver info structure 2665 * fmt - trace string and format to be logged 2666 */ 2667 2668 static void 2669 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...) 2670 { 2671 va_list ap; 2672 dev_info_t *dev; 2673 2674 ASSERT(un != NULL); 2675 dev = SD_DEVINFO(un); 2676 ASSERT(dev != NULL); 2677 2678 /* 2679 * Filter messages based on the global component and level masks. 2680 * Also print if un matches the value of sd_debug_un, or if 2681 * sd_debug_un is set to NULL. 2682 */ 2683 if ((sd_component_mask & component) && 2684 (sd_level_mask & SD_LOGMASK_TRACE) && 2685 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2686 mutex_enter(&sd_log_mutex); 2687 va_start(ap, fmt); 2688 (void) vsprintf(sd_log_buf, fmt, ap); 2689 va_end(ap); 2690 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2691 mutex_exit(&sd_log_mutex); 2692 } 2693 #ifdef SD_FAULT_INJECTION 2694 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2695 if (un->sd_injection_mask & component) { 2696 mutex_enter(&sd_log_mutex); 2697 va_start(ap, fmt); 2698 (void) vsprintf(sd_log_buf, fmt, ap); 2699 va_end(ap); 2700 sd_injection_log(sd_log_buf, un); 2701 mutex_exit(&sd_log_mutex); 2702 } 2703 #endif 2704 } 2705 2706 2707 /* 2708 * Function: sdprobe 2709 * 2710 * Description: This is the driver probe(9e) entry point function. 2711 * 2712 * Arguments: devi - opaque device info handle 2713 * 2714 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful. 2715 * DDI_PROBE_FAILURE: If the probe failed. 2716 * DDI_PROBE_PARTIAL: If the instance is not present now, 2717 * but may be present in the future. 2718 */ 2719 2720 static int 2721 sdprobe(dev_info_t *devi) 2722 { 2723 struct scsi_device *devp; 2724 int rval; 2725 #ifndef XPV_HVM_DRIVER 2726 int instance = ddi_get_instance(devi); 2727 #endif /* !XPV_HVM_DRIVER */ 2728 2729 /* 2730 * if it wasn't for pln, sdprobe could actually be nulldev 2731 * in the "__fibre" case. 2732 */ 2733 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { 2734 return (DDI_PROBE_DONTCARE); 2735 } 2736 2737 devp = ddi_get_driver_private(devi); 2738 2739 if (devp == NULL) { 2740 /* Ooops... nexus driver is mis-configured... */ 2741 return (DDI_PROBE_FAILURE); 2742 } 2743 2744 #ifndef XPV_HVM_DRIVER 2745 if (ddi_get_soft_state(sd_state, instance) != NULL) { 2746 return (DDI_PROBE_PARTIAL); 2747 } 2748 #endif /* !XPV_HVM_DRIVER */ 2749 2750 /* 2751 * Call the SCSA utility probe routine to see if we actually 2752 * have a target at this SCSI nexus. 2753 */ 2754 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) { 2755 case SCSIPROBE_EXISTS: 2756 switch (devp->sd_inq->inq_dtype) { 2757 case DTYPE_DIRECT: 2758 rval = DDI_PROBE_SUCCESS; 2759 break; 2760 case DTYPE_RODIRECT: 2761 /* CDs etc. Can be removable media */ 2762 rval = DDI_PROBE_SUCCESS; 2763 break; 2764 case DTYPE_OPTICAL: 2765 /* 2766 * Rewritable optical driver HP115AA 2767 * Can also be removable media 2768 */ 2769 2770 /* 2771 * Do not attempt to bind to DTYPE_OPTICAL if 2772 * pre solaris 9 sparc sd behavior is required 2773 * 2774 * If first time through and sd_dtype_optical_bind 2775 * has not been set in /etc/system check properties 2776 */ 2777 2778 if (sd_dtype_optical_bind < 0) { 2779 sd_dtype_optical_bind = ddi_prop_get_int 2780 (DDI_DEV_T_ANY, devi, 0, 2781 "optical-device-bind", 1); 2782 } 2783 2784 if (sd_dtype_optical_bind == 0) { 2785 rval = DDI_PROBE_FAILURE; 2786 } else { 2787 rval = DDI_PROBE_SUCCESS; 2788 } 2789 break; 2790 2791 case DTYPE_NOTPRESENT: 2792 default: 2793 rval = DDI_PROBE_FAILURE; 2794 break; 2795 } 2796 break; 2797 default: 2798 rval = DDI_PROBE_PARTIAL; 2799 break; 2800 } 2801 2802 /* 2803 * This routine checks for resource allocation prior to freeing, 2804 * so it will take care of the "smart probing" case where a 2805 * scsi_probe() may or may not have been issued and will *not* 2806 * free previously-freed resources. 2807 */ 2808 scsi_unprobe(devp); 2809 return (rval); 2810 } 2811 2812 2813 /* 2814 * Function: sdinfo 2815 * 2816 * Description: This is the driver getinfo(9e) entry point function. 2817 * Given the device number, return the devinfo pointer from 2818 * the scsi_device structure or the instance number 2819 * associated with the dev_t. 2820 * 2821 * Arguments: dip - pointer to device info structure 2822 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO, 2823 * DDI_INFO_DEVT2INSTANCE) 2824 * arg - driver dev_t 2825 * resultp - user buffer for request response 2826 * 2827 * Return Code: DDI_SUCCESS 2828 * DDI_FAILURE 2829 */ 2830 /* ARGSUSED */ 2831 static int 2832 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2833 { 2834 struct sd_lun *un; 2835 dev_t dev; 2836 int instance; 2837 int error; 2838 2839 switch (infocmd) { 2840 case DDI_INFO_DEVT2DEVINFO: 2841 dev = (dev_t)arg; 2842 instance = SDUNIT(dev); 2843 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 2844 return (DDI_FAILURE); 2845 } 2846 *result = (void *) SD_DEVINFO(un); 2847 error = DDI_SUCCESS; 2848 break; 2849 case DDI_INFO_DEVT2INSTANCE: 2850 dev = (dev_t)arg; 2851 instance = SDUNIT(dev); 2852 *result = (void *)(uintptr_t)instance; 2853 error = DDI_SUCCESS; 2854 break; 2855 default: 2856 error = DDI_FAILURE; 2857 } 2858 return (error); 2859 } 2860 2861 /* 2862 * Function: sd_prop_op 2863 * 2864 * Description: This is the driver prop_op(9e) entry point function. 2865 * Return the number of blocks for the partition in question 2866 * or forward the request to the property facilities. 2867 * 2868 * Arguments: dev - device number 2869 * dip - pointer to device info structure 2870 * prop_op - property operator 2871 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent 2872 * name - pointer to property name 2873 * valuep - pointer or address of the user buffer 2874 * lengthp - property length 2875 * 2876 * Return Code: DDI_PROP_SUCCESS 2877 * DDI_PROP_NOT_FOUND 2878 * DDI_PROP_UNDEFINED 2879 * DDI_PROP_NO_MEMORY 2880 * DDI_PROP_BUF_TOO_SMALL 2881 */ 2882 2883 static int 2884 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 2885 char *name, caddr_t valuep, int *lengthp) 2886 { 2887 struct sd_lun *un; 2888 2889 if ((un = ddi_get_soft_state(sd_state, ddi_get_instance(dip))) == NULL) 2890 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2891 name, valuep, lengthp)); 2892 2893 return (cmlb_prop_op(un->un_cmlbhandle, 2894 dev, dip, prop_op, mod_flags, name, valuep, lengthp, 2895 SDPART(dev), (void *)SD_PATH_DIRECT)); 2896 } 2897 2898 /* 2899 * The following functions are for smart probing: 2900 * sd_scsi_probe_cache_init() 2901 * sd_scsi_probe_cache_fini() 2902 * sd_scsi_clear_probe_cache() 2903 * sd_scsi_probe_with_cache() 2904 */ 2905 2906 /* 2907 * Function: sd_scsi_probe_cache_init 2908 * 2909 * Description: Initializes the probe response cache mutex and head pointer. 2910 * 2911 * Context: Kernel thread context 2912 */ 2913 2914 static void 2915 sd_scsi_probe_cache_init(void) 2916 { 2917 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL); 2918 sd_scsi_probe_cache_head = NULL; 2919 } 2920 2921 2922 /* 2923 * Function: sd_scsi_probe_cache_fini 2924 * 2925 * Description: Frees all resources associated with the probe response cache. 2926 * 2927 * Context: Kernel thread context 2928 */ 2929 2930 static void 2931 sd_scsi_probe_cache_fini(void) 2932 { 2933 struct sd_scsi_probe_cache *cp; 2934 struct sd_scsi_probe_cache *ncp; 2935 2936 /* Clean up our smart probing linked list */ 2937 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) { 2938 ncp = cp->next; 2939 kmem_free(cp, sizeof (struct sd_scsi_probe_cache)); 2940 } 2941 sd_scsi_probe_cache_head = NULL; 2942 mutex_destroy(&sd_scsi_probe_cache_mutex); 2943 } 2944 2945 2946 /* 2947 * Function: sd_scsi_clear_probe_cache 2948 * 2949 * Description: This routine clears the probe response cache. This is 2950 * done when open() returns ENXIO so that when deferred 2951 * attach is attempted (possibly after a device has been 2952 * turned on) we will retry the probe. Since we don't know 2953 * which target we failed to open, we just clear the 2954 * entire cache. 2955 * 2956 * Context: Kernel thread context 2957 */ 2958 2959 static void 2960 sd_scsi_clear_probe_cache(void) 2961 { 2962 struct sd_scsi_probe_cache *cp; 2963 int i; 2964 2965 mutex_enter(&sd_scsi_probe_cache_mutex); 2966 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2967 /* 2968 * Reset all entries to SCSIPROBE_EXISTS. This will 2969 * force probing to be performed the next time 2970 * sd_scsi_probe_with_cache is called. 2971 */ 2972 for (i = 0; i < NTARGETS_WIDE; i++) { 2973 cp->cache[i] = SCSIPROBE_EXISTS; 2974 } 2975 } 2976 mutex_exit(&sd_scsi_probe_cache_mutex); 2977 } 2978 2979 2980 /* 2981 * Function: sd_scsi_probe_with_cache 2982 * 2983 * Description: This routine implements support for a scsi device probe 2984 * with cache. The driver maintains a cache of the target 2985 * responses to scsi probes. If we get no response from a 2986 * target during a probe inquiry, we remember that, and we 2987 * avoid additional calls to scsi_probe on non-zero LUNs 2988 * on the same target until the cache is cleared. By doing 2989 * so we avoid the 1/4 sec selection timeout for nonzero 2990 * LUNs. lun0 of a target is always probed. 2991 * 2992 * Arguments: devp - Pointer to a scsi_device(9S) structure 2993 * waitfunc - indicates what the allocator routines should 2994 * do when resources are not available. This value 2995 * is passed on to scsi_probe() when that routine 2996 * is called. 2997 * 2998 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache; 2999 * otherwise the value returned by scsi_probe(9F). 3000 * 3001 * Context: Kernel thread context 3002 */ 3003 3004 static int 3005 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)()) 3006 { 3007 struct sd_scsi_probe_cache *cp; 3008 dev_info_t *pdip = ddi_get_parent(devp->sd_dev); 3009 int lun, tgt; 3010 3011 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 3012 SCSI_ADDR_PROP_LUN, 0); 3013 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 3014 SCSI_ADDR_PROP_TARGET, -1); 3015 3016 /* Make sure caching enabled and target in range */ 3017 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) { 3018 /* do it the old way (no cache) */ 3019 return (scsi_probe(devp, waitfn)); 3020 } 3021 3022 mutex_enter(&sd_scsi_probe_cache_mutex); 3023 3024 /* Find the cache for this scsi bus instance */ 3025 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 3026 if (cp->pdip == pdip) { 3027 break; 3028 } 3029 } 3030 3031 /* If we can't find a cache for this pdip, create one */ 3032 if (cp == NULL) { 3033 int i; 3034 3035 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache), 3036 KM_SLEEP); 3037 cp->pdip = pdip; 3038 cp->next = sd_scsi_probe_cache_head; 3039 sd_scsi_probe_cache_head = cp; 3040 for (i = 0; i < NTARGETS_WIDE; i++) { 3041 cp->cache[i] = SCSIPROBE_EXISTS; 3042 } 3043 } 3044 3045 mutex_exit(&sd_scsi_probe_cache_mutex); 3046 3047 /* Recompute the cache for this target if LUN zero */ 3048 if (lun == 0) { 3049 cp->cache[tgt] = SCSIPROBE_EXISTS; 3050 } 3051 3052 /* Don't probe if cache remembers a NORESP from a previous LUN. */ 3053 if (cp->cache[tgt] != SCSIPROBE_EXISTS) { 3054 return (SCSIPROBE_NORESP); 3055 } 3056 3057 /* Do the actual probe; save & return the result */ 3058 return (cp->cache[tgt] = scsi_probe(devp, waitfn)); 3059 } 3060 3061 3062 /* 3063 * Function: sd_scsi_target_lun_init 3064 * 3065 * Description: Initializes the attached lun chain mutex and head pointer. 3066 * 3067 * Context: Kernel thread context 3068 */ 3069 3070 static void 3071 sd_scsi_target_lun_init(void) 3072 { 3073 mutex_init(&sd_scsi_target_lun_mutex, NULL, MUTEX_DRIVER, NULL); 3074 sd_scsi_target_lun_head = NULL; 3075 } 3076 3077 3078 /* 3079 * Function: sd_scsi_target_lun_fini 3080 * 3081 * Description: Frees all resources associated with the attached lun 3082 * chain 3083 * 3084 * Context: Kernel thread context 3085 */ 3086 3087 static void 3088 sd_scsi_target_lun_fini(void) 3089 { 3090 struct sd_scsi_hba_tgt_lun *cp; 3091 struct sd_scsi_hba_tgt_lun *ncp; 3092 3093 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = ncp) { 3094 ncp = cp->next; 3095 kmem_free(cp, sizeof (struct sd_scsi_hba_tgt_lun)); 3096 } 3097 sd_scsi_target_lun_head = NULL; 3098 mutex_destroy(&sd_scsi_target_lun_mutex); 3099 } 3100 3101 3102 /* 3103 * Function: sd_scsi_get_target_lun_count 3104 * 3105 * Description: This routine will check in the attached lun chain to see 3106 * how many luns are attached on the required SCSI controller 3107 * and target. Currently, some capabilities like tagged queue 3108 * are supported per target based by HBA. So all luns in a 3109 * target have the same capabilities. Based on this assumption, 3110 * sd should only set these capabilities once per target. This 3111 * function is called when sd needs to decide how many luns 3112 * already attached on a target. 3113 * 3114 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 3115 * controller device. 3116 * target - The target ID on the controller's SCSI bus. 3117 * 3118 * Return Code: The number of luns attached on the required target and 3119 * controller. 3120 * -1 if target ID is not in parallel SCSI scope or the given 3121 * dip is not in the chain. 3122 * 3123 * Context: Kernel thread context 3124 */ 3125 3126 static int 3127 sd_scsi_get_target_lun_count(dev_info_t *dip, int target) 3128 { 3129 struct sd_scsi_hba_tgt_lun *cp; 3130 3131 if ((target < 0) || (target >= NTARGETS_WIDE)) { 3132 return (-1); 3133 } 3134 3135 mutex_enter(&sd_scsi_target_lun_mutex); 3136 3137 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 3138 if (cp->pdip == dip) { 3139 break; 3140 } 3141 } 3142 3143 mutex_exit(&sd_scsi_target_lun_mutex); 3144 3145 if (cp == NULL) { 3146 return (-1); 3147 } 3148 3149 return (cp->nlun[target]); 3150 } 3151 3152 3153 /* 3154 * Function: sd_scsi_update_lun_on_target 3155 * 3156 * Description: This routine is used to update the attached lun chain when a 3157 * lun is attached or detached on a target. 3158 * 3159 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 3160 * controller device. 3161 * target - The target ID on the controller's SCSI bus. 3162 * flag - Indicate the lun is attached or detached. 3163 * 3164 * Context: Kernel thread context 3165 */ 3166 3167 static void 3168 sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag) 3169 { 3170 struct sd_scsi_hba_tgt_lun *cp; 3171 3172 mutex_enter(&sd_scsi_target_lun_mutex); 3173 3174 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 3175 if (cp->pdip == dip) { 3176 break; 3177 } 3178 } 3179 3180 if ((cp == NULL) && (flag == SD_SCSI_LUN_ATTACH)) { 3181 cp = kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun), 3182 KM_SLEEP); 3183 cp->pdip = dip; 3184 cp->next = sd_scsi_target_lun_head; 3185 sd_scsi_target_lun_head = cp; 3186 } 3187 3188 mutex_exit(&sd_scsi_target_lun_mutex); 3189 3190 if (cp != NULL) { 3191 if (flag == SD_SCSI_LUN_ATTACH) { 3192 cp->nlun[target] ++; 3193 } else { 3194 cp->nlun[target] --; 3195 } 3196 } 3197 } 3198 3199 3200 /* 3201 * Function: sd_spin_up_unit 3202 * 3203 * Description: Issues the following commands to spin-up the device: 3204 * START STOP UNIT, and INQUIRY. 3205 * 3206 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3207 * structure for this target. 3208 * 3209 * Return Code: 0 - success 3210 * EIO - failure 3211 * EACCES - reservation conflict 3212 * 3213 * Context: Kernel thread context 3214 */ 3215 3216 static int 3217 sd_spin_up_unit(sd_ssc_t *ssc) 3218 { 3219 size_t resid = 0; 3220 int has_conflict = FALSE; 3221 uchar_t *bufaddr; 3222 int status; 3223 struct sd_lun *un; 3224 3225 ASSERT(ssc != NULL); 3226 un = ssc->ssc_un; 3227 ASSERT(un != NULL); 3228 3229 /* 3230 * Send a throwaway START UNIT command. 3231 * 3232 * If we fail on this, we don't care presently what precisely 3233 * is wrong. EMC's arrays will also fail this with a check 3234 * condition (0x2/0x4/0x3) if the device is "inactive," but 3235 * we don't want to fail the attach because it may become 3236 * "active" later. 3237 * We don't know if power condition is supported or not at 3238 * this stage, use START STOP bit. 3239 */ 3240 status = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 3241 SD_TARGET_START, SD_PATH_DIRECT); 3242 3243 if (status != 0) { 3244 if (status == EACCES) 3245 has_conflict = TRUE; 3246 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3247 } 3248 3249 /* 3250 * Send another INQUIRY command to the target. This is necessary for 3251 * non-removable media direct access devices because their INQUIRY data 3252 * may not be fully qualified until they are spun up (perhaps via the 3253 * START command above). Note: This seems to be needed for some 3254 * legacy devices only.) The INQUIRY command should succeed even if a 3255 * Reservation Conflict is present. 3256 */ 3257 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP); 3258 3259 if (sd_send_scsi_INQUIRY(ssc, bufaddr, SUN_INQSIZE, 0, 0, &resid) 3260 != 0) { 3261 kmem_free(bufaddr, SUN_INQSIZE); 3262 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 3263 return (EIO); 3264 } 3265 3266 /* 3267 * If we got enough INQUIRY data, copy it over the old INQUIRY data. 3268 * Note that this routine does not return a failure here even if the 3269 * INQUIRY command did not return any data. This is a legacy behavior. 3270 */ 3271 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) { 3272 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE); 3273 } 3274 3275 kmem_free(bufaddr, SUN_INQSIZE); 3276 3277 /* If we hit a reservation conflict above, tell the caller. */ 3278 if (has_conflict == TRUE) { 3279 return (EACCES); 3280 } 3281 3282 return (0); 3283 } 3284 3285 #ifdef _LP64 3286 /* 3287 * Function: sd_enable_descr_sense 3288 * 3289 * Description: This routine attempts to select descriptor sense format 3290 * using the Control mode page. Devices that support 64 bit 3291 * LBAs (for >2TB luns) should also implement descriptor 3292 * sense data so we will call this function whenever we see 3293 * a lun larger than 2TB. If for some reason the device 3294 * supports 64 bit LBAs but doesn't support descriptor sense 3295 * presumably the mode select will fail. Everything will 3296 * continue to work normally except that we will not get 3297 * complete sense data for commands that fail with an LBA 3298 * larger than 32 bits. 3299 * 3300 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3301 * structure for this target. 3302 * 3303 * Context: Kernel thread context only 3304 */ 3305 3306 static void 3307 sd_enable_descr_sense(sd_ssc_t *ssc) 3308 { 3309 uchar_t *header; 3310 struct mode_control_scsi3 *ctrl_bufp; 3311 size_t buflen; 3312 size_t bd_len; 3313 int status; 3314 struct sd_lun *un; 3315 3316 ASSERT(ssc != NULL); 3317 un = ssc->ssc_un; 3318 ASSERT(un != NULL); 3319 3320 /* 3321 * Read MODE SENSE page 0xA, Control Mode Page 3322 */ 3323 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH + 3324 sizeof (struct mode_control_scsi3); 3325 header = kmem_zalloc(buflen, KM_SLEEP); 3326 3327 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 3328 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT); 3329 3330 if (status != 0) { 3331 SD_ERROR(SD_LOG_COMMON, un, 3332 "sd_enable_descr_sense: mode sense ctrl page failed\n"); 3333 goto eds_exit; 3334 } 3335 3336 /* 3337 * Determine size of Block Descriptors in order to locate 3338 * the mode page data. ATAPI devices return 0, SCSI devices 3339 * should return MODE_BLK_DESC_LENGTH. 3340 */ 3341 bd_len = ((struct mode_header *)header)->bdesc_length; 3342 3343 /* Clear the mode data length field for MODE SELECT */ 3344 ((struct mode_header *)header)->length = 0; 3345 3346 ctrl_bufp = (struct mode_control_scsi3 *) 3347 (header + MODE_HEADER_LENGTH + bd_len); 3348 3349 /* 3350 * If the page length is smaller than the expected value, 3351 * the target device doesn't support D_SENSE. Bail out here. 3352 */ 3353 if (ctrl_bufp->mode_page.length < 3354 sizeof (struct mode_control_scsi3) - 2) { 3355 SD_ERROR(SD_LOG_COMMON, un, 3356 "sd_enable_descr_sense: enable D_SENSE failed\n"); 3357 goto eds_exit; 3358 } 3359 3360 /* 3361 * Clear PS bit for MODE SELECT 3362 */ 3363 ctrl_bufp->mode_page.ps = 0; 3364 3365 /* 3366 * Set D_SENSE to enable descriptor sense format. 3367 */ 3368 ctrl_bufp->d_sense = 1; 3369 3370 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3371 3372 /* 3373 * Use MODE SELECT to commit the change to the D_SENSE bit 3374 */ 3375 status = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, header, 3376 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT); 3377 3378 if (status != 0) { 3379 SD_INFO(SD_LOG_COMMON, un, 3380 "sd_enable_descr_sense: mode select ctrl page failed\n"); 3381 } else { 3382 kmem_free(header, buflen); 3383 return; 3384 } 3385 3386 eds_exit: 3387 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3388 kmem_free(header, buflen); 3389 } 3390 3391 /* 3392 * Function: sd_reenable_dsense_task 3393 * 3394 * Description: Re-enable descriptor sense after device or bus reset 3395 * 3396 * Context: Executes in a taskq() thread context 3397 */ 3398 static void 3399 sd_reenable_dsense_task(void *arg) 3400 { 3401 struct sd_lun *un = arg; 3402 sd_ssc_t *ssc; 3403 3404 ASSERT(un != NULL); 3405 3406 ssc = sd_ssc_init(un); 3407 sd_enable_descr_sense(ssc); 3408 sd_ssc_fini(ssc); 3409 } 3410 #endif /* _LP64 */ 3411 3412 /* 3413 * Function: sd_set_mmc_caps 3414 * 3415 * Description: This routine determines if the device is MMC compliant and if 3416 * the device supports CDDA via a mode sense of the CDVD 3417 * capabilities mode page. Also checks if the device is a 3418 * dvdram writable device. 3419 * 3420 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3421 * structure for this target. 3422 * 3423 * Context: Kernel thread context only 3424 */ 3425 3426 static void 3427 sd_set_mmc_caps(sd_ssc_t *ssc) 3428 { 3429 struct mode_header_grp2 *sense_mhp; 3430 uchar_t *sense_page; 3431 caddr_t buf; 3432 int bd_len; 3433 int status; 3434 struct uscsi_cmd com; 3435 int rtn; 3436 uchar_t *out_data_rw, *out_data_hd; 3437 uchar_t *rqbuf_rw, *rqbuf_hd; 3438 uchar_t *out_data_gesn; 3439 int gesn_len; 3440 struct sd_lun *un; 3441 3442 ASSERT(ssc != NULL); 3443 un = ssc->ssc_un; 3444 ASSERT(un != NULL); 3445 3446 /* 3447 * The flags which will be set in this function are - mmc compliant, 3448 * dvdram writable device, cdda support. Initialize them to FALSE 3449 * and if a capability is detected - it will be set to TRUE. 3450 */ 3451 un->un_f_mmc_cap = FALSE; 3452 un->un_f_dvdram_writable_device = FALSE; 3453 un->un_f_cfg_cdda = FALSE; 3454 3455 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3456 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf, 3457 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 3458 3459 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3460 3461 if (status != 0) { 3462 /* command failed; just return */ 3463 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3464 return; 3465 } 3466 /* 3467 * If the mode sense request for the CDROM CAPABILITIES 3468 * page (0x2A) succeeds the device is assumed to be MMC. 3469 */ 3470 un->un_f_mmc_cap = TRUE; 3471 3472 /* See if GET STATUS EVENT NOTIFICATION is supported */ 3473 if (un->un_f_mmc_gesn_polling) { 3474 gesn_len = SD_GESN_HEADER_LEN + SD_GESN_MEDIA_DATA_LEN; 3475 out_data_gesn = kmem_zalloc(gesn_len, KM_SLEEP); 3476 3477 rtn = sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION(ssc, 3478 out_data_gesn, gesn_len, 1 << SD_GESN_MEDIA_CLASS); 3479 3480 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3481 3482 if ((rtn != 0) || !sd_gesn_media_data_valid(out_data_gesn)) { 3483 un->un_f_mmc_gesn_polling = FALSE; 3484 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3485 "sd_set_mmc_caps: gesn not supported " 3486 "%d %x %x %x %x\n", rtn, 3487 out_data_gesn[0], out_data_gesn[1], 3488 out_data_gesn[2], out_data_gesn[3]); 3489 } 3490 3491 kmem_free(out_data_gesn, gesn_len); 3492 } 3493 3494 /* Get to the page data */ 3495 sense_mhp = (struct mode_header_grp2 *)buf; 3496 bd_len = (sense_mhp->bdesc_length_hi << 8) | 3497 sense_mhp->bdesc_length_lo; 3498 if (bd_len > MODE_BLK_DESC_LENGTH) { 3499 /* 3500 * We did not get back the expected block descriptor 3501 * length so we cannot determine if the device supports 3502 * CDDA. However, we still indicate the device is MMC 3503 * according to the successful response to the page 3504 * 0x2A mode sense request. 3505 */ 3506 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3507 "sd_set_mmc_caps: Mode Sense returned " 3508 "invalid block descriptor length\n"); 3509 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3510 return; 3511 } 3512 3513 /* See if read CDDA is supported */ 3514 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + 3515 bd_len); 3516 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE; 3517 3518 /* See if writing DVD RAM is supported. */ 3519 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE; 3520 if (un->un_f_dvdram_writable_device == TRUE) { 3521 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3522 return; 3523 } 3524 3525 /* 3526 * If the device presents DVD or CD capabilities in the mode 3527 * page, we can return here since a RRD will not have 3528 * these capabilities. 3529 */ 3530 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3531 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3532 return; 3533 } 3534 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3535 3536 /* 3537 * If un->un_f_dvdram_writable_device is still FALSE, 3538 * check for a Removable Rigid Disk (RRD). A RRD 3539 * device is identified by the features RANDOM_WRITABLE and 3540 * HARDWARE_DEFECT_MANAGEMENT. 3541 */ 3542 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3543 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3544 3545 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw, 3546 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3547 RANDOM_WRITABLE, SD_PATH_STANDARD); 3548 3549 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3550 3551 if (rtn != 0) { 3552 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3553 kmem_free(rqbuf_rw, SENSE_LENGTH); 3554 return; 3555 } 3556 3557 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3558 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3559 3560 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd, 3561 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3562 HARDWARE_DEFECT_MANAGEMENT, SD_PATH_STANDARD); 3563 3564 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3565 3566 if (rtn == 0) { 3567 /* 3568 * We have good information, check for random writable 3569 * and hardware defect features. 3570 */ 3571 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3572 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) { 3573 un->un_f_dvdram_writable_device = TRUE; 3574 } 3575 } 3576 3577 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3578 kmem_free(rqbuf_rw, SENSE_LENGTH); 3579 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3580 kmem_free(rqbuf_hd, SENSE_LENGTH); 3581 } 3582 3583 /* 3584 * Function: sd_check_for_writable_cd 3585 * 3586 * Description: This routine determines if the media in the device is 3587 * writable or not. It uses the get configuration command (0x46) 3588 * to determine if the media is writable 3589 * 3590 * Arguments: un - driver soft state (unit) structure 3591 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" 3592 * chain and the normal command waitq, or 3593 * SD_PATH_DIRECT_PRIORITY to use the USCSI 3594 * "direct" chain and bypass the normal command 3595 * waitq. 3596 * 3597 * Context: Never called at interrupt context. 3598 */ 3599 3600 static void 3601 sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag) 3602 { 3603 struct uscsi_cmd com; 3604 uchar_t *out_data; 3605 uchar_t *rqbuf; 3606 int rtn; 3607 uchar_t *out_data_rw, *out_data_hd; 3608 uchar_t *rqbuf_rw, *rqbuf_hd; 3609 struct mode_header_grp2 *sense_mhp; 3610 uchar_t *sense_page; 3611 caddr_t buf; 3612 int bd_len; 3613 int status; 3614 struct sd_lun *un; 3615 3616 ASSERT(ssc != NULL); 3617 un = ssc->ssc_un; 3618 ASSERT(un != NULL); 3619 ASSERT(mutex_owned(SD_MUTEX(un))); 3620 3621 /* 3622 * Initialize the writable media to false, if configuration info. 3623 * tells us otherwise then only we will set it. 3624 */ 3625 un->un_f_mmc_writable_media = FALSE; 3626 mutex_exit(SD_MUTEX(un)); 3627 3628 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 3629 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3630 3631 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, SENSE_LENGTH, 3632 out_data, SD_PROFILE_HEADER_LEN, path_flag); 3633 3634 if (rtn != 0) 3635 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3636 3637 mutex_enter(SD_MUTEX(un)); 3638 if (rtn == 0) { 3639 /* 3640 * We have good information, check for writable DVD. 3641 */ 3642 if ((out_data[6] == 0) && (out_data[7] == 0x12)) { 3643 un->un_f_mmc_writable_media = TRUE; 3644 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3645 kmem_free(rqbuf, SENSE_LENGTH); 3646 return; 3647 } 3648 } 3649 3650 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3651 kmem_free(rqbuf, SENSE_LENGTH); 3652 3653 /* 3654 * Determine if this is a RRD type device. 3655 */ 3656 mutex_exit(SD_MUTEX(un)); 3657 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3658 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf, 3659 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, path_flag); 3660 3661 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3662 3663 mutex_enter(SD_MUTEX(un)); 3664 if (status != 0) { 3665 /* command failed; just return */ 3666 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3667 return; 3668 } 3669 3670 /* Get to the page data */ 3671 sense_mhp = (struct mode_header_grp2 *)buf; 3672 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 3673 if (bd_len > MODE_BLK_DESC_LENGTH) { 3674 /* 3675 * We did not get back the expected block descriptor length so 3676 * we cannot check the mode page. 3677 */ 3678 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3679 "sd_check_for_writable_cd: Mode Sense returned " 3680 "invalid block descriptor length\n"); 3681 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3682 return; 3683 } 3684 3685 /* 3686 * If the device presents DVD or CD capabilities in the mode 3687 * page, we can return here since a RRD device will not have 3688 * these capabilities. 3689 */ 3690 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len); 3691 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3692 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3693 return; 3694 } 3695 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3696 3697 /* 3698 * If un->un_f_mmc_writable_media is still FALSE, 3699 * check for RRD type media. A RRD device is identified 3700 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT. 3701 */ 3702 mutex_exit(SD_MUTEX(un)); 3703 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3704 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3705 3706 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw, 3707 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3708 RANDOM_WRITABLE, path_flag); 3709 3710 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3711 if (rtn != 0) { 3712 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3713 kmem_free(rqbuf_rw, SENSE_LENGTH); 3714 mutex_enter(SD_MUTEX(un)); 3715 return; 3716 } 3717 3718 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3719 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3720 3721 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd, 3722 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3723 HARDWARE_DEFECT_MANAGEMENT, path_flag); 3724 3725 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3726 mutex_enter(SD_MUTEX(un)); 3727 if (rtn == 0) { 3728 /* 3729 * We have good information, check for random writable 3730 * and hardware defect features as current. 3731 */ 3732 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3733 (out_data_rw[10] & 0x1) && 3734 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) && 3735 (out_data_hd[10] & 0x1)) { 3736 un->un_f_mmc_writable_media = TRUE; 3737 } 3738 } 3739 3740 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3741 kmem_free(rqbuf_rw, SENSE_LENGTH); 3742 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3743 kmem_free(rqbuf_hd, SENSE_LENGTH); 3744 } 3745 3746 /* 3747 * Function: sd_read_unit_properties 3748 * 3749 * Description: The following implements a property lookup mechanism. 3750 * Properties for particular disks (keyed on vendor, model 3751 * and rev numbers) are sought in the sd.conf file via 3752 * sd_process_sdconf_file(), and if not found there, are 3753 * looked for in a list hardcoded in this driver via 3754 * sd_process_sdconf_table() Once located the properties 3755 * are used to update the driver unit structure. 3756 * 3757 * Arguments: un - driver soft state (unit) structure 3758 */ 3759 3760 static void 3761 sd_read_unit_properties(struct sd_lun *un) 3762 { 3763 /* 3764 * sd_process_sdconf_file returns SD_FAILURE if it cannot find 3765 * the "sd-config-list" property (from the sd.conf file) or if 3766 * there was not a match for the inquiry vid/pid. If this event 3767 * occurs the static driver configuration table is searched for 3768 * a match. 3769 */ 3770 ASSERT(un != NULL); 3771 if (sd_process_sdconf_file(un) == SD_FAILURE) { 3772 sd_process_sdconf_table(un); 3773 } 3774 3775 /* check for LSI device */ 3776 sd_is_lsi(un); 3777 3778 3779 } 3780 3781 3782 /* 3783 * Function: sd_process_sdconf_file 3784 * 3785 * Description: Use ddi_prop_lookup(9F) to obtain the properties from the 3786 * driver's config file (ie, sd.conf) and update the driver 3787 * soft state structure accordingly. 3788 * 3789 * Arguments: un - driver soft state (unit) structure 3790 * 3791 * Return Code: SD_SUCCESS - The properties were successfully set according 3792 * to the driver configuration file. 3793 * SD_FAILURE - The driver config list was not obtained or 3794 * there was no vid/pid match. This indicates that 3795 * the static config table should be used. 3796 * 3797 * The config file has a property, "sd-config-list". Currently we support 3798 * two kinds of formats. For both formats, the value of this property 3799 * is a list of duplets: 3800 * 3801 * sd-config-list= 3802 * <duplet>, 3803 * [,<duplet>]*; 3804 * 3805 * For the improved format, where 3806 * 3807 * <duplet>:= "<vid+pid>","<tunable-list>" 3808 * 3809 * and 3810 * 3811 * <tunable-list>:= <tunable> [, <tunable> ]*; 3812 * <tunable> = <name> : <value> 3813 * 3814 * The <vid+pid> is the string that is returned by the target device on a 3815 * SCSI inquiry command, the <tunable-list> contains one or more tunables 3816 * to apply to all target devices with the specified <vid+pid>. 3817 * 3818 * Each <tunable> is a "<name> : <value>" pair. 3819 * 3820 * For the old format, the structure of each duplet is as follows: 3821 * 3822 * <duplet>:= "<vid+pid>","<data-property-name_list>" 3823 * 3824 * The first entry of the duplet is the device ID string (the concatenated 3825 * vid & pid; not to be confused with a device_id). This is defined in 3826 * the same way as in the sd_disk_table. 3827 * 3828 * The second part of the duplet is a string that identifies a 3829 * data-property-name-list. The data-property-name-list is defined as 3830 * follows: 3831 * 3832 * <data-property-name-list>:=<data-property-name> [<data-property-name>] 3833 * 3834 * The syntax of <data-property-name> depends on the <version> field. 3835 * 3836 * If version = SD_CONF_VERSION_1 we have the following syntax: 3837 * 3838 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3839 * 3840 * where the prop0 value will be used to set prop0 if bit0 set in the 3841 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1 3842 * 3843 */ 3844 3845 static int 3846 sd_process_sdconf_file(struct sd_lun *un) 3847 { 3848 char **config_list = NULL; 3849 uint_t nelements; 3850 char *vidptr; 3851 int vidlen; 3852 char *dnlist_ptr; 3853 char *dataname_ptr; 3854 char *dataname_lasts; 3855 int *data_list = NULL; 3856 uint_t data_list_len; 3857 int rval = SD_FAILURE; 3858 int i; 3859 3860 ASSERT(un != NULL); 3861 3862 /* Obtain the configuration list associated with the .conf file */ 3863 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, SD_DEVINFO(un), 3864 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, sd_config_list, 3865 &config_list, &nelements) != DDI_PROP_SUCCESS) { 3866 return (SD_FAILURE); 3867 } 3868 3869 /* 3870 * Compare vids in each duplet to the inquiry vid - if a match is 3871 * made, get the data value and update the soft state structure 3872 * accordingly. 3873 * 3874 * Each duplet should show as a pair of strings, return SD_FAILURE 3875 * otherwise. 3876 */ 3877 if (nelements & 1) { 3878 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3879 "sd-config-list should show as pairs of strings.\n"); 3880 if (config_list) 3881 ddi_prop_free(config_list); 3882 return (SD_FAILURE); 3883 } 3884 3885 for (i = 0; i < nelements; i += 2) { 3886 /* 3887 * Note: The assumption here is that each vid entry is on 3888 * a unique line from its associated duplet. 3889 */ 3890 vidptr = config_list[i]; 3891 vidlen = (int)strlen(vidptr); 3892 if (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS) { 3893 continue; 3894 } 3895 3896 /* 3897 * dnlist contains 1 or more blank separated 3898 * data-property-name entries 3899 */ 3900 dnlist_ptr = config_list[i + 1]; 3901 3902 if (strchr(dnlist_ptr, ':') != NULL) { 3903 /* 3904 * Decode the improved format sd-config-list. 3905 */ 3906 sd_nvpair_str_decode(un, dnlist_ptr); 3907 } else { 3908 /* 3909 * The old format sd-config-list, loop through all 3910 * data-property-name entries in the 3911 * data-property-name-list 3912 * setting the properties for each. 3913 */ 3914 for (dataname_ptr = sd_strtok_r(dnlist_ptr, " \t", 3915 &dataname_lasts); dataname_ptr != NULL; 3916 dataname_ptr = sd_strtok_r(NULL, " \t", 3917 &dataname_lasts)) { 3918 int version; 3919 3920 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3921 "sd_process_sdconf_file: disk:%s, " 3922 "data:%s\n", vidptr, dataname_ptr); 3923 3924 /* Get the data list */ 3925 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 3926 SD_DEVINFO(un), 0, dataname_ptr, &data_list, 3927 &data_list_len) != DDI_PROP_SUCCESS) { 3928 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3929 "sd_process_sdconf_file: data " 3930 "property (%s) has no value\n", 3931 dataname_ptr); 3932 continue; 3933 } 3934 3935 version = data_list[0]; 3936 3937 if (version == SD_CONF_VERSION_1) { 3938 sd_tunables values; 3939 3940 /* Set the properties */ 3941 if (sd_chk_vers1_data(un, data_list[1], 3942 &data_list[2], data_list_len, 3943 dataname_ptr) == SD_SUCCESS) { 3944 sd_get_tunables_from_conf(un, 3945 data_list[1], &data_list[2], 3946 &values); 3947 sd_set_vers1_properties(un, 3948 data_list[1], &values); 3949 rval = SD_SUCCESS; 3950 } else { 3951 rval = SD_FAILURE; 3952 } 3953 } else { 3954 scsi_log(SD_DEVINFO(un), sd_label, 3955 CE_WARN, "data property %s version " 3956 "0x%x is invalid.", 3957 dataname_ptr, version); 3958 rval = SD_FAILURE; 3959 } 3960 if (data_list) 3961 ddi_prop_free(data_list); 3962 } 3963 } 3964 } 3965 3966 /* free up the memory allocated by ddi_prop_lookup_string_array(). */ 3967 if (config_list) { 3968 ddi_prop_free(config_list); 3969 } 3970 3971 return (rval); 3972 } 3973 3974 /* 3975 * Function: sd_nvpair_str_decode() 3976 * 3977 * Description: Parse the improved format sd-config-list to get 3978 * each entry of tunable, which includes a name-value pair. 3979 * Then call sd_set_properties() to set the property. 3980 * 3981 * Arguments: un - driver soft state (unit) structure 3982 * nvpair_str - the tunable list 3983 */ 3984 static void 3985 sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str) 3986 { 3987 char *nv, *name, *value, *token; 3988 char *nv_lasts, *v_lasts, *x_lasts; 3989 3990 for (nv = sd_strtok_r(nvpair_str, ",", &nv_lasts); nv != NULL; 3991 nv = sd_strtok_r(NULL, ",", &nv_lasts)) { 3992 token = sd_strtok_r(nv, ":", &v_lasts); 3993 name = sd_strtok_r(token, " \t", &x_lasts); 3994 token = sd_strtok_r(NULL, ":", &v_lasts); 3995 value = sd_strtok_r(token, " \t", &x_lasts); 3996 if (name == NULL || value == NULL) { 3997 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3998 "sd_nvpair_str_decode: " 3999 "name or value is not valid!\n"); 4000 } else { 4001 sd_set_properties(un, name, value); 4002 } 4003 } 4004 } 4005 4006 /* 4007 * Function: sd_strtok_r() 4008 * 4009 * Description: This function uses strpbrk and strspn to break 4010 * string into tokens on sequentially subsequent calls. Return 4011 * NULL when no non-separator characters remain. The first 4012 * argument is NULL for subsequent calls. 4013 */ 4014 static char * 4015 sd_strtok_r(char *string, const char *sepset, char **lasts) 4016 { 4017 char *q, *r; 4018 4019 /* First or subsequent call */ 4020 if (string == NULL) 4021 string = *lasts; 4022 4023 if (string == NULL) 4024 return (NULL); 4025 4026 /* Skip leading separators */ 4027 q = string + strspn(string, sepset); 4028 4029 if (*q == '\0') 4030 return (NULL); 4031 4032 if ((r = strpbrk(q, sepset)) == NULL) 4033 *lasts = NULL; 4034 else { 4035 *r = '\0'; 4036 *lasts = r + 1; 4037 } 4038 return (q); 4039 } 4040 4041 /* 4042 * Function: sd_set_properties() 4043 * 4044 * Description: Set device properties based on the improved 4045 * format sd-config-list. 4046 * 4047 * Arguments: un - driver soft state (unit) structure 4048 * name - supported tunable name 4049 * value - tunable value 4050 */ 4051 static void 4052 sd_set_properties(struct sd_lun *un, char *name, char *value) 4053 { 4054 char *endptr = NULL; 4055 long val = 0; 4056 4057 if (strcasecmp(name, "cache-nonvolatile") == 0) { 4058 if (strcasecmp(value, "true") == 0) { 4059 un->un_f_suppress_cache_flush = TRUE; 4060 } else if (strcasecmp(value, "false") == 0) { 4061 un->un_f_suppress_cache_flush = FALSE; 4062 } else { 4063 goto value_invalid; 4064 } 4065 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4066 "suppress_cache_flush flag set to %d\n", 4067 un->un_f_suppress_cache_flush); 4068 return; 4069 } 4070 4071 if (strcasecmp(name, "controller-type") == 0) { 4072 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4073 un->un_ctype = val; 4074 } else { 4075 goto value_invalid; 4076 } 4077 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4078 "ctype set to %d\n", un->un_ctype); 4079 return; 4080 } 4081 4082 if (strcasecmp(name, "delay-busy") == 0) { 4083 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4084 un->un_busy_timeout = drv_usectohz(val / 1000); 4085 } else { 4086 goto value_invalid; 4087 } 4088 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4089 "busy_timeout set to %d\n", un->un_busy_timeout); 4090 return; 4091 } 4092 4093 if (strcasecmp(name, "disksort") == 0) { 4094 if (strcasecmp(value, "true") == 0) { 4095 un->un_f_disksort_disabled = FALSE; 4096 } else if (strcasecmp(value, "false") == 0) { 4097 un->un_f_disksort_disabled = TRUE; 4098 } else { 4099 goto value_invalid; 4100 } 4101 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4102 "disksort disabled flag set to %d\n", 4103 un->un_f_disksort_disabled); 4104 return; 4105 } 4106 4107 if (strcasecmp(name, "power-condition") == 0) { 4108 if (strcasecmp(value, "true") == 0) { 4109 un->un_f_power_condition_disabled = FALSE; 4110 } else if (strcasecmp(value, "false") == 0) { 4111 un->un_f_power_condition_disabled = TRUE; 4112 } else { 4113 goto value_invalid; 4114 } 4115 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4116 "power condition disabled flag set to %d\n", 4117 un->un_f_power_condition_disabled); 4118 return; 4119 } 4120 4121 if (strcasecmp(name, "timeout-releasereservation") == 0) { 4122 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4123 un->un_reserve_release_time = val; 4124 } else { 4125 goto value_invalid; 4126 } 4127 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4128 "reservation release timeout set to %d\n", 4129 un->un_reserve_release_time); 4130 return; 4131 } 4132 4133 if (strcasecmp(name, "reset-lun") == 0) { 4134 if (strcasecmp(value, "true") == 0) { 4135 un->un_f_lun_reset_enabled = TRUE; 4136 } else if (strcasecmp(value, "false") == 0) { 4137 un->un_f_lun_reset_enabled = FALSE; 4138 } else { 4139 goto value_invalid; 4140 } 4141 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4142 "lun reset enabled flag set to %d\n", 4143 un->un_f_lun_reset_enabled); 4144 return; 4145 } 4146 4147 if (strcasecmp(name, "retries-busy") == 0) { 4148 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4149 un->un_busy_retry_count = val; 4150 } else { 4151 goto value_invalid; 4152 } 4153 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4154 "busy retry count set to %d\n", un->un_busy_retry_count); 4155 return; 4156 } 4157 4158 if (strcasecmp(name, "retries-timeout") == 0) { 4159 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4160 un->un_retry_count = val; 4161 } else { 4162 goto value_invalid; 4163 } 4164 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4165 "timeout retry count set to %d\n", un->un_retry_count); 4166 return; 4167 } 4168 4169 if (strcasecmp(name, "retries-notready") == 0) { 4170 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4171 un->un_notready_retry_count = val; 4172 } else { 4173 goto value_invalid; 4174 } 4175 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4176 "notready retry count set to %d\n", 4177 un->un_notready_retry_count); 4178 return; 4179 } 4180 4181 if (strcasecmp(name, "retries-reset") == 0) { 4182 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4183 un->un_reset_retry_count = val; 4184 } else { 4185 goto value_invalid; 4186 } 4187 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4188 "reset retry count set to %d\n", 4189 un->un_reset_retry_count); 4190 return; 4191 } 4192 4193 if (strcasecmp(name, "throttle-max") == 0) { 4194 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4195 un->un_saved_throttle = un->un_throttle = val; 4196 } else { 4197 goto value_invalid; 4198 } 4199 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4200 "throttle set to %d\n", un->un_throttle); 4201 } 4202 4203 if (strcasecmp(name, "throttle-min") == 0) { 4204 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4205 un->un_min_throttle = val; 4206 } else { 4207 goto value_invalid; 4208 } 4209 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4210 "min throttle set to %d\n", un->un_min_throttle); 4211 } 4212 4213 if (strcasecmp(name, "rmw-type") == 0) { 4214 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4215 un->un_f_rmw_type = val; 4216 } else { 4217 goto value_invalid; 4218 } 4219 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4220 "RMW type set to %d\n", un->un_f_rmw_type); 4221 } 4222 4223 if (strcasecmp(name, "physical-block-size") == 0) { 4224 if (ddi_strtol(value, &endptr, 0, &val) == 0 && 4225 ISP2(val) && val >= un->un_tgt_blocksize && 4226 val >= un->un_sys_blocksize) { 4227 un->un_phy_blocksize = val; 4228 } else { 4229 goto value_invalid; 4230 } 4231 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4232 "physical block size set to %d\n", un->un_phy_blocksize); 4233 } 4234 4235 if (strcasecmp(name, "retries-victim") == 0) { 4236 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4237 un->un_victim_retry_count = val; 4238 } else { 4239 goto value_invalid; 4240 } 4241 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4242 "victim retry count set to %d\n", 4243 un->un_victim_retry_count); 4244 return; 4245 } 4246 4247 /* 4248 * Validate the throttle values. 4249 * If any of the numbers are invalid, set everything to defaults. 4250 */ 4251 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4252 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4253 (un->un_min_throttle > un->un_throttle)) { 4254 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4255 un->un_min_throttle = sd_min_throttle; 4256 } 4257 4258 if (strcasecmp(name, "mmc-gesn-polling") == 0) { 4259 if (strcasecmp(value, "true") == 0) { 4260 un->un_f_mmc_gesn_polling = TRUE; 4261 } else if (strcasecmp(value, "false") == 0) { 4262 un->un_f_mmc_gesn_polling = FALSE; 4263 } else { 4264 goto value_invalid; 4265 } 4266 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4267 "mmc-gesn-polling set to %d\n", 4268 un->un_f_mmc_gesn_polling); 4269 } 4270 4271 return; 4272 4273 value_invalid: 4274 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4275 "value of prop %s is invalid\n", name); 4276 } 4277 4278 /* 4279 * Function: sd_get_tunables_from_conf() 4280 * 4281 * 4282 * This function reads the data list from the sd.conf file and pulls 4283 * the values that can have numeric values as arguments and places 4284 * the values in the appropriate sd_tunables member. 4285 * Since the order of the data list members varies across platforms 4286 * This function reads them from the data list in a platform specific 4287 * order and places them into the correct sd_tunable member that is 4288 * consistent across all platforms. 4289 */ 4290 static void 4291 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list, 4292 sd_tunables *values) 4293 { 4294 int i; 4295 int mask; 4296 4297 bzero(values, sizeof (sd_tunables)); 4298 4299 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 4300 4301 mask = 1 << i; 4302 if (mask > flags) { 4303 break; 4304 } 4305 4306 switch (mask & flags) { 4307 case 0: /* This mask bit not set in flags */ 4308 continue; 4309 case SD_CONF_BSET_THROTTLE: 4310 values->sdt_throttle = data_list[i]; 4311 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4312 "sd_get_tunables_from_conf: throttle = %d\n", 4313 values->sdt_throttle); 4314 break; 4315 case SD_CONF_BSET_CTYPE: 4316 values->sdt_ctype = data_list[i]; 4317 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4318 "sd_get_tunables_from_conf: ctype = %d\n", 4319 values->sdt_ctype); 4320 break; 4321 case SD_CONF_BSET_NRR_COUNT: 4322 values->sdt_not_rdy_retries = data_list[i]; 4323 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4324 "sd_get_tunables_from_conf: not_rdy_retries = %d\n", 4325 values->sdt_not_rdy_retries); 4326 break; 4327 case SD_CONF_BSET_BSY_RETRY_COUNT: 4328 values->sdt_busy_retries = data_list[i]; 4329 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4330 "sd_get_tunables_from_conf: busy_retries = %d\n", 4331 values->sdt_busy_retries); 4332 break; 4333 case SD_CONF_BSET_RST_RETRIES: 4334 values->sdt_reset_retries = data_list[i]; 4335 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4336 "sd_get_tunables_from_conf: reset_retries = %d\n", 4337 values->sdt_reset_retries); 4338 break; 4339 case SD_CONF_BSET_RSV_REL_TIME: 4340 values->sdt_reserv_rel_time = data_list[i]; 4341 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4342 "sd_get_tunables_from_conf: reserv_rel_time = %d\n", 4343 values->sdt_reserv_rel_time); 4344 break; 4345 case SD_CONF_BSET_MIN_THROTTLE: 4346 values->sdt_min_throttle = data_list[i]; 4347 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4348 "sd_get_tunables_from_conf: min_throttle = %d\n", 4349 values->sdt_min_throttle); 4350 break; 4351 case SD_CONF_BSET_DISKSORT_DISABLED: 4352 values->sdt_disk_sort_dis = data_list[i]; 4353 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4354 "sd_get_tunables_from_conf: disk_sort_dis = %d\n", 4355 values->sdt_disk_sort_dis); 4356 break; 4357 case SD_CONF_BSET_LUN_RESET_ENABLED: 4358 values->sdt_lun_reset_enable = data_list[i]; 4359 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4360 "sd_get_tunables_from_conf: lun_reset_enable = %d" 4361 "\n", values->sdt_lun_reset_enable); 4362 break; 4363 case SD_CONF_BSET_CACHE_IS_NV: 4364 values->sdt_suppress_cache_flush = data_list[i]; 4365 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4366 "sd_get_tunables_from_conf: \ 4367 suppress_cache_flush = %d" 4368 "\n", values->sdt_suppress_cache_flush); 4369 break; 4370 case SD_CONF_BSET_PC_DISABLED: 4371 values->sdt_disk_sort_dis = data_list[i]; 4372 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4373 "sd_get_tunables_from_conf: power_condition_dis = " 4374 "%d\n", values->sdt_power_condition_dis); 4375 break; 4376 } 4377 } 4378 } 4379 4380 /* 4381 * Function: sd_process_sdconf_table 4382 * 4383 * Description: Search the static configuration table for a match on the 4384 * inquiry vid/pid and update the driver soft state structure 4385 * according to the table property values for the device. 4386 * 4387 * The form of a configuration table entry is: 4388 * <vid+pid>,<flags>,<property-data> 4389 * "SEAGATE ST42400N",1,0x40000, 4390 * 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1; 4391 * 4392 * Arguments: un - driver soft state (unit) structure 4393 */ 4394 4395 static void 4396 sd_process_sdconf_table(struct sd_lun *un) 4397 { 4398 char *id = NULL; 4399 int table_index; 4400 int idlen; 4401 4402 ASSERT(un != NULL); 4403 for (table_index = 0; table_index < sd_disk_table_size; 4404 table_index++) { 4405 id = sd_disk_table[table_index].device_id; 4406 idlen = strlen(id); 4407 4408 /* 4409 * The static configuration table currently does not 4410 * implement version 10 properties. Additionally, 4411 * multiple data-property-name entries are not 4412 * implemented in the static configuration table. 4413 */ 4414 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4415 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4416 "sd_process_sdconf_table: disk %s\n", id); 4417 sd_set_vers1_properties(un, 4418 sd_disk_table[table_index].flags, 4419 sd_disk_table[table_index].properties); 4420 break; 4421 } 4422 } 4423 } 4424 4425 4426 /* 4427 * Function: sd_sdconf_id_match 4428 * 4429 * Description: This local function implements a case sensitive vid/pid 4430 * comparison as well as the boundary cases of wild card and 4431 * multiple blanks. 4432 * 4433 * Note: An implicit assumption made here is that the scsi 4434 * inquiry structure will always keep the vid, pid and 4435 * revision strings in consecutive sequence, so they can be 4436 * read as a single string. If this assumption is not the 4437 * case, a separate string, to be used for the check, needs 4438 * to be built with these strings concatenated. 4439 * 4440 * Arguments: un - driver soft state (unit) structure 4441 * id - table or config file vid/pid 4442 * idlen - length of the vid/pid (bytes) 4443 * 4444 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4445 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4446 */ 4447 4448 static int 4449 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen) 4450 { 4451 struct scsi_inquiry *sd_inq; 4452 int rval = SD_SUCCESS; 4453 4454 ASSERT(un != NULL); 4455 sd_inq = un->un_sd->sd_inq; 4456 ASSERT(id != NULL); 4457 4458 /* 4459 * We use the inq_vid as a pointer to a buffer containing the 4460 * vid and pid and use the entire vid/pid length of the table 4461 * entry for the comparison. This works because the inq_pid 4462 * data member follows inq_vid in the scsi_inquiry structure. 4463 */ 4464 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) { 4465 /* 4466 * The user id string is compared to the inquiry vid/pid 4467 * using a case insensitive comparison and ignoring 4468 * multiple spaces. 4469 */ 4470 rval = sd_blank_cmp(un, id, idlen); 4471 if (rval != SD_SUCCESS) { 4472 /* 4473 * User id strings that start and end with a "*" 4474 * are a special case. These do not have a 4475 * specific vendor, and the product string can 4476 * appear anywhere in the 16 byte PID portion of 4477 * the inquiry data. This is a simple strstr() 4478 * type search for the user id in the inquiry data. 4479 */ 4480 if ((id[0] == '*') && (id[idlen - 1] == '*')) { 4481 char *pidptr = &id[1]; 4482 int i; 4483 int j; 4484 int pidstrlen = idlen - 2; 4485 j = sizeof (SD_INQUIRY(un)->inq_pid) - 4486 pidstrlen; 4487 4488 if (j < 0) { 4489 return (SD_FAILURE); 4490 } 4491 for (i = 0; i < j; i++) { 4492 if (bcmp(&SD_INQUIRY(un)->inq_pid[i], 4493 pidptr, pidstrlen) == 0) { 4494 rval = SD_SUCCESS; 4495 break; 4496 } 4497 } 4498 } 4499 } 4500 } 4501 return (rval); 4502 } 4503 4504 4505 /* 4506 * Function: sd_blank_cmp 4507 * 4508 * Description: If the id string starts and ends with a space, treat 4509 * multiple consecutive spaces as equivalent to a single 4510 * space. For example, this causes a sd_disk_table entry 4511 * of " NEC CDROM " to match a device's id string of 4512 * "NEC CDROM". 4513 * 4514 * Note: The success exit condition for this routine is if 4515 * the pointer to the table entry is '\0' and the cnt of 4516 * the inquiry length is zero. This will happen if the inquiry 4517 * string returned by the device is padded with spaces to be 4518 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The 4519 * SCSI spec states that the inquiry string is to be padded with 4520 * spaces. 4521 * 4522 * Arguments: un - driver soft state (unit) structure 4523 * id - table or config file vid/pid 4524 * idlen - length of the vid/pid (bytes) 4525 * 4526 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4527 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4528 */ 4529 4530 static int 4531 sd_blank_cmp(struct sd_lun *un, char *id, int idlen) 4532 { 4533 char *p1; 4534 char *p2; 4535 int cnt; 4536 cnt = sizeof (SD_INQUIRY(un)->inq_vid) + 4537 sizeof (SD_INQUIRY(un)->inq_pid); 4538 4539 ASSERT(un != NULL); 4540 p2 = un->un_sd->sd_inq->inq_vid; 4541 ASSERT(id != NULL); 4542 p1 = id; 4543 4544 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) { 4545 /* 4546 * Note: string p1 is terminated by a NUL but string p2 4547 * isn't. The end of p2 is determined by cnt. 4548 */ 4549 for (;;) { 4550 /* skip over any extra blanks in both strings */ 4551 while ((*p1 != '\0') && (*p1 == ' ')) { 4552 p1++; 4553 } 4554 while ((cnt != 0) && (*p2 == ' ')) { 4555 p2++; 4556 cnt--; 4557 } 4558 4559 /* compare the two strings */ 4560 if ((cnt == 0) || 4561 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) { 4562 break; 4563 } 4564 while ((cnt > 0) && 4565 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) { 4566 p1++; 4567 p2++; 4568 cnt--; 4569 } 4570 } 4571 } 4572 4573 /* return SD_SUCCESS if both strings match */ 4574 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE); 4575 } 4576 4577 4578 /* 4579 * Function: sd_chk_vers1_data 4580 * 4581 * Description: Verify the version 1 device properties provided by the 4582 * user via the configuration file 4583 * 4584 * Arguments: un - driver soft state (unit) structure 4585 * flags - integer mask indicating properties to be set 4586 * prop_list - integer list of property values 4587 * list_len - number of the elements 4588 * 4589 * Return Code: SD_SUCCESS - Indicates the user provided data is valid 4590 * SD_FAILURE - Indicates the user provided data is invalid 4591 */ 4592 4593 static int 4594 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 4595 int list_len, char *dataname_ptr) 4596 { 4597 int i; 4598 int mask = 1; 4599 int index = 0; 4600 4601 ASSERT(un != NULL); 4602 4603 /* Check for a NULL property name and list */ 4604 if (dataname_ptr == NULL) { 4605 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4606 "sd_chk_vers1_data: NULL data property name."); 4607 return (SD_FAILURE); 4608 } 4609 if (prop_list == NULL) { 4610 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4611 "sd_chk_vers1_data: %s NULL data property list.", 4612 dataname_ptr); 4613 return (SD_FAILURE); 4614 } 4615 4616 /* Display a warning if undefined bits are set in the flags */ 4617 if (flags & ~SD_CONF_BIT_MASK) { 4618 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4619 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. " 4620 "Properties not set.", 4621 (flags & ~SD_CONF_BIT_MASK), dataname_ptr); 4622 return (SD_FAILURE); 4623 } 4624 4625 /* 4626 * Verify the length of the list by identifying the highest bit set 4627 * in the flags and validating that the property list has a length 4628 * up to the index of this bit. 4629 */ 4630 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 4631 if (flags & mask) { 4632 index++; 4633 } 4634 mask = 1 << i; 4635 } 4636 if (list_len < (index + 2)) { 4637 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4638 "sd_chk_vers1_data: " 4639 "Data property list %s size is incorrect. " 4640 "Properties not set.", dataname_ptr); 4641 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: " 4642 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS); 4643 return (SD_FAILURE); 4644 } 4645 return (SD_SUCCESS); 4646 } 4647 4648 4649 /* 4650 * Function: sd_set_vers1_properties 4651 * 4652 * Description: Set version 1 device properties based on a property list 4653 * retrieved from the driver configuration file or static 4654 * configuration table. Version 1 properties have the format: 4655 * 4656 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 4657 * 4658 * where the prop0 value will be used to set prop0 if bit0 4659 * is set in the flags 4660 * 4661 * Arguments: un - driver soft state (unit) structure 4662 * flags - integer mask indicating properties to be set 4663 * prop_list - integer list of property values 4664 */ 4665 4666 static void 4667 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list) 4668 { 4669 ASSERT(un != NULL); 4670 4671 /* 4672 * Set the flag to indicate cache is to be disabled. An attempt 4673 * to disable the cache via sd_cache_control() will be made 4674 * later during attach once the basic initialization is complete. 4675 */ 4676 if (flags & SD_CONF_BSET_NOCACHE) { 4677 un->un_f_opt_disable_cache = TRUE; 4678 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4679 "sd_set_vers1_properties: caching disabled flag set\n"); 4680 } 4681 4682 /* CD-specific configuration parameters */ 4683 if (flags & SD_CONF_BSET_PLAYMSF_BCD) { 4684 un->un_f_cfg_playmsf_bcd = TRUE; 4685 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4686 "sd_set_vers1_properties: playmsf_bcd set\n"); 4687 } 4688 if (flags & SD_CONF_BSET_READSUB_BCD) { 4689 un->un_f_cfg_readsub_bcd = TRUE; 4690 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4691 "sd_set_vers1_properties: readsub_bcd set\n"); 4692 } 4693 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) { 4694 un->un_f_cfg_read_toc_trk_bcd = TRUE; 4695 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4696 "sd_set_vers1_properties: read_toc_trk_bcd set\n"); 4697 } 4698 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) { 4699 un->un_f_cfg_read_toc_addr_bcd = TRUE; 4700 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4701 "sd_set_vers1_properties: read_toc_addr_bcd set\n"); 4702 } 4703 if (flags & SD_CONF_BSET_NO_READ_HEADER) { 4704 un->un_f_cfg_no_read_header = TRUE; 4705 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4706 "sd_set_vers1_properties: no_read_header set\n"); 4707 } 4708 if (flags & SD_CONF_BSET_READ_CD_XD4) { 4709 un->un_f_cfg_read_cd_xd4 = TRUE; 4710 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4711 "sd_set_vers1_properties: read_cd_xd4 set\n"); 4712 } 4713 4714 /* Support for devices which do not have valid/unique serial numbers */ 4715 if (flags & SD_CONF_BSET_FAB_DEVID) { 4716 un->un_f_opt_fab_devid = TRUE; 4717 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4718 "sd_set_vers1_properties: fab_devid bit set\n"); 4719 } 4720 4721 /* Support for user throttle configuration */ 4722 if (flags & SD_CONF_BSET_THROTTLE) { 4723 ASSERT(prop_list != NULL); 4724 un->un_saved_throttle = un->un_throttle = 4725 prop_list->sdt_throttle; 4726 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4727 "sd_set_vers1_properties: throttle set to %d\n", 4728 prop_list->sdt_throttle); 4729 } 4730 4731 /* Set the per disk retry count according to the conf file or table. */ 4732 if (flags & SD_CONF_BSET_NRR_COUNT) { 4733 ASSERT(prop_list != NULL); 4734 if (prop_list->sdt_not_rdy_retries) { 4735 un->un_notready_retry_count = 4736 prop_list->sdt_not_rdy_retries; 4737 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4738 "sd_set_vers1_properties: not ready retry count" 4739 " set to %d\n", un->un_notready_retry_count); 4740 } 4741 } 4742 4743 /* The controller type is reported for generic disk driver ioctls */ 4744 if (flags & SD_CONF_BSET_CTYPE) { 4745 ASSERT(prop_list != NULL); 4746 switch (prop_list->sdt_ctype) { 4747 case CTYPE_CDROM: 4748 un->un_ctype = prop_list->sdt_ctype; 4749 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4750 "sd_set_vers1_properties: ctype set to " 4751 "CTYPE_CDROM\n"); 4752 break; 4753 case CTYPE_CCS: 4754 un->un_ctype = prop_list->sdt_ctype; 4755 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4756 "sd_set_vers1_properties: ctype set to " 4757 "CTYPE_CCS\n"); 4758 break; 4759 case CTYPE_ROD: /* RW optical */ 4760 un->un_ctype = prop_list->sdt_ctype; 4761 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4762 "sd_set_vers1_properties: ctype set to " 4763 "CTYPE_ROD\n"); 4764 break; 4765 default: 4766 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4767 "sd_set_vers1_properties: Could not set " 4768 "invalid ctype value (%d)", 4769 prop_list->sdt_ctype); 4770 } 4771 } 4772 4773 /* Purple failover timeout */ 4774 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) { 4775 ASSERT(prop_list != NULL); 4776 un->un_busy_retry_count = 4777 prop_list->sdt_busy_retries; 4778 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4779 "sd_set_vers1_properties: " 4780 "busy retry count set to %d\n", 4781 un->un_busy_retry_count); 4782 } 4783 4784 /* Purple reset retry count */ 4785 if (flags & SD_CONF_BSET_RST_RETRIES) { 4786 ASSERT(prop_list != NULL); 4787 un->un_reset_retry_count = 4788 prop_list->sdt_reset_retries; 4789 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4790 "sd_set_vers1_properties: " 4791 "reset retry count set to %d\n", 4792 un->un_reset_retry_count); 4793 } 4794 4795 /* Purple reservation release timeout */ 4796 if (flags & SD_CONF_BSET_RSV_REL_TIME) { 4797 ASSERT(prop_list != NULL); 4798 un->un_reserve_release_time = 4799 prop_list->sdt_reserv_rel_time; 4800 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4801 "sd_set_vers1_properties: " 4802 "reservation release timeout set to %d\n", 4803 un->un_reserve_release_time); 4804 } 4805 4806 /* 4807 * Driver flag telling the driver to verify that no commands are pending 4808 * for a device before issuing a Test Unit Ready. This is a workaround 4809 * for a firmware bug in some Seagate eliteI drives. 4810 */ 4811 if (flags & SD_CONF_BSET_TUR_CHECK) { 4812 un->un_f_cfg_tur_check = TRUE; 4813 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4814 "sd_set_vers1_properties: tur queue check set\n"); 4815 } 4816 4817 if (flags & SD_CONF_BSET_MIN_THROTTLE) { 4818 un->un_min_throttle = prop_list->sdt_min_throttle; 4819 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4820 "sd_set_vers1_properties: min throttle set to %d\n", 4821 un->un_min_throttle); 4822 } 4823 4824 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) { 4825 un->un_f_disksort_disabled = 4826 (prop_list->sdt_disk_sort_dis != 0) ? 4827 TRUE : FALSE; 4828 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4829 "sd_set_vers1_properties: disksort disabled " 4830 "flag set to %d\n", 4831 prop_list->sdt_disk_sort_dis); 4832 } 4833 4834 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) { 4835 un->un_f_lun_reset_enabled = 4836 (prop_list->sdt_lun_reset_enable != 0) ? 4837 TRUE : FALSE; 4838 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4839 "sd_set_vers1_properties: lun reset enabled " 4840 "flag set to %d\n", 4841 prop_list->sdt_lun_reset_enable); 4842 } 4843 4844 if (flags & SD_CONF_BSET_CACHE_IS_NV) { 4845 un->un_f_suppress_cache_flush = 4846 (prop_list->sdt_suppress_cache_flush != 0) ? 4847 TRUE : FALSE; 4848 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4849 "sd_set_vers1_properties: suppress_cache_flush " 4850 "flag set to %d\n", 4851 prop_list->sdt_suppress_cache_flush); 4852 } 4853 4854 if (flags & SD_CONF_BSET_PC_DISABLED) { 4855 un->un_f_power_condition_disabled = 4856 (prop_list->sdt_power_condition_dis != 0) ? 4857 TRUE : FALSE; 4858 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4859 "sd_set_vers1_properties: power_condition_disabled " 4860 "flag set to %d\n", 4861 prop_list->sdt_power_condition_dis); 4862 } 4863 4864 /* 4865 * Validate the throttle values. 4866 * If any of the numbers are invalid, set everything to defaults. 4867 */ 4868 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4869 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4870 (un->un_min_throttle > un->un_throttle)) { 4871 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4872 un->un_min_throttle = sd_min_throttle; 4873 } 4874 } 4875 4876 /* 4877 * Function: sd_is_lsi() 4878 * 4879 * Description: Check for lsi devices, step through the static device 4880 * table to match vid/pid. 4881 * 4882 * Args: un - ptr to sd_lun 4883 * 4884 * Notes: When creating new LSI property, need to add the new LSI property 4885 * to this function. 4886 */ 4887 static void 4888 sd_is_lsi(struct sd_lun *un) 4889 { 4890 char *id = NULL; 4891 int table_index; 4892 int idlen; 4893 void *prop; 4894 4895 ASSERT(un != NULL); 4896 for (table_index = 0; table_index < sd_disk_table_size; 4897 table_index++) { 4898 id = sd_disk_table[table_index].device_id; 4899 idlen = strlen(id); 4900 if (idlen == 0) { 4901 continue; 4902 } 4903 4904 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4905 prop = sd_disk_table[table_index].properties; 4906 if (prop == &lsi_properties || 4907 prop == &lsi_oem_properties || 4908 prop == &lsi_properties_scsi || 4909 prop == &symbios_properties) { 4910 un->un_f_cfg_is_lsi = TRUE; 4911 } 4912 break; 4913 } 4914 } 4915 } 4916 4917 /* 4918 * Function: sd_get_physical_geometry 4919 * 4920 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and 4921 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the 4922 * target, and use this information to initialize the physical 4923 * geometry cache specified by pgeom_p. 4924 * 4925 * MODE SENSE is an optional command, so failure in this case 4926 * does not necessarily denote an error. We want to use the 4927 * MODE SENSE commands to derive the physical geometry of the 4928 * device, but if either command fails, the logical geometry is 4929 * used as the fallback for disk label geometry in cmlb. 4930 * 4931 * This requires that un->un_blockcount and un->un_tgt_blocksize 4932 * have already been initialized for the current target and 4933 * that the current values be passed as args so that we don't 4934 * end up ever trying to use -1 as a valid value. This could 4935 * happen if either value is reset while we're not holding 4936 * the mutex. 4937 * 4938 * Arguments: un - driver soft state (unit) structure 4939 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4940 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4941 * to use the USCSI "direct" chain and bypass the normal 4942 * command waitq. 4943 * 4944 * Context: Kernel thread only (can sleep). 4945 */ 4946 4947 static int 4948 sd_get_physical_geometry(struct sd_lun *un, cmlb_geom_t *pgeom_p, 4949 diskaddr_t capacity, int lbasize, int path_flag) 4950 { 4951 struct mode_format *page3p; 4952 struct mode_geometry *page4p; 4953 struct mode_header *headerp; 4954 int sector_size; 4955 int nsect; 4956 int nhead; 4957 int ncyl; 4958 int intrlv; 4959 int spc; 4960 diskaddr_t modesense_capacity; 4961 int rpm; 4962 int bd_len; 4963 int mode_header_length; 4964 uchar_t *p3bufp; 4965 uchar_t *p4bufp; 4966 int cdbsize; 4967 int ret = EIO; 4968 sd_ssc_t *ssc; 4969 int status; 4970 4971 ASSERT(un != NULL); 4972 4973 if (lbasize == 0) { 4974 if (ISCD(un)) { 4975 lbasize = 2048; 4976 } else { 4977 lbasize = un->un_sys_blocksize; 4978 } 4979 } 4980 pgeom_p->g_secsize = (unsigned short)lbasize; 4981 4982 /* 4983 * If the unit is a cd/dvd drive MODE SENSE page three 4984 * and MODE SENSE page four are reserved (see SBC spec 4985 * and MMC spec). To prevent soft errors just return 4986 * using the default LBA size. 4987 */ 4988 if (ISCD(un)) 4989 return (ret); 4990 4991 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0; 4992 4993 /* 4994 * Retrieve MODE SENSE page 3 - Format Device Page 4995 */ 4996 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP); 4997 ssc = sd_ssc_init(un); 4998 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p3bufp, 4999 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag); 5000 if (status != 0) { 5001 SD_ERROR(SD_LOG_COMMON, un, 5002 "sd_get_physical_geometry: mode sense page 3 failed\n"); 5003 goto page3_exit; 5004 } 5005 5006 /* 5007 * Determine size of Block Descriptors in order to locate the mode 5008 * page data. ATAPI devices return 0, SCSI devices should return 5009 * MODE_BLK_DESC_LENGTH. 5010 */ 5011 headerp = (struct mode_header *)p3bufp; 5012 if (un->un_f_cfg_is_atapi == TRUE) { 5013 struct mode_header_grp2 *mhp = 5014 (struct mode_header_grp2 *)headerp; 5015 mode_header_length = MODE_HEADER_LENGTH_GRP2; 5016 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 5017 } else { 5018 mode_header_length = MODE_HEADER_LENGTH; 5019 bd_len = ((struct mode_header *)headerp)->bdesc_length; 5020 } 5021 5022 if (bd_len > MODE_BLK_DESC_LENGTH) { 5023 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 5024 "sd_get_physical_geometry: received unexpected bd_len " 5025 "of %d, page3\n", bd_len); 5026 status = EIO; 5027 goto page3_exit; 5028 } 5029 5030 page3p = (struct mode_format *) 5031 ((caddr_t)headerp + mode_header_length + bd_len); 5032 5033 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) { 5034 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 5035 "sd_get_physical_geometry: mode sense pg3 code mismatch " 5036 "%d\n", page3p->mode_page.code); 5037 status = EIO; 5038 goto page3_exit; 5039 } 5040 5041 /* 5042 * Use this physical geometry data only if BOTH MODE SENSE commands 5043 * complete successfully; otherwise, revert to the logical geometry. 5044 * So, we need to save everything in temporary variables. 5045 */ 5046 sector_size = BE_16(page3p->data_bytes_sect); 5047 5048 /* 5049 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size 5050 */ 5051 if (sector_size == 0) { 5052 sector_size = un->un_sys_blocksize; 5053 } else { 5054 sector_size &= ~(un->un_sys_blocksize - 1); 5055 } 5056 5057 nsect = BE_16(page3p->sect_track); 5058 intrlv = BE_16(page3p->interleave); 5059 5060 SD_INFO(SD_LOG_COMMON, un, 5061 "sd_get_physical_geometry: Format Parameters (page 3)\n"); 5062 SD_INFO(SD_LOG_COMMON, un, 5063 " mode page: %d; nsect: %d; sector size: %d;\n", 5064 page3p->mode_page.code, nsect, sector_size); 5065 SD_INFO(SD_LOG_COMMON, un, 5066 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv, 5067 BE_16(page3p->track_skew), 5068 BE_16(page3p->cylinder_skew)); 5069 5070 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 5071 5072 /* 5073 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page 5074 */ 5075 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP); 5076 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p4bufp, 5077 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag); 5078 if (status != 0) { 5079 SD_ERROR(SD_LOG_COMMON, un, 5080 "sd_get_physical_geometry: mode sense page 4 failed\n"); 5081 goto page4_exit; 5082 } 5083 5084 /* 5085 * Determine size of Block Descriptors in order to locate the mode 5086 * page data. ATAPI devices return 0, SCSI devices should return 5087 * MODE_BLK_DESC_LENGTH. 5088 */ 5089 headerp = (struct mode_header *)p4bufp; 5090 if (un->un_f_cfg_is_atapi == TRUE) { 5091 struct mode_header_grp2 *mhp = 5092 (struct mode_header_grp2 *)headerp; 5093 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 5094 } else { 5095 bd_len = ((struct mode_header *)headerp)->bdesc_length; 5096 } 5097 5098 if (bd_len > MODE_BLK_DESC_LENGTH) { 5099 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 5100 "sd_get_physical_geometry: received unexpected bd_len of " 5101 "%d, page4\n", bd_len); 5102 status = EIO; 5103 goto page4_exit; 5104 } 5105 5106 page4p = (struct mode_geometry *) 5107 ((caddr_t)headerp + mode_header_length + bd_len); 5108 5109 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) { 5110 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 5111 "sd_get_physical_geometry: mode sense pg4 code mismatch " 5112 "%d\n", page4p->mode_page.code); 5113 status = EIO; 5114 goto page4_exit; 5115 } 5116 5117 /* 5118 * Stash the data now, after we know that both commands completed. 5119 */ 5120 5121 5122 nhead = (int)page4p->heads; /* uchar, so no conversion needed */ 5123 spc = nhead * nsect; 5124 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb; 5125 rpm = BE_16(page4p->rpm); 5126 5127 modesense_capacity = spc * ncyl; 5128 5129 SD_INFO(SD_LOG_COMMON, un, 5130 "sd_get_physical_geometry: Geometry Parameters (page 4)\n"); 5131 SD_INFO(SD_LOG_COMMON, un, 5132 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm); 5133 SD_INFO(SD_LOG_COMMON, un, 5134 " computed capacity(h*s*c): %d;\n", modesense_capacity); 5135 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n", 5136 (void *)pgeom_p, capacity); 5137 5138 /* 5139 * Compensate if the drive's geometry is not rectangular, i.e., 5140 * the product of C * H * S returned by MODE SENSE >= that returned 5141 * by read capacity. This is an idiosyncrasy of the original x86 5142 * disk subsystem. 5143 */ 5144 if (modesense_capacity >= capacity) { 5145 SD_INFO(SD_LOG_COMMON, un, 5146 "sd_get_physical_geometry: adjusting acyl; " 5147 "old: %d; new: %d\n", pgeom_p->g_acyl, 5148 (modesense_capacity - capacity + spc - 1) / spc); 5149 if (sector_size != 0) { 5150 /* 1243403: NEC D38x7 drives don't support sec size */ 5151 pgeom_p->g_secsize = (unsigned short)sector_size; 5152 } 5153 pgeom_p->g_nsect = (unsigned short)nsect; 5154 pgeom_p->g_nhead = (unsigned short)nhead; 5155 pgeom_p->g_capacity = capacity; 5156 pgeom_p->g_acyl = 5157 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc; 5158 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl; 5159 } 5160 5161 pgeom_p->g_rpm = (unsigned short)rpm; 5162 pgeom_p->g_intrlv = (unsigned short)intrlv; 5163 ret = 0; 5164 5165 SD_INFO(SD_LOG_COMMON, un, 5166 "sd_get_physical_geometry: mode sense geometry:\n"); 5167 SD_INFO(SD_LOG_COMMON, un, 5168 " nsect: %d; sector size: %d; interlv: %d\n", 5169 nsect, sector_size, intrlv); 5170 SD_INFO(SD_LOG_COMMON, un, 5171 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n", 5172 nhead, ncyl, rpm, modesense_capacity); 5173 SD_INFO(SD_LOG_COMMON, un, 5174 "sd_get_physical_geometry: (cached)\n"); 5175 SD_INFO(SD_LOG_COMMON, un, 5176 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 5177 pgeom_p->g_ncyl, pgeom_p->g_acyl, 5178 pgeom_p->g_nhead, pgeom_p->g_nsect); 5179 SD_INFO(SD_LOG_COMMON, un, 5180 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n", 5181 pgeom_p->g_secsize, pgeom_p->g_capacity, 5182 pgeom_p->g_intrlv, pgeom_p->g_rpm); 5183 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 5184 5185 page4_exit: 5186 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH); 5187 5188 page3_exit: 5189 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH); 5190 5191 if (status != 0) { 5192 if (status == EIO) { 5193 /* 5194 * Some disks do not support mode sense(6), we 5195 * should ignore this kind of error(sense key is 5196 * 0x5 - illegal request). 5197 */ 5198 uint8_t *sensep; 5199 int senlen; 5200 5201 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 5202 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 5203 ssc->ssc_uscsi_cmd->uscsi_rqresid); 5204 5205 if (senlen > 0 && 5206 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 5207 sd_ssc_assessment(ssc, 5208 SD_FMT_IGNORE_COMPROMISE); 5209 } else { 5210 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 5211 } 5212 } else { 5213 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5214 } 5215 } 5216 sd_ssc_fini(ssc); 5217 return (ret); 5218 } 5219 5220 /* 5221 * Function: sd_get_virtual_geometry 5222 * 5223 * Description: Ask the controller to tell us about the target device. 5224 * 5225 * Arguments: un - pointer to softstate 5226 * capacity - disk capacity in #blocks 5227 * lbasize - disk block size in bytes 5228 * 5229 * Context: Kernel thread only 5230 */ 5231 5232 static int 5233 sd_get_virtual_geometry(struct sd_lun *un, cmlb_geom_t *lgeom_p, 5234 diskaddr_t capacity, int lbasize) 5235 { 5236 uint_t geombuf; 5237 int spc; 5238 5239 ASSERT(un != NULL); 5240 5241 /* Set sector size, and total number of sectors */ 5242 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1); 5243 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1); 5244 5245 /* Let the HBA tell us its geometry */ 5246 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1); 5247 5248 /* A value of -1 indicates an undefined "geometry" property */ 5249 if (geombuf == (-1)) { 5250 return (EINVAL); 5251 } 5252 5253 /* Initialize the logical geometry cache. */ 5254 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff; 5255 lgeom_p->g_nsect = geombuf & 0xffff; 5256 lgeom_p->g_secsize = un->un_sys_blocksize; 5257 5258 spc = lgeom_p->g_nhead * lgeom_p->g_nsect; 5259 5260 /* 5261 * Note: The driver originally converted the capacity value from 5262 * target blocks to system blocks. However, the capacity value passed 5263 * to this routine is already in terms of system blocks (this scaling 5264 * is done when the READ CAPACITY command is issued and processed). 5265 * This 'error' may have gone undetected because the usage of g_ncyl 5266 * (which is based upon g_capacity) is very limited within the driver 5267 */ 5268 lgeom_p->g_capacity = capacity; 5269 5270 /* 5271 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The 5272 * hba may return zero values if the device has been removed. 5273 */ 5274 if (spc == 0) { 5275 lgeom_p->g_ncyl = 0; 5276 } else { 5277 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc; 5278 } 5279 lgeom_p->g_acyl = 0; 5280 5281 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n"); 5282 return (0); 5283 5284 } 5285 /* 5286 * Function: sd_update_block_info 5287 * 5288 * Description: Calculate a byte count to sector count bitshift value 5289 * from sector size. 5290 * 5291 * Arguments: un: unit struct. 5292 * lbasize: new target sector size 5293 * capacity: new target capacity, ie. block count 5294 * 5295 * Context: Kernel thread context 5296 */ 5297 5298 static void 5299 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity) 5300 { 5301 if (lbasize != 0) { 5302 un->un_tgt_blocksize = lbasize; 5303 un->un_f_tgt_blocksize_is_valid = TRUE; 5304 if (!un->un_f_has_removable_media) { 5305 un->un_sys_blocksize = lbasize; 5306 } 5307 } 5308 5309 if (capacity != 0) { 5310 un->un_blockcount = capacity; 5311 un->un_f_blockcount_is_valid = TRUE; 5312 5313 /* 5314 * The capacity has changed so update the errstats. 5315 */ 5316 if (un->un_errstats != NULL) { 5317 struct sd_errstats *stp; 5318 5319 capacity *= un->un_sys_blocksize; 5320 stp = (struct sd_errstats *)un->un_errstats->ks_data; 5321 if (stp->sd_capacity.value.ui64 < capacity) 5322 stp->sd_capacity.value.ui64 = capacity; 5323 } 5324 } 5325 } 5326 5327 5328 /* 5329 * Function: sd_register_devid 5330 * 5331 * Description: This routine will obtain the device id information from the 5332 * target, obtain the serial number, and register the device 5333 * id with the ddi framework. 5334 * 5335 * Arguments: devi - the system's dev_info_t for the device. 5336 * un - driver soft state (unit) structure 5337 * reservation_flag - indicates if a reservation conflict 5338 * occurred during attach 5339 * 5340 * Context: Kernel Thread 5341 */ 5342 static void 5343 sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, int reservation_flag) 5344 { 5345 int rval = 0; 5346 uchar_t *inq80 = NULL; 5347 size_t inq80_len = MAX_INQUIRY_SIZE; 5348 size_t inq80_resid = 0; 5349 uchar_t *inq83 = NULL; 5350 size_t inq83_len = MAX_INQUIRY_SIZE; 5351 size_t inq83_resid = 0; 5352 int dlen, len; 5353 char *sn; 5354 struct sd_lun *un; 5355 5356 ASSERT(ssc != NULL); 5357 un = ssc->ssc_un; 5358 ASSERT(un != NULL); 5359 ASSERT(mutex_owned(SD_MUTEX(un))); 5360 ASSERT((SD_DEVINFO(un)) == devi); 5361 5362 5363 /* 5364 * We check the availability of the World Wide Name (0x83) and Unit 5365 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using 5366 * un_vpd_page_mask from them, we decide which way to get the WWN. If 5367 * 0x83 is available, that is the best choice. Our next choice is 5368 * 0x80. If neither are available, we munge the devid from the device 5369 * vid/pid/serial # for Sun qualified disks, or use the ddi framework 5370 * to fabricate a devid for non-Sun qualified disks. 5371 */ 5372 if (sd_check_vpd_page_support(ssc) == 0) { 5373 /* collect page 80 data if available */ 5374 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) { 5375 5376 mutex_exit(SD_MUTEX(un)); 5377 inq80 = kmem_zalloc(inq80_len, KM_SLEEP); 5378 5379 rval = sd_send_scsi_INQUIRY(ssc, inq80, inq80_len, 5380 0x01, 0x80, &inq80_resid); 5381 5382 if (rval != 0) { 5383 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5384 kmem_free(inq80, inq80_len); 5385 inq80 = NULL; 5386 inq80_len = 0; 5387 } else if (ddi_prop_exists( 5388 DDI_DEV_T_NONE, SD_DEVINFO(un), 5389 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 5390 INQUIRY_SERIAL_NO) == 0) { 5391 /* 5392 * If we don't already have a serial number 5393 * property, do quick verify of data returned 5394 * and define property. 5395 */ 5396 dlen = inq80_len - inq80_resid; 5397 len = (size_t)inq80[3]; 5398 if ((dlen >= 4) && ((len + 4) <= dlen)) { 5399 /* 5400 * Ensure sn termination, skip leading 5401 * blanks, and create property 5402 * 'inquiry-serial-no'. 5403 */ 5404 sn = (char *)&inq80[4]; 5405 sn[len] = 0; 5406 while (*sn && (*sn == ' ')) 5407 sn++; 5408 if (*sn) { 5409 (void) ddi_prop_update_string( 5410 DDI_DEV_T_NONE, 5411 SD_DEVINFO(un), 5412 INQUIRY_SERIAL_NO, sn); 5413 } 5414 } 5415 } 5416 mutex_enter(SD_MUTEX(un)); 5417 } 5418 5419 /* collect page 83 data if available */ 5420 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) { 5421 mutex_exit(SD_MUTEX(un)); 5422 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 5423 5424 rval = sd_send_scsi_INQUIRY(ssc, inq83, inq83_len, 5425 0x01, 0x83, &inq83_resid); 5426 5427 if (rval != 0) { 5428 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5429 kmem_free(inq83, inq83_len); 5430 inq83 = NULL; 5431 inq83_len = 0; 5432 } 5433 mutex_enter(SD_MUTEX(un)); 5434 } 5435 } 5436 5437 /* 5438 * If transport has already registered a devid for this target 5439 * then that takes precedence over the driver's determination 5440 * of the devid. 5441 * 5442 * NOTE: The reason this check is done here instead of at the beginning 5443 * of the function is to allow the code above to create the 5444 * 'inquiry-serial-no' property. 5445 */ 5446 if (ddi_devid_get(SD_DEVINFO(un), &un->un_devid) == DDI_SUCCESS) { 5447 ASSERT(un->un_devid); 5448 un->un_f_devid_transport_defined = TRUE; 5449 goto cleanup; /* use devid registered by the transport */ 5450 } 5451 5452 /* 5453 * This is the case of antiquated Sun disk drives that have the 5454 * FAB_DEVID property set in the disk_table. These drives 5455 * manage the devid's by storing them in last 2 available sectors 5456 * on the drive and have them fabricated by the ddi layer by calling 5457 * ddi_devid_init and passing the DEVID_FAB flag. 5458 */ 5459 if (un->un_f_opt_fab_devid == TRUE) { 5460 /* 5461 * Depending on EINVAL isn't reliable, since a reserved disk 5462 * may result in invalid geometry, so check to make sure a 5463 * reservation conflict did not occur during attach. 5464 */ 5465 if ((sd_get_devid(ssc) == EINVAL) && 5466 (reservation_flag != SD_TARGET_IS_RESERVED)) { 5467 /* 5468 * The devid is invalid AND there is no reservation 5469 * conflict. Fabricate a new devid. 5470 */ 5471 (void) sd_create_devid(ssc); 5472 } 5473 5474 /* Register the devid if it exists */ 5475 if (un->un_devid != NULL) { 5476 (void) ddi_devid_register(SD_DEVINFO(un), 5477 un->un_devid); 5478 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5479 "sd_register_devid: Devid Fabricated\n"); 5480 } 5481 goto cleanup; 5482 } 5483 5484 /* encode best devid possible based on data available */ 5485 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, 5486 (char *)ddi_driver_name(SD_DEVINFO(un)), 5487 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)), 5488 inq80, inq80_len - inq80_resid, inq83, inq83_len - 5489 inq83_resid, &un->un_devid) == DDI_SUCCESS) { 5490 5491 /* devid successfully encoded, register devid */ 5492 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); 5493 5494 } else { 5495 /* 5496 * Unable to encode a devid based on data available. 5497 * This is not a Sun qualified disk. Older Sun disk 5498 * drives that have the SD_FAB_DEVID property 5499 * set in the disk_table and non Sun qualified 5500 * disks are treated in the same manner. These 5501 * drives manage the devid's by storing them in 5502 * last 2 available sectors on the drive and 5503 * have them fabricated by the ddi layer by 5504 * calling ddi_devid_init and passing the 5505 * DEVID_FAB flag. 5506 * Create a fabricate devid only if there's no 5507 * fabricate devid existed. 5508 */ 5509 if (sd_get_devid(ssc) == EINVAL) { 5510 (void) sd_create_devid(ssc); 5511 } 5512 un->un_f_opt_fab_devid = TRUE; 5513 5514 /* Register the devid if it exists */ 5515 if (un->un_devid != NULL) { 5516 (void) ddi_devid_register(SD_DEVINFO(un), 5517 un->un_devid); 5518 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5519 "sd_register_devid: devid fabricated using " 5520 "ddi framework\n"); 5521 } 5522 } 5523 5524 cleanup: 5525 /* clean up resources */ 5526 if (inq80 != NULL) { 5527 kmem_free(inq80, inq80_len); 5528 } 5529 if (inq83 != NULL) { 5530 kmem_free(inq83, inq83_len); 5531 } 5532 } 5533 5534 5535 5536 /* 5537 * Function: sd_get_devid 5538 * 5539 * Description: This routine will return 0 if a valid device id has been 5540 * obtained from the target and stored in the soft state. If a 5541 * valid device id has not been previously read and stored, a 5542 * read attempt will be made. 5543 * 5544 * Arguments: un - driver soft state (unit) structure 5545 * 5546 * Return Code: 0 if we successfully get the device id 5547 * 5548 * Context: Kernel Thread 5549 */ 5550 5551 static int 5552 sd_get_devid(sd_ssc_t *ssc) 5553 { 5554 struct dk_devid *dkdevid; 5555 ddi_devid_t tmpid; 5556 uint_t *ip; 5557 size_t sz; 5558 diskaddr_t blk; 5559 int status; 5560 int chksum; 5561 int i; 5562 size_t buffer_size; 5563 struct sd_lun *un; 5564 5565 ASSERT(ssc != NULL); 5566 un = ssc->ssc_un; 5567 ASSERT(un != NULL); 5568 ASSERT(mutex_owned(SD_MUTEX(un))); 5569 5570 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n", 5571 un); 5572 5573 if (un->un_devid != NULL) { 5574 return (0); 5575 } 5576 5577 mutex_exit(SD_MUTEX(un)); 5578 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5579 (void *)SD_PATH_DIRECT) != 0) { 5580 mutex_enter(SD_MUTEX(un)); 5581 return (EINVAL); 5582 } 5583 5584 /* 5585 * Read and verify device id, stored in the reserved cylinders at the 5586 * end of the disk. Backup label is on the odd sectors of the last 5587 * track of the last cylinder. Device id will be on track of the next 5588 * to last cylinder. 5589 */ 5590 mutex_enter(SD_MUTEX(un)); 5591 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid)); 5592 mutex_exit(SD_MUTEX(un)); 5593 dkdevid = kmem_alloc(buffer_size, KM_SLEEP); 5594 status = sd_send_scsi_READ(ssc, dkdevid, buffer_size, blk, 5595 SD_PATH_DIRECT); 5596 5597 if (status != 0) { 5598 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5599 goto error; 5600 } 5601 5602 /* Validate the revision */ 5603 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 5604 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 5605 status = EINVAL; 5606 goto error; 5607 } 5608 5609 /* Calculate the checksum */ 5610 chksum = 0; 5611 ip = (uint_t *)dkdevid; 5612 for (i = 0; i < ((DEV_BSIZE - sizeof (int)) / sizeof (int)); 5613 i++) { 5614 chksum ^= ip[i]; 5615 } 5616 5617 /* Compare the checksums */ 5618 if (DKD_GETCHKSUM(dkdevid) != chksum) { 5619 status = EINVAL; 5620 goto error; 5621 } 5622 5623 /* Validate the device id */ 5624 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 5625 status = EINVAL; 5626 goto error; 5627 } 5628 5629 /* 5630 * Store the device id in the driver soft state 5631 */ 5632 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 5633 tmpid = kmem_alloc(sz, KM_SLEEP); 5634 5635 mutex_enter(SD_MUTEX(un)); 5636 5637 un->un_devid = tmpid; 5638 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 5639 5640 kmem_free(dkdevid, buffer_size); 5641 5642 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un); 5643 5644 return (status); 5645 error: 5646 mutex_enter(SD_MUTEX(un)); 5647 kmem_free(dkdevid, buffer_size); 5648 return (status); 5649 } 5650 5651 5652 /* 5653 * Function: sd_create_devid 5654 * 5655 * Description: This routine will fabricate the device id and write it 5656 * to the disk. 5657 * 5658 * Arguments: un - driver soft state (unit) structure 5659 * 5660 * Return Code: value of the fabricated device id 5661 * 5662 * Context: Kernel Thread 5663 */ 5664 5665 static ddi_devid_t 5666 sd_create_devid(sd_ssc_t *ssc) 5667 { 5668 struct sd_lun *un; 5669 5670 ASSERT(ssc != NULL); 5671 un = ssc->ssc_un; 5672 ASSERT(un != NULL); 5673 5674 /* Fabricate the devid */ 5675 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid) 5676 == DDI_FAILURE) { 5677 return (NULL); 5678 } 5679 5680 /* Write the devid to disk */ 5681 if (sd_write_deviceid(ssc) != 0) { 5682 ddi_devid_free(un->un_devid); 5683 un->un_devid = NULL; 5684 } 5685 5686 return (un->un_devid); 5687 } 5688 5689 5690 /* 5691 * Function: sd_write_deviceid 5692 * 5693 * Description: This routine will write the device id to the disk 5694 * reserved sector. 5695 * 5696 * Arguments: un - driver soft state (unit) structure 5697 * 5698 * Return Code: EINVAL 5699 * value returned by sd_send_scsi_cmd 5700 * 5701 * Context: Kernel Thread 5702 */ 5703 5704 static int 5705 sd_write_deviceid(sd_ssc_t *ssc) 5706 { 5707 struct dk_devid *dkdevid; 5708 uchar_t *buf; 5709 diskaddr_t blk; 5710 uint_t *ip, chksum; 5711 int status; 5712 int i; 5713 struct sd_lun *un; 5714 5715 ASSERT(ssc != NULL); 5716 un = ssc->ssc_un; 5717 ASSERT(un != NULL); 5718 ASSERT(mutex_owned(SD_MUTEX(un))); 5719 5720 mutex_exit(SD_MUTEX(un)); 5721 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5722 (void *)SD_PATH_DIRECT) != 0) { 5723 mutex_enter(SD_MUTEX(un)); 5724 return (-1); 5725 } 5726 5727 5728 /* Allocate the buffer */ 5729 buf = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP); 5730 dkdevid = (struct dk_devid *)buf; 5731 5732 /* Fill in the revision */ 5733 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 5734 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 5735 5736 /* Copy in the device id */ 5737 mutex_enter(SD_MUTEX(un)); 5738 bcopy(un->un_devid, &dkdevid->dkd_devid, 5739 ddi_devid_sizeof(un->un_devid)); 5740 mutex_exit(SD_MUTEX(un)); 5741 5742 /* Calculate the checksum */ 5743 chksum = 0; 5744 ip = (uint_t *)dkdevid; 5745 for (i = 0; i < ((DEV_BSIZE - sizeof (int)) / sizeof (int)); 5746 i++) { 5747 chksum ^= ip[i]; 5748 } 5749 5750 /* Fill-in checksum */ 5751 DKD_FORMCHKSUM(chksum, dkdevid); 5752 5753 /* Write the reserved sector */ 5754 status = sd_send_scsi_WRITE(ssc, buf, un->un_sys_blocksize, blk, 5755 SD_PATH_DIRECT); 5756 if (status != 0) 5757 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5758 5759 kmem_free(buf, un->un_sys_blocksize); 5760 5761 mutex_enter(SD_MUTEX(un)); 5762 return (status); 5763 } 5764 5765 5766 /* 5767 * Function: sd_check_vpd_page_support 5768 * 5769 * Description: This routine sends an inquiry command with the EVPD bit set and 5770 * a page code of 0x00 to the device. It is used to determine which 5771 * vital product pages are available to find the devid. We are 5772 * looking for pages 0x83 0x80 or 0xB1. If we return a negative 1, 5773 * the device does not support that command. 5774 * 5775 * Arguments: un - driver soft state (unit) structure 5776 * 5777 * Return Code: 0 - success 5778 * 1 - check condition 5779 * 5780 * Context: This routine can sleep. 5781 */ 5782 5783 static int 5784 sd_check_vpd_page_support(sd_ssc_t *ssc) 5785 { 5786 uchar_t *page_list = NULL; 5787 uchar_t page_length = 0xff; /* Use max possible length */ 5788 uchar_t evpd = 0x01; /* Set the EVPD bit */ 5789 uchar_t page_code = 0x00; /* Supported VPD Pages */ 5790 int rval = 0; 5791 int counter; 5792 struct sd_lun *un; 5793 5794 ASSERT(ssc != NULL); 5795 un = ssc->ssc_un; 5796 ASSERT(un != NULL); 5797 ASSERT(mutex_owned(SD_MUTEX(un))); 5798 5799 mutex_exit(SD_MUTEX(un)); 5800 5801 /* 5802 * We'll set the page length to the maximum to save figuring it out 5803 * with an additional call. 5804 */ 5805 page_list = kmem_zalloc(page_length, KM_SLEEP); 5806 5807 rval = sd_send_scsi_INQUIRY(ssc, page_list, page_length, evpd, 5808 page_code, NULL); 5809 5810 if (rval != 0) 5811 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5812 5813 mutex_enter(SD_MUTEX(un)); 5814 5815 /* 5816 * Now we must validate that the device accepted the command, as some 5817 * drives do not support it. If the drive does support it, we will 5818 * return 0, and the supported pages will be in un_vpd_page_mask. If 5819 * not, we return -1. 5820 */ 5821 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) { 5822 /* Loop to find one of the 2 pages we need */ 5823 counter = 4; /* Supported pages start at byte 4, with 0x00 */ 5824 5825 /* 5826 * Pages are returned in ascending order, and 0x83 is what we 5827 * are hoping for. 5828 */ 5829 while ((page_list[counter] <= 0xB1) && 5830 (counter <= (page_list[VPD_PAGE_LENGTH] + 5831 VPD_HEAD_OFFSET))) { 5832 /* 5833 * Add 3 because page_list[3] is the number of 5834 * pages minus 3 5835 */ 5836 5837 switch (page_list[counter]) { 5838 case 0x00: 5839 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG; 5840 break; 5841 case 0x80: 5842 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG; 5843 break; 5844 case 0x81: 5845 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG; 5846 break; 5847 case 0x82: 5848 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG; 5849 break; 5850 case 0x83: 5851 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; 5852 break; 5853 case 0x86: 5854 un->un_vpd_page_mask |= SD_VPD_EXTENDED_DATA_PG; 5855 break; 5856 case 0xB1: 5857 un->un_vpd_page_mask |= SD_VPD_DEV_CHARACTER_PG; 5858 break; 5859 } 5860 counter++; 5861 } 5862 5863 } else { 5864 rval = -1; 5865 5866 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5867 "sd_check_vpd_page_support: This drive does not implement " 5868 "VPD pages.\n"); 5869 } 5870 5871 kmem_free(page_list, page_length); 5872 5873 return (rval); 5874 } 5875 5876 5877 /* 5878 * Function: sd_setup_pm 5879 * 5880 * Description: Initialize Power Management on the device 5881 * 5882 * Context: Kernel Thread 5883 */ 5884 5885 static void 5886 sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi) 5887 { 5888 uint_t log_page_size; 5889 uchar_t *log_page_data; 5890 int rval = 0; 5891 struct sd_lun *un; 5892 5893 ASSERT(ssc != NULL); 5894 un = ssc->ssc_un; 5895 ASSERT(un != NULL); 5896 5897 /* 5898 * Since we are called from attach, holding a mutex for 5899 * un is unnecessary. Because some of the routines called 5900 * from here require SD_MUTEX to not be held, assert this 5901 * right up front. 5902 */ 5903 ASSERT(!mutex_owned(SD_MUTEX(un))); 5904 /* 5905 * Since the sd device does not have the 'reg' property, 5906 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 5907 * The following code is to tell cpr that this device 5908 * DOES need to be suspended and resumed. 5909 */ 5910 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 5911 "pm-hardware-state", "needs-suspend-resume"); 5912 5913 /* 5914 * This complies with the new power management framework 5915 * for certain desktop machines. Create the pm_components 5916 * property as a string array property. 5917 * If un_f_pm_supported is TRUE, that means the disk 5918 * attached HBA has set the "pm-capable" property and 5919 * the value of this property is bigger than 0. 5920 */ 5921 if (un->un_f_pm_supported) { 5922 /* 5923 * not all devices have a motor, try it first. 5924 * some devices may return ILLEGAL REQUEST, some 5925 * will hang 5926 * The following START_STOP_UNIT is used to check if target 5927 * device has a motor. 5928 */ 5929 un->un_f_start_stop_supported = TRUE; 5930 5931 if (un->un_f_power_condition_supported) { 5932 rval = sd_send_scsi_START_STOP_UNIT(ssc, 5933 SD_POWER_CONDITION, SD_TARGET_ACTIVE, 5934 SD_PATH_DIRECT); 5935 if (rval != 0) { 5936 un->un_f_power_condition_supported = FALSE; 5937 } 5938 } 5939 if (!un->un_f_power_condition_supported) { 5940 rval = sd_send_scsi_START_STOP_UNIT(ssc, 5941 SD_START_STOP, SD_TARGET_START, SD_PATH_DIRECT); 5942 } 5943 if (rval != 0) { 5944 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5945 un->un_f_start_stop_supported = FALSE; 5946 } 5947 5948 /* 5949 * create pm properties anyways otherwise the parent can't 5950 * go to sleep 5951 */ 5952 un->un_f_pm_is_enabled = TRUE; 5953 (void) sd_create_pm_components(devi, un); 5954 5955 /* 5956 * If it claims that log sense is supported, check it out. 5957 */ 5958 if (un->un_f_log_sense_supported) { 5959 rval = sd_log_page_supported(ssc, 5960 START_STOP_CYCLE_PAGE); 5961 if (rval == 1) { 5962 /* Page found, use it. */ 5963 un->un_start_stop_cycle_page = 5964 START_STOP_CYCLE_PAGE; 5965 } else { 5966 /* 5967 * Page not found or log sense is not 5968 * supported. 5969 * Notice we do not check the old style 5970 * START_STOP_CYCLE_VU_PAGE because this 5971 * code path does not apply to old disks. 5972 */ 5973 un->un_f_log_sense_supported = FALSE; 5974 un->un_f_pm_log_sense_smart = FALSE; 5975 } 5976 } 5977 5978 return; 5979 } 5980 5981 /* 5982 * For the disk whose attached HBA has not set the "pm-capable" 5983 * property, check if it supports the power management. 5984 */ 5985 if (!un->un_f_log_sense_supported) { 5986 un->un_power_level = SD_SPINDLE_ON; 5987 un->un_f_pm_is_enabled = FALSE; 5988 return; 5989 } 5990 5991 rval = sd_log_page_supported(ssc, START_STOP_CYCLE_PAGE); 5992 5993 #ifdef SDDEBUG 5994 if (sd_force_pm_supported) { 5995 /* Force a successful result */ 5996 rval = 1; 5997 } 5998 #endif 5999 6000 /* 6001 * If the start-stop cycle counter log page is not supported 6002 * or if the pm-capable property is set to be false (0), 6003 * then we should not create the pm_components property. 6004 */ 6005 if (rval == -1) { 6006 /* 6007 * Error. 6008 * Reading log sense failed, most likely this is 6009 * an older drive that does not support log sense. 6010 * If this fails auto-pm is not supported. 6011 */ 6012 un->un_power_level = SD_SPINDLE_ON; 6013 un->un_f_pm_is_enabled = FALSE; 6014 6015 } else if (rval == 0) { 6016 /* 6017 * Page not found. 6018 * The start stop cycle counter is implemented as page 6019 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For 6020 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE). 6021 */ 6022 if (sd_log_page_supported(ssc, START_STOP_CYCLE_VU_PAGE) == 1) { 6023 /* 6024 * Page found, use this one. 6025 */ 6026 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE; 6027 un->un_f_pm_is_enabled = TRUE; 6028 } else { 6029 /* 6030 * Error or page not found. 6031 * auto-pm is not supported for this device. 6032 */ 6033 un->un_power_level = SD_SPINDLE_ON; 6034 un->un_f_pm_is_enabled = FALSE; 6035 } 6036 } else { 6037 /* 6038 * Page found, use it. 6039 */ 6040 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE; 6041 un->un_f_pm_is_enabled = TRUE; 6042 } 6043 6044 6045 if (un->un_f_pm_is_enabled == TRUE) { 6046 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 6047 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 6048 6049 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 6050 log_page_size, un->un_start_stop_cycle_page, 6051 0x01, 0, SD_PATH_DIRECT); 6052 6053 if (rval != 0) { 6054 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6055 } 6056 6057 #ifdef SDDEBUG 6058 if (sd_force_pm_supported) { 6059 /* Force a successful result */ 6060 rval = 0; 6061 } 6062 #endif 6063 6064 /* 6065 * If the Log sense for Page( Start/stop cycle counter page) 6066 * succeeds, then power management is supported and we can 6067 * enable auto-pm. 6068 */ 6069 if (rval == 0) { 6070 (void) sd_create_pm_components(devi, un); 6071 } else { 6072 un->un_power_level = SD_SPINDLE_ON; 6073 un->un_f_pm_is_enabled = FALSE; 6074 } 6075 6076 kmem_free(log_page_data, log_page_size); 6077 } 6078 } 6079 6080 6081 /* 6082 * Function: sd_create_pm_components 6083 * 6084 * Description: Initialize PM property. 6085 * 6086 * Context: Kernel thread context 6087 */ 6088 6089 static void 6090 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un) 6091 { 6092 ASSERT(!mutex_owned(SD_MUTEX(un))); 6093 6094 if (un->un_f_power_condition_supported) { 6095 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 6096 "pm-components", sd_pwr_pc.pm_comp, 5) 6097 != DDI_PROP_SUCCESS) { 6098 un->un_power_level = SD_SPINDLE_ACTIVE; 6099 un->un_f_pm_is_enabled = FALSE; 6100 return; 6101 } 6102 } else { 6103 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 6104 "pm-components", sd_pwr_ss.pm_comp, 3) 6105 != DDI_PROP_SUCCESS) { 6106 un->un_power_level = SD_SPINDLE_ON; 6107 un->un_f_pm_is_enabled = FALSE; 6108 return; 6109 } 6110 } 6111 /* 6112 * When components are initially created they are idle, 6113 * power up any non-removables. 6114 * Note: the return value of pm_raise_power can't be used 6115 * for determining if PM should be enabled for this device. 6116 * Even if you check the return values and remove this 6117 * property created above, the PM framework will not honor the 6118 * change after the first call to pm_raise_power. Hence, 6119 * removal of that property does not help if pm_raise_power 6120 * fails. In the case of removable media, the start/stop 6121 * will fail if the media is not present. 6122 */ 6123 if (un->un_f_attach_spinup && (pm_raise_power(SD_DEVINFO(un), 0, 6124 SD_PM_STATE_ACTIVE(un)) == DDI_SUCCESS)) { 6125 mutex_enter(SD_MUTEX(un)); 6126 un->un_power_level = SD_PM_STATE_ACTIVE(un); 6127 mutex_enter(&un->un_pm_mutex); 6128 /* Set to on and not busy. */ 6129 un->un_pm_count = 0; 6130 } else { 6131 mutex_enter(SD_MUTEX(un)); 6132 un->un_power_level = SD_PM_STATE_STOPPED(un); 6133 mutex_enter(&un->un_pm_mutex); 6134 /* Set to off. */ 6135 un->un_pm_count = -1; 6136 } 6137 mutex_exit(&un->un_pm_mutex); 6138 mutex_exit(SD_MUTEX(un)); 6139 } 6140 6141 6142 /* 6143 * Function: sd_ddi_suspend 6144 * 6145 * Description: Performs system power-down operations. This includes 6146 * setting the drive state to indicate its suspended so 6147 * that no new commands will be accepted. Also, wait for 6148 * all commands that are in transport or queued to a timer 6149 * for retry to complete. All timeout threads are cancelled. 6150 * 6151 * Return Code: DDI_FAILURE or DDI_SUCCESS 6152 * 6153 * Context: Kernel thread context 6154 */ 6155 6156 static int 6157 sd_ddi_suspend(dev_info_t *devi) 6158 { 6159 struct sd_lun *un; 6160 clock_t wait_cmds_complete; 6161 6162 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 6163 if (un == NULL) { 6164 return (DDI_FAILURE); 6165 } 6166 6167 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n"); 6168 6169 mutex_enter(SD_MUTEX(un)); 6170 6171 /* Return success if the device is already suspended. */ 6172 if (un->un_state == SD_STATE_SUSPENDED) { 6173 mutex_exit(SD_MUTEX(un)); 6174 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 6175 "device already suspended, exiting\n"); 6176 return (DDI_SUCCESS); 6177 } 6178 6179 /* Return failure if the device is being used by HA */ 6180 if (un->un_resvd_status & 6181 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) { 6182 mutex_exit(SD_MUTEX(un)); 6183 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 6184 "device in use by HA, exiting\n"); 6185 return (DDI_FAILURE); 6186 } 6187 6188 /* 6189 * Return failure if the device is in a resource wait 6190 * or power changing state. 6191 */ 6192 if ((un->un_state == SD_STATE_RWAIT) || 6193 (un->un_state == SD_STATE_PM_CHANGING)) { 6194 mutex_exit(SD_MUTEX(un)); 6195 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 6196 "device in resource wait state, exiting\n"); 6197 return (DDI_FAILURE); 6198 } 6199 6200 6201 un->un_save_state = un->un_last_state; 6202 New_state(un, SD_STATE_SUSPENDED); 6203 6204 /* 6205 * Wait for all commands that are in transport or queued to a timer 6206 * for retry to complete. 6207 * 6208 * While waiting, no new commands will be accepted or sent because of 6209 * the new state we set above. 6210 * 6211 * Wait till current operation has completed. If we are in the resource 6212 * wait state (with an intr outstanding) then we need to wait till the 6213 * intr completes and starts the next cmd. We want to wait for 6214 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND. 6215 */ 6216 wait_cmds_complete = ddi_get_lbolt() + 6217 (sd_wait_cmds_complete * drv_usectohz(1000000)); 6218 6219 while (un->un_ncmds_in_transport != 0) { 6220 /* 6221 * Fail if commands do not finish in the specified time. 6222 */ 6223 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un), 6224 wait_cmds_complete) == -1) { 6225 /* 6226 * Undo the state changes made above. Everything 6227 * must go back to it's original value. 6228 */ 6229 Restore_state(un); 6230 un->un_last_state = un->un_save_state; 6231 /* Wake up any threads that might be waiting. */ 6232 cv_broadcast(&un->un_suspend_cv); 6233 mutex_exit(SD_MUTEX(un)); 6234 SD_ERROR(SD_LOG_IO_PM, un, 6235 "sd_ddi_suspend: failed due to outstanding cmds\n"); 6236 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n"); 6237 return (DDI_FAILURE); 6238 } 6239 } 6240 6241 /* 6242 * Cancel SCSI watch thread and timeouts, if any are active 6243 */ 6244 6245 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) { 6246 opaque_t temp_token = un->un_swr_token; 6247 mutex_exit(SD_MUTEX(un)); 6248 scsi_watch_suspend(temp_token); 6249 mutex_enter(SD_MUTEX(un)); 6250 } 6251 6252 if (un->un_reset_throttle_timeid != NULL) { 6253 timeout_id_t temp_id = un->un_reset_throttle_timeid; 6254 un->un_reset_throttle_timeid = NULL; 6255 mutex_exit(SD_MUTEX(un)); 6256 (void) untimeout(temp_id); 6257 mutex_enter(SD_MUTEX(un)); 6258 } 6259 6260 if (un->un_dcvb_timeid != NULL) { 6261 timeout_id_t temp_id = un->un_dcvb_timeid; 6262 un->un_dcvb_timeid = NULL; 6263 mutex_exit(SD_MUTEX(un)); 6264 (void) untimeout(temp_id); 6265 mutex_enter(SD_MUTEX(un)); 6266 } 6267 6268 mutex_enter(&un->un_pm_mutex); 6269 if (un->un_pm_timeid != NULL) { 6270 timeout_id_t temp_id = un->un_pm_timeid; 6271 un->un_pm_timeid = NULL; 6272 mutex_exit(&un->un_pm_mutex); 6273 mutex_exit(SD_MUTEX(un)); 6274 (void) untimeout(temp_id); 6275 mutex_enter(SD_MUTEX(un)); 6276 } else { 6277 mutex_exit(&un->un_pm_mutex); 6278 } 6279 6280 if (un->un_rmw_msg_timeid != NULL) { 6281 timeout_id_t temp_id = un->un_rmw_msg_timeid; 6282 un->un_rmw_msg_timeid = NULL; 6283 mutex_exit(SD_MUTEX(un)); 6284 (void) untimeout(temp_id); 6285 mutex_enter(SD_MUTEX(un)); 6286 } 6287 6288 if (un->un_retry_timeid != NULL) { 6289 timeout_id_t temp_id = un->un_retry_timeid; 6290 un->un_retry_timeid = NULL; 6291 mutex_exit(SD_MUTEX(un)); 6292 (void) untimeout(temp_id); 6293 mutex_enter(SD_MUTEX(un)); 6294 6295 if (un->un_retry_bp != NULL) { 6296 un->un_retry_bp->av_forw = un->un_waitq_headp; 6297 un->un_waitq_headp = un->un_retry_bp; 6298 if (un->un_waitq_tailp == NULL) { 6299 un->un_waitq_tailp = un->un_retry_bp; 6300 } 6301 un->un_retry_bp = NULL; 6302 un->un_retry_statp = NULL; 6303 } 6304 } 6305 6306 if (un->un_direct_priority_timeid != NULL) { 6307 timeout_id_t temp_id = un->un_direct_priority_timeid; 6308 un->un_direct_priority_timeid = NULL; 6309 mutex_exit(SD_MUTEX(un)); 6310 (void) untimeout(temp_id); 6311 mutex_enter(SD_MUTEX(un)); 6312 } 6313 6314 if (un->un_f_is_fibre == TRUE) { 6315 /* 6316 * Remove callbacks for insert and remove events 6317 */ 6318 if (un->un_insert_event != NULL) { 6319 mutex_exit(SD_MUTEX(un)); 6320 (void) ddi_remove_event_handler(un->un_insert_cb_id); 6321 mutex_enter(SD_MUTEX(un)); 6322 un->un_insert_event = NULL; 6323 } 6324 6325 if (un->un_remove_event != NULL) { 6326 mutex_exit(SD_MUTEX(un)); 6327 (void) ddi_remove_event_handler(un->un_remove_cb_id); 6328 mutex_enter(SD_MUTEX(un)); 6329 un->un_remove_event = NULL; 6330 } 6331 } 6332 6333 mutex_exit(SD_MUTEX(un)); 6334 6335 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); 6336 6337 return (DDI_SUCCESS); 6338 } 6339 6340 6341 /* 6342 * Function: sd_ddi_resume 6343 * 6344 * Description: Performs system power-up operations.. 6345 * 6346 * Return Code: DDI_SUCCESS 6347 * DDI_FAILURE 6348 * 6349 * Context: Kernel thread context 6350 */ 6351 6352 static int 6353 sd_ddi_resume(dev_info_t *devi) 6354 { 6355 struct sd_lun *un; 6356 6357 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 6358 if (un == NULL) { 6359 return (DDI_FAILURE); 6360 } 6361 6362 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n"); 6363 6364 mutex_enter(SD_MUTEX(un)); 6365 Restore_state(un); 6366 6367 /* 6368 * Restore the state which was saved to give the 6369 * the right state in un_last_state 6370 */ 6371 un->un_last_state = un->un_save_state; 6372 /* 6373 * Note: throttle comes back at full. 6374 * Also note: this MUST be done before calling pm_raise_power 6375 * otherwise the system can get hung in biowait. The scenario where 6376 * this'll happen is under cpr suspend. Writing of the system 6377 * state goes through sddump, which writes 0 to un_throttle. If 6378 * writing the system state then fails, example if the partition is 6379 * too small, then cpr attempts a resume. If throttle isn't restored 6380 * from the saved value until after calling pm_raise_power then 6381 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs 6382 * in biowait. 6383 */ 6384 un->un_throttle = un->un_saved_throttle; 6385 6386 /* 6387 * The chance of failure is very rare as the only command done in power 6388 * entry point is START command when you transition from 0->1 or 6389 * unknown->1. Put it to SPINDLE ON state irrespective of the state at 6390 * which suspend was done. Ignore the return value as the resume should 6391 * not be failed. In the case of removable media the media need not be 6392 * inserted and hence there is a chance that raise power will fail with 6393 * media not present. 6394 */ 6395 if (un->un_f_attach_spinup) { 6396 mutex_exit(SD_MUTEX(un)); 6397 (void) pm_raise_power(SD_DEVINFO(un), 0, 6398 SD_PM_STATE_ACTIVE(un)); 6399 mutex_enter(SD_MUTEX(un)); 6400 } 6401 6402 /* 6403 * Don't broadcast to the suspend cv and therefore possibly 6404 * start I/O until after power has been restored. 6405 */ 6406 cv_broadcast(&un->un_suspend_cv); 6407 cv_broadcast(&un->un_state_cv); 6408 6409 /* restart thread */ 6410 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { 6411 scsi_watch_resume(un->un_swr_token); 6412 } 6413 6414 #if (defined(__fibre)) 6415 if (un->un_f_is_fibre == TRUE) { 6416 /* 6417 * Add callbacks for insert and remove events 6418 */ 6419 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 6420 sd_init_event_callbacks(un); 6421 } 6422 } 6423 #endif 6424 6425 /* 6426 * Transport any pending commands to the target. 6427 * 6428 * If this is a low-activity device commands in queue will have to wait 6429 * until new commands come in, which may take awhile. Also, we 6430 * specifically don't check un_ncmds_in_transport because we know that 6431 * there really are no commands in progress after the unit was 6432 * suspended and we could have reached the throttle level, been 6433 * suspended, and have no new commands coming in for awhile. Highly 6434 * unlikely, but so is the low-activity disk scenario. 6435 */ 6436 ddi_xbuf_dispatch(un->un_xbuf_attr); 6437 6438 sd_start_cmds(un, NULL); 6439 mutex_exit(SD_MUTEX(un)); 6440 6441 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n"); 6442 6443 return (DDI_SUCCESS); 6444 } 6445 6446 6447 /* 6448 * Function: sd_pm_state_change 6449 * 6450 * Description: Change the driver power state. 6451 * Someone else is required to actually change the driver 6452 * power level. 6453 * 6454 * Arguments: un - driver soft state (unit) structure 6455 * level - the power level that is changed to 6456 * flag - to decide how to change the power state 6457 * 6458 * Return Code: DDI_SUCCESS 6459 * 6460 * Context: Kernel thread context 6461 */ 6462 static int 6463 sd_pm_state_change(struct sd_lun *un, int level, int flag) 6464 { 6465 ASSERT(un != NULL); 6466 SD_TRACE(SD_LOG_POWER, un, "sd_pm_state_change: entry\n"); 6467 6468 ASSERT(!mutex_owned(SD_MUTEX(un))); 6469 mutex_enter(SD_MUTEX(un)); 6470 6471 if (flag == SD_PM_STATE_ROLLBACK || SD_PM_IS_IO_CAPABLE(un, level)) { 6472 un->un_power_level = level; 6473 ASSERT(!mutex_owned(&un->un_pm_mutex)); 6474 mutex_enter(&un->un_pm_mutex); 6475 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 6476 un->un_pm_count++; 6477 ASSERT(un->un_pm_count == 0); 6478 } 6479 mutex_exit(&un->un_pm_mutex); 6480 } else { 6481 /* 6482 * Exit if power management is not enabled for this device, 6483 * or if the device is being used by HA. 6484 */ 6485 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status & 6486 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) { 6487 mutex_exit(SD_MUTEX(un)); 6488 SD_TRACE(SD_LOG_POWER, un, 6489 "sd_pm_state_change: exiting\n"); 6490 return (DDI_FAILURE); 6491 } 6492 6493 SD_INFO(SD_LOG_POWER, un, "sd_pm_state_change: " 6494 "un_ncmds_in_driver=%ld\n", un->un_ncmds_in_driver); 6495 6496 /* 6497 * See if the device is not busy, ie.: 6498 * - we have no commands in the driver for this device 6499 * - not waiting for resources 6500 */ 6501 if ((un->un_ncmds_in_driver == 0) && 6502 (un->un_state != SD_STATE_RWAIT)) { 6503 /* 6504 * The device is not busy, so it is OK to go to low 6505 * power state. Indicate low power, but rely on someone 6506 * else to actually change it. 6507 */ 6508 mutex_enter(&un->un_pm_mutex); 6509 un->un_pm_count = -1; 6510 mutex_exit(&un->un_pm_mutex); 6511 un->un_power_level = level; 6512 } 6513 } 6514 6515 mutex_exit(SD_MUTEX(un)); 6516 6517 SD_TRACE(SD_LOG_POWER, un, "sd_pm_state_change: exit\n"); 6518 6519 return (DDI_SUCCESS); 6520 } 6521 6522 6523 /* 6524 * Function: sd_pm_idletimeout_handler 6525 * 6526 * Description: A timer routine that's active only while a device is busy. 6527 * The purpose is to extend slightly the pm framework's busy 6528 * view of the device to prevent busy/idle thrashing for 6529 * back-to-back commands. Do this by comparing the current time 6530 * to the time at which the last command completed and when the 6531 * difference is greater than sd_pm_idletime, call 6532 * pm_idle_component. In addition to indicating idle to the pm 6533 * framework, update the chain type to again use the internal pm 6534 * layers of the driver. 6535 * 6536 * Arguments: arg - driver soft state (unit) structure 6537 * 6538 * Context: Executes in a timeout(9F) thread context 6539 */ 6540 6541 static void 6542 sd_pm_idletimeout_handler(void *arg) 6543 { 6544 struct sd_lun *un = arg; 6545 6546 time_t now; 6547 6548 mutex_enter(&sd_detach_mutex); 6549 if (un->un_detach_count != 0) { 6550 /* Abort if the instance is detaching */ 6551 mutex_exit(&sd_detach_mutex); 6552 return; 6553 } 6554 mutex_exit(&sd_detach_mutex); 6555 6556 now = ddi_get_time(); 6557 /* 6558 * Grab both mutexes, in the proper order, since we're accessing 6559 * both PM and softstate variables. 6560 */ 6561 mutex_enter(SD_MUTEX(un)); 6562 mutex_enter(&un->un_pm_mutex); 6563 if (((now - un->un_pm_idle_time) > sd_pm_idletime) && 6564 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) { 6565 /* 6566 * Update the chain types. 6567 * This takes affect on the next new command received. 6568 */ 6569 if (un->un_f_non_devbsize_supported) { 6570 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 6571 } else { 6572 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 6573 } 6574 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 6575 6576 SD_TRACE(SD_LOG_IO_PM, un, 6577 "sd_pm_idletimeout_handler: idling device\n"); 6578 (void) pm_idle_component(SD_DEVINFO(un), 0); 6579 un->un_pm_idle_timeid = NULL; 6580 } else { 6581 un->un_pm_idle_timeid = 6582 timeout(sd_pm_idletimeout_handler, un, 6583 (drv_usectohz((clock_t)300000))); /* 300 ms. */ 6584 } 6585 mutex_exit(&un->un_pm_mutex); 6586 mutex_exit(SD_MUTEX(un)); 6587 } 6588 6589 6590 /* 6591 * Function: sd_pm_timeout_handler 6592 * 6593 * Description: Callback to tell framework we are idle. 6594 * 6595 * Context: timeout(9f) thread context. 6596 */ 6597 6598 static void 6599 sd_pm_timeout_handler(void *arg) 6600 { 6601 struct sd_lun *un = arg; 6602 6603 (void) pm_idle_component(SD_DEVINFO(un), 0); 6604 mutex_enter(&un->un_pm_mutex); 6605 un->un_pm_timeid = NULL; 6606 mutex_exit(&un->un_pm_mutex); 6607 } 6608 6609 6610 /* 6611 * Function: sdpower 6612 * 6613 * Description: PM entry point. 6614 * 6615 * Return Code: DDI_SUCCESS 6616 * DDI_FAILURE 6617 * 6618 * Context: Kernel thread context 6619 */ 6620 6621 static int 6622 sdpower(dev_info_t *devi, int component, int level) 6623 { 6624 struct sd_lun *un; 6625 int instance; 6626 int rval = DDI_SUCCESS; 6627 uint_t i, log_page_size, maxcycles, ncycles; 6628 uchar_t *log_page_data; 6629 int log_sense_page; 6630 int medium_present; 6631 time_t intvlp; 6632 struct pm_trans_data sd_pm_tran_data; 6633 uchar_t save_state; 6634 int sval; 6635 uchar_t state_before_pm; 6636 int got_semaphore_here; 6637 sd_ssc_t *ssc; 6638 int last_power_level; 6639 6640 instance = ddi_get_instance(devi); 6641 6642 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 6643 !SD_PM_IS_LEVEL_VALID(un, level) || component != 0) { 6644 return (DDI_FAILURE); 6645 } 6646 6647 ssc = sd_ssc_init(un); 6648 6649 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level); 6650 6651 /* 6652 * Must synchronize power down with close. 6653 * Attempt to decrement/acquire the open/close semaphore, 6654 * but do NOT wait on it. If it's not greater than zero, 6655 * ie. it can't be decremented without waiting, then 6656 * someone else, either open or close, already has it 6657 * and the try returns 0. Use that knowledge here to determine 6658 * if it's OK to change the device power level. 6659 * Also, only increment it on exit if it was decremented, ie. gotten, 6660 * here. 6661 */ 6662 got_semaphore_here = sema_tryp(&un->un_semoclose); 6663 6664 mutex_enter(SD_MUTEX(un)); 6665 6666 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n", 6667 un->un_ncmds_in_driver); 6668 6669 /* 6670 * If un_ncmds_in_driver is non-zero it indicates commands are 6671 * already being processed in the driver, or if the semaphore was 6672 * not gotten here it indicates an open or close is being processed. 6673 * At the same time somebody is requesting to go to a lower power 6674 * that can't perform I/O, which can't happen, therefore we need to 6675 * return failure. 6676 */ 6677 if ((!SD_PM_IS_IO_CAPABLE(un, level)) && 6678 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) { 6679 mutex_exit(SD_MUTEX(un)); 6680 6681 if (got_semaphore_here != 0) { 6682 sema_v(&un->un_semoclose); 6683 } 6684 SD_TRACE(SD_LOG_IO_PM, un, 6685 "sdpower: exit, device has queued cmds.\n"); 6686 6687 goto sdpower_failed; 6688 } 6689 6690 /* 6691 * if it is OFFLINE that means the disk is completely dead 6692 * in our case we have to put the disk in on or off by sending commands 6693 * Of course that will fail anyway so return back here. 6694 * 6695 * Power changes to a device that's OFFLINE or SUSPENDED 6696 * are not allowed. 6697 */ 6698 if ((un->un_state == SD_STATE_OFFLINE) || 6699 (un->un_state == SD_STATE_SUSPENDED)) { 6700 mutex_exit(SD_MUTEX(un)); 6701 6702 if (got_semaphore_here != 0) { 6703 sema_v(&un->un_semoclose); 6704 } 6705 SD_TRACE(SD_LOG_IO_PM, un, 6706 "sdpower: exit, device is off-line.\n"); 6707 6708 goto sdpower_failed; 6709 } 6710 6711 /* 6712 * Change the device's state to indicate it's power level 6713 * is being changed. Do this to prevent a power off in the 6714 * middle of commands, which is especially bad on devices 6715 * that are really powered off instead of just spun down. 6716 */ 6717 state_before_pm = un->un_state; 6718 un->un_state = SD_STATE_PM_CHANGING; 6719 6720 mutex_exit(SD_MUTEX(un)); 6721 6722 /* 6723 * If log sense command is not supported, bypass the 6724 * following checking, otherwise, check the log sense 6725 * information for this device. 6726 */ 6727 if (SD_PM_STOP_MOTOR_NEEDED(un, level) && 6728 un->un_f_log_sense_supported) { 6729 /* 6730 * Get the log sense information to understand whether the 6731 * the powercycle counts have gone beyond the threshhold. 6732 */ 6733 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 6734 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 6735 6736 mutex_enter(SD_MUTEX(un)); 6737 log_sense_page = un->un_start_stop_cycle_page; 6738 mutex_exit(SD_MUTEX(un)); 6739 6740 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 6741 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT); 6742 6743 if (rval != 0) { 6744 if (rval == EIO) 6745 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 6746 else 6747 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6748 } 6749 6750 #ifdef SDDEBUG 6751 if (sd_force_pm_supported) { 6752 /* Force a successful result */ 6753 rval = 0; 6754 } 6755 #endif 6756 if (rval != 0) { 6757 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 6758 "Log Sense Failed\n"); 6759 6760 kmem_free(log_page_data, log_page_size); 6761 /* Cannot support power management on those drives */ 6762 6763 if (got_semaphore_here != 0) { 6764 sema_v(&un->un_semoclose); 6765 } 6766 /* 6767 * On exit put the state back to it's original value 6768 * and broadcast to anyone waiting for the power 6769 * change completion. 6770 */ 6771 mutex_enter(SD_MUTEX(un)); 6772 un->un_state = state_before_pm; 6773 cv_broadcast(&un->un_suspend_cv); 6774 mutex_exit(SD_MUTEX(un)); 6775 SD_TRACE(SD_LOG_IO_PM, un, 6776 "sdpower: exit, Log Sense Failed.\n"); 6777 6778 goto sdpower_failed; 6779 } 6780 6781 /* 6782 * From the page data - Convert the essential information to 6783 * pm_trans_data 6784 */ 6785 maxcycles = 6786 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) | 6787 (log_page_data[0x1E] << 8) | log_page_data[0x1F]; 6788 6789 ncycles = 6790 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) | 6791 (log_page_data[0x26] << 8) | log_page_data[0x27]; 6792 6793 if (un->un_f_pm_log_sense_smart) { 6794 sd_pm_tran_data.un.smart_count.allowed = maxcycles; 6795 sd_pm_tran_data.un.smart_count.consumed = ncycles; 6796 sd_pm_tran_data.un.smart_count.flag = 0; 6797 sd_pm_tran_data.format = DC_SMART_FORMAT; 6798 } else { 6799 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles; 6800 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles; 6801 for (i = 0; i < DC_SCSI_MFR_LEN; i++) { 6802 sd_pm_tran_data.un.scsi_cycles.svc_date[i] = 6803 log_page_data[8+i]; 6804 } 6805 sd_pm_tran_data.un.scsi_cycles.flag = 0; 6806 sd_pm_tran_data.format = DC_SCSI_FORMAT; 6807 } 6808 6809 kmem_free(log_page_data, log_page_size); 6810 6811 /* 6812 * Call pm_trans_check routine to get the Ok from 6813 * the global policy 6814 */ 6815 rval = pm_trans_check(&sd_pm_tran_data, &intvlp); 6816 #ifdef SDDEBUG 6817 if (sd_force_pm_supported) { 6818 /* Force a successful result */ 6819 rval = 1; 6820 } 6821 #endif 6822 switch (rval) { 6823 case 0: 6824 /* 6825 * Not Ok to Power cycle or error in parameters passed 6826 * Would have given the advised time to consider power 6827 * cycle. Based on the new intvlp parameter we are 6828 * supposed to pretend we are busy so that pm framework 6829 * will never call our power entry point. Because of 6830 * that install a timeout handler and wait for the 6831 * recommended time to elapse so that power management 6832 * can be effective again. 6833 * 6834 * To effect this behavior, call pm_busy_component to 6835 * indicate to the framework this device is busy. 6836 * By not adjusting un_pm_count the rest of PM in 6837 * the driver will function normally, and independent 6838 * of this but because the framework is told the device 6839 * is busy it won't attempt powering down until it gets 6840 * a matching idle. The timeout handler sends this. 6841 * Note: sd_pm_entry can't be called here to do this 6842 * because sdpower may have been called as a result 6843 * of a call to pm_raise_power from within sd_pm_entry. 6844 * 6845 * If a timeout handler is already active then 6846 * don't install another. 6847 */ 6848 mutex_enter(&un->un_pm_mutex); 6849 if (un->un_pm_timeid == NULL) { 6850 un->un_pm_timeid = 6851 timeout(sd_pm_timeout_handler, 6852 un, intvlp * drv_usectohz(1000000)); 6853 mutex_exit(&un->un_pm_mutex); 6854 (void) pm_busy_component(SD_DEVINFO(un), 0); 6855 } else { 6856 mutex_exit(&un->un_pm_mutex); 6857 } 6858 if (got_semaphore_here != 0) { 6859 sema_v(&un->un_semoclose); 6860 } 6861 /* 6862 * On exit put the state back to it's original value 6863 * and broadcast to anyone waiting for the power 6864 * change completion. 6865 */ 6866 mutex_enter(SD_MUTEX(un)); 6867 un->un_state = state_before_pm; 6868 cv_broadcast(&un->un_suspend_cv); 6869 mutex_exit(SD_MUTEX(un)); 6870 6871 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, " 6872 "trans check Failed, not ok to power cycle.\n"); 6873 6874 goto sdpower_failed; 6875 case -1: 6876 if (got_semaphore_here != 0) { 6877 sema_v(&un->un_semoclose); 6878 } 6879 /* 6880 * On exit put the state back to it's original value 6881 * and broadcast to anyone waiting for the power 6882 * change completion. 6883 */ 6884 mutex_enter(SD_MUTEX(un)); 6885 un->un_state = state_before_pm; 6886 cv_broadcast(&un->un_suspend_cv); 6887 mutex_exit(SD_MUTEX(un)); 6888 SD_TRACE(SD_LOG_IO_PM, un, 6889 "sdpower: exit, trans check command Failed.\n"); 6890 6891 goto sdpower_failed; 6892 } 6893 } 6894 6895 if (!SD_PM_IS_IO_CAPABLE(un, level)) { 6896 /* 6897 * Save the last state... if the STOP FAILS we need it 6898 * for restoring 6899 */ 6900 mutex_enter(SD_MUTEX(un)); 6901 save_state = un->un_last_state; 6902 last_power_level = un->un_power_level; 6903 /* 6904 * There must not be any cmds. getting processed 6905 * in the driver when we get here. Power to the 6906 * device is potentially going off. 6907 */ 6908 ASSERT(un->un_ncmds_in_driver == 0); 6909 mutex_exit(SD_MUTEX(un)); 6910 6911 /* 6912 * For now PM suspend the device completely before spindle is 6913 * turned off 6914 */ 6915 if ((rval = sd_pm_state_change(un, level, SD_PM_STATE_CHANGE)) 6916 == DDI_FAILURE) { 6917 if (got_semaphore_here != 0) { 6918 sema_v(&un->un_semoclose); 6919 } 6920 /* 6921 * On exit put the state back to it's original value 6922 * and broadcast to anyone waiting for the power 6923 * change completion. 6924 */ 6925 mutex_enter(SD_MUTEX(un)); 6926 un->un_state = state_before_pm; 6927 un->un_power_level = last_power_level; 6928 cv_broadcast(&un->un_suspend_cv); 6929 mutex_exit(SD_MUTEX(un)); 6930 SD_TRACE(SD_LOG_IO_PM, un, 6931 "sdpower: exit, PM suspend Failed.\n"); 6932 6933 goto sdpower_failed; 6934 } 6935 } 6936 6937 /* 6938 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open, 6939 * close, or strategy. Dump no long uses this routine, it uses it's 6940 * own code so it can be done in polled mode. 6941 */ 6942 6943 medium_present = TRUE; 6944 6945 /* 6946 * When powering up, issue a TUR in case the device is at unit 6947 * attention. Don't do retries. Bypass the PM layer, otherwise 6948 * a deadlock on un_pm_busy_cv will occur. 6949 */ 6950 if (SD_PM_IS_IO_CAPABLE(un, level)) { 6951 sval = sd_send_scsi_TEST_UNIT_READY(ssc, 6952 SD_DONT_RETRY_TUR | SD_BYPASS_PM); 6953 if (sval != 0) 6954 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6955 } 6956 6957 if (un->un_f_power_condition_supported) { 6958 char *pm_condition_name[] = {"STOPPED", "STANDBY", 6959 "IDLE", "ACTIVE"}; 6960 SD_TRACE(SD_LOG_IO_PM, un, 6961 "sdpower: sending \'%s\' power condition", 6962 pm_condition_name[level]); 6963 sval = sd_send_scsi_START_STOP_UNIT(ssc, SD_POWER_CONDITION, 6964 sd_pl2pc[level], SD_PATH_DIRECT); 6965 } else { 6966 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n", 6967 ((level == SD_SPINDLE_ON) ? "START" : "STOP")); 6968 sval = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 6969 ((level == SD_SPINDLE_ON) ? SD_TARGET_START : 6970 SD_TARGET_STOP), SD_PATH_DIRECT); 6971 } 6972 if (sval != 0) { 6973 if (sval == EIO) 6974 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 6975 else 6976 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6977 } 6978 6979 /* Command failed, check for media present. */ 6980 if ((sval == ENXIO) && un->un_f_has_removable_media) { 6981 medium_present = FALSE; 6982 } 6983 6984 /* 6985 * The conditions of interest here are: 6986 * if a spindle off with media present fails, 6987 * then restore the state and return an error. 6988 * else if a spindle on fails, 6989 * then return an error (there's no state to restore). 6990 * In all other cases we setup for the new state 6991 * and return success. 6992 */ 6993 if (!SD_PM_IS_IO_CAPABLE(un, level)) { 6994 if ((medium_present == TRUE) && (sval != 0)) { 6995 /* The stop command from above failed */ 6996 rval = DDI_FAILURE; 6997 /* 6998 * The stop command failed, and we have media 6999 * present. Put the level back by calling the 7000 * sd_pm_resume() and set the state back to 7001 * it's previous value. 7002 */ 7003 (void) sd_pm_state_change(un, last_power_level, 7004 SD_PM_STATE_ROLLBACK); 7005 mutex_enter(SD_MUTEX(un)); 7006 un->un_last_state = save_state; 7007 mutex_exit(SD_MUTEX(un)); 7008 } else if (un->un_f_monitor_media_state) { 7009 /* 7010 * The stop command from above succeeded. 7011 * Terminate watch thread in case of removable media 7012 * devices going into low power state. This is as per 7013 * the requirements of pm framework, otherwise commands 7014 * will be generated for the device (through watch 7015 * thread), even when the device is in low power state. 7016 */ 7017 mutex_enter(SD_MUTEX(un)); 7018 un->un_f_watcht_stopped = FALSE; 7019 if (un->un_swr_token != NULL) { 7020 opaque_t temp_token = un->un_swr_token; 7021 un->un_f_watcht_stopped = TRUE; 7022 un->un_swr_token = NULL; 7023 mutex_exit(SD_MUTEX(un)); 7024 (void) scsi_watch_request_terminate(temp_token, 7025 SCSI_WATCH_TERMINATE_ALL_WAIT); 7026 } else { 7027 mutex_exit(SD_MUTEX(un)); 7028 } 7029 } 7030 } else { 7031 /* 7032 * The level requested is I/O capable. 7033 * Legacy behavior: return success on a failed spinup 7034 * if there is no media in the drive. 7035 * Do this by looking at medium_present here. 7036 */ 7037 if ((sval != 0) && medium_present) { 7038 /* The start command from above failed */ 7039 rval = DDI_FAILURE; 7040 } else { 7041 /* 7042 * The start command from above succeeded 7043 * PM resume the devices now that we have 7044 * started the disks 7045 */ 7046 (void) sd_pm_state_change(un, level, 7047 SD_PM_STATE_CHANGE); 7048 7049 /* 7050 * Resume the watch thread since it was suspended 7051 * when the device went into low power mode. 7052 */ 7053 if (un->un_f_monitor_media_state) { 7054 mutex_enter(SD_MUTEX(un)); 7055 if (un->un_f_watcht_stopped == TRUE) { 7056 opaque_t temp_token; 7057 7058 un->un_f_watcht_stopped = FALSE; 7059 mutex_exit(SD_MUTEX(un)); 7060 temp_token = 7061 sd_watch_request_submit(un); 7062 mutex_enter(SD_MUTEX(un)); 7063 un->un_swr_token = temp_token; 7064 } 7065 mutex_exit(SD_MUTEX(un)); 7066 } 7067 } 7068 } 7069 7070 if (got_semaphore_here != 0) { 7071 sema_v(&un->un_semoclose); 7072 } 7073 /* 7074 * On exit put the state back to it's original value 7075 * and broadcast to anyone waiting for the power 7076 * change completion. 7077 */ 7078 mutex_enter(SD_MUTEX(un)); 7079 un->un_state = state_before_pm; 7080 cv_broadcast(&un->un_suspend_cv); 7081 mutex_exit(SD_MUTEX(un)); 7082 7083 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval); 7084 7085 sd_ssc_fini(ssc); 7086 return (rval); 7087 7088 sdpower_failed: 7089 7090 sd_ssc_fini(ssc); 7091 return (DDI_FAILURE); 7092 } 7093 7094 7095 7096 /* 7097 * Function: sdattach 7098 * 7099 * Description: Driver's attach(9e) entry point function. 7100 * 7101 * Arguments: devi - opaque device info handle 7102 * cmd - attach type 7103 * 7104 * Return Code: DDI_SUCCESS 7105 * DDI_FAILURE 7106 * 7107 * Context: Kernel thread context 7108 */ 7109 7110 static int 7111 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 7112 { 7113 switch (cmd) { 7114 case DDI_ATTACH: 7115 return (sd_unit_attach(devi)); 7116 case DDI_RESUME: 7117 return (sd_ddi_resume(devi)); 7118 default: 7119 break; 7120 } 7121 return (DDI_FAILURE); 7122 } 7123 7124 7125 /* 7126 * Function: sddetach 7127 * 7128 * Description: Driver's detach(9E) entry point function. 7129 * 7130 * Arguments: devi - opaque device info handle 7131 * cmd - detach type 7132 * 7133 * Return Code: DDI_SUCCESS 7134 * DDI_FAILURE 7135 * 7136 * Context: Kernel thread context 7137 */ 7138 7139 static int 7140 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 7141 { 7142 switch (cmd) { 7143 case DDI_DETACH: 7144 return (sd_unit_detach(devi)); 7145 case DDI_SUSPEND: 7146 return (sd_ddi_suspend(devi)); 7147 default: 7148 break; 7149 } 7150 return (DDI_FAILURE); 7151 } 7152 7153 7154 /* 7155 * Function: sd_sync_with_callback 7156 * 7157 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft 7158 * state while the callback routine is active. 7159 * 7160 * Arguments: un: softstate structure for the instance 7161 * 7162 * Context: Kernel thread context 7163 */ 7164 7165 static void 7166 sd_sync_with_callback(struct sd_lun *un) 7167 { 7168 ASSERT(un != NULL); 7169 7170 mutex_enter(SD_MUTEX(un)); 7171 7172 ASSERT(un->un_in_callback >= 0); 7173 7174 while (un->un_in_callback > 0) { 7175 mutex_exit(SD_MUTEX(un)); 7176 delay(2); 7177 mutex_enter(SD_MUTEX(un)); 7178 } 7179 7180 mutex_exit(SD_MUTEX(un)); 7181 } 7182 7183 /* 7184 * Function: sd_unit_attach 7185 * 7186 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates 7187 * the soft state structure for the device and performs 7188 * all necessary structure and device initializations. 7189 * 7190 * Arguments: devi: the system's dev_info_t for the device. 7191 * 7192 * Return Code: DDI_SUCCESS if attach is successful. 7193 * DDI_FAILURE if any part of the attach fails. 7194 * 7195 * Context: Called at attach(9e) time for the DDI_ATTACH flag. 7196 * Kernel thread context only. Can sleep. 7197 */ 7198 7199 static int 7200 sd_unit_attach(dev_info_t *devi) 7201 { 7202 struct scsi_device *devp; 7203 struct sd_lun *un; 7204 char *variantp; 7205 char name_str[48]; 7206 int reservation_flag = SD_TARGET_IS_UNRESERVED; 7207 int instance; 7208 int rval; 7209 int wc_enabled; 7210 int tgt; 7211 uint64_t capacity; 7212 uint_t lbasize = 0; 7213 dev_info_t *pdip = ddi_get_parent(devi); 7214 int offbyone = 0; 7215 int geom_label_valid = 0; 7216 sd_ssc_t *ssc; 7217 int status; 7218 struct sd_fm_internal *sfip = NULL; 7219 int max_xfer_size; 7220 7221 /* 7222 * Retrieve the target driver's private data area. This was set 7223 * up by the HBA. 7224 */ 7225 devp = ddi_get_driver_private(devi); 7226 7227 /* 7228 * Retrieve the target ID of the device. 7229 */ 7230 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7231 SCSI_ADDR_PROP_TARGET, -1); 7232 7233 /* 7234 * Since we have no idea what state things were left in by the last 7235 * user of the device, set up some 'default' settings, ie. turn 'em 7236 * off. The scsi_ifsetcap calls force re-negotiations with the drive. 7237 * Do this before the scsi_probe, which sends an inquiry. 7238 * This is a fix for bug (4430280). 7239 * Of special importance is wide-xfer. The drive could have been left 7240 * in wide transfer mode by the last driver to communicate with it, 7241 * this includes us. If that's the case, and if the following is not 7242 * setup properly or we don't re-negotiate with the drive prior to 7243 * transferring data to/from the drive, it causes bus parity errors, 7244 * data overruns, and unexpected interrupts. This first occurred when 7245 * the fix for bug (4378686) was made. 7246 */ 7247 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1); 7248 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1); 7249 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1); 7250 7251 /* 7252 * Currently, scsi_ifsetcap sets tagged-qing capability for all LUNs 7253 * on a target. Setting it per lun instance actually sets the 7254 * capability of this target, which affects those luns already 7255 * attached on the same target. So during attach, we can only disable 7256 * this capability only when no other lun has been attached on this 7257 * target. By doing this, we assume a target has the same tagged-qing 7258 * capability for every lun. The condition can be removed when HBA 7259 * is changed to support per lun based tagged-qing capability. 7260 */ 7261 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 7262 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1); 7263 } 7264 7265 /* 7266 * Use scsi_probe() to issue an INQUIRY command to the device. 7267 * This call will allocate and fill in the scsi_inquiry structure 7268 * and point the sd_inq member of the scsi_device structure to it. 7269 * If the attach succeeds, then this memory will not be de-allocated 7270 * (via scsi_unprobe()) until the instance is detached. 7271 */ 7272 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 7273 goto probe_failed; 7274 } 7275 7276 /* 7277 * Check the device type as specified in the inquiry data and 7278 * claim it if it is of a type that we support. 7279 */ 7280 switch (devp->sd_inq->inq_dtype) { 7281 case DTYPE_DIRECT: 7282 break; 7283 case DTYPE_RODIRECT: 7284 break; 7285 case DTYPE_OPTICAL: 7286 break; 7287 case DTYPE_NOTPRESENT: 7288 default: 7289 /* Unsupported device type; fail the attach. */ 7290 goto probe_failed; 7291 } 7292 7293 /* 7294 * Allocate the soft state structure for this unit. 7295 * 7296 * We rely upon this memory being set to all zeroes by 7297 * ddi_soft_state_zalloc(). We assume that any member of the 7298 * soft state structure that is not explicitly initialized by 7299 * this routine will have a value of zero. 7300 */ 7301 instance = ddi_get_instance(devp->sd_dev); 7302 #ifndef XPV_HVM_DRIVER 7303 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) { 7304 goto probe_failed; 7305 } 7306 #endif /* !XPV_HVM_DRIVER */ 7307 7308 /* 7309 * Retrieve a pointer to the newly-allocated soft state. 7310 * 7311 * This should NEVER fail if the ddi_soft_state_zalloc() call above 7312 * was successful, unless something has gone horribly wrong and the 7313 * ddi's soft state internals are corrupt (in which case it is 7314 * probably better to halt here than just fail the attach....) 7315 */ 7316 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 7317 panic("sd_unit_attach: NULL soft state on instance:0x%x", 7318 instance); 7319 /*NOTREACHED*/ 7320 } 7321 7322 /* 7323 * Link the back ptr of the driver soft state to the scsi_device 7324 * struct for this lun. 7325 * Save a pointer to the softstate in the driver-private area of 7326 * the scsi_device struct. 7327 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until 7328 * we first set un->un_sd below. 7329 */ 7330 un->un_sd = devp; 7331 devp->sd_private = (opaque_t)un; 7332 7333 /* 7334 * The following must be after devp is stored in the soft state struct. 7335 */ 7336 #ifdef SDDEBUG 7337 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7338 "%s_unit_attach: un:0x%p instance:%d\n", 7339 ddi_driver_name(devi), un, instance); 7340 #endif 7341 7342 /* 7343 * Set up the device type and node type (for the minor nodes). 7344 * By default we assume that the device can at least support the 7345 * Common Command Set. Call it a CD-ROM if it reports itself 7346 * as a RODIRECT device. 7347 */ 7348 switch (devp->sd_inq->inq_dtype) { 7349 case DTYPE_RODIRECT: 7350 un->un_node_type = DDI_NT_CD_CHAN; 7351 un->un_ctype = CTYPE_CDROM; 7352 break; 7353 case DTYPE_OPTICAL: 7354 un->un_node_type = DDI_NT_BLOCK_CHAN; 7355 un->un_ctype = CTYPE_ROD; 7356 break; 7357 default: 7358 un->un_node_type = DDI_NT_BLOCK_CHAN; 7359 un->un_ctype = CTYPE_CCS; 7360 break; 7361 } 7362 7363 /* 7364 * Try to read the interconnect type from the HBA. 7365 * 7366 * Note: This driver is currently compiled as two binaries, a parallel 7367 * scsi version (sd) and a fibre channel version (ssd). All functional 7368 * differences are determined at compile time. In the future a single 7369 * binary will be provided and the interconnect type will be used to 7370 * differentiate between fibre and parallel scsi behaviors. At that time 7371 * it will be necessary for all fibre channel HBAs to support this 7372 * property. 7373 * 7374 * set un_f_is_fiber to TRUE ( default fiber ) 7375 */ 7376 un->un_f_is_fibre = TRUE; 7377 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { 7378 case INTERCONNECT_SSA: 7379 un->un_interconnect_type = SD_INTERCONNECT_SSA; 7380 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7381 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); 7382 break; 7383 case INTERCONNECT_PARALLEL: 7384 un->un_f_is_fibre = FALSE; 7385 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7386 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7387 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); 7388 break; 7389 case INTERCONNECT_SAS: 7390 un->un_f_is_fibre = FALSE; 7391 un->un_interconnect_type = SD_INTERCONNECT_SAS; 7392 un->un_node_type = DDI_NT_BLOCK_SAS; 7393 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7394 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SAS\n", un); 7395 break; 7396 case INTERCONNECT_SATA: 7397 un->un_f_is_fibre = FALSE; 7398 un->un_interconnect_type = SD_INTERCONNECT_SATA; 7399 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7400 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un); 7401 break; 7402 case INTERCONNECT_FIBRE: 7403 un->un_interconnect_type = SD_INTERCONNECT_FIBRE; 7404 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7405 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); 7406 break; 7407 case INTERCONNECT_FABRIC: 7408 un->un_interconnect_type = SD_INTERCONNECT_FABRIC; 7409 un->un_node_type = DDI_NT_BLOCK_FABRIC; 7410 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7411 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); 7412 break; 7413 default: 7414 #ifdef SD_DEFAULT_INTERCONNECT_TYPE 7415 /* 7416 * The HBA does not support the "interconnect-type" property 7417 * (or did not provide a recognized type). 7418 * 7419 * Note: This will be obsoleted when a single fibre channel 7420 * and parallel scsi driver is delivered. In the meantime the 7421 * interconnect type will be set to the platform default.If that 7422 * type is not parallel SCSI, it means that we should be 7423 * assuming "ssd" semantics. However, here this also means that 7424 * the FC HBA is not supporting the "interconnect-type" property 7425 * like we expect it to, so log this occurrence. 7426 */ 7427 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; 7428 if (!SD_IS_PARALLEL_SCSI(un)) { 7429 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7430 "sd_unit_attach: un:0x%p Assuming " 7431 "INTERCONNECT_FIBRE\n", un); 7432 } else { 7433 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7434 "sd_unit_attach: un:0x%p Assuming " 7435 "INTERCONNECT_PARALLEL\n", un); 7436 un->un_f_is_fibre = FALSE; 7437 } 7438 #else 7439 /* 7440 * Note: This source will be implemented when a single fibre 7441 * channel and parallel scsi driver is delivered. The default 7442 * will be to assume that if a device does not support the 7443 * "interconnect-type" property it is a parallel SCSI HBA and 7444 * we will set the interconnect type for parallel scsi. 7445 */ 7446 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7447 un->un_f_is_fibre = FALSE; 7448 #endif 7449 break; 7450 } 7451 7452 if (un->un_f_is_fibre == TRUE) { 7453 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == 7454 SCSI_VERSION_3) { 7455 switch (un->un_interconnect_type) { 7456 case SD_INTERCONNECT_FIBRE: 7457 case SD_INTERCONNECT_SSA: 7458 un->un_node_type = DDI_NT_BLOCK_WWN; 7459 break; 7460 default: 7461 break; 7462 } 7463 } 7464 } 7465 7466 /* 7467 * Initialize the Request Sense command for the target 7468 */ 7469 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { 7470 goto alloc_rqs_failed; 7471 } 7472 7473 /* 7474 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc 7475 * with separate binary for sd and ssd. 7476 * 7477 * x86 has 1 binary, un_retry_count is set base on connection type. 7478 * The hardcoded values will go away when Sparc uses 1 binary 7479 * for sd and ssd. This hardcoded values need to match 7480 * SD_RETRY_COUNT in sddef.h 7481 * The value used is base on interconnect type. 7482 * fibre = 3, parallel = 5 7483 */ 7484 #if defined(__i386) || defined(__amd64) 7485 un->un_retry_count = un->un_f_is_fibre ? 3 : 5; 7486 #else 7487 un->un_retry_count = SD_RETRY_COUNT; 7488 #endif 7489 7490 /* 7491 * Set the per disk retry count to the default number of retries 7492 * for disks and CDROMs. This value can be overridden by the 7493 * disk property list or an entry in sd.conf. 7494 */ 7495 un->un_notready_retry_count = 7496 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un) 7497 : DISK_NOT_READY_RETRY_COUNT(un); 7498 7499 /* 7500 * Set the busy retry count to the default value of un_retry_count. 7501 * This can be overridden by entries in sd.conf or the device 7502 * config table. 7503 */ 7504 un->un_busy_retry_count = un->un_retry_count; 7505 7506 /* 7507 * Init the reset threshold for retries. This number determines 7508 * how many retries must be performed before a reset can be issued 7509 * (for certain error conditions). This can be overridden by entries 7510 * in sd.conf or the device config table. 7511 */ 7512 un->un_reset_retry_count = (un->un_retry_count / 2); 7513 7514 /* 7515 * Set the victim_retry_count to the default un_retry_count 7516 */ 7517 un->un_victim_retry_count = (2 * un->un_retry_count); 7518 7519 /* 7520 * Set the reservation release timeout to the default value of 7521 * 5 seconds. This can be overridden by entries in ssd.conf or the 7522 * device config table. 7523 */ 7524 un->un_reserve_release_time = 5; 7525 7526 /* 7527 * Set up the default maximum transfer size. Note that this may 7528 * get updated later in the attach, when setting up default wide 7529 * operations for disks. 7530 */ 7531 #if defined(__i386) || defined(__amd64) 7532 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; 7533 un->un_partial_dma_supported = 1; 7534 #else 7535 un->un_max_xfer_size = (uint_t)maxphys; 7536 #endif 7537 7538 /* 7539 * Get "allow bus device reset" property (defaults to "enabled" if 7540 * the property was not defined). This is to disable bus resets for 7541 * certain kinds of error recovery. Note: In the future when a run-time 7542 * fibre check is available the soft state flag should default to 7543 * enabled. 7544 */ 7545 if (un->un_f_is_fibre == TRUE) { 7546 un->un_f_allow_bus_device_reset = TRUE; 7547 } else { 7548 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7549 "allow-bus-device-reset", 1) != 0) { 7550 un->un_f_allow_bus_device_reset = TRUE; 7551 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7552 "sd_unit_attach: un:0x%p Bus device reset " 7553 "enabled\n", un); 7554 } else { 7555 un->un_f_allow_bus_device_reset = FALSE; 7556 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7557 "sd_unit_attach: un:0x%p Bus device reset " 7558 "disabled\n", un); 7559 } 7560 } 7561 7562 /* 7563 * Check if this is an ATAPI device. ATAPI devices use Group 1 7564 * Read/Write commands and Group 2 Mode Sense/Select commands. 7565 * 7566 * Note: The "obsolete" way of doing this is to check for the "atapi" 7567 * property. The new "variant" property with a value of "atapi" has been 7568 * introduced so that future 'variants' of standard SCSI behavior (like 7569 * atapi) could be specified by the underlying HBA drivers by supplying 7570 * a new value for the "variant" property, instead of having to define a 7571 * new property. 7572 */ 7573 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { 7574 un->un_f_cfg_is_atapi = TRUE; 7575 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7576 "sd_unit_attach: un:0x%p Atapi device\n", un); 7577 } 7578 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", 7579 &variantp) == DDI_PROP_SUCCESS) { 7580 if (strcmp(variantp, "atapi") == 0) { 7581 un->un_f_cfg_is_atapi = TRUE; 7582 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7583 "sd_unit_attach: un:0x%p Atapi device\n", un); 7584 } 7585 ddi_prop_free(variantp); 7586 } 7587 7588 un->un_cmd_timeout = SD_IO_TIME; 7589 7590 un->un_busy_timeout = SD_BSY_TIMEOUT; 7591 7592 /* Info on current states, statuses, etc. (Updated frequently) */ 7593 un->un_state = SD_STATE_NORMAL; 7594 un->un_last_state = SD_STATE_NORMAL; 7595 7596 /* Control & status info for command throttling */ 7597 un->un_throttle = sd_max_throttle; 7598 un->un_saved_throttle = sd_max_throttle; 7599 un->un_min_throttle = sd_min_throttle; 7600 7601 if (un->un_f_is_fibre == TRUE) { 7602 un->un_f_use_adaptive_throttle = TRUE; 7603 } else { 7604 un->un_f_use_adaptive_throttle = FALSE; 7605 } 7606 7607 /* Removable media support. */ 7608 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 7609 un->un_mediastate = DKIO_NONE; 7610 un->un_specified_mediastate = DKIO_NONE; 7611 7612 /* CVs for suspend/resume (PM or DR) */ 7613 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 7614 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 7615 7616 /* Power management support. */ 7617 un->un_power_level = SD_SPINDLE_UNINIT; 7618 7619 cv_init(&un->un_wcc_cv, NULL, CV_DRIVER, NULL); 7620 un->un_f_wcc_inprog = 0; 7621 7622 /* 7623 * The open/close semaphore is used to serialize threads executing 7624 * in the driver's open & close entry point routines for a given 7625 * instance. 7626 */ 7627 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 7628 7629 /* 7630 * The conf file entry and softstate variable is a forceful override, 7631 * meaning a non-zero value must be entered to change the default. 7632 */ 7633 un->un_f_disksort_disabled = FALSE; 7634 un->un_f_rmw_type = SD_RMW_TYPE_DEFAULT; 7635 un->un_f_enable_rmw = FALSE; 7636 7637 /* 7638 * GET EVENT STATUS NOTIFICATION media polling enabled by default, but 7639 * can be overridden via [s]sd-config-list "mmc-gesn-polling" property. 7640 */ 7641 un->un_f_mmc_gesn_polling = TRUE; 7642 7643 /* 7644 * physical sector size defaults to DEV_BSIZE currently. We can 7645 * override this value via the driver configuration file so we must 7646 * set it before calling sd_read_unit_properties(). 7647 */ 7648 un->un_phy_blocksize = DEV_BSIZE; 7649 7650 /* 7651 * Retrieve the properties from the static driver table or the driver 7652 * configuration file (.conf) for this unit and update the soft state 7653 * for the device as needed for the indicated properties. 7654 * Note: the property configuration needs to occur here as some of the 7655 * following routines may have dependencies on soft state flags set 7656 * as part of the driver property configuration. 7657 */ 7658 sd_read_unit_properties(un); 7659 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7660 "sd_unit_attach: un:0x%p property configuration complete.\n", un); 7661 7662 /* 7663 * Only if a device has "hotpluggable" property, it is 7664 * treated as hotpluggable device. Otherwise, it is 7665 * regarded as non-hotpluggable one. 7666 */ 7667 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable", 7668 -1) != -1) { 7669 un->un_f_is_hotpluggable = TRUE; 7670 } 7671 7672 /* 7673 * set unit's attributes(flags) according to "hotpluggable" and 7674 * RMB bit in INQUIRY data. 7675 */ 7676 sd_set_unit_attributes(un, devi); 7677 7678 /* 7679 * By default, we mark the capacity, lbasize, and geometry 7680 * as invalid. Only if we successfully read a valid capacity 7681 * will we update the un_blockcount and un_tgt_blocksize with the 7682 * valid values (the geometry will be validated later). 7683 */ 7684 un->un_f_blockcount_is_valid = FALSE; 7685 un->un_f_tgt_blocksize_is_valid = FALSE; 7686 7687 /* 7688 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine 7689 * otherwise. 7690 */ 7691 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE; 7692 un->un_blockcount = 0; 7693 7694 /* 7695 * Set up the per-instance info needed to determine the correct 7696 * CDBs and other info for issuing commands to the target. 7697 */ 7698 sd_init_cdb_limits(un); 7699 7700 /* 7701 * Set up the IO chains to use, based upon the target type. 7702 */ 7703 if (un->un_f_non_devbsize_supported) { 7704 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 7705 } else { 7706 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 7707 } 7708 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 7709 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD; 7710 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD; 7711 7712 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf), 7713 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit, 7714 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER); 7715 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi); 7716 7717 7718 if (ISCD(un)) { 7719 un->un_additional_codes = sd_additional_codes; 7720 } else { 7721 un->un_additional_codes = NULL; 7722 } 7723 7724 /* 7725 * Create the kstats here so they can be available for attach-time 7726 * routines that send commands to the unit (either polled or via 7727 * sd_send_scsi_cmd). 7728 * 7729 * Note: This is a critical sequence that needs to be maintained: 7730 * 1) Instantiate the kstats here, before any routines using the 7731 * iopath (i.e. sd_send_scsi_cmd). 7732 * 2) Instantiate and initialize the partition stats 7733 * (sd_set_pstats). 7734 * 3) Initialize the error stats (sd_set_errstats), following 7735 * sd_validate_geometry(),sd_register_devid(), 7736 * and sd_cache_control(). 7737 */ 7738 7739 un->un_stats = kstat_create(sd_label, instance, 7740 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 7741 if (un->un_stats != NULL) { 7742 un->un_stats->ks_lock = SD_MUTEX(un); 7743 kstat_install(un->un_stats); 7744 } 7745 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7746 "sd_unit_attach: un:0x%p un_stats created\n", un); 7747 7748 sd_create_errstats(un, instance); 7749 if (un->un_errstats == NULL) { 7750 goto create_errstats_failed; 7751 } 7752 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7753 "sd_unit_attach: un:0x%p errstats created\n", un); 7754 7755 /* 7756 * The following if/else code was relocated here from below as part 7757 * of the fix for bug (4430280). However with the default setup added 7758 * on entry to this routine, it's no longer absolutely necessary for 7759 * this to be before the call to sd_spin_up_unit. 7760 */ 7761 if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) { 7762 int tq_trigger_flag = (((devp->sd_inq->inq_ansi == 4) || 7763 (devp->sd_inq->inq_ansi == 5)) && 7764 devp->sd_inq->inq_bque) || devp->sd_inq->inq_cmdque; 7765 7766 /* 7767 * If tagged queueing is supported by the target 7768 * and by the host adapter then we will enable it 7769 */ 7770 un->un_tagflags = 0; 7771 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && tq_trigger_flag && 7772 (un->un_f_arq_enabled == TRUE)) { 7773 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 7774 1, 1) == 1) { 7775 un->un_tagflags = FLAG_STAG; 7776 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7777 "sd_unit_attach: un:0x%p tag queueing " 7778 "enabled\n", un); 7779 } else if (scsi_ifgetcap(SD_ADDRESS(un), 7780 "untagged-qing", 0) == 1) { 7781 un->un_f_opt_queueing = TRUE; 7782 un->un_saved_throttle = un->un_throttle = 7783 min(un->un_throttle, 3); 7784 } else { 7785 un->un_f_opt_queueing = FALSE; 7786 un->un_saved_throttle = un->un_throttle = 1; 7787 } 7788 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) 7789 == 1) && (un->un_f_arq_enabled == TRUE)) { 7790 /* The Host Adapter supports internal queueing. */ 7791 un->un_f_opt_queueing = TRUE; 7792 un->un_saved_throttle = un->un_throttle = 7793 min(un->un_throttle, 3); 7794 } else { 7795 un->un_f_opt_queueing = FALSE; 7796 un->un_saved_throttle = un->un_throttle = 1; 7797 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7798 "sd_unit_attach: un:0x%p no tag queueing\n", un); 7799 } 7800 7801 /* 7802 * Enable large transfers for SATA/SAS drives 7803 */ 7804 if (SD_IS_SERIAL(un)) { 7805 un->un_max_xfer_size = 7806 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7807 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7808 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7809 "sd_unit_attach: un:0x%p max transfer " 7810 "size=0x%x\n", un, un->un_max_xfer_size); 7811 7812 } 7813 7814 /* Setup or tear down default wide operations for disks */ 7815 7816 /* 7817 * Note: Legacy: it may be possible for both "sd_max_xfer_size" 7818 * and "ssd_max_xfer_size" to exist simultaneously on the same 7819 * system and be set to different values. In the future this 7820 * code may need to be updated when the ssd module is 7821 * obsoleted and removed from the system. (4299588) 7822 */ 7823 if (SD_IS_PARALLEL_SCSI(un) && 7824 (devp->sd_inq->inq_rdf == RDF_SCSI2) && 7825 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { 7826 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7827 1, 1) == 1) { 7828 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7829 "sd_unit_attach: un:0x%p Wide Transfer " 7830 "enabled\n", un); 7831 } 7832 7833 /* 7834 * If tagged queuing has also been enabled, then 7835 * enable large xfers 7836 */ 7837 if (un->un_saved_throttle == sd_max_throttle) { 7838 un->un_max_xfer_size = 7839 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7840 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7841 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7842 "sd_unit_attach: un:0x%p max transfer " 7843 "size=0x%x\n", un, un->un_max_xfer_size); 7844 } 7845 } else { 7846 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7847 0, 1) == 1) { 7848 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7849 "sd_unit_attach: un:0x%p " 7850 "Wide Transfer disabled\n", un); 7851 } 7852 } 7853 } else { 7854 un->un_tagflags = FLAG_STAG; 7855 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, 7856 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); 7857 } 7858 7859 /* 7860 * If this target supports LUN reset, try to enable it. 7861 */ 7862 if (un->un_f_lun_reset_enabled) { 7863 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { 7864 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7865 "un:0x%p lun_reset capability set\n", un); 7866 } else { 7867 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7868 "un:0x%p lun-reset capability not set\n", un); 7869 } 7870 } 7871 7872 /* 7873 * Adjust the maximum transfer size. This is to fix 7874 * the problem of partial DMA support on SPARC. Some 7875 * HBA driver, like aac, has very small dma_attr_maxxfer 7876 * size, which requires partial DMA support on SPARC. 7877 * In the future the SPARC pci nexus driver may solve 7878 * the problem instead of this fix. 7879 */ 7880 max_xfer_size = scsi_ifgetcap(SD_ADDRESS(un), "dma-max", 1); 7881 if ((max_xfer_size > 0) && (max_xfer_size < un->un_max_xfer_size)) { 7882 /* We need DMA partial even on sparc to ensure sddump() works */ 7883 un->un_max_xfer_size = max_xfer_size; 7884 if (un->un_partial_dma_supported == 0) 7885 un->un_partial_dma_supported = 1; 7886 } 7887 if (ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 7888 DDI_PROP_DONTPASS, "buf_break", 0) == 1) { 7889 if (ddi_xbuf_attr_setup_brk(un->un_xbuf_attr, 7890 un->un_max_xfer_size) == 1) { 7891 un->un_buf_breakup_supported = 1; 7892 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7893 "un:0x%p Buf breakup enabled\n", un); 7894 } 7895 } 7896 7897 /* 7898 * Set PKT_DMA_PARTIAL flag. 7899 */ 7900 if (un->un_partial_dma_supported == 1) { 7901 un->un_pkt_flags = PKT_DMA_PARTIAL; 7902 } else { 7903 un->un_pkt_flags = 0; 7904 } 7905 7906 /* Initialize sd_ssc_t for internal uscsi commands */ 7907 ssc = sd_ssc_init(un); 7908 scsi_fm_init(devp); 7909 7910 /* 7911 * Allocate memory for SCSI FMA stuffs. 7912 */ 7913 un->un_fm_private = 7914 kmem_zalloc(sizeof (struct sd_fm_internal), KM_SLEEP); 7915 sfip = (struct sd_fm_internal *)un->un_fm_private; 7916 sfip->fm_ssc.ssc_uscsi_cmd = &sfip->fm_ucmd; 7917 sfip->fm_ssc.ssc_uscsi_info = &sfip->fm_uinfo; 7918 sfip->fm_ssc.ssc_un = un; 7919 7920 if (ISCD(un) || 7921 un->un_f_has_removable_media || 7922 devp->sd_fm_capable == DDI_FM_NOT_CAPABLE) { 7923 /* 7924 * We don't touch CDROM or the DDI_FM_NOT_CAPABLE device. 7925 * Their log are unchanged. 7926 */ 7927 sfip->fm_log_level = SD_FM_LOG_NSUP; 7928 } else { 7929 /* 7930 * If enter here, it should be non-CDROM and FM-capable 7931 * device, and it will not keep the old scsi_log as before 7932 * in /var/adm/messages. However, the property 7933 * "fm-scsi-log" will control whether the FM telemetry will 7934 * be logged in /var/adm/messages. 7935 */ 7936 int fm_scsi_log; 7937 fm_scsi_log = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 7938 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "fm-scsi-log", 0); 7939 7940 if (fm_scsi_log) 7941 sfip->fm_log_level = SD_FM_LOG_EREPORT; 7942 else 7943 sfip->fm_log_level = SD_FM_LOG_SILENT; 7944 } 7945 7946 /* 7947 * At this point in the attach, we have enough info in the 7948 * soft state to be able to issue commands to the target. 7949 * 7950 * All command paths used below MUST issue their commands as 7951 * SD_PATH_DIRECT. This is important as intermediate layers 7952 * are not all initialized yet (such as PM). 7953 */ 7954 7955 /* 7956 * Send a TEST UNIT READY command to the device. This should clear 7957 * any outstanding UNIT ATTENTION that may be present. 7958 * 7959 * Note: Don't check for success, just track if there is a reservation, 7960 * this is a throw away command to clear any unit attentions. 7961 * 7962 * Note: This MUST be the first command issued to the target during 7963 * attach to ensure power on UNIT ATTENTIONS are cleared. 7964 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated 7965 * with attempts at spinning up a device with no media. 7966 */ 7967 status = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR); 7968 if (status != 0) { 7969 if (status == EACCES) 7970 reservation_flag = SD_TARGET_IS_RESERVED; 7971 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7972 } 7973 7974 /* 7975 * If the device is NOT a removable media device, attempt to spin 7976 * it up (using the START_STOP_UNIT command) and read its capacity 7977 * (using the READ CAPACITY command). Note, however, that either 7978 * of these could fail and in some cases we would continue with 7979 * the attach despite the failure (see below). 7980 */ 7981 if (un->un_f_descr_format_supported) { 7982 7983 switch (sd_spin_up_unit(ssc)) { 7984 case 0: 7985 /* 7986 * Spin-up was successful; now try to read the 7987 * capacity. If successful then save the results 7988 * and mark the capacity & lbasize as valid. 7989 */ 7990 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7991 "sd_unit_attach: un:0x%p spin-up successful\n", un); 7992 7993 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity, 7994 &lbasize, SD_PATH_DIRECT); 7995 7996 switch (status) { 7997 case 0: { 7998 if (capacity > DK_MAX_BLOCKS) { 7999 #ifdef _LP64 8000 if ((capacity + 1) > 8001 SD_GROUP1_MAX_ADDRESS) { 8002 /* 8003 * Enable descriptor format 8004 * sense data so that we can 8005 * get 64 bit sense data 8006 * fields. 8007 */ 8008 sd_enable_descr_sense(ssc); 8009 } 8010 #else 8011 /* 32-bit kernels can't handle this */ 8012 scsi_log(SD_DEVINFO(un), 8013 sd_label, CE_WARN, 8014 "disk has %llu blocks, which " 8015 "is too large for a 32-bit " 8016 "kernel", capacity); 8017 8018 #if defined(__i386) || defined(__amd64) 8019 /* 8020 * 1TB disk was treated as (1T - 512)B 8021 * in the past, so that it might have 8022 * valid VTOC and solaris partitions, 8023 * we have to allow it to continue to 8024 * work. 8025 */ 8026 if (capacity -1 > DK_MAX_BLOCKS) 8027 #endif 8028 goto spinup_failed; 8029 #endif 8030 } 8031 8032 /* 8033 * Here it's not necessary to check the case: 8034 * the capacity of the device is bigger than 8035 * what the max hba cdb can support. Because 8036 * sd_send_scsi_READ_CAPACITY will retrieve 8037 * the capacity by sending USCSI command, which 8038 * is constrained by the max hba cdb. Actually, 8039 * sd_send_scsi_READ_CAPACITY will return 8040 * EINVAL when using bigger cdb than required 8041 * cdb length. Will handle this case in 8042 * "case EINVAL". 8043 */ 8044 8045 /* 8046 * The following relies on 8047 * sd_send_scsi_READ_CAPACITY never 8048 * returning 0 for capacity and/or lbasize. 8049 */ 8050 sd_update_block_info(un, lbasize, capacity); 8051 8052 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8053 "sd_unit_attach: un:0x%p capacity = %ld " 8054 "blocks; lbasize= %ld.\n", un, 8055 un->un_blockcount, un->un_tgt_blocksize); 8056 8057 break; 8058 } 8059 case EINVAL: 8060 /* 8061 * In the case where the max-cdb-length property 8062 * is smaller than the required CDB length for 8063 * a SCSI device, a target driver can fail to 8064 * attach to that device. 8065 */ 8066 scsi_log(SD_DEVINFO(un), 8067 sd_label, CE_WARN, 8068 "disk capacity is too large " 8069 "for current cdb length"); 8070 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 8071 8072 goto spinup_failed; 8073 case EACCES: 8074 /* 8075 * Should never get here if the spin-up 8076 * succeeded, but code it in anyway. 8077 * From here, just continue with the attach... 8078 */ 8079 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8080 "sd_unit_attach: un:0x%p " 8081 "sd_send_scsi_READ_CAPACITY " 8082 "returned reservation conflict\n", un); 8083 reservation_flag = SD_TARGET_IS_RESERVED; 8084 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 8085 break; 8086 default: 8087 /* 8088 * Likewise, should never get here if the 8089 * spin-up succeeded. Just continue with 8090 * the attach... 8091 */ 8092 if (status == EIO) 8093 sd_ssc_assessment(ssc, 8094 SD_FMT_STATUS_CHECK); 8095 else 8096 sd_ssc_assessment(ssc, 8097 SD_FMT_IGNORE); 8098 break; 8099 } 8100 break; 8101 case EACCES: 8102 /* 8103 * Device is reserved by another host. In this case 8104 * we could not spin it up or read the capacity, but 8105 * we continue with the attach anyway. 8106 */ 8107 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8108 "sd_unit_attach: un:0x%p spin-up reservation " 8109 "conflict.\n", un); 8110 reservation_flag = SD_TARGET_IS_RESERVED; 8111 break; 8112 default: 8113 /* Fail the attach if the spin-up failed. */ 8114 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8115 "sd_unit_attach: un:0x%p spin-up failed.", un); 8116 goto spinup_failed; 8117 } 8118 8119 } 8120 8121 /* 8122 * Check to see if this is a MMC drive 8123 */ 8124 if (ISCD(un)) { 8125 sd_set_mmc_caps(ssc); 8126 } 8127 8128 /* 8129 * Add a zero-length attribute to tell the world we support 8130 * kernel ioctls (for layered drivers) 8131 */ 8132 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 8133 DDI_KERNEL_IOCTL, NULL, 0); 8134 8135 /* 8136 * Add a boolean property to tell the world we support 8137 * the B_FAILFAST flag (for layered drivers) 8138 */ 8139 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 8140 "ddi-failfast-supported", NULL, 0); 8141 8142 /* 8143 * Initialize power management 8144 */ 8145 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); 8146 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); 8147 sd_setup_pm(ssc, devi); 8148 if (un->un_f_pm_is_enabled == FALSE) { 8149 /* 8150 * For performance, point to a jump table that does 8151 * not include pm. 8152 * The direct and priority chains don't change with PM. 8153 * 8154 * Note: this is currently done based on individual device 8155 * capabilities. When an interface for determining system 8156 * power enabled state becomes available, or when additional 8157 * layers are added to the command chain, these values will 8158 * have to be re-evaluated for correctness. 8159 */ 8160 if (un->un_f_non_devbsize_supported) { 8161 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM; 8162 } else { 8163 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM; 8164 } 8165 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 8166 } 8167 8168 /* 8169 * This property is set to 0 by HA software to avoid retries 8170 * on a reserved disk. (The preferred property name is 8171 * "retry-on-reservation-conflict") (1189689) 8172 * 8173 * Note: The use of a global here can have unintended consequences. A 8174 * per instance variable is preferable to match the capabilities of 8175 * different underlying hba's (4402600) 8176 */ 8177 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, 8178 DDI_PROP_DONTPASS, "retry-on-reservation-conflict", 8179 sd_retry_on_reservation_conflict); 8180 if (sd_retry_on_reservation_conflict != 0) { 8181 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, 8182 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, 8183 sd_retry_on_reservation_conflict); 8184 } 8185 8186 /* Set up options for QFULL handling. */ 8187 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 8188 "qfull-retries", -1)) != -1) { 8189 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries", 8190 rval, 1); 8191 } 8192 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 8193 "qfull-retry-interval", -1)) != -1) { 8194 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval", 8195 rval, 1); 8196 } 8197 8198 /* 8199 * This just prints a message that announces the existence of the 8200 * device. The message is always printed in the system logfile, but 8201 * only appears on the console if the system is booted with the 8202 * -v (verbose) argument. 8203 */ 8204 ddi_report_dev(devi); 8205 8206 un->un_mediastate = DKIO_NONE; 8207 8208 /* 8209 * Check if this is a SSD(Solid State Drive). 8210 */ 8211 sd_check_solid_state(ssc); 8212 8213 /* 8214 * Check whether the drive is in emulation mode. 8215 */ 8216 sd_check_emulation_mode(ssc); 8217 8218 cmlb_alloc_handle(&un->un_cmlbhandle); 8219 8220 #if defined(__i386) || defined(__amd64) 8221 /* 8222 * On x86, compensate for off-by-1 legacy error 8223 */ 8224 if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable && 8225 (lbasize == un->un_sys_blocksize)) 8226 offbyone = CMLB_OFF_BY_ONE; 8227 #endif 8228 8229 if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype, 8230 VOID2BOOLEAN(un->un_f_has_removable_media != 0), 8231 VOID2BOOLEAN(un->un_f_is_hotpluggable != 0), 8232 un->un_node_type, offbyone, un->un_cmlbhandle, 8233 (void *)SD_PATH_DIRECT) != 0) { 8234 goto cmlb_attach_failed; 8235 } 8236 8237 8238 /* 8239 * Read and validate the device's geometry (ie, disk label) 8240 * A new unformatted drive will not have a valid geometry, but 8241 * the driver needs to successfully attach to this device so 8242 * the drive can be formatted via ioctls. 8243 */ 8244 geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0, 8245 (void *)SD_PATH_DIRECT) == 0) ? 1: 0; 8246 8247 mutex_enter(SD_MUTEX(un)); 8248 8249 /* 8250 * Read and initialize the devid for the unit. 8251 */ 8252 if (un->un_f_devid_supported) { 8253 sd_register_devid(ssc, devi, reservation_flag); 8254 } 8255 mutex_exit(SD_MUTEX(un)); 8256 8257 #if (defined(__fibre)) 8258 /* 8259 * Register callbacks for fibre only. You can't do this solely 8260 * on the basis of the devid_type because this is hba specific. 8261 * We need to query our hba capabilities to find out whether to 8262 * register or not. 8263 */ 8264 if (un->un_f_is_fibre) { 8265 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 8266 sd_init_event_callbacks(un); 8267 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8268 "sd_unit_attach: un:0x%p event callbacks inserted", 8269 un); 8270 } 8271 } 8272 #endif 8273 8274 if (un->un_f_opt_disable_cache == TRUE) { 8275 /* 8276 * Disable both read cache and write cache. This is 8277 * the historic behavior of the keywords in the config file. 8278 */ 8279 if (sd_cache_control(ssc, SD_CACHE_DISABLE, SD_CACHE_DISABLE) != 8280 0) { 8281 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8282 "sd_unit_attach: un:0x%p Could not disable " 8283 "caching", un); 8284 goto devid_failed; 8285 } 8286 } 8287 8288 /* 8289 * Check the value of the WCE bit now and 8290 * set un_f_write_cache_enabled accordingly. 8291 */ 8292 (void) sd_get_write_cache_enabled(ssc, &wc_enabled); 8293 mutex_enter(SD_MUTEX(un)); 8294 un->un_f_write_cache_enabled = (wc_enabled != 0); 8295 mutex_exit(SD_MUTEX(un)); 8296 8297 if ((un->un_f_rmw_type != SD_RMW_TYPE_RETURN_ERROR && 8298 un->un_tgt_blocksize != DEV_BSIZE) || 8299 un->un_f_enable_rmw) { 8300 if (!(un->un_wm_cache)) { 8301 (void) snprintf(name_str, sizeof (name_str), 8302 "%s%d_cache", 8303 ddi_driver_name(SD_DEVINFO(un)), 8304 ddi_get_instance(SD_DEVINFO(un))); 8305 un->un_wm_cache = kmem_cache_create( 8306 name_str, sizeof (struct sd_w_map), 8307 8, sd_wm_cache_constructor, 8308 sd_wm_cache_destructor, NULL, 8309 (void *)un, NULL, 0); 8310 if (!(un->un_wm_cache)) { 8311 goto wm_cache_failed; 8312 } 8313 } 8314 } 8315 8316 /* 8317 * Check the value of the NV_SUP bit and set 8318 * un_f_suppress_cache_flush accordingly. 8319 */ 8320 sd_get_nv_sup(ssc); 8321 8322 /* 8323 * Find out what type of reservation this disk supports. 8324 */ 8325 status = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 0, NULL); 8326 8327 switch (status) { 8328 case 0: 8329 /* 8330 * SCSI-3 reservations are supported. 8331 */ 8332 un->un_reservation_type = SD_SCSI3_RESERVATION; 8333 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8334 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un); 8335 break; 8336 case ENOTSUP: 8337 /* 8338 * The PERSISTENT RESERVE IN command would not be recognized by 8339 * a SCSI-2 device, so assume the reservation type is SCSI-2. 8340 */ 8341 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8342 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un); 8343 un->un_reservation_type = SD_SCSI2_RESERVATION; 8344 8345 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 8346 break; 8347 default: 8348 /* 8349 * default to SCSI-3 reservations 8350 */ 8351 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8352 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un); 8353 un->un_reservation_type = SD_SCSI3_RESERVATION; 8354 8355 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 8356 break; 8357 } 8358 8359 /* 8360 * Set the pstat and error stat values here, so data obtained during the 8361 * previous attach-time routines is available. 8362 * 8363 * Note: This is a critical sequence that needs to be maintained: 8364 * 1) Instantiate the kstats before any routines using the iopath 8365 * (i.e. sd_send_scsi_cmd). 8366 * 2) Initialize the error stats (sd_set_errstats) and partition 8367 * stats (sd_set_pstats)here, following 8368 * cmlb_validate_geometry(), sd_register_devid(), and 8369 * sd_cache_control(). 8370 */ 8371 8372 if (un->un_f_pkstats_enabled && geom_label_valid) { 8373 sd_set_pstats(un); 8374 SD_TRACE(SD_LOG_IO_PARTITION, un, 8375 "sd_unit_attach: un:0x%p pstats created and set\n", un); 8376 } 8377 8378 sd_set_errstats(un); 8379 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8380 "sd_unit_attach: un:0x%p errstats set\n", un); 8381 8382 8383 /* 8384 * After successfully attaching an instance, we record the information 8385 * of how many luns have been attached on the relative target and 8386 * controller for parallel SCSI. This information is used when sd tries 8387 * to set the tagged queuing capability in HBA. 8388 */ 8389 if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) { 8390 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH); 8391 } 8392 8393 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8394 "sd_unit_attach: un:0x%p exit success\n", un); 8395 8396 /* Uninitialize sd_ssc_t pointer */ 8397 sd_ssc_fini(ssc); 8398 8399 return (DDI_SUCCESS); 8400 8401 /* 8402 * An error occurred during the attach; clean up & return failure. 8403 */ 8404 wm_cache_failed: 8405 devid_failed: 8406 8407 setup_pm_failed: 8408 ddi_remove_minor_node(devi, NULL); 8409 8410 cmlb_attach_failed: 8411 /* 8412 * Cleanup from the scsi_ifsetcap() calls (437868) 8413 */ 8414 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 8415 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 8416 8417 /* 8418 * Refer to the comments of setting tagged-qing in the beginning of 8419 * sd_unit_attach. We can only disable tagged queuing when there is 8420 * no lun attached on the target. 8421 */ 8422 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 8423 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8424 } 8425 8426 if (un->un_f_is_fibre == FALSE) { 8427 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8428 } 8429 8430 spinup_failed: 8431 8432 /* Uninitialize sd_ssc_t pointer */ 8433 sd_ssc_fini(ssc); 8434 8435 mutex_enter(SD_MUTEX(un)); 8436 8437 /* Deallocate SCSI FMA memory spaces */ 8438 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal)); 8439 8440 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ 8441 if (un->un_direct_priority_timeid != NULL) { 8442 timeout_id_t temp_id = un->un_direct_priority_timeid; 8443 un->un_direct_priority_timeid = NULL; 8444 mutex_exit(SD_MUTEX(un)); 8445 (void) untimeout(temp_id); 8446 mutex_enter(SD_MUTEX(un)); 8447 } 8448 8449 /* Cancel any pending start/stop timeouts */ 8450 if (un->un_startstop_timeid != NULL) { 8451 timeout_id_t temp_id = un->un_startstop_timeid; 8452 un->un_startstop_timeid = NULL; 8453 mutex_exit(SD_MUTEX(un)); 8454 (void) untimeout(temp_id); 8455 mutex_enter(SD_MUTEX(un)); 8456 } 8457 8458 /* Cancel any pending reset-throttle timeouts */ 8459 if (un->un_reset_throttle_timeid != NULL) { 8460 timeout_id_t temp_id = un->un_reset_throttle_timeid; 8461 un->un_reset_throttle_timeid = NULL; 8462 mutex_exit(SD_MUTEX(un)); 8463 (void) untimeout(temp_id); 8464 mutex_enter(SD_MUTEX(un)); 8465 } 8466 8467 /* Cancel rmw warning message timeouts */ 8468 if (un->un_rmw_msg_timeid != NULL) { 8469 timeout_id_t temp_id = un->un_rmw_msg_timeid; 8470 un->un_rmw_msg_timeid = NULL; 8471 mutex_exit(SD_MUTEX(un)); 8472 (void) untimeout(temp_id); 8473 mutex_enter(SD_MUTEX(un)); 8474 } 8475 8476 /* Cancel any pending retry timeouts */ 8477 if (un->un_retry_timeid != NULL) { 8478 timeout_id_t temp_id = un->un_retry_timeid; 8479 un->un_retry_timeid = NULL; 8480 mutex_exit(SD_MUTEX(un)); 8481 (void) untimeout(temp_id); 8482 mutex_enter(SD_MUTEX(un)); 8483 } 8484 8485 /* Cancel any pending delayed cv broadcast timeouts */ 8486 if (un->un_dcvb_timeid != NULL) { 8487 timeout_id_t temp_id = un->un_dcvb_timeid; 8488 un->un_dcvb_timeid = NULL; 8489 mutex_exit(SD_MUTEX(un)); 8490 (void) untimeout(temp_id); 8491 mutex_enter(SD_MUTEX(un)); 8492 } 8493 8494 mutex_exit(SD_MUTEX(un)); 8495 8496 /* There should not be any in-progress I/O so ASSERT this check */ 8497 ASSERT(un->un_ncmds_in_transport == 0); 8498 ASSERT(un->un_ncmds_in_driver == 0); 8499 8500 /* Do not free the softstate if the callback routine is active */ 8501 sd_sync_with_callback(un); 8502 8503 /* 8504 * Partition stats apparently are not used with removables. These would 8505 * not have been created during attach, so no need to clean them up... 8506 */ 8507 if (un->un_errstats != NULL) { 8508 kstat_delete(un->un_errstats); 8509 un->un_errstats = NULL; 8510 } 8511 8512 create_errstats_failed: 8513 8514 if (un->un_stats != NULL) { 8515 kstat_delete(un->un_stats); 8516 un->un_stats = NULL; 8517 } 8518 8519 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8520 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8521 8522 ddi_prop_remove_all(devi); 8523 sema_destroy(&un->un_semoclose); 8524 cv_destroy(&un->un_state_cv); 8525 8526 getrbuf_failed: 8527 8528 sd_free_rqs(un); 8529 8530 alloc_rqs_failed: 8531 8532 devp->sd_private = NULL; 8533 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ 8534 8535 get_softstate_failed: 8536 /* 8537 * Note: the man pages are unclear as to whether or not doing a 8538 * ddi_soft_state_free(sd_state, instance) is the right way to 8539 * clean up after the ddi_soft_state_zalloc() if the subsequent 8540 * ddi_get_soft_state() fails. The implication seems to be 8541 * that the get_soft_state cannot fail if the zalloc succeeds. 8542 */ 8543 #ifndef XPV_HVM_DRIVER 8544 ddi_soft_state_free(sd_state, instance); 8545 #endif /* !XPV_HVM_DRIVER */ 8546 8547 probe_failed: 8548 scsi_unprobe(devp); 8549 8550 return (DDI_FAILURE); 8551 } 8552 8553 8554 /* 8555 * Function: sd_unit_detach 8556 * 8557 * Description: Performs DDI_DETACH processing for sddetach(). 8558 * 8559 * Return Code: DDI_SUCCESS 8560 * DDI_FAILURE 8561 * 8562 * Context: Kernel thread context 8563 */ 8564 8565 static int 8566 sd_unit_detach(dev_info_t *devi) 8567 { 8568 struct scsi_device *devp; 8569 struct sd_lun *un; 8570 int i; 8571 int tgt; 8572 dev_t dev; 8573 dev_info_t *pdip = ddi_get_parent(devi); 8574 #ifndef XPV_HVM_DRIVER 8575 int instance = ddi_get_instance(devi); 8576 #endif /* !XPV_HVM_DRIVER */ 8577 8578 mutex_enter(&sd_detach_mutex); 8579 8580 /* 8581 * Fail the detach for any of the following: 8582 * - Unable to get the sd_lun struct for the instance 8583 * - A layered driver has an outstanding open on the instance 8584 * - Another thread is already detaching this instance 8585 * - Another thread is currently performing an open 8586 */ 8587 devp = ddi_get_driver_private(devi); 8588 if ((devp == NULL) || 8589 ((un = (struct sd_lun *)devp->sd_private) == NULL) || 8590 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) || 8591 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) { 8592 mutex_exit(&sd_detach_mutex); 8593 return (DDI_FAILURE); 8594 } 8595 8596 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); 8597 8598 /* 8599 * Mark this instance as currently in a detach, to inhibit any 8600 * opens from a layered driver. 8601 */ 8602 un->un_detach_count++; 8603 mutex_exit(&sd_detach_mutex); 8604 8605 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 8606 SCSI_ADDR_PROP_TARGET, -1); 8607 8608 dev = sd_make_device(SD_DEVINFO(un)); 8609 8610 #ifndef lint 8611 _NOTE(COMPETING_THREADS_NOW); 8612 #endif 8613 8614 mutex_enter(SD_MUTEX(un)); 8615 8616 /* 8617 * Fail the detach if there are any outstanding layered 8618 * opens on this device. 8619 */ 8620 for (i = 0; i < NDKMAP; i++) { 8621 if (un->un_ocmap.lyropen[i] != 0) { 8622 goto err_notclosed; 8623 } 8624 } 8625 8626 /* 8627 * Verify there are NO outstanding commands issued to this device. 8628 * ie, un_ncmds_in_transport == 0. 8629 * It's possible to have outstanding commands through the physio 8630 * code path, even though everything's closed. 8631 */ 8632 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || 8633 (un->un_direct_priority_timeid != NULL) || 8634 (un->un_state == SD_STATE_RWAIT)) { 8635 mutex_exit(SD_MUTEX(un)); 8636 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8637 "sd_dr_detach: Detach failure due to outstanding cmds\n"); 8638 goto err_stillbusy; 8639 } 8640 8641 /* 8642 * If we have the device reserved, release the reservation. 8643 */ 8644 if ((un->un_resvd_status & SD_RESERVE) && 8645 !(un->un_resvd_status & SD_LOST_RESERVE)) { 8646 mutex_exit(SD_MUTEX(un)); 8647 /* 8648 * Note: sd_reserve_release sends a command to the device 8649 * via the sd_ioctlcmd() path, and can sleep. 8650 */ 8651 if (sd_reserve_release(dev, SD_RELEASE) != 0) { 8652 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8653 "sd_dr_detach: Cannot release reservation \n"); 8654 } 8655 } else { 8656 mutex_exit(SD_MUTEX(un)); 8657 } 8658 8659 /* 8660 * Untimeout any reserve recover, throttle reset, restart unit 8661 * and delayed broadcast timeout threads. Protect the timeout pointer 8662 * from getting nulled by their callback functions. 8663 */ 8664 mutex_enter(SD_MUTEX(un)); 8665 if (un->un_resvd_timeid != NULL) { 8666 timeout_id_t temp_id = un->un_resvd_timeid; 8667 un->un_resvd_timeid = NULL; 8668 mutex_exit(SD_MUTEX(un)); 8669 (void) untimeout(temp_id); 8670 mutex_enter(SD_MUTEX(un)); 8671 } 8672 8673 if (un->un_reset_throttle_timeid != NULL) { 8674 timeout_id_t temp_id = un->un_reset_throttle_timeid; 8675 un->un_reset_throttle_timeid = NULL; 8676 mutex_exit(SD_MUTEX(un)); 8677 (void) untimeout(temp_id); 8678 mutex_enter(SD_MUTEX(un)); 8679 } 8680 8681 if (un->un_startstop_timeid != NULL) { 8682 timeout_id_t temp_id = un->un_startstop_timeid; 8683 un->un_startstop_timeid = NULL; 8684 mutex_exit(SD_MUTEX(un)); 8685 (void) untimeout(temp_id); 8686 mutex_enter(SD_MUTEX(un)); 8687 } 8688 8689 if (un->un_rmw_msg_timeid != NULL) { 8690 timeout_id_t temp_id = un->un_rmw_msg_timeid; 8691 un->un_rmw_msg_timeid = NULL; 8692 mutex_exit(SD_MUTEX(un)); 8693 (void) untimeout(temp_id); 8694 mutex_enter(SD_MUTEX(un)); 8695 } 8696 8697 if (un->un_dcvb_timeid != NULL) { 8698 timeout_id_t temp_id = un->un_dcvb_timeid; 8699 un->un_dcvb_timeid = NULL; 8700 mutex_exit(SD_MUTEX(un)); 8701 (void) untimeout(temp_id); 8702 } else { 8703 mutex_exit(SD_MUTEX(un)); 8704 } 8705 8706 /* Remove any pending reservation reclaim requests for this device */ 8707 sd_rmv_resv_reclaim_req(dev); 8708 8709 mutex_enter(SD_MUTEX(un)); 8710 8711 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ 8712 if (un->un_direct_priority_timeid != NULL) { 8713 timeout_id_t temp_id = un->un_direct_priority_timeid; 8714 un->un_direct_priority_timeid = NULL; 8715 mutex_exit(SD_MUTEX(un)); 8716 (void) untimeout(temp_id); 8717 mutex_enter(SD_MUTEX(un)); 8718 } 8719 8720 /* Cancel any active multi-host disk watch thread requests */ 8721 if (un->un_mhd_token != NULL) { 8722 mutex_exit(SD_MUTEX(un)); 8723 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); 8724 if (scsi_watch_request_terminate(un->un_mhd_token, 8725 SCSI_WATCH_TERMINATE_NOWAIT)) { 8726 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8727 "sd_dr_detach: Cannot cancel mhd watch request\n"); 8728 /* 8729 * Note: We are returning here after having removed 8730 * some driver timeouts above. This is consistent with 8731 * the legacy implementation but perhaps the watch 8732 * terminate call should be made with the wait flag set. 8733 */ 8734 goto err_stillbusy; 8735 } 8736 mutex_enter(SD_MUTEX(un)); 8737 un->un_mhd_token = NULL; 8738 } 8739 8740 if (un->un_swr_token != NULL) { 8741 mutex_exit(SD_MUTEX(un)); 8742 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); 8743 if (scsi_watch_request_terminate(un->un_swr_token, 8744 SCSI_WATCH_TERMINATE_NOWAIT)) { 8745 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8746 "sd_dr_detach: Cannot cancel swr watch request\n"); 8747 /* 8748 * Note: We are returning here after having removed 8749 * some driver timeouts above. This is consistent with 8750 * the legacy implementation but perhaps the watch 8751 * terminate call should be made with the wait flag set. 8752 */ 8753 goto err_stillbusy; 8754 } 8755 mutex_enter(SD_MUTEX(un)); 8756 un->un_swr_token = NULL; 8757 } 8758 8759 mutex_exit(SD_MUTEX(un)); 8760 8761 /* 8762 * Clear any scsi_reset_notifies. We clear the reset notifies 8763 * if we have not registered one. 8764 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! 8765 */ 8766 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 8767 sd_mhd_reset_notify_cb, (caddr_t)un); 8768 8769 /* 8770 * protect the timeout pointers from getting nulled by 8771 * their callback functions during the cancellation process. 8772 * In such a scenario untimeout can be invoked with a null value. 8773 */ 8774 _NOTE(NO_COMPETING_THREADS_NOW); 8775 8776 mutex_enter(&un->un_pm_mutex); 8777 if (un->un_pm_idle_timeid != NULL) { 8778 timeout_id_t temp_id = un->un_pm_idle_timeid; 8779 un->un_pm_idle_timeid = NULL; 8780 mutex_exit(&un->un_pm_mutex); 8781 8782 /* 8783 * Timeout is active; cancel it. 8784 * Note that it'll never be active on a device 8785 * that does not support PM therefore we don't 8786 * have to check before calling pm_idle_component. 8787 */ 8788 (void) untimeout(temp_id); 8789 (void) pm_idle_component(SD_DEVINFO(un), 0); 8790 mutex_enter(&un->un_pm_mutex); 8791 } 8792 8793 /* 8794 * Check whether there is already a timeout scheduled for power 8795 * management. If yes then don't lower the power here, that's. 8796 * the timeout handler's job. 8797 */ 8798 if (un->un_pm_timeid != NULL) { 8799 timeout_id_t temp_id = un->un_pm_timeid; 8800 un->un_pm_timeid = NULL; 8801 mutex_exit(&un->un_pm_mutex); 8802 /* 8803 * Timeout is active; cancel it. 8804 * Note that it'll never be active on a device 8805 * that does not support PM therefore we don't 8806 * have to check before calling pm_idle_component. 8807 */ 8808 (void) untimeout(temp_id); 8809 (void) pm_idle_component(SD_DEVINFO(un), 0); 8810 8811 } else { 8812 mutex_exit(&un->un_pm_mutex); 8813 if ((un->un_f_pm_is_enabled == TRUE) && 8814 (pm_lower_power(SD_DEVINFO(un), 0, SD_PM_STATE_STOPPED(un)) 8815 != DDI_SUCCESS)) { 8816 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8817 "sd_dr_detach: Lower power request failed, ignoring.\n"); 8818 /* 8819 * Fix for bug: 4297749, item # 13 8820 * The above test now includes a check to see if PM is 8821 * supported by this device before call 8822 * pm_lower_power(). 8823 * Note, the following is not dead code. The call to 8824 * pm_lower_power above will generate a call back into 8825 * our sdpower routine which might result in a timeout 8826 * handler getting activated. Therefore the following 8827 * code is valid and necessary. 8828 */ 8829 mutex_enter(&un->un_pm_mutex); 8830 if (un->un_pm_timeid != NULL) { 8831 timeout_id_t temp_id = un->un_pm_timeid; 8832 un->un_pm_timeid = NULL; 8833 mutex_exit(&un->un_pm_mutex); 8834 (void) untimeout(temp_id); 8835 (void) pm_idle_component(SD_DEVINFO(un), 0); 8836 } else { 8837 mutex_exit(&un->un_pm_mutex); 8838 } 8839 } 8840 } 8841 8842 /* 8843 * Cleanup from the scsi_ifsetcap() calls (437868) 8844 * Relocated here from above to be after the call to 8845 * pm_lower_power, which was getting errors. 8846 */ 8847 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 8848 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 8849 8850 /* 8851 * Currently, tagged queuing is supported per target based by HBA. 8852 * Setting this per lun instance actually sets the capability of this 8853 * target in HBA, which affects those luns already attached on the 8854 * same target. So during detach, we can only disable this capability 8855 * only when this is the only lun left on this target. By doing 8856 * this, we assume a target has the same tagged queuing capability 8857 * for every lun. The condition can be removed when HBA is changed to 8858 * support per lun based tagged queuing capability. 8859 */ 8860 if (sd_scsi_get_target_lun_count(pdip, tgt) <= 1) { 8861 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8862 } 8863 8864 if (un->un_f_is_fibre == FALSE) { 8865 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8866 } 8867 8868 /* 8869 * Remove any event callbacks, fibre only 8870 */ 8871 if (un->un_f_is_fibre == TRUE) { 8872 if ((un->un_insert_event != NULL) && 8873 (ddi_remove_event_handler(un->un_insert_cb_id) != 8874 DDI_SUCCESS)) { 8875 /* 8876 * Note: We are returning here after having done 8877 * substantial cleanup above. This is consistent 8878 * with the legacy implementation but this may not 8879 * be the right thing to do. 8880 */ 8881 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8882 "sd_dr_detach: Cannot cancel insert event\n"); 8883 goto err_remove_event; 8884 } 8885 un->un_insert_event = NULL; 8886 8887 if ((un->un_remove_event != NULL) && 8888 (ddi_remove_event_handler(un->un_remove_cb_id) != 8889 DDI_SUCCESS)) { 8890 /* 8891 * Note: We are returning here after having done 8892 * substantial cleanup above. This is consistent 8893 * with the legacy implementation but this may not 8894 * be the right thing to do. 8895 */ 8896 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8897 "sd_dr_detach: Cannot cancel remove event\n"); 8898 goto err_remove_event; 8899 } 8900 un->un_remove_event = NULL; 8901 } 8902 8903 /* Do not free the softstate if the callback routine is active */ 8904 sd_sync_with_callback(un); 8905 8906 cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 8907 cmlb_free_handle(&un->un_cmlbhandle); 8908 8909 /* 8910 * Hold the detach mutex here, to make sure that no other threads ever 8911 * can access a (partially) freed soft state structure. 8912 */ 8913 mutex_enter(&sd_detach_mutex); 8914 8915 /* 8916 * Clean up the soft state struct. 8917 * Cleanup is done in reverse order of allocs/inits. 8918 * At this point there should be no competing threads anymore. 8919 */ 8920 8921 scsi_fm_fini(devp); 8922 8923 /* 8924 * Deallocate memory for SCSI FMA. 8925 */ 8926 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal)); 8927 8928 /* 8929 * Unregister and free device id if it was not registered 8930 * by the transport. 8931 */ 8932 if (un->un_f_devid_transport_defined == FALSE) 8933 ddi_devid_unregister(devi); 8934 8935 /* 8936 * free the devid structure if allocated before (by ddi_devid_init() 8937 * or ddi_devid_get()). 8938 */ 8939 if (un->un_devid) { 8940 ddi_devid_free(un->un_devid); 8941 un->un_devid = NULL; 8942 } 8943 8944 /* 8945 * Destroy wmap cache if it exists. 8946 */ 8947 if (un->un_wm_cache != NULL) { 8948 kmem_cache_destroy(un->un_wm_cache); 8949 un->un_wm_cache = NULL; 8950 } 8951 8952 /* 8953 * kstat cleanup is done in detach for all device types (4363169). 8954 * We do not want to fail detach if the device kstats are not deleted 8955 * since there is a confusion about the devo_refcnt for the device. 8956 * We just delete the kstats and let detach complete successfully. 8957 */ 8958 if (un->un_stats != NULL) { 8959 kstat_delete(un->un_stats); 8960 un->un_stats = NULL; 8961 } 8962 if (un->un_errstats != NULL) { 8963 kstat_delete(un->un_errstats); 8964 un->un_errstats = NULL; 8965 } 8966 8967 /* Remove partition stats */ 8968 if (un->un_f_pkstats_enabled) { 8969 for (i = 0; i < NSDMAP; i++) { 8970 if (un->un_pstats[i] != NULL) { 8971 kstat_delete(un->un_pstats[i]); 8972 un->un_pstats[i] = NULL; 8973 } 8974 } 8975 } 8976 8977 /* Remove xbuf registration */ 8978 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8979 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8980 8981 /* Remove driver properties */ 8982 ddi_prop_remove_all(devi); 8983 8984 mutex_destroy(&un->un_pm_mutex); 8985 cv_destroy(&un->un_pm_busy_cv); 8986 8987 cv_destroy(&un->un_wcc_cv); 8988 8989 /* Open/close semaphore */ 8990 sema_destroy(&un->un_semoclose); 8991 8992 /* Removable media condvar. */ 8993 cv_destroy(&un->un_state_cv); 8994 8995 /* Suspend/resume condvar. */ 8996 cv_destroy(&un->un_suspend_cv); 8997 cv_destroy(&un->un_disk_busy_cv); 8998 8999 sd_free_rqs(un); 9000 9001 /* Free up soft state */ 9002 devp->sd_private = NULL; 9003 9004 bzero(un, sizeof (struct sd_lun)); 9005 #ifndef XPV_HVM_DRIVER 9006 ddi_soft_state_free(sd_state, instance); 9007 #endif /* !XPV_HVM_DRIVER */ 9008 9009 mutex_exit(&sd_detach_mutex); 9010 9011 /* This frees up the INQUIRY data associated with the device. */ 9012 scsi_unprobe(devp); 9013 9014 /* 9015 * After successfully detaching an instance, we update the information 9016 * of how many luns have been attached in the relative target and 9017 * controller for parallel SCSI. This information is used when sd tries 9018 * to set the tagged queuing capability in HBA. 9019 * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to 9020 * check if the device is parallel SCSI. However, we don't need to 9021 * check here because we've already checked during attach. No device 9022 * that is not parallel SCSI is in the chain. 9023 */ 9024 if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) { 9025 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH); 9026 } 9027 9028 return (DDI_SUCCESS); 9029 9030 err_notclosed: 9031 mutex_exit(SD_MUTEX(un)); 9032 9033 err_stillbusy: 9034 _NOTE(NO_COMPETING_THREADS_NOW); 9035 9036 err_remove_event: 9037 mutex_enter(&sd_detach_mutex); 9038 un->un_detach_count--; 9039 mutex_exit(&sd_detach_mutex); 9040 9041 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); 9042 return (DDI_FAILURE); 9043 } 9044 9045 9046 /* 9047 * Function: sd_create_errstats 9048 * 9049 * Description: This routine instantiates the device error stats. 9050 * 9051 * Note: During attach the stats are instantiated first so they are 9052 * available for attach-time routines that utilize the driver 9053 * iopath to send commands to the device. The stats are initialized 9054 * separately so data obtained during some attach-time routines is 9055 * available. (4362483) 9056 * 9057 * Arguments: un - driver soft state (unit) structure 9058 * instance - driver instance 9059 * 9060 * Context: Kernel thread context 9061 */ 9062 9063 static void 9064 sd_create_errstats(struct sd_lun *un, int instance) 9065 { 9066 struct sd_errstats *stp; 9067 char kstatmodule_err[KSTAT_STRLEN]; 9068 char kstatname[KSTAT_STRLEN]; 9069 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t)); 9070 9071 ASSERT(un != NULL); 9072 9073 if (un->un_errstats != NULL) { 9074 return; 9075 } 9076 9077 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err), 9078 "%serr", sd_label); 9079 (void) snprintf(kstatname, sizeof (kstatname), 9080 "%s%d,err", sd_label, instance); 9081 9082 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname, 9083 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 9084 9085 if (un->un_errstats == NULL) { 9086 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 9087 "sd_create_errstats: Failed kstat_create\n"); 9088 return; 9089 } 9090 9091 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9092 kstat_named_init(&stp->sd_softerrs, "Soft Errors", 9093 KSTAT_DATA_UINT32); 9094 kstat_named_init(&stp->sd_harderrs, "Hard Errors", 9095 KSTAT_DATA_UINT32); 9096 kstat_named_init(&stp->sd_transerrs, "Transport Errors", 9097 KSTAT_DATA_UINT32); 9098 kstat_named_init(&stp->sd_vid, "Vendor", 9099 KSTAT_DATA_CHAR); 9100 kstat_named_init(&stp->sd_pid, "Product", 9101 KSTAT_DATA_CHAR); 9102 kstat_named_init(&stp->sd_revision, "Revision", 9103 KSTAT_DATA_CHAR); 9104 kstat_named_init(&stp->sd_serial, "Serial No", 9105 KSTAT_DATA_CHAR); 9106 kstat_named_init(&stp->sd_capacity, "Size", 9107 KSTAT_DATA_ULONGLONG); 9108 kstat_named_init(&stp->sd_rq_media_err, "Media Error", 9109 KSTAT_DATA_UINT32); 9110 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready", 9111 KSTAT_DATA_UINT32); 9112 kstat_named_init(&stp->sd_rq_nodev_err, "No Device", 9113 KSTAT_DATA_UINT32); 9114 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable", 9115 KSTAT_DATA_UINT32); 9116 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request", 9117 KSTAT_DATA_UINT32); 9118 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis", 9119 KSTAT_DATA_UINT32); 9120 9121 un->un_errstats->ks_private = un; 9122 un->un_errstats->ks_update = nulldev; 9123 9124 kstat_install(un->un_errstats); 9125 } 9126 9127 9128 /* 9129 * Function: sd_set_errstats 9130 * 9131 * Description: This routine sets the value of the vendor id, product id, 9132 * revision, serial number, and capacity device error stats. 9133 * 9134 * Note: During attach the stats are instantiated first so they are 9135 * available for attach-time routines that utilize the driver 9136 * iopath to send commands to the device. The stats are initialized 9137 * separately so data obtained during some attach-time routines is 9138 * available. (4362483) 9139 * 9140 * Arguments: un - driver soft state (unit) structure 9141 * 9142 * Context: Kernel thread context 9143 */ 9144 9145 static void 9146 sd_set_errstats(struct sd_lun *un) 9147 { 9148 struct sd_errstats *stp; 9149 char *sn; 9150 9151 ASSERT(un != NULL); 9152 ASSERT(un->un_errstats != NULL); 9153 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9154 ASSERT(stp != NULL); 9155 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8); 9156 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16); 9157 (void) strncpy(stp->sd_revision.value.c, 9158 un->un_sd->sd_inq->inq_revision, 4); 9159 9160 /* 9161 * All the errstats are persistent across detach/attach, 9162 * so reset all the errstats here in case of the hot 9163 * replacement of disk drives, except for not changed 9164 * Sun qualified drives. 9165 */ 9166 if ((bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) != 0) || 9167 (bcmp(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 9168 sizeof (SD_INQUIRY(un)->inq_serial)) != 0)) { 9169 stp->sd_softerrs.value.ui32 = 0; 9170 stp->sd_harderrs.value.ui32 = 0; 9171 stp->sd_transerrs.value.ui32 = 0; 9172 stp->sd_rq_media_err.value.ui32 = 0; 9173 stp->sd_rq_ntrdy_err.value.ui32 = 0; 9174 stp->sd_rq_nodev_err.value.ui32 = 0; 9175 stp->sd_rq_recov_err.value.ui32 = 0; 9176 stp->sd_rq_illrq_err.value.ui32 = 0; 9177 stp->sd_rq_pfa_err.value.ui32 = 0; 9178 } 9179 9180 /* 9181 * Set the "Serial No" kstat for Sun qualified drives (indicated by 9182 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid) 9183 * (4376302)) 9184 */ 9185 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) { 9186 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 9187 sizeof (SD_INQUIRY(un)->inq_serial)); 9188 } else { 9189 /* 9190 * Set the "Serial No" kstat for non-Sun qualified drives 9191 */ 9192 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, SD_DEVINFO(un), 9193 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 9194 INQUIRY_SERIAL_NO, &sn) == DDI_SUCCESS) { 9195 (void) strlcpy(stp->sd_serial.value.c, sn, 9196 sizeof (stp->sd_serial.value.c)); 9197 ddi_prop_free(sn); 9198 } 9199 } 9200 9201 if (un->un_f_blockcount_is_valid != TRUE) { 9202 /* 9203 * Set capacity error stat to 0 for no media. This ensures 9204 * a valid capacity is displayed in response to 'iostat -E' 9205 * when no media is present in the device. 9206 */ 9207 stp->sd_capacity.value.ui64 = 0; 9208 } else { 9209 /* 9210 * Multiply un_blockcount by un->un_sys_blocksize to get 9211 * capacity. 9212 * 9213 * Note: for non-512 blocksize devices "un_blockcount" has been 9214 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by 9215 * (un_tgt_blocksize / un->un_sys_blocksize). 9216 */ 9217 stp->sd_capacity.value.ui64 = (uint64_t) 9218 ((uint64_t)un->un_blockcount * un->un_sys_blocksize); 9219 } 9220 } 9221 9222 9223 /* 9224 * Function: sd_set_pstats 9225 * 9226 * Description: This routine instantiates and initializes the partition 9227 * stats for each partition with more than zero blocks. 9228 * (4363169) 9229 * 9230 * Arguments: un - driver soft state (unit) structure 9231 * 9232 * Context: Kernel thread context 9233 */ 9234 9235 static void 9236 sd_set_pstats(struct sd_lun *un) 9237 { 9238 char kstatname[KSTAT_STRLEN]; 9239 int instance; 9240 int i; 9241 diskaddr_t nblks = 0; 9242 char *partname = NULL; 9243 9244 ASSERT(un != NULL); 9245 9246 instance = ddi_get_instance(SD_DEVINFO(un)); 9247 9248 /* Note:x86: is this a VTOC8/VTOC16 difference? */ 9249 for (i = 0; i < NSDMAP; i++) { 9250 9251 if (cmlb_partinfo(un->un_cmlbhandle, i, 9252 &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0) 9253 continue; 9254 mutex_enter(SD_MUTEX(un)); 9255 9256 if ((un->un_pstats[i] == NULL) && 9257 (nblks != 0)) { 9258 9259 (void) snprintf(kstatname, sizeof (kstatname), 9260 "%s%d,%s", sd_label, instance, 9261 partname); 9262 9263 un->un_pstats[i] = kstat_create(sd_label, 9264 instance, kstatname, "partition", KSTAT_TYPE_IO, 9265 1, KSTAT_FLAG_PERSISTENT); 9266 if (un->un_pstats[i] != NULL) { 9267 un->un_pstats[i]->ks_lock = SD_MUTEX(un); 9268 kstat_install(un->un_pstats[i]); 9269 } 9270 } 9271 mutex_exit(SD_MUTEX(un)); 9272 } 9273 } 9274 9275 9276 #if (defined(__fibre)) 9277 /* 9278 * Function: sd_init_event_callbacks 9279 * 9280 * Description: This routine initializes the insertion and removal event 9281 * callbacks. (fibre only) 9282 * 9283 * Arguments: un - driver soft state (unit) structure 9284 * 9285 * Context: Kernel thread context 9286 */ 9287 9288 static void 9289 sd_init_event_callbacks(struct sd_lun *un) 9290 { 9291 ASSERT(un != NULL); 9292 9293 if ((un->un_insert_event == NULL) && 9294 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, 9295 &un->un_insert_event) == DDI_SUCCESS)) { 9296 /* 9297 * Add the callback for an insertion event 9298 */ 9299 (void) ddi_add_event_handler(SD_DEVINFO(un), 9300 un->un_insert_event, sd_event_callback, (void *)un, 9301 &(un->un_insert_cb_id)); 9302 } 9303 9304 if ((un->un_remove_event == NULL) && 9305 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, 9306 &un->un_remove_event) == DDI_SUCCESS)) { 9307 /* 9308 * Add the callback for a removal event 9309 */ 9310 (void) ddi_add_event_handler(SD_DEVINFO(un), 9311 un->un_remove_event, sd_event_callback, (void *)un, 9312 &(un->un_remove_cb_id)); 9313 } 9314 } 9315 9316 9317 /* 9318 * Function: sd_event_callback 9319 * 9320 * Description: This routine handles insert/remove events (photon). The 9321 * state is changed to OFFLINE which can be used to supress 9322 * error msgs. (fibre only) 9323 * 9324 * Arguments: un - driver soft state (unit) structure 9325 * 9326 * Context: Callout thread context 9327 */ 9328 /* ARGSUSED */ 9329 static void 9330 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, 9331 void *bus_impldata) 9332 { 9333 struct sd_lun *un = (struct sd_lun *)arg; 9334 9335 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); 9336 if (event == un->un_insert_event) { 9337 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); 9338 mutex_enter(SD_MUTEX(un)); 9339 if (un->un_state == SD_STATE_OFFLINE) { 9340 if (un->un_last_state != SD_STATE_SUSPENDED) { 9341 un->un_state = un->un_last_state; 9342 } else { 9343 /* 9344 * We have gone through SUSPEND/RESUME while 9345 * we were offline. Restore the last state 9346 */ 9347 un->un_state = un->un_save_state; 9348 } 9349 } 9350 mutex_exit(SD_MUTEX(un)); 9351 9352 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); 9353 } else if (event == un->un_remove_event) { 9354 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); 9355 mutex_enter(SD_MUTEX(un)); 9356 /* 9357 * We need to handle an event callback that occurs during 9358 * the suspend operation, since we don't prevent it. 9359 */ 9360 if (un->un_state != SD_STATE_OFFLINE) { 9361 if (un->un_state != SD_STATE_SUSPENDED) { 9362 New_state(un, SD_STATE_OFFLINE); 9363 } else { 9364 un->un_last_state = SD_STATE_OFFLINE; 9365 } 9366 } 9367 mutex_exit(SD_MUTEX(un)); 9368 } else { 9369 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 9370 "!Unknown event\n"); 9371 } 9372 9373 } 9374 #endif 9375 9376 /* 9377 * Function: sd_cache_control() 9378 * 9379 * Description: This routine is the driver entry point for setting 9380 * read and write caching by modifying the WCE (write cache 9381 * enable) and RCD (read cache disable) bits of mode 9382 * page 8 (MODEPAGE_CACHING). 9383 * 9384 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 9385 * structure for this target. 9386 * rcd_flag - flag for controlling the read cache 9387 * wce_flag - flag for controlling the write cache 9388 * 9389 * Return Code: EIO 9390 * code returned by sd_send_scsi_MODE_SENSE and 9391 * sd_send_scsi_MODE_SELECT 9392 * 9393 * Context: Kernel Thread 9394 */ 9395 9396 static int 9397 sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag) 9398 { 9399 struct mode_caching *mode_caching_page; 9400 uchar_t *header; 9401 size_t buflen; 9402 int hdrlen; 9403 int bd_len; 9404 int rval = 0; 9405 struct mode_header_grp2 *mhp; 9406 struct sd_lun *un; 9407 int status; 9408 9409 ASSERT(ssc != NULL); 9410 un = ssc->ssc_un; 9411 ASSERT(un != NULL); 9412 9413 /* 9414 * Do a test unit ready, otherwise a mode sense may not work if this 9415 * is the first command sent to the device after boot. 9416 */ 9417 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 9418 if (status != 0) 9419 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9420 9421 if (un->un_f_cfg_is_atapi == TRUE) { 9422 hdrlen = MODE_HEADER_LENGTH_GRP2; 9423 } else { 9424 hdrlen = MODE_HEADER_LENGTH; 9425 } 9426 9427 /* 9428 * Allocate memory for the retrieved mode page and its headers. Set 9429 * a pointer to the page itself. Use mode_cache_scsi3 to insure 9430 * we get all of the mode sense data otherwise, the mode select 9431 * will fail. mode_cache_scsi3 is a superset of mode_caching. 9432 */ 9433 buflen = hdrlen + MODE_BLK_DESC_LENGTH + 9434 sizeof (struct mode_cache_scsi3); 9435 9436 header = kmem_zalloc(buflen, KM_SLEEP); 9437 9438 /* Get the information from the device. */ 9439 if (un->un_f_cfg_is_atapi == TRUE) { 9440 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, header, buflen, 9441 MODEPAGE_CACHING, SD_PATH_DIRECT); 9442 } else { 9443 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 9444 MODEPAGE_CACHING, SD_PATH_DIRECT); 9445 } 9446 9447 if (rval != 0) { 9448 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 9449 "sd_cache_control: Mode Sense Failed\n"); 9450 goto mode_sense_failed; 9451 } 9452 9453 /* 9454 * Determine size of Block Descriptors in order to locate 9455 * the mode page data. ATAPI devices return 0, SCSI devices 9456 * should return MODE_BLK_DESC_LENGTH. 9457 */ 9458 if (un->un_f_cfg_is_atapi == TRUE) { 9459 mhp = (struct mode_header_grp2 *)header; 9460 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 9461 } else { 9462 bd_len = ((struct mode_header *)header)->bdesc_length; 9463 } 9464 9465 if (bd_len > MODE_BLK_DESC_LENGTH) { 9466 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 0, 9467 "sd_cache_control: Mode Sense returned invalid block " 9468 "descriptor length\n"); 9469 rval = EIO; 9470 goto mode_sense_failed; 9471 } 9472 9473 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 9474 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 9475 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 9476 "sd_cache_control: Mode Sense caching page code mismatch " 9477 "%d\n", mode_caching_page->mode_page.code); 9478 rval = EIO; 9479 goto mode_sense_failed; 9480 } 9481 9482 /* Check the relevant bits on successful mode sense. */ 9483 if ((mode_caching_page->rcd && rcd_flag == SD_CACHE_ENABLE) || 9484 (!mode_caching_page->rcd && rcd_flag == SD_CACHE_DISABLE) || 9485 (mode_caching_page->wce && wce_flag == SD_CACHE_DISABLE) || 9486 (!mode_caching_page->wce && wce_flag == SD_CACHE_ENABLE)) { 9487 9488 size_t sbuflen; 9489 uchar_t save_pg; 9490 9491 /* 9492 * Construct select buffer length based on the 9493 * length of the sense data returned. 9494 */ 9495 sbuflen = hdrlen + bd_len + 9496 sizeof (struct mode_page) + 9497 (int)mode_caching_page->mode_page.length; 9498 9499 /* 9500 * Set the caching bits as requested. 9501 */ 9502 if (rcd_flag == SD_CACHE_ENABLE) 9503 mode_caching_page->rcd = 0; 9504 else if (rcd_flag == SD_CACHE_DISABLE) 9505 mode_caching_page->rcd = 1; 9506 9507 if (wce_flag == SD_CACHE_ENABLE) 9508 mode_caching_page->wce = 1; 9509 else if (wce_flag == SD_CACHE_DISABLE) 9510 mode_caching_page->wce = 0; 9511 9512 /* 9513 * Save the page if the mode sense says the 9514 * drive supports it. 9515 */ 9516 save_pg = mode_caching_page->mode_page.ps ? 9517 SD_SAVE_PAGE : SD_DONTSAVE_PAGE; 9518 9519 /* Clear reserved bits before mode select. */ 9520 mode_caching_page->mode_page.ps = 0; 9521 9522 /* 9523 * Clear out mode header for mode select. 9524 * The rest of the retrieved page will be reused. 9525 */ 9526 bzero(header, hdrlen); 9527 9528 if (un->un_f_cfg_is_atapi == TRUE) { 9529 mhp = (struct mode_header_grp2 *)header; 9530 mhp->bdesc_length_hi = bd_len >> 8; 9531 mhp->bdesc_length_lo = (uchar_t)bd_len & 0xff; 9532 } else { 9533 ((struct mode_header *)header)->bdesc_length = bd_len; 9534 } 9535 9536 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9537 9538 /* Issue mode select to change the cache settings */ 9539 if (un->un_f_cfg_is_atapi == TRUE) { 9540 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP1, header, 9541 sbuflen, save_pg, SD_PATH_DIRECT); 9542 } else { 9543 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, header, 9544 sbuflen, save_pg, SD_PATH_DIRECT); 9545 } 9546 9547 } 9548 9549 9550 mode_sense_failed: 9551 9552 kmem_free(header, buflen); 9553 9554 if (rval != 0) { 9555 if (rval == EIO) 9556 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 9557 else 9558 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9559 } 9560 return (rval); 9561 } 9562 9563 9564 /* 9565 * Function: sd_get_write_cache_enabled() 9566 * 9567 * Description: This routine is the driver entry point for determining if 9568 * write caching is enabled. It examines the WCE (write cache 9569 * enable) bits of mode page 8 (MODEPAGE_CACHING). 9570 * 9571 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 9572 * structure for this target. 9573 * is_enabled - pointer to int where write cache enabled state 9574 * is returned (non-zero -> write cache enabled) 9575 * 9576 * 9577 * Return Code: EIO 9578 * code returned by sd_send_scsi_MODE_SENSE 9579 * 9580 * Context: Kernel Thread 9581 * 9582 * NOTE: If ioctl is added to disable write cache, this sequence should 9583 * be followed so that no locking is required for accesses to 9584 * un->un_f_write_cache_enabled: 9585 * do mode select to clear wce 9586 * do synchronize cache to flush cache 9587 * set un->un_f_write_cache_enabled = FALSE 9588 * 9589 * Conversely, an ioctl to enable the write cache should be done 9590 * in this order: 9591 * set un->un_f_write_cache_enabled = TRUE 9592 * do mode select to set wce 9593 */ 9594 9595 static int 9596 sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled) 9597 { 9598 struct mode_caching *mode_caching_page; 9599 uchar_t *header; 9600 size_t buflen; 9601 int hdrlen; 9602 int bd_len; 9603 int rval = 0; 9604 struct sd_lun *un; 9605 int status; 9606 9607 ASSERT(ssc != NULL); 9608 un = ssc->ssc_un; 9609 ASSERT(un != NULL); 9610 ASSERT(is_enabled != NULL); 9611 9612 /* in case of error, flag as enabled */ 9613 *is_enabled = TRUE; 9614 9615 /* 9616 * Do a test unit ready, otherwise a mode sense may not work if this 9617 * is the first command sent to the device after boot. 9618 */ 9619 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 9620 9621 if (status != 0) 9622 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9623 9624 if (un->un_f_cfg_is_atapi == TRUE) { 9625 hdrlen = MODE_HEADER_LENGTH_GRP2; 9626 } else { 9627 hdrlen = MODE_HEADER_LENGTH; 9628 } 9629 9630 /* 9631 * Allocate memory for the retrieved mode page and its headers. Set 9632 * a pointer to the page itself. 9633 */ 9634 buflen = hdrlen + MODE_BLK_DESC_LENGTH + sizeof (struct mode_caching); 9635 header = kmem_zalloc(buflen, KM_SLEEP); 9636 9637 /* Get the information from the device. */ 9638 if (un->un_f_cfg_is_atapi == TRUE) { 9639 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, header, buflen, 9640 MODEPAGE_CACHING, SD_PATH_DIRECT); 9641 } else { 9642 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 9643 MODEPAGE_CACHING, SD_PATH_DIRECT); 9644 } 9645 9646 if (rval != 0) { 9647 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 9648 "sd_get_write_cache_enabled: Mode Sense Failed\n"); 9649 goto mode_sense_failed; 9650 } 9651 9652 /* 9653 * Determine size of Block Descriptors in order to locate 9654 * the mode page data. ATAPI devices return 0, SCSI devices 9655 * should return MODE_BLK_DESC_LENGTH. 9656 */ 9657 if (un->un_f_cfg_is_atapi == TRUE) { 9658 struct mode_header_grp2 *mhp; 9659 mhp = (struct mode_header_grp2 *)header; 9660 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 9661 } else { 9662 bd_len = ((struct mode_header *)header)->bdesc_length; 9663 } 9664 9665 if (bd_len > MODE_BLK_DESC_LENGTH) { 9666 /* FMA should make upset complain here */ 9667 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 0, 9668 "sd_get_write_cache_enabled: Mode Sense returned invalid " 9669 "block descriptor length\n"); 9670 rval = EIO; 9671 goto mode_sense_failed; 9672 } 9673 9674 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 9675 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 9676 /* FMA could make upset complain here */ 9677 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 9678 "sd_get_write_cache_enabled: Mode Sense caching page " 9679 "code mismatch %d\n", mode_caching_page->mode_page.code); 9680 rval = EIO; 9681 goto mode_sense_failed; 9682 } 9683 *is_enabled = mode_caching_page->wce; 9684 9685 mode_sense_failed: 9686 if (rval == 0) { 9687 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 9688 } else if (rval == EIO) { 9689 /* 9690 * Some disks do not support mode sense(6), we 9691 * should ignore this kind of error(sense key is 9692 * 0x5 - illegal request). 9693 */ 9694 uint8_t *sensep; 9695 int senlen; 9696 9697 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 9698 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 9699 ssc->ssc_uscsi_cmd->uscsi_rqresid); 9700 9701 if (senlen > 0 && 9702 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 9703 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 9704 } else { 9705 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 9706 } 9707 } else { 9708 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9709 } 9710 kmem_free(header, buflen); 9711 return (rval); 9712 } 9713 9714 /* 9715 * Function: sd_get_nv_sup() 9716 * 9717 * Description: This routine is the driver entry point for 9718 * determining whether non-volatile cache is supported. This 9719 * determination process works as follows: 9720 * 9721 * 1. sd first queries sd.conf on whether 9722 * suppress_cache_flush bit is set for this device. 9723 * 9724 * 2. if not there, then queries the internal disk table. 9725 * 9726 * 3. if either sd.conf or internal disk table specifies 9727 * cache flush be suppressed, we don't bother checking 9728 * NV_SUP bit. 9729 * 9730 * If SUPPRESS_CACHE_FLUSH bit is not set to 1, sd queries 9731 * the optional INQUIRY VPD page 0x86. If the device 9732 * supports VPD page 0x86, sd examines the NV_SUP 9733 * (non-volatile cache support) bit in the INQUIRY VPD page 9734 * 0x86: 9735 * o If NV_SUP bit is set, sd assumes the device has a 9736 * non-volatile cache and set the 9737 * un_f_sync_nv_supported to TRUE. 9738 * o Otherwise cache is not non-volatile, 9739 * un_f_sync_nv_supported is set to FALSE. 9740 * 9741 * Arguments: un - driver soft state (unit) structure 9742 * 9743 * Return Code: 9744 * 9745 * Context: Kernel Thread 9746 */ 9747 9748 static void 9749 sd_get_nv_sup(sd_ssc_t *ssc) 9750 { 9751 int rval = 0; 9752 uchar_t *inq86 = NULL; 9753 size_t inq86_len = MAX_INQUIRY_SIZE; 9754 size_t inq86_resid = 0; 9755 struct dk_callback *dkc; 9756 struct sd_lun *un; 9757 9758 ASSERT(ssc != NULL); 9759 un = ssc->ssc_un; 9760 ASSERT(un != NULL); 9761 9762 mutex_enter(SD_MUTEX(un)); 9763 9764 /* 9765 * Be conservative on the device's support of 9766 * SYNC_NV bit: un_f_sync_nv_supported is 9767 * initialized to be false. 9768 */ 9769 un->un_f_sync_nv_supported = FALSE; 9770 9771 /* 9772 * If either sd.conf or internal disk table 9773 * specifies cache flush be suppressed, then 9774 * we don't bother checking NV_SUP bit. 9775 */ 9776 if (un->un_f_suppress_cache_flush == TRUE) { 9777 mutex_exit(SD_MUTEX(un)); 9778 return; 9779 } 9780 9781 if (sd_check_vpd_page_support(ssc) == 0 && 9782 un->un_vpd_page_mask & SD_VPD_EXTENDED_DATA_PG) { 9783 mutex_exit(SD_MUTEX(un)); 9784 /* collect page 86 data if available */ 9785 inq86 = kmem_zalloc(inq86_len, KM_SLEEP); 9786 9787 rval = sd_send_scsi_INQUIRY(ssc, inq86, inq86_len, 9788 0x01, 0x86, &inq86_resid); 9789 9790 if (rval == 0 && (inq86_len - inq86_resid > 6)) { 9791 SD_TRACE(SD_LOG_COMMON, un, 9792 "sd_get_nv_sup: \ 9793 successfully get VPD page: %x \ 9794 PAGE LENGTH: %x BYTE 6: %x\n", 9795 inq86[1], inq86[3], inq86[6]); 9796 9797 mutex_enter(SD_MUTEX(un)); 9798 /* 9799 * check the value of NV_SUP bit: only if the device 9800 * reports NV_SUP bit to be 1, the 9801 * un_f_sync_nv_supported bit will be set to true. 9802 */ 9803 if (inq86[6] & SD_VPD_NV_SUP) { 9804 un->un_f_sync_nv_supported = TRUE; 9805 } 9806 mutex_exit(SD_MUTEX(un)); 9807 } else if (rval != 0) { 9808 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9809 } 9810 9811 kmem_free(inq86, inq86_len); 9812 } else { 9813 mutex_exit(SD_MUTEX(un)); 9814 } 9815 9816 /* 9817 * Send a SYNC CACHE command to check whether 9818 * SYNC_NV bit is supported. This command should have 9819 * un_f_sync_nv_supported set to correct value. 9820 */ 9821 mutex_enter(SD_MUTEX(un)); 9822 if (un->un_f_sync_nv_supported) { 9823 mutex_exit(SD_MUTEX(un)); 9824 dkc = kmem_zalloc(sizeof (struct dk_callback), KM_SLEEP); 9825 dkc->dkc_flag = FLUSH_VOLATILE; 9826 (void) sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 9827 9828 /* 9829 * Send a TEST UNIT READY command to the device. This should 9830 * clear any outstanding UNIT ATTENTION that may be present. 9831 */ 9832 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR); 9833 if (rval != 0) 9834 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9835 9836 kmem_free(dkc, sizeof (struct dk_callback)); 9837 } else { 9838 mutex_exit(SD_MUTEX(un)); 9839 } 9840 9841 SD_TRACE(SD_LOG_COMMON, un, "sd_get_nv_sup: \ 9842 un_f_suppress_cache_flush is set to %d\n", 9843 un->un_f_suppress_cache_flush); 9844 } 9845 9846 /* 9847 * Function: sd_make_device 9848 * 9849 * Description: Utility routine to return the Solaris device number from 9850 * the data in the device's dev_info structure. 9851 * 9852 * Return Code: The Solaris device number 9853 * 9854 * Context: Any 9855 */ 9856 9857 static dev_t 9858 sd_make_device(dev_info_t *devi) 9859 { 9860 return (makedevice(ddi_driver_major(devi), 9861 ddi_get_instance(devi) << SDUNIT_SHIFT)); 9862 } 9863 9864 9865 /* 9866 * Function: sd_pm_entry 9867 * 9868 * Description: Called at the start of a new command to manage power 9869 * and busy status of a device. This includes determining whether 9870 * the current power state of the device is sufficient for 9871 * performing the command or whether it must be changed. 9872 * The PM framework is notified appropriately. 9873 * Only with a return status of DDI_SUCCESS will the 9874 * component be busy to the framework. 9875 * 9876 * All callers of sd_pm_entry must check the return status 9877 * and only call sd_pm_exit it it was DDI_SUCCESS. A status 9878 * of DDI_FAILURE indicates the device failed to power up. 9879 * In this case un_pm_count has been adjusted so the result 9880 * on exit is still powered down, ie. count is less than 0. 9881 * Calling sd_pm_exit with this count value hits an ASSERT. 9882 * 9883 * Return Code: DDI_SUCCESS or DDI_FAILURE 9884 * 9885 * Context: Kernel thread context. 9886 */ 9887 9888 static int 9889 sd_pm_entry(struct sd_lun *un) 9890 { 9891 int return_status = DDI_SUCCESS; 9892 9893 ASSERT(!mutex_owned(SD_MUTEX(un))); 9894 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9895 9896 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n"); 9897 9898 if (un->un_f_pm_is_enabled == FALSE) { 9899 SD_TRACE(SD_LOG_IO_PM, un, 9900 "sd_pm_entry: exiting, PM not enabled\n"); 9901 return (return_status); 9902 } 9903 9904 /* 9905 * Just increment a counter if PM is enabled. On the transition from 9906 * 0 ==> 1, mark the device as busy. The iodone side will decrement 9907 * the count with each IO and mark the device as idle when the count 9908 * hits 0. 9909 * 9910 * If the count is less than 0 the device is powered down. If a powered 9911 * down device is successfully powered up then the count must be 9912 * incremented to reflect the power up. Note that it'll get incremented 9913 * a second time to become busy. 9914 * 9915 * Because the following has the potential to change the device state 9916 * and must release the un_pm_mutex to do so, only one thread can be 9917 * allowed through at a time. 9918 */ 9919 9920 mutex_enter(&un->un_pm_mutex); 9921 while (un->un_pm_busy == TRUE) { 9922 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex); 9923 } 9924 un->un_pm_busy = TRUE; 9925 9926 if (un->un_pm_count < 1) { 9927 9928 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n"); 9929 9930 /* 9931 * Indicate we are now busy so the framework won't attempt to 9932 * power down the device. This call will only fail if either 9933 * we passed a bad component number or the device has no 9934 * components. Neither of these should ever happen. 9935 */ 9936 mutex_exit(&un->un_pm_mutex); 9937 return_status = pm_busy_component(SD_DEVINFO(un), 0); 9938 ASSERT(return_status == DDI_SUCCESS); 9939 9940 mutex_enter(&un->un_pm_mutex); 9941 9942 if (un->un_pm_count < 0) { 9943 mutex_exit(&un->un_pm_mutex); 9944 9945 SD_TRACE(SD_LOG_IO_PM, un, 9946 "sd_pm_entry: power up component\n"); 9947 9948 /* 9949 * pm_raise_power will cause sdpower to be called 9950 * which brings the device power level to the 9951 * desired state, If successful, un_pm_count and 9952 * un_power_level will be updated appropriately. 9953 */ 9954 return_status = pm_raise_power(SD_DEVINFO(un), 0, 9955 SD_PM_STATE_ACTIVE(un)); 9956 9957 mutex_enter(&un->un_pm_mutex); 9958 9959 if (return_status != DDI_SUCCESS) { 9960 /* 9961 * Power up failed. 9962 * Idle the device and adjust the count 9963 * so the result on exit is that we're 9964 * still powered down, ie. count is less than 0. 9965 */ 9966 SD_TRACE(SD_LOG_IO_PM, un, 9967 "sd_pm_entry: power up failed," 9968 " idle the component\n"); 9969 9970 (void) pm_idle_component(SD_DEVINFO(un), 0); 9971 un->un_pm_count--; 9972 } else { 9973 /* 9974 * Device is powered up, verify the 9975 * count is non-negative. 9976 * This is debug only. 9977 */ 9978 ASSERT(un->un_pm_count == 0); 9979 } 9980 } 9981 9982 if (return_status == DDI_SUCCESS) { 9983 /* 9984 * For performance, now that the device has been tagged 9985 * as busy, and it's known to be powered up, update the 9986 * chain types to use jump tables that do not include 9987 * pm. This significantly lowers the overhead and 9988 * therefore improves performance. 9989 */ 9990 9991 mutex_exit(&un->un_pm_mutex); 9992 mutex_enter(SD_MUTEX(un)); 9993 SD_TRACE(SD_LOG_IO_PM, un, 9994 "sd_pm_entry: changing uscsi_chain_type from %d\n", 9995 un->un_uscsi_chain_type); 9996 9997 if (un->un_f_non_devbsize_supported) { 9998 un->un_buf_chain_type = 9999 SD_CHAIN_INFO_RMMEDIA_NO_PM; 10000 } else { 10001 un->un_buf_chain_type = 10002 SD_CHAIN_INFO_DISK_NO_PM; 10003 } 10004 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 10005 10006 SD_TRACE(SD_LOG_IO_PM, un, 10007 " changed uscsi_chain_type to %d\n", 10008 un->un_uscsi_chain_type); 10009 mutex_exit(SD_MUTEX(un)); 10010 mutex_enter(&un->un_pm_mutex); 10011 10012 if (un->un_pm_idle_timeid == NULL) { 10013 /* 300 ms. */ 10014 un->un_pm_idle_timeid = 10015 timeout(sd_pm_idletimeout_handler, un, 10016 (drv_usectohz((clock_t)300000))); 10017 /* 10018 * Include an extra call to busy which keeps the 10019 * device busy with-respect-to the PM layer 10020 * until the timer fires, at which time it'll 10021 * get the extra idle call. 10022 */ 10023 (void) pm_busy_component(SD_DEVINFO(un), 0); 10024 } 10025 } 10026 } 10027 un->un_pm_busy = FALSE; 10028 /* Next... */ 10029 cv_signal(&un->un_pm_busy_cv); 10030 10031 un->un_pm_count++; 10032 10033 SD_TRACE(SD_LOG_IO_PM, un, 10034 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count); 10035 10036 mutex_exit(&un->un_pm_mutex); 10037 10038 return (return_status); 10039 } 10040 10041 10042 /* 10043 * Function: sd_pm_exit 10044 * 10045 * Description: Called at the completion of a command to manage busy 10046 * status for the device. If the device becomes idle the 10047 * PM framework is notified. 10048 * 10049 * Context: Kernel thread context 10050 */ 10051 10052 static void 10053 sd_pm_exit(struct sd_lun *un) 10054 { 10055 ASSERT(!mutex_owned(SD_MUTEX(un))); 10056 ASSERT(!mutex_owned(&un->un_pm_mutex)); 10057 10058 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n"); 10059 10060 /* 10061 * After attach the following flag is only read, so don't 10062 * take the penalty of acquiring a mutex for it. 10063 */ 10064 if (un->un_f_pm_is_enabled == TRUE) { 10065 10066 mutex_enter(&un->un_pm_mutex); 10067 un->un_pm_count--; 10068 10069 SD_TRACE(SD_LOG_IO_PM, un, 10070 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count); 10071 10072 ASSERT(un->un_pm_count >= 0); 10073 if (un->un_pm_count == 0) { 10074 mutex_exit(&un->un_pm_mutex); 10075 10076 SD_TRACE(SD_LOG_IO_PM, un, 10077 "sd_pm_exit: idle component\n"); 10078 10079 (void) pm_idle_component(SD_DEVINFO(un), 0); 10080 10081 } else { 10082 mutex_exit(&un->un_pm_mutex); 10083 } 10084 } 10085 10086 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n"); 10087 } 10088 10089 10090 /* 10091 * Function: sdopen 10092 * 10093 * Description: Driver's open(9e) entry point function. 10094 * 10095 * Arguments: dev_i - pointer to device number 10096 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE) 10097 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 10098 * cred_p - user credential pointer 10099 * 10100 * Return Code: EINVAL 10101 * ENXIO 10102 * EIO 10103 * EROFS 10104 * EBUSY 10105 * 10106 * Context: Kernel thread context 10107 */ 10108 /* ARGSUSED */ 10109 static int 10110 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 10111 { 10112 struct sd_lun *un; 10113 int nodelay; 10114 int part; 10115 uint64_t partmask; 10116 int instance; 10117 dev_t dev; 10118 int rval = EIO; 10119 diskaddr_t nblks = 0; 10120 diskaddr_t label_cap; 10121 10122 /* Validate the open type */ 10123 if (otyp >= OTYPCNT) { 10124 return (EINVAL); 10125 } 10126 10127 dev = *dev_p; 10128 instance = SDUNIT(dev); 10129 mutex_enter(&sd_detach_mutex); 10130 10131 /* 10132 * Fail the open if there is no softstate for the instance, or 10133 * if another thread somewhere is trying to detach the instance. 10134 */ 10135 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 10136 (un->un_detach_count != 0)) { 10137 mutex_exit(&sd_detach_mutex); 10138 /* 10139 * The probe cache only needs to be cleared when open (9e) fails 10140 * with ENXIO (4238046). 10141 */ 10142 /* 10143 * un-conditionally clearing probe cache is ok with 10144 * separate sd/ssd binaries 10145 * x86 platform can be an issue with both parallel 10146 * and fibre in 1 binary 10147 */ 10148 sd_scsi_clear_probe_cache(); 10149 return (ENXIO); 10150 } 10151 10152 /* 10153 * The un_layer_count is to prevent another thread in specfs from 10154 * trying to detach the instance, which can happen when we are 10155 * called from a higher-layer driver instead of thru specfs. 10156 * This will not be needed when DDI provides a layered driver 10157 * interface that allows specfs to know that an instance is in 10158 * use by a layered driver & should not be detached. 10159 * 10160 * Note: the semantics for layered driver opens are exactly one 10161 * close for every open. 10162 */ 10163 if (otyp == OTYP_LYR) { 10164 un->un_layer_count++; 10165 } 10166 10167 /* 10168 * Keep a count of the current # of opens in progress. This is because 10169 * some layered drivers try to call us as a regular open. This can 10170 * cause problems that we cannot prevent, however by keeping this count 10171 * we can at least keep our open and detach routines from racing against 10172 * each other under such conditions. 10173 */ 10174 un->un_opens_in_progress++; 10175 mutex_exit(&sd_detach_mutex); 10176 10177 nodelay = (flag & (FNDELAY | FNONBLOCK)); 10178 part = SDPART(dev); 10179 partmask = 1 << part; 10180 10181 /* 10182 * We use a semaphore here in order to serialize 10183 * open and close requests on the device. 10184 */ 10185 sema_p(&un->un_semoclose); 10186 10187 mutex_enter(SD_MUTEX(un)); 10188 10189 /* 10190 * All device accesses go thru sdstrategy() where we check 10191 * on suspend status but there could be a scsi_poll command, 10192 * which bypasses sdstrategy(), so we need to check pm 10193 * status. 10194 */ 10195 10196 if (!nodelay) { 10197 while ((un->un_state == SD_STATE_SUSPENDED) || 10198 (un->un_state == SD_STATE_PM_CHANGING)) { 10199 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10200 } 10201 10202 mutex_exit(SD_MUTEX(un)); 10203 if (sd_pm_entry(un) != DDI_SUCCESS) { 10204 rval = EIO; 10205 SD_ERROR(SD_LOG_OPEN_CLOSE, un, 10206 "sdopen: sd_pm_entry failed\n"); 10207 goto open_failed_with_pm; 10208 } 10209 mutex_enter(SD_MUTEX(un)); 10210 } 10211 10212 /* check for previous exclusive open */ 10213 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); 10214 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 10215 "sdopen: exclopen=%x, flag=%x, regopen=%x\n", 10216 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 10217 10218 if (un->un_exclopen & (partmask)) { 10219 goto excl_open_fail; 10220 } 10221 10222 if (flag & FEXCL) { 10223 int i; 10224 if (un->un_ocmap.lyropen[part]) { 10225 goto excl_open_fail; 10226 } 10227 for (i = 0; i < (OTYPCNT - 1); i++) { 10228 if (un->un_ocmap.regopen[i] & (partmask)) { 10229 goto excl_open_fail; 10230 } 10231 } 10232 } 10233 10234 /* 10235 * Check the write permission if this is a removable media device, 10236 * NDELAY has not been set, and writable permission is requested. 10237 * 10238 * Note: If NDELAY was set and this is write-protected media the WRITE 10239 * attempt will fail with EIO as part of the I/O processing. This is a 10240 * more permissive implementation that allows the open to succeed and 10241 * WRITE attempts to fail when appropriate. 10242 */ 10243 if (un->un_f_chk_wp_open) { 10244 if ((flag & FWRITE) && (!nodelay)) { 10245 mutex_exit(SD_MUTEX(un)); 10246 /* 10247 * Defer the check for write permission on writable 10248 * DVD drive till sdstrategy and will not fail open even 10249 * if FWRITE is set as the device can be writable 10250 * depending upon the media and the media can change 10251 * after the call to open(). 10252 */ 10253 if (un->un_f_dvdram_writable_device == FALSE) { 10254 if (ISCD(un) || sr_check_wp(dev)) { 10255 rval = EROFS; 10256 mutex_enter(SD_MUTEX(un)); 10257 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10258 "write to cd or write protected media\n"); 10259 goto open_fail; 10260 } 10261 } 10262 mutex_enter(SD_MUTEX(un)); 10263 } 10264 } 10265 10266 /* 10267 * If opening in NDELAY/NONBLOCK mode, just return. 10268 * Check if disk is ready and has a valid geometry later. 10269 */ 10270 if (!nodelay) { 10271 sd_ssc_t *ssc; 10272 10273 mutex_exit(SD_MUTEX(un)); 10274 ssc = sd_ssc_init(un); 10275 rval = sd_ready_and_valid(ssc, part); 10276 sd_ssc_fini(ssc); 10277 mutex_enter(SD_MUTEX(un)); 10278 /* 10279 * Fail if device is not ready or if the number of disk 10280 * blocks is zero or negative for non CD devices. 10281 */ 10282 10283 nblks = 0; 10284 10285 if (rval == SD_READY_VALID && (!ISCD(un))) { 10286 /* if cmlb_partinfo fails, nblks remains 0 */ 10287 mutex_exit(SD_MUTEX(un)); 10288 (void) cmlb_partinfo(un->un_cmlbhandle, part, &nblks, 10289 NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 10290 mutex_enter(SD_MUTEX(un)); 10291 } 10292 10293 if ((rval != SD_READY_VALID) || 10294 (!ISCD(un) && nblks <= 0)) { 10295 rval = un->un_f_has_removable_media ? ENXIO : EIO; 10296 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10297 "device not ready or invalid disk block value\n"); 10298 goto open_fail; 10299 } 10300 #if defined(__i386) || defined(__amd64) 10301 } else { 10302 uchar_t *cp; 10303 /* 10304 * x86 requires special nodelay handling, so that p0 is 10305 * always defined and accessible. 10306 * Invalidate geometry only if device is not already open. 10307 */ 10308 cp = &un->un_ocmap.chkd[0]; 10309 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 10310 if (*cp != (uchar_t)0) { 10311 break; 10312 } 10313 cp++; 10314 } 10315 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 10316 mutex_exit(SD_MUTEX(un)); 10317 cmlb_invalidate(un->un_cmlbhandle, 10318 (void *)SD_PATH_DIRECT); 10319 mutex_enter(SD_MUTEX(un)); 10320 } 10321 10322 #endif 10323 } 10324 10325 if (otyp == OTYP_LYR) { 10326 un->un_ocmap.lyropen[part]++; 10327 } else { 10328 un->un_ocmap.regopen[otyp] |= partmask; 10329 } 10330 10331 /* Set up open and exclusive open flags */ 10332 if (flag & FEXCL) { 10333 un->un_exclopen |= (partmask); 10334 } 10335 10336 /* 10337 * If the lun is EFI labeled and lun capacity is greater than the 10338 * capacity contained in the label, log a sys-event to notify the 10339 * interested module. 10340 * To avoid an infinite loop of logging sys-event, we only log the 10341 * event when the lun is not opened in NDELAY mode. The event handler 10342 * should open the lun in NDELAY mode. 10343 */ 10344 if (!nodelay) { 10345 mutex_exit(SD_MUTEX(un)); 10346 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 10347 (void*)SD_PATH_DIRECT) == 0) { 10348 mutex_enter(SD_MUTEX(un)); 10349 if (un->un_f_blockcount_is_valid && 10350 un->un_blockcount > label_cap && 10351 un->un_f_expnevent == B_FALSE) { 10352 un->un_f_expnevent = B_TRUE; 10353 mutex_exit(SD_MUTEX(un)); 10354 sd_log_lun_expansion_event(un, 10355 (nodelay ? KM_NOSLEEP : KM_SLEEP)); 10356 mutex_enter(SD_MUTEX(un)); 10357 } 10358 } else { 10359 mutex_enter(SD_MUTEX(un)); 10360 } 10361 } 10362 10363 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10364 "open of part %d type %d\n", part, otyp); 10365 10366 mutex_exit(SD_MUTEX(un)); 10367 if (!nodelay) { 10368 sd_pm_exit(un); 10369 } 10370 10371 sema_v(&un->un_semoclose); 10372 10373 mutex_enter(&sd_detach_mutex); 10374 un->un_opens_in_progress--; 10375 mutex_exit(&sd_detach_mutex); 10376 10377 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n"); 10378 return (DDI_SUCCESS); 10379 10380 excl_open_fail: 10381 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n"); 10382 rval = EBUSY; 10383 10384 open_fail: 10385 mutex_exit(SD_MUTEX(un)); 10386 10387 /* 10388 * On a failed open we must exit the pm management. 10389 */ 10390 if (!nodelay) { 10391 sd_pm_exit(un); 10392 } 10393 open_failed_with_pm: 10394 sema_v(&un->un_semoclose); 10395 10396 mutex_enter(&sd_detach_mutex); 10397 un->un_opens_in_progress--; 10398 if (otyp == OTYP_LYR) { 10399 un->un_layer_count--; 10400 } 10401 mutex_exit(&sd_detach_mutex); 10402 10403 return (rval); 10404 } 10405 10406 10407 /* 10408 * Function: sdclose 10409 * 10410 * Description: Driver's close(9e) entry point function. 10411 * 10412 * Arguments: dev - device number 10413 * flag - file status flag, informational only 10414 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 10415 * cred_p - user credential pointer 10416 * 10417 * Return Code: ENXIO 10418 * 10419 * Context: Kernel thread context 10420 */ 10421 /* ARGSUSED */ 10422 static int 10423 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 10424 { 10425 struct sd_lun *un; 10426 uchar_t *cp; 10427 int part; 10428 int nodelay; 10429 int rval = 0; 10430 10431 /* Validate the open type */ 10432 if (otyp >= OTYPCNT) { 10433 return (ENXIO); 10434 } 10435 10436 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10437 return (ENXIO); 10438 } 10439 10440 part = SDPART(dev); 10441 nodelay = flag & (FNDELAY | FNONBLOCK); 10442 10443 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 10444 "sdclose: close of part %d type %d\n", part, otyp); 10445 10446 /* 10447 * We use a semaphore here in order to serialize 10448 * open and close requests on the device. 10449 */ 10450 sema_p(&un->un_semoclose); 10451 10452 mutex_enter(SD_MUTEX(un)); 10453 10454 /* Don't proceed if power is being changed. */ 10455 while (un->un_state == SD_STATE_PM_CHANGING) { 10456 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10457 } 10458 10459 if (un->un_exclopen & (1 << part)) { 10460 un->un_exclopen &= ~(1 << part); 10461 } 10462 10463 /* Update the open partition map */ 10464 if (otyp == OTYP_LYR) { 10465 un->un_ocmap.lyropen[part] -= 1; 10466 } else { 10467 un->un_ocmap.regopen[otyp] &= ~(1 << part); 10468 } 10469 10470 cp = &un->un_ocmap.chkd[0]; 10471 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 10472 if (*cp != NULL) { 10473 break; 10474 } 10475 cp++; 10476 } 10477 10478 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 10479 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n"); 10480 10481 /* 10482 * We avoid persistance upon the last close, and set 10483 * the throttle back to the maximum. 10484 */ 10485 un->un_throttle = un->un_saved_throttle; 10486 10487 if (un->un_state == SD_STATE_OFFLINE) { 10488 if (un->un_f_is_fibre == FALSE) { 10489 scsi_log(SD_DEVINFO(un), sd_label, 10490 CE_WARN, "offline\n"); 10491 } 10492 mutex_exit(SD_MUTEX(un)); 10493 cmlb_invalidate(un->un_cmlbhandle, 10494 (void *)SD_PATH_DIRECT); 10495 mutex_enter(SD_MUTEX(un)); 10496 10497 } else { 10498 /* 10499 * Flush any outstanding writes in NVRAM cache. 10500 * Note: SYNCHRONIZE CACHE is an optional SCSI-2 10501 * cmd, it may not work for non-Pluto devices. 10502 * SYNCHRONIZE CACHE is not required for removables, 10503 * except DVD-RAM drives. 10504 * 10505 * Also note: because SYNCHRONIZE CACHE is currently 10506 * the only command issued here that requires the 10507 * drive be powered up, only do the power up before 10508 * sending the Sync Cache command. If additional 10509 * commands are added which require a powered up 10510 * drive, the following sequence may have to change. 10511 * 10512 * And finally, note that parallel SCSI on SPARC 10513 * only issues a Sync Cache to DVD-RAM, a newly 10514 * supported device. 10515 */ 10516 #if defined(__i386) || defined(__amd64) 10517 if ((un->un_f_sync_cache_supported && 10518 un->un_f_sync_cache_required) || 10519 un->un_f_dvdram_writable_device == TRUE) { 10520 #else 10521 if (un->un_f_dvdram_writable_device == TRUE) { 10522 #endif 10523 mutex_exit(SD_MUTEX(un)); 10524 if (sd_pm_entry(un) == DDI_SUCCESS) { 10525 rval = 10526 sd_send_scsi_SYNCHRONIZE_CACHE(un, 10527 NULL); 10528 /* ignore error if not supported */ 10529 if (rval == ENOTSUP) { 10530 rval = 0; 10531 } else if (rval != 0) { 10532 rval = EIO; 10533 } 10534 sd_pm_exit(un); 10535 } else { 10536 rval = EIO; 10537 } 10538 mutex_enter(SD_MUTEX(un)); 10539 } 10540 10541 /* 10542 * For devices which supports DOOR_LOCK, send an ALLOW 10543 * MEDIA REMOVAL command, but don't get upset if it 10544 * fails. We need to raise the power of the drive before 10545 * we can call sd_send_scsi_DOORLOCK() 10546 */ 10547 if (un->un_f_doorlock_supported) { 10548 mutex_exit(SD_MUTEX(un)); 10549 if (sd_pm_entry(un) == DDI_SUCCESS) { 10550 sd_ssc_t *ssc; 10551 10552 ssc = sd_ssc_init(un); 10553 rval = sd_send_scsi_DOORLOCK(ssc, 10554 SD_REMOVAL_ALLOW, SD_PATH_DIRECT); 10555 if (rval != 0) 10556 sd_ssc_assessment(ssc, 10557 SD_FMT_IGNORE); 10558 sd_ssc_fini(ssc); 10559 10560 sd_pm_exit(un); 10561 if (ISCD(un) && (rval != 0) && 10562 (nodelay != 0)) { 10563 rval = ENXIO; 10564 } 10565 } else { 10566 rval = EIO; 10567 } 10568 mutex_enter(SD_MUTEX(un)); 10569 } 10570 10571 /* 10572 * If a device has removable media, invalidate all 10573 * parameters related to media, such as geometry, 10574 * blocksize, and blockcount. 10575 */ 10576 if (un->un_f_has_removable_media) { 10577 sr_ejected(un); 10578 } 10579 10580 /* 10581 * Destroy the cache (if it exists) which was 10582 * allocated for the write maps since this is 10583 * the last close for this media. 10584 */ 10585 if (un->un_wm_cache) { 10586 /* 10587 * Check if there are pending commands. 10588 * and if there are give a warning and 10589 * do not destroy the cache. 10590 */ 10591 if (un->un_ncmds_in_driver > 0) { 10592 scsi_log(SD_DEVINFO(un), 10593 sd_label, CE_WARN, 10594 "Unable to clean up memory " 10595 "because of pending I/O\n"); 10596 } else { 10597 kmem_cache_destroy( 10598 un->un_wm_cache); 10599 un->un_wm_cache = NULL; 10600 } 10601 } 10602 } 10603 } 10604 10605 mutex_exit(SD_MUTEX(un)); 10606 sema_v(&un->un_semoclose); 10607 10608 if (otyp == OTYP_LYR) { 10609 mutex_enter(&sd_detach_mutex); 10610 /* 10611 * The detach routine may run when the layer count 10612 * drops to zero. 10613 */ 10614 un->un_layer_count--; 10615 mutex_exit(&sd_detach_mutex); 10616 } 10617 10618 return (rval); 10619 } 10620 10621 10622 /* 10623 * Function: sd_ready_and_valid 10624 * 10625 * Description: Test if device is ready and has a valid geometry. 10626 * 10627 * Arguments: ssc - sd_ssc_t will contain un 10628 * un - driver soft state (unit) structure 10629 * 10630 * Return Code: SD_READY_VALID ready and valid label 10631 * SD_NOT_READY_VALID not ready, no label 10632 * SD_RESERVED_BY_OTHERS reservation conflict 10633 * 10634 * Context: Never called at interrupt context. 10635 */ 10636 10637 static int 10638 sd_ready_and_valid(sd_ssc_t *ssc, int part) 10639 { 10640 struct sd_errstats *stp; 10641 uint64_t capacity; 10642 uint_t lbasize; 10643 int rval = SD_READY_VALID; 10644 char name_str[48]; 10645 boolean_t is_valid; 10646 struct sd_lun *un; 10647 int status; 10648 10649 ASSERT(ssc != NULL); 10650 un = ssc->ssc_un; 10651 ASSERT(un != NULL); 10652 ASSERT(!mutex_owned(SD_MUTEX(un))); 10653 10654 mutex_enter(SD_MUTEX(un)); 10655 /* 10656 * If a device has removable media, we must check if media is 10657 * ready when checking if this device is ready and valid. 10658 */ 10659 if (un->un_f_has_removable_media) { 10660 mutex_exit(SD_MUTEX(un)); 10661 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10662 10663 if (status != 0) { 10664 rval = SD_NOT_READY_VALID; 10665 mutex_enter(SD_MUTEX(un)); 10666 10667 /* Ignore all failed status for removalbe media */ 10668 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10669 10670 goto done; 10671 } 10672 10673 is_valid = SD_IS_VALID_LABEL(un); 10674 mutex_enter(SD_MUTEX(un)); 10675 if (!is_valid || 10676 (un->un_f_blockcount_is_valid == FALSE) || 10677 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 10678 10679 /* capacity has to be read every open. */ 10680 mutex_exit(SD_MUTEX(un)); 10681 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity, 10682 &lbasize, SD_PATH_DIRECT); 10683 10684 if (status != 0) { 10685 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10686 10687 cmlb_invalidate(un->un_cmlbhandle, 10688 (void *)SD_PATH_DIRECT); 10689 mutex_enter(SD_MUTEX(un)); 10690 rval = SD_NOT_READY_VALID; 10691 10692 goto done; 10693 } else { 10694 mutex_enter(SD_MUTEX(un)); 10695 sd_update_block_info(un, lbasize, capacity); 10696 } 10697 } 10698 10699 /* 10700 * Check if the media in the device is writable or not. 10701 */ 10702 if (!is_valid && ISCD(un)) { 10703 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT); 10704 } 10705 10706 } else { 10707 /* 10708 * Do a test unit ready to clear any unit attention from non-cd 10709 * devices. 10710 */ 10711 mutex_exit(SD_MUTEX(un)); 10712 10713 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10714 if (status != 0) { 10715 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10716 } 10717 10718 mutex_enter(SD_MUTEX(un)); 10719 } 10720 10721 10722 /* 10723 * If this is a non 512 block device, allocate space for 10724 * the wmap cache. This is being done here since every time 10725 * a media is changed this routine will be called and the 10726 * block size is a function of media rather than device. 10727 */ 10728 if (((un->un_f_rmw_type != SD_RMW_TYPE_RETURN_ERROR || 10729 un->un_f_non_devbsize_supported) && 10730 un->un_tgt_blocksize != DEV_BSIZE) || 10731 un->un_f_enable_rmw) { 10732 if (!(un->un_wm_cache)) { 10733 (void) snprintf(name_str, sizeof (name_str), 10734 "%s%d_cache", 10735 ddi_driver_name(SD_DEVINFO(un)), 10736 ddi_get_instance(SD_DEVINFO(un))); 10737 un->un_wm_cache = kmem_cache_create( 10738 name_str, sizeof (struct sd_w_map), 10739 8, sd_wm_cache_constructor, 10740 sd_wm_cache_destructor, NULL, 10741 (void *)un, NULL, 0); 10742 if (!(un->un_wm_cache)) { 10743 rval = ENOMEM; 10744 goto done; 10745 } 10746 } 10747 } 10748 10749 if (un->un_state == SD_STATE_NORMAL) { 10750 /* 10751 * If the target is not yet ready here (defined by a TUR 10752 * failure), invalidate the geometry and print an 'offline' 10753 * message. This is a legacy message, as the state of the 10754 * target is not actually changed to SD_STATE_OFFLINE. 10755 * 10756 * If the TUR fails for EACCES (Reservation Conflict), 10757 * SD_RESERVED_BY_OTHERS will be returned to indicate 10758 * reservation conflict. If the TUR fails for other 10759 * reasons, SD_NOT_READY_VALID will be returned. 10760 */ 10761 int err; 10762 10763 mutex_exit(SD_MUTEX(un)); 10764 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10765 mutex_enter(SD_MUTEX(un)); 10766 10767 if (err != 0) { 10768 mutex_exit(SD_MUTEX(un)); 10769 cmlb_invalidate(un->un_cmlbhandle, 10770 (void *)SD_PATH_DIRECT); 10771 mutex_enter(SD_MUTEX(un)); 10772 if (err == EACCES) { 10773 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10774 "reservation conflict\n"); 10775 rval = SD_RESERVED_BY_OTHERS; 10776 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10777 } else { 10778 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10779 "drive offline\n"); 10780 rval = SD_NOT_READY_VALID; 10781 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 10782 } 10783 goto done; 10784 } 10785 } 10786 10787 if (un->un_f_format_in_progress == FALSE) { 10788 mutex_exit(SD_MUTEX(un)); 10789 10790 (void) cmlb_validate(un->un_cmlbhandle, 0, 10791 (void *)SD_PATH_DIRECT); 10792 if (cmlb_partinfo(un->un_cmlbhandle, part, NULL, NULL, NULL, 10793 NULL, (void *) SD_PATH_DIRECT) != 0) { 10794 rval = SD_NOT_READY_VALID; 10795 mutex_enter(SD_MUTEX(un)); 10796 10797 goto done; 10798 } 10799 if (un->un_f_pkstats_enabled) { 10800 sd_set_pstats(un); 10801 SD_TRACE(SD_LOG_IO_PARTITION, un, 10802 "sd_ready_and_valid: un:0x%p pstats created and " 10803 "set\n", un); 10804 } 10805 mutex_enter(SD_MUTEX(un)); 10806 } 10807 10808 /* 10809 * If this device supports DOOR_LOCK command, try and send 10810 * this command to PREVENT MEDIA REMOVAL, but don't get upset 10811 * if it fails. For a CD, however, it is an error 10812 */ 10813 if (un->un_f_doorlock_supported) { 10814 mutex_exit(SD_MUTEX(un)); 10815 status = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 10816 SD_PATH_DIRECT); 10817 10818 if ((status != 0) && ISCD(un)) { 10819 rval = SD_NOT_READY_VALID; 10820 mutex_enter(SD_MUTEX(un)); 10821 10822 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10823 10824 goto done; 10825 } else if (status != 0) 10826 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10827 mutex_enter(SD_MUTEX(un)); 10828 } 10829 10830 /* The state has changed, inform the media watch routines */ 10831 un->un_mediastate = DKIO_INSERTED; 10832 cv_broadcast(&un->un_state_cv); 10833 rval = SD_READY_VALID; 10834 10835 done: 10836 10837 /* 10838 * Initialize the capacity kstat value, if no media previously 10839 * (capacity kstat is 0) and a media has been inserted 10840 * (un_blockcount > 0). 10841 */ 10842 if (un->un_errstats != NULL) { 10843 stp = (struct sd_errstats *)un->un_errstats->ks_data; 10844 if ((stp->sd_capacity.value.ui64 == 0) && 10845 (un->un_f_blockcount_is_valid == TRUE)) { 10846 stp->sd_capacity.value.ui64 = 10847 (uint64_t)((uint64_t)un->un_blockcount * 10848 un->un_sys_blocksize); 10849 } 10850 } 10851 10852 mutex_exit(SD_MUTEX(un)); 10853 return (rval); 10854 } 10855 10856 10857 /* 10858 * Function: sdmin 10859 * 10860 * Description: Routine to limit the size of a data transfer. Used in 10861 * conjunction with physio(9F). 10862 * 10863 * Arguments: bp - pointer to the indicated buf(9S) struct. 10864 * 10865 * Context: Kernel thread context. 10866 */ 10867 10868 static void 10869 sdmin(struct buf *bp) 10870 { 10871 struct sd_lun *un; 10872 int instance; 10873 10874 instance = SDUNIT(bp->b_edev); 10875 10876 un = ddi_get_soft_state(sd_state, instance); 10877 ASSERT(un != NULL); 10878 10879 /* 10880 * We depend on buf breakup to restrict 10881 * IO size if it is enabled. 10882 */ 10883 if (un->un_buf_breakup_supported) { 10884 return; 10885 } 10886 10887 if (bp->b_bcount > un->un_max_xfer_size) { 10888 bp->b_bcount = un->un_max_xfer_size; 10889 } 10890 } 10891 10892 10893 /* 10894 * Function: sdread 10895 * 10896 * Description: Driver's read(9e) entry point function. 10897 * 10898 * Arguments: dev - device number 10899 * uio - structure pointer describing where data is to be stored 10900 * in user's space 10901 * cred_p - user credential pointer 10902 * 10903 * Return Code: ENXIO 10904 * EIO 10905 * EINVAL 10906 * value returned by physio 10907 * 10908 * Context: Kernel thread context. 10909 */ 10910 /* ARGSUSED */ 10911 static int 10912 sdread(dev_t dev, struct uio *uio, cred_t *cred_p) 10913 { 10914 struct sd_lun *un = NULL; 10915 int secmask; 10916 int err = 0; 10917 sd_ssc_t *ssc; 10918 10919 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10920 return (ENXIO); 10921 } 10922 10923 ASSERT(!mutex_owned(SD_MUTEX(un))); 10924 10925 10926 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10927 mutex_enter(SD_MUTEX(un)); 10928 /* 10929 * Because the call to sd_ready_and_valid will issue I/O we 10930 * must wait here if either the device is suspended or 10931 * if it's power level is changing. 10932 */ 10933 while ((un->un_state == SD_STATE_SUSPENDED) || 10934 (un->un_state == SD_STATE_PM_CHANGING)) { 10935 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10936 } 10937 un->un_ncmds_in_driver++; 10938 mutex_exit(SD_MUTEX(un)); 10939 10940 /* Initialize sd_ssc_t for internal uscsi commands */ 10941 ssc = sd_ssc_init(un); 10942 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10943 err = EIO; 10944 } else { 10945 err = 0; 10946 } 10947 sd_ssc_fini(ssc); 10948 10949 mutex_enter(SD_MUTEX(un)); 10950 un->un_ncmds_in_driver--; 10951 ASSERT(un->un_ncmds_in_driver >= 0); 10952 mutex_exit(SD_MUTEX(un)); 10953 if (err != 0) 10954 return (err); 10955 } 10956 10957 /* 10958 * Read requests are restricted to multiples of the system block size. 10959 */ 10960 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR && 10961 !un->un_f_enable_rmw) 10962 secmask = un->un_tgt_blocksize - 1; 10963 else 10964 secmask = DEV_BSIZE - 1; 10965 10966 if (uio->uio_loffset & ((offset_t)(secmask))) { 10967 SD_ERROR(SD_LOG_READ_WRITE, un, 10968 "sdread: file offset not modulo %d\n", 10969 secmask + 1); 10970 err = EINVAL; 10971 } else if (uio->uio_iov->iov_len & (secmask)) { 10972 SD_ERROR(SD_LOG_READ_WRITE, un, 10973 "sdread: transfer length not modulo %d\n", 10974 secmask + 1); 10975 err = EINVAL; 10976 } else { 10977 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio); 10978 } 10979 10980 return (err); 10981 } 10982 10983 10984 /* 10985 * Function: sdwrite 10986 * 10987 * Description: Driver's write(9e) entry point function. 10988 * 10989 * Arguments: dev - device number 10990 * uio - structure pointer describing where data is stored in 10991 * user's space 10992 * cred_p - user credential pointer 10993 * 10994 * Return Code: ENXIO 10995 * EIO 10996 * EINVAL 10997 * value returned by physio 10998 * 10999 * Context: Kernel thread context. 11000 */ 11001 /* ARGSUSED */ 11002 static int 11003 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 11004 { 11005 struct sd_lun *un = NULL; 11006 int secmask; 11007 int err = 0; 11008 sd_ssc_t *ssc; 11009 11010 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 11011 return (ENXIO); 11012 } 11013 11014 ASSERT(!mutex_owned(SD_MUTEX(un))); 11015 11016 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 11017 mutex_enter(SD_MUTEX(un)); 11018 /* 11019 * Because the call to sd_ready_and_valid will issue I/O we 11020 * must wait here if either the device is suspended or 11021 * if it's power level is changing. 11022 */ 11023 while ((un->un_state == SD_STATE_SUSPENDED) || 11024 (un->un_state == SD_STATE_PM_CHANGING)) { 11025 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 11026 } 11027 un->un_ncmds_in_driver++; 11028 mutex_exit(SD_MUTEX(un)); 11029 11030 /* Initialize sd_ssc_t for internal uscsi commands */ 11031 ssc = sd_ssc_init(un); 11032 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 11033 err = EIO; 11034 } else { 11035 err = 0; 11036 } 11037 sd_ssc_fini(ssc); 11038 11039 mutex_enter(SD_MUTEX(un)); 11040 un->un_ncmds_in_driver--; 11041 ASSERT(un->un_ncmds_in_driver >= 0); 11042 mutex_exit(SD_MUTEX(un)); 11043 if (err != 0) 11044 return (err); 11045 } 11046 11047 /* 11048 * Write requests are restricted to multiples of the system block size. 11049 */ 11050 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR && 11051 !un->un_f_enable_rmw) 11052 secmask = un->un_tgt_blocksize - 1; 11053 else 11054 secmask = DEV_BSIZE - 1; 11055 11056 if (uio->uio_loffset & ((offset_t)(secmask))) { 11057 SD_ERROR(SD_LOG_READ_WRITE, un, 11058 "sdwrite: file offset not modulo %d\n", 11059 secmask + 1); 11060 err = EINVAL; 11061 } else if (uio->uio_iov->iov_len & (secmask)) { 11062 SD_ERROR(SD_LOG_READ_WRITE, un, 11063 "sdwrite: transfer length not modulo %d\n", 11064 secmask + 1); 11065 err = EINVAL; 11066 } else { 11067 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio); 11068 } 11069 11070 return (err); 11071 } 11072 11073 11074 /* 11075 * Function: sdaread 11076 * 11077 * Description: Driver's aread(9e) entry point function. 11078 * 11079 * Arguments: dev - device number 11080 * aio - structure pointer describing where data is to be stored 11081 * cred_p - user credential pointer 11082 * 11083 * Return Code: ENXIO 11084 * EIO 11085 * EINVAL 11086 * value returned by aphysio 11087 * 11088 * Context: Kernel thread context. 11089 */ 11090 /* ARGSUSED */ 11091 static int 11092 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 11093 { 11094 struct sd_lun *un = NULL; 11095 struct uio *uio = aio->aio_uio; 11096 int secmask; 11097 int err = 0; 11098 sd_ssc_t *ssc; 11099 11100 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 11101 return (ENXIO); 11102 } 11103 11104 ASSERT(!mutex_owned(SD_MUTEX(un))); 11105 11106 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 11107 mutex_enter(SD_MUTEX(un)); 11108 /* 11109 * Because the call to sd_ready_and_valid will issue I/O we 11110 * must wait here if either the device is suspended or 11111 * if it's power level is changing. 11112 */ 11113 while ((un->un_state == SD_STATE_SUSPENDED) || 11114 (un->un_state == SD_STATE_PM_CHANGING)) { 11115 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 11116 } 11117 un->un_ncmds_in_driver++; 11118 mutex_exit(SD_MUTEX(un)); 11119 11120 /* Initialize sd_ssc_t for internal uscsi commands */ 11121 ssc = sd_ssc_init(un); 11122 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 11123 err = EIO; 11124 } else { 11125 err = 0; 11126 } 11127 sd_ssc_fini(ssc); 11128 11129 mutex_enter(SD_MUTEX(un)); 11130 un->un_ncmds_in_driver--; 11131 ASSERT(un->un_ncmds_in_driver >= 0); 11132 mutex_exit(SD_MUTEX(un)); 11133 if (err != 0) 11134 return (err); 11135 } 11136 11137 /* 11138 * Read requests are restricted to multiples of the system block size. 11139 */ 11140 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR && 11141 !un->un_f_enable_rmw) 11142 secmask = un->un_tgt_blocksize - 1; 11143 else 11144 secmask = DEV_BSIZE - 1; 11145 11146 if (uio->uio_loffset & ((offset_t)(secmask))) { 11147 SD_ERROR(SD_LOG_READ_WRITE, un, 11148 "sdaread: file offset not modulo %d\n", 11149 secmask + 1); 11150 err = EINVAL; 11151 } else if (uio->uio_iov->iov_len & (secmask)) { 11152 SD_ERROR(SD_LOG_READ_WRITE, un, 11153 "sdaread: transfer length not modulo %d\n", 11154 secmask + 1); 11155 err = EINVAL; 11156 } else { 11157 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio); 11158 } 11159 11160 return (err); 11161 } 11162 11163 11164 /* 11165 * Function: sdawrite 11166 * 11167 * Description: Driver's awrite(9e) entry point function. 11168 * 11169 * Arguments: dev - device number 11170 * aio - structure pointer describing where data is stored 11171 * cred_p - user credential pointer 11172 * 11173 * Return Code: ENXIO 11174 * EIO 11175 * EINVAL 11176 * value returned by aphysio 11177 * 11178 * Context: Kernel thread context. 11179 */ 11180 /* ARGSUSED */ 11181 static int 11182 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 11183 { 11184 struct sd_lun *un = NULL; 11185 struct uio *uio = aio->aio_uio; 11186 int secmask; 11187 int err = 0; 11188 sd_ssc_t *ssc; 11189 11190 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 11191 return (ENXIO); 11192 } 11193 11194 ASSERT(!mutex_owned(SD_MUTEX(un))); 11195 11196 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 11197 mutex_enter(SD_MUTEX(un)); 11198 /* 11199 * Because the call to sd_ready_and_valid will issue I/O we 11200 * must wait here if either the device is suspended or 11201 * if it's power level is changing. 11202 */ 11203 while ((un->un_state == SD_STATE_SUSPENDED) || 11204 (un->un_state == SD_STATE_PM_CHANGING)) { 11205 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 11206 } 11207 un->un_ncmds_in_driver++; 11208 mutex_exit(SD_MUTEX(un)); 11209 11210 /* Initialize sd_ssc_t for internal uscsi commands */ 11211 ssc = sd_ssc_init(un); 11212 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 11213 err = EIO; 11214 } else { 11215 err = 0; 11216 } 11217 sd_ssc_fini(ssc); 11218 11219 mutex_enter(SD_MUTEX(un)); 11220 un->un_ncmds_in_driver--; 11221 ASSERT(un->un_ncmds_in_driver >= 0); 11222 mutex_exit(SD_MUTEX(un)); 11223 if (err != 0) 11224 return (err); 11225 } 11226 11227 /* 11228 * Write requests are restricted to multiples of the system block size. 11229 */ 11230 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR && 11231 !un->un_f_enable_rmw) 11232 secmask = un->un_tgt_blocksize - 1; 11233 else 11234 secmask = DEV_BSIZE - 1; 11235 11236 if (uio->uio_loffset & ((offset_t)(secmask))) { 11237 SD_ERROR(SD_LOG_READ_WRITE, un, 11238 "sdawrite: file offset not modulo %d\n", 11239 secmask + 1); 11240 err = EINVAL; 11241 } else if (uio->uio_iov->iov_len & (secmask)) { 11242 SD_ERROR(SD_LOG_READ_WRITE, un, 11243 "sdawrite: transfer length not modulo %d\n", 11244 secmask + 1); 11245 err = EINVAL; 11246 } else { 11247 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio); 11248 } 11249 11250 return (err); 11251 } 11252 11253 11254 11255 11256 11257 /* 11258 * Driver IO processing follows the following sequence: 11259 * 11260 * sdioctl(9E) sdstrategy(9E) biodone(9F) 11261 * | | ^ 11262 * v v | 11263 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+ 11264 * | | | | 11265 * v | | | 11266 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone() 11267 * | | ^ ^ 11268 * v v | | 11269 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | | 11270 * | | | | 11271 * +---+ | +------------+ +-------+ 11272 * | | | | 11273 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 11274 * | v | | 11275 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() | 11276 * | | ^ | 11277 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 11278 * | v | | 11279 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() | 11280 * | | ^ | 11281 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 11282 * | v | | 11283 * | sd_checksum_iostart() sd_checksum_iodone() | 11284 * | | ^ | 11285 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+ 11286 * | v | | 11287 * | sd_pm_iostart() sd_pm_iodone() | 11288 * | | ^ | 11289 * | | | | 11290 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+ 11291 * | ^ 11292 * v | 11293 * sd_core_iostart() | 11294 * | | 11295 * | +------>(*destroypkt)() 11296 * +-> sd_start_cmds() <-+ | | 11297 * | | | v 11298 * | | | scsi_destroy_pkt(9F) 11299 * | | | 11300 * +->(*initpkt)() +- sdintr() 11301 * | | | | 11302 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx() 11303 * | +-> scsi_setup_cdb(9F) | 11304 * | | 11305 * +--> scsi_transport(9F) | 11306 * | | 11307 * +----> SCSA ---->+ 11308 * 11309 * 11310 * This code is based upon the following presumptions: 11311 * 11312 * - iostart and iodone functions operate on buf(9S) structures. These 11313 * functions perform the necessary operations on the buf(9S) and pass 11314 * them along to the next function in the chain by using the macros 11315 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE() 11316 * (for iodone side functions). 11317 * 11318 * - The iostart side functions may sleep. The iodone side functions 11319 * are called under interrupt context and may NOT sleep. Therefore 11320 * iodone side functions also may not call iostart side functions. 11321 * (NOTE: iostart side functions should NOT sleep for memory, as 11322 * this could result in deadlock.) 11323 * 11324 * - An iostart side function may call its corresponding iodone side 11325 * function directly (if necessary). 11326 * 11327 * - In the event of an error, an iostart side function can return a buf(9S) 11328 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and 11329 * b_error in the usual way of course). 11330 * 11331 * - The taskq mechanism may be used by the iodone side functions to dispatch 11332 * requests to the iostart side functions. The iostart side functions in 11333 * this case would be called under the context of a taskq thread, so it's 11334 * OK for them to block/sleep/spin in this case. 11335 * 11336 * - iostart side functions may allocate "shadow" buf(9S) structs and 11337 * pass them along to the next function in the chain. The corresponding 11338 * iodone side functions must coalesce the "shadow" bufs and return 11339 * the "original" buf to the next higher layer. 11340 * 11341 * - The b_private field of the buf(9S) struct holds a pointer to 11342 * an sd_xbuf struct, which contains information needed to 11343 * construct the scsi_pkt for the command. 11344 * 11345 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each 11346 * layer must acquire & release the SD_MUTEX(un) as needed. 11347 */ 11348 11349 11350 /* 11351 * Create taskq for all targets in the system. This is created at 11352 * _init(9E) and destroyed at _fini(9E). 11353 * 11354 * Note: here we set the minalloc to a reasonably high number to ensure that 11355 * we will have an adequate supply of task entries available at interrupt time. 11356 * This is used in conjunction with the TASKQ_PREPOPULATE flag in 11357 * sd_create_taskq(). Since we do not want to sleep for allocations at 11358 * interrupt time, set maxalloc equal to minalloc. That way we will just fail 11359 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq 11360 * requests any one instant in time. 11361 */ 11362 #define SD_TASKQ_NUMTHREADS 8 11363 #define SD_TASKQ_MINALLOC 256 11364 #define SD_TASKQ_MAXALLOC 256 11365 11366 static taskq_t *sd_tq = NULL; 11367 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq)) 11368 11369 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; 11370 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; 11371 11372 /* 11373 * The following task queue is being created for the write part of 11374 * read-modify-write of non-512 block size devices. 11375 * Limit the number of threads to 1 for now. This number has been chosen 11376 * considering the fact that it applies only to dvd ram drives/MO drives 11377 * currently. Performance for which is not main criteria at this stage. 11378 * Note: It needs to be explored if we can use a single taskq in future 11379 */ 11380 #define SD_WMR_TASKQ_NUMTHREADS 1 11381 static taskq_t *sd_wmr_tq = NULL; 11382 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq)) 11383 11384 /* 11385 * Function: sd_taskq_create 11386 * 11387 * Description: Create taskq thread(s) and preallocate task entries 11388 * 11389 * Return Code: Returns a pointer to the allocated taskq_t. 11390 * 11391 * Context: Can sleep. Requires blockable context. 11392 * 11393 * Notes: - The taskq() facility currently is NOT part of the DDI. 11394 * (definitely NOT recommeded for 3rd-party drivers!) :-) 11395 * - taskq_create() will block for memory, also it will panic 11396 * if it cannot create the requested number of threads. 11397 * - Currently taskq_create() creates threads that cannot be 11398 * swapped. 11399 * - We use TASKQ_PREPOPULATE to ensure we have an adequate 11400 * supply of taskq entries at interrupt time (ie, so that we 11401 * do not have to sleep for memory) 11402 */ 11403 11404 static void 11405 sd_taskq_create(void) 11406 { 11407 char taskq_name[TASKQ_NAMELEN]; 11408 11409 ASSERT(sd_tq == NULL); 11410 ASSERT(sd_wmr_tq == NULL); 11411 11412 (void) snprintf(taskq_name, sizeof (taskq_name), 11413 "%s_drv_taskq", sd_label); 11414 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS, 11415 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 11416 TASKQ_PREPOPULATE)); 11417 11418 (void) snprintf(taskq_name, sizeof (taskq_name), 11419 "%s_rmw_taskq", sd_label); 11420 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS, 11421 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 11422 TASKQ_PREPOPULATE)); 11423 } 11424 11425 11426 /* 11427 * Function: sd_taskq_delete 11428 * 11429 * Description: Complementary cleanup routine for sd_taskq_create(). 11430 * 11431 * Context: Kernel thread context. 11432 */ 11433 11434 static void 11435 sd_taskq_delete(void) 11436 { 11437 ASSERT(sd_tq != NULL); 11438 ASSERT(sd_wmr_tq != NULL); 11439 taskq_destroy(sd_tq); 11440 taskq_destroy(sd_wmr_tq); 11441 sd_tq = NULL; 11442 sd_wmr_tq = NULL; 11443 } 11444 11445 11446 /* 11447 * Function: sdstrategy 11448 * 11449 * Description: Driver's strategy (9E) entry point function. 11450 * 11451 * Arguments: bp - pointer to buf(9S) 11452 * 11453 * Return Code: Always returns zero 11454 * 11455 * Context: Kernel thread context. 11456 */ 11457 11458 static int 11459 sdstrategy(struct buf *bp) 11460 { 11461 struct sd_lun *un; 11462 11463 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 11464 if (un == NULL) { 11465 bioerror(bp, EIO); 11466 bp->b_resid = bp->b_bcount; 11467 biodone(bp); 11468 return (0); 11469 } 11470 11471 /* As was done in the past, fail new cmds. if state is dumping. */ 11472 if (un->un_state == SD_STATE_DUMPING) { 11473 bioerror(bp, ENXIO); 11474 bp->b_resid = bp->b_bcount; 11475 biodone(bp); 11476 return (0); 11477 } 11478 11479 ASSERT(!mutex_owned(SD_MUTEX(un))); 11480 11481 /* 11482 * Commands may sneak in while we released the mutex in 11483 * DDI_SUSPEND, we should block new commands. However, old 11484 * commands that are still in the driver at this point should 11485 * still be allowed to drain. 11486 */ 11487 mutex_enter(SD_MUTEX(un)); 11488 /* 11489 * Must wait here if either the device is suspended or 11490 * if it's power level is changing. 11491 */ 11492 while ((un->un_state == SD_STATE_SUSPENDED) || 11493 (un->un_state == SD_STATE_PM_CHANGING)) { 11494 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 11495 } 11496 11497 un->un_ncmds_in_driver++; 11498 11499 /* 11500 * atapi: Since we are running the CD for now in PIO mode we need to 11501 * call bp_mapin here to avoid bp_mapin called interrupt context under 11502 * the HBA's init_pkt routine. 11503 */ 11504 if (un->un_f_cfg_is_atapi == TRUE) { 11505 mutex_exit(SD_MUTEX(un)); 11506 bp_mapin(bp); 11507 mutex_enter(SD_MUTEX(un)); 11508 } 11509 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n", 11510 un->un_ncmds_in_driver); 11511 11512 if (bp->b_flags & B_WRITE) 11513 un->un_f_sync_cache_required = TRUE; 11514 11515 mutex_exit(SD_MUTEX(un)); 11516 11517 /* 11518 * This will (eventually) allocate the sd_xbuf area and 11519 * call sd_xbuf_strategy(). We just want to return the 11520 * result of ddi_xbuf_qstrategy so that we have an opt- 11521 * imized tail call which saves us a stack frame. 11522 */ 11523 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); 11524 } 11525 11526 11527 /* 11528 * Function: sd_xbuf_strategy 11529 * 11530 * Description: Function for initiating IO operations via the 11531 * ddi_xbuf_qstrategy() mechanism. 11532 * 11533 * Context: Kernel thread context. 11534 */ 11535 11536 static void 11537 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg) 11538 { 11539 struct sd_lun *un = arg; 11540 11541 ASSERT(bp != NULL); 11542 ASSERT(xp != NULL); 11543 ASSERT(un != NULL); 11544 ASSERT(!mutex_owned(SD_MUTEX(un))); 11545 11546 /* 11547 * Initialize the fields in the xbuf and save a pointer to the 11548 * xbuf in bp->b_private. 11549 */ 11550 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL); 11551 11552 /* Send the buf down the iostart chain */ 11553 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp); 11554 } 11555 11556 11557 /* 11558 * Function: sd_xbuf_init 11559 * 11560 * Description: Prepare the given sd_xbuf struct for use. 11561 * 11562 * Arguments: un - ptr to softstate 11563 * bp - ptr to associated buf(9S) 11564 * xp - ptr to associated sd_xbuf 11565 * chain_type - IO chain type to use: 11566 * SD_CHAIN_NULL 11567 * SD_CHAIN_BUFIO 11568 * SD_CHAIN_USCSI 11569 * SD_CHAIN_DIRECT 11570 * SD_CHAIN_DIRECT_PRIORITY 11571 * pktinfop - ptr to private data struct for scsi_pkt(9S) 11572 * initialization; may be NULL if none. 11573 * 11574 * Context: Kernel thread context 11575 */ 11576 11577 static void 11578 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 11579 uchar_t chain_type, void *pktinfop) 11580 { 11581 int index; 11582 11583 ASSERT(un != NULL); 11584 ASSERT(bp != NULL); 11585 ASSERT(xp != NULL); 11586 11587 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n", 11588 bp, chain_type); 11589 11590 xp->xb_un = un; 11591 xp->xb_pktp = NULL; 11592 xp->xb_pktinfo = pktinfop; 11593 xp->xb_private = bp->b_private; 11594 xp->xb_blkno = (daddr_t)bp->b_blkno; 11595 11596 /* 11597 * Set up the iostart and iodone chain indexes in the xbuf, based 11598 * upon the specified chain type to use. 11599 */ 11600 switch (chain_type) { 11601 case SD_CHAIN_NULL: 11602 /* 11603 * Fall thru to just use the values for the buf type, even 11604 * tho for the NULL chain these values will never be used. 11605 */ 11606 /* FALLTHRU */ 11607 case SD_CHAIN_BUFIO: 11608 index = un->un_buf_chain_type; 11609 if ((!un->un_f_has_removable_media) && 11610 (un->un_tgt_blocksize != 0) && 11611 (un->un_tgt_blocksize != DEV_BSIZE || 11612 un->un_f_enable_rmw)) { 11613 int secmask = 0, blknomask = 0; 11614 if (un->un_f_enable_rmw) { 11615 blknomask = 11616 (un->un_phy_blocksize / DEV_BSIZE) - 1; 11617 secmask = un->un_phy_blocksize - 1; 11618 } else { 11619 blknomask = 11620 (un->un_tgt_blocksize / DEV_BSIZE) - 1; 11621 secmask = un->un_tgt_blocksize - 1; 11622 } 11623 11624 if ((bp->b_lblkno & (blknomask)) || 11625 (bp->b_bcount & (secmask))) { 11626 if ((un->un_f_rmw_type != 11627 SD_RMW_TYPE_RETURN_ERROR) || 11628 un->un_f_enable_rmw) { 11629 if (un->un_f_pm_is_enabled == FALSE) 11630 index = 11631 SD_CHAIN_INFO_MSS_DSK_NO_PM; 11632 else 11633 index = 11634 SD_CHAIN_INFO_MSS_DISK; 11635 } 11636 } 11637 } 11638 break; 11639 case SD_CHAIN_USCSI: 11640 index = un->un_uscsi_chain_type; 11641 break; 11642 case SD_CHAIN_DIRECT: 11643 index = un->un_direct_chain_type; 11644 break; 11645 case SD_CHAIN_DIRECT_PRIORITY: 11646 index = un->un_priority_chain_type; 11647 break; 11648 default: 11649 /* We're really broken if we ever get here... */ 11650 panic("sd_xbuf_init: illegal chain type!"); 11651 /*NOTREACHED*/ 11652 } 11653 11654 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index; 11655 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index; 11656 11657 /* 11658 * It might be a bit easier to simply bzero the entire xbuf above, 11659 * but it turns out that since we init a fair number of members anyway, 11660 * we save a fair number cycles by doing explicit assignment of zero. 11661 */ 11662 xp->xb_pkt_flags = 0; 11663 xp->xb_dma_resid = 0; 11664 xp->xb_retry_count = 0; 11665 xp->xb_victim_retry_count = 0; 11666 xp->xb_ua_retry_count = 0; 11667 xp->xb_nr_retry_count = 0; 11668 xp->xb_sense_bp = NULL; 11669 xp->xb_sense_status = 0; 11670 xp->xb_sense_state = 0; 11671 xp->xb_sense_resid = 0; 11672 xp->xb_ena = 0; 11673 11674 bp->b_private = xp; 11675 bp->b_flags &= ~(B_DONE | B_ERROR); 11676 bp->b_resid = 0; 11677 bp->av_forw = NULL; 11678 bp->av_back = NULL; 11679 bioerror(bp, 0); 11680 11681 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n"); 11682 } 11683 11684 11685 /* 11686 * Function: sd_uscsi_strategy 11687 * 11688 * Description: Wrapper for calling into the USCSI chain via physio(9F) 11689 * 11690 * Arguments: bp - buf struct ptr 11691 * 11692 * Return Code: Always returns 0 11693 * 11694 * Context: Kernel thread context 11695 */ 11696 11697 static int 11698 sd_uscsi_strategy(struct buf *bp) 11699 { 11700 struct sd_lun *un; 11701 struct sd_uscsi_info *uip; 11702 struct sd_xbuf *xp; 11703 uchar_t chain_type; 11704 uchar_t cmd; 11705 11706 ASSERT(bp != NULL); 11707 11708 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 11709 if (un == NULL) { 11710 bioerror(bp, EIO); 11711 bp->b_resid = bp->b_bcount; 11712 biodone(bp); 11713 return (0); 11714 } 11715 11716 ASSERT(!mutex_owned(SD_MUTEX(un))); 11717 11718 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp); 11719 11720 /* 11721 * A pointer to a struct sd_uscsi_info is expected in bp->b_private 11722 */ 11723 ASSERT(bp->b_private != NULL); 11724 uip = (struct sd_uscsi_info *)bp->b_private; 11725 cmd = ((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_cdb[0]; 11726 11727 mutex_enter(SD_MUTEX(un)); 11728 /* 11729 * atapi: Since we are running the CD for now in PIO mode we need to 11730 * call bp_mapin here to avoid bp_mapin called interrupt context under 11731 * the HBA's init_pkt routine. 11732 */ 11733 if (un->un_f_cfg_is_atapi == TRUE) { 11734 mutex_exit(SD_MUTEX(un)); 11735 bp_mapin(bp); 11736 mutex_enter(SD_MUTEX(un)); 11737 } 11738 un->un_ncmds_in_driver++; 11739 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n", 11740 un->un_ncmds_in_driver); 11741 11742 if ((bp->b_flags & B_WRITE) && (bp->b_bcount != 0) && 11743 (cmd != SCMD_MODE_SELECT) && (cmd != SCMD_MODE_SELECT_G1)) 11744 un->un_f_sync_cache_required = TRUE; 11745 11746 mutex_exit(SD_MUTEX(un)); 11747 11748 switch (uip->ui_flags) { 11749 case SD_PATH_DIRECT: 11750 chain_type = SD_CHAIN_DIRECT; 11751 break; 11752 case SD_PATH_DIRECT_PRIORITY: 11753 chain_type = SD_CHAIN_DIRECT_PRIORITY; 11754 break; 11755 default: 11756 chain_type = SD_CHAIN_USCSI; 11757 break; 11758 } 11759 11760 /* 11761 * We may allocate extra buf for external USCSI commands. If the 11762 * application asks for bigger than 20-byte sense data via USCSI, 11763 * SCSA layer will allocate 252 bytes sense buf for that command. 11764 */ 11765 if (((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_rqlen > 11766 SENSE_LENGTH) { 11767 xp = kmem_zalloc(sizeof (struct sd_xbuf) - SENSE_LENGTH + 11768 MAX_SENSE_LENGTH, KM_SLEEP); 11769 } else { 11770 xp = kmem_zalloc(sizeof (struct sd_xbuf), KM_SLEEP); 11771 } 11772 11773 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp); 11774 11775 /* Use the index obtained within xbuf_init */ 11776 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp); 11777 11778 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp); 11779 11780 return (0); 11781 } 11782 11783 /* 11784 * Function: sd_send_scsi_cmd 11785 * 11786 * Description: Runs a USCSI command for user (when called thru sdioctl), 11787 * or for the driver 11788 * 11789 * Arguments: dev - the dev_t for the device 11790 * incmd - ptr to a valid uscsi_cmd struct 11791 * flag - bit flag, indicating open settings, 32/64 bit type 11792 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 11793 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 11794 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 11795 * to use the USCSI "direct" chain and bypass the normal 11796 * command waitq. 11797 * 11798 * Return Code: 0 - successful completion of the given command 11799 * EIO - scsi_uscsi_handle_command() failed 11800 * ENXIO - soft state not found for specified dev 11801 * EINVAL 11802 * EFAULT - copyin/copyout error 11803 * return code of scsi_uscsi_handle_command(): 11804 * EIO 11805 * ENXIO 11806 * EACCES 11807 * 11808 * Context: Waits for command to complete. Can sleep. 11809 */ 11810 11811 static int 11812 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 11813 enum uio_seg dataspace, int path_flag) 11814 { 11815 struct sd_lun *un; 11816 sd_ssc_t *ssc; 11817 int rval; 11818 11819 un = ddi_get_soft_state(sd_state, SDUNIT(dev)); 11820 if (un == NULL) { 11821 return (ENXIO); 11822 } 11823 11824 /* 11825 * Using sd_ssc_send to handle uscsi cmd 11826 */ 11827 ssc = sd_ssc_init(un); 11828 rval = sd_ssc_send(ssc, incmd, flag, dataspace, path_flag); 11829 sd_ssc_fini(ssc); 11830 11831 return (rval); 11832 } 11833 11834 /* 11835 * Function: sd_ssc_init 11836 * 11837 * Description: Uscsi end-user call this function to initialize necessary 11838 * fields, such as uscsi_cmd and sd_uscsi_info struct. 11839 * 11840 * The return value of sd_send_scsi_cmd will be treated as a 11841 * fault in various conditions. Even it is not Zero, some 11842 * callers may ignore the return value. That is to say, we can 11843 * not make an accurate assessment in sdintr, since if a 11844 * command is failed in sdintr it does not mean the caller of 11845 * sd_send_scsi_cmd will treat it as a real failure. 11846 * 11847 * To avoid printing too many error logs for a failed uscsi 11848 * packet that the caller may not treat it as a failure, the 11849 * sd will keep silent for handling all uscsi commands. 11850 * 11851 * During detach->attach and attach-open, for some types of 11852 * problems, the driver should be providing information about 11853 * the problem encountered. Device use USCSI_SILENT, which 11854 * suppresses all driver information. The result is that no 11855 * information about the problem is available. Being 11856 * completely silent during this time is inappropriate. The 11857 * driver needs a more selective filter than USCSI_SILENT, so 11858 * that information related to faults is provided. 11859 * 11860 * To make the accurate accessment, the caller of 11861 * sd_send_scsi_USCSI_CMD should take the ownership and 11862 * get necessary information to print error messages. 11863 * 11864 * If we want to print necessary info of uscsi command, we need to 11865 * keep the uscsi_cmd and sd_uscsi_info till we can make the 11866 * assessment. We use sd_ssc_init to alloc necessary 11867 * structs for sending an uscsi command and we are also 11868 * responsible for free the memory by calling 11869 * sd_ssc_fini. 11870 * 11871 * The calling secquences will look like: 11872 * sd_ssc_init-> 11873 * 11874 * ... 11875 * 11876 * sd_send_scsi_USCSI_CMD-> 11877 * sd_ssc_send-> - - - sdintr 11878 * ... 11879 * 11880 * if we think the return value should be treated as a 11881 * failure, we make the accessment here and print out 11882 * necessary by retrieving uscsi_cmd and sd_uscsi_info' 11883 * 11884 * ... 11885 * 11886 * sd_ssc_fini 11887 * 11888 * 11889 * Arguments: un - pointer to driver soft state (unit) structure for this 11890 * target. 11891 * 11892 * Return code: sd_ssc_t - pointer to allocated sd_ssc_t struct, it contains 11893 * uscsi_cmd and sd_uscsi_info. 11894 * NULL - if can not alloc memory for sd_ssc_t struct 11895 * 11896 * Context: Kernel Thread. 11897 */ 11898 static sd_ssc_t * 11899 sd_ssc_init(struct sd_lun *un) 11900 { 11901 sd_ssc_t *ssc; 11902 struct uscsi_cmd *ucmdp; 11903 struct sd_uscsi_info *uip; 11904 11905 ASSERT(un != NULL); 11906 ASSERT(!mutex_owned(SD_MUTEX(un))); 11907 11908 /* 11909 * Allocate sd_ssc_t structure 11910 */ 11911 ssc = kmem_zalloc(sizeof (sd_ssc_t), KM_SLEEP); 11912 11913 /* 11914 * Allocate uscsi_cmd by calling scsi_uscsi_alloc common routine 11915 */ 11916 ucmdp = scsi_uscsi_alloc(); 11917 11918 /* 11919 * Allocate sd_uscsi_info structure 11920 */ 11921 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 11922 11923 ssc->ssc_uscsi_cmd = ucmdp; 11924 ssc->ssc_uscsi_info = uip; 11925 ssc->ssc_un = un; 11926 11927 return (ssc); 11928 } 11929 11930 /* 11931 * Function: sd_ssc_fini 11932 * 11933 * Description: To free sd_ssc_t and it's hanging off 11934 * 11935 * Arguments: ssc - struct pointer of sd_ssc_t. 11936 */ 11937 static void 11938 sd_ssc_fini(sd_ssc_t *ssc) 11939 { 11940 scsi_uscsi_free(ssc->ssc_uscsi_cmd); 11941 11942 if (ssc->ssc_uscsi_info != NULL) { 11943 kmem_free(ssc->ssc_uscsi_info, sizeof (struct sd_uscsi_info)); 11944 ssc->ssc_uscsi_info = NULL; 11945 } 11946 11947 kmem_free(ssc, sizeof (sd_ssc_t)); 11948 ssc = NULL; 11949 } 11950 11951 /* 11952 * Function: sd_ssc_send 11953 * 11954 * Description: Runs a USCSI command for user when called through sdioctl, 11955 * or for the driver. 11956 * 11957 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11958 * sd_uscsi_info in. 11959 * incmd - ptr to a valid uscsi_cmd struct 11960 * flag - bit flag, indicating open settings, 32/64 bit type 11961 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 11962 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 11963 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 11964 * to use the USCSI "direct" chain and bypass the normal 11965 * command waitq. 11966 * 11967 * Return Code: 0 - successful completion of the given command 11968 * EIO - scsi_uscsi_handle_command() failed 11969 * ENXIO - soft state not found for specified dev 11970 * ECANCELED - command cancelled due to low power 11971 * EINVAL 11972 * EFAULT - copyin/copyout error 11973 * return code of scsi_uscsi_handle_command(): 11974 * EIO 11975 * ENXIO 11976 * EACCES 11977 * 11978 * Context: Kernel Thread; 11979 * Waits for command to complete. Can sleep. 11980 */ 11981 static int 11982 sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, int flag, 11983 enum uio_seg dataspace, int path_flag) 11984 { 11985 struct sd_uscsi_info *uip; 11986 struct uscsi_cmd *uscmd; 11987 struct sd_lun *un; 11988 dev_t dev; 11989 11990 int format = 0; 11991 int rval; 11992 11993 ASSERT(ssc != NULL); 11994 un = ssc->ssc_un; 11995 ASSERT(un != NULL); 11996 uscmd = ssc->ssc_uscsi_cmd; 11997 ASSERT(uscmd != NULL); 11998 ASSERT(!mutex_owned(SD_MUTEX(un))); 11999 if (ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT) { 12000 /* 12001 * If enter here, it indicates that the previous uscsi 12002 * command has not been processed by sd_ssc_assessment. 12003 * This is violating our rules of FMA telemetry processing. 12004 * We should print out this message and the last undisposed 12005 * uscsi command. 12006 */ 12007 if (uscmd->uscsi_cdb != NULL) { 12008 SD_INFO(SD_LOG_SDTEST, un, 12009 "sd_ssc_send is missing the alternative " 12010 "sd_ssc_assessment when running command 0x%x.\n", 12011 uscmd->uscsi_cdb[0]); 12012 } 12013 /* 12014 * Set the ssc_flags to SSC_FLAGS_UNKNOWN, which should be 12015 * the initial status. 12016 */ 12017 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12018 } 12019 12020 /* 12021 * We need to make sure sd_ssc_send will have sd_ssc_assessment 12022 * followed to avoid missing FMA telemetries. 12023 */ 12024 ssc->ssc_flags |= SSC_FLAGS_NEED_ASSESSMENT; 12025 12026 /* 12027 * if USCSI_PMFAILFAST is set and un is in low power, fail the 12028 * command immediately. 12029 */ 12030 mutex_enter(SD_MUTEX(un)); 12031 mutex_enter(&un->un_pm_mutex); 12032 if ((uscmd->uscsi_flags & USCSI_PMFAILFAST) && 12033 SD_DEVICE_IS_IN_LOW_POWER(un)) { 12034 SD_TRACE(SD_LOG_IO, un, "sd_ssc_send:" 12035 "un:0x%p is in low power\n", un); 12036 mutex_exit(&un->un_pm_mutex); 12037 mutex_exit(SD_MUTEX(un)); 12038 return (ECANCELED); 12039 } 12040 mutex_exit(&un->un_pm_mutex); 12041 mutex_exit(SD_MUTEX(un)); 12042 12043 #ifdef SDDEBUG 12044 switch (dataspace) { 12045 case UIO_USERSPACE: 12046 SD_TRACE(SD_LOG_IO, un, 12047 "sd_ssc_send: entry: un:0x%p UIO_USERSPACE\n", un); 12048 break; 12049 case UIO_SYSSPACE: 12050 SD_TRACE(SD_LOG_IO, un, 12051 "sd_ssc_send: entry: un:0x%p UIO_SYSSPACE\n", un); 12052 break; 12053 default: 12054 SD_TRACE(SD_LOG_IO, un, 12055 "sd_ssc_send: entry: un:0x%p UNEXPECTED SPACE\n", un); 12056 break; 12057 } 12058 #endif 12059 12060 rval = scsi_uscsi_copyin((intptr_t)incmd, flag, 12061 SD_ADDRESS(un), &uscmd); 12062 if (rval != 0) { 12063 SD_TRACE(SD_LOG_IO, un, "sd_sense_scsi_cmd: " 12064 "scsi_uscsi_alloc_and_copyin failed\n", un); 12065 return (rval); 12066 } 12067 12068 if ((uscmd->uscsi_cdb != NULL) && 12069 (uscmd->uscsi_cdb[0] == SCMD_FORMAT)) { 12070 mutex_enter(SD_MUTEX(un)); 12071 un->un_f_format_in_progress = TRUE; 12072 mutex_exit(SD_MUTEX(un)); 12073 format = 1; 12074 } 12075 12076 /* 12077 * Allocate an sd_uscsi_info struct and fill it with the info 12078 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 12079 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 12080 * since we allocate the buf here in this function, we do not 12081 * need to preserve the prior contents of b_private. 12082 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 12083 */ 12084 uip = ssc->ssc_uscsi_info; 12085 uip->ui_flags = path_flag; 12086 uip->ui_cmdp = uscmd; 12087 12088 /* 12089 * Commands sent with priority are intended for error recovery 12090 * situations, and do not have retries performed. 12091 */ 12092 if (path_flag == SD_PATH_DIRECT_PRIORITY) { 12093 uscmd->uscsi_flags |= USCSI_DIAGNOSE; 12094 } 12095 uscmd->uscsi_flags &= ~USCSI_NOINTR; 12096 12097 dev = SD_GET_DEV(un); 12098 rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd, 12099 sd_uscsi_strategy, NULL, uip); 12100 12101 /* 12102 * mark ssc_flags right after handle_cmd to make sure 12103 * the uscsi has been sent 12104 */ 12105 ssc->ssc_flags |= SSC_FLAGS_CMD_ISSUED; 12106 12107 #ifdef SDDEBUG 12108 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: " 12109 "uscsi_status: 0x%02x uscsi_resid:0x%x\n", 12110 uscmd->uscsi_status, uscmd->uscsi_resid); 12111 if (uscmd->uscsi_bufaddr != NULL) { 12112 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: " 12113 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n", 12114 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen); 12115 if (dataspace == UIO_SYSSPACE) { 12116 SD_DUMP_MEMORY(un, SD_LOG_IO, 12117 "data", (uchar_t *)uscmd->uscsi_bufaddr, 12118 uscmd->uscsi_buflen, SD_LOG_HEX); 12119 } 12120 } 12121 #endif 12122 12123 if (format == 1) { 12124 mutex_enter(SD_MUTEX(un)); 12125 un->un_f_format_in_progress = FALSE; 12126 mutex_exit(SD_MUTEX(un)); 12127 } 12128 12129 (void) scsi_uscsi_copyout((intptr_t)incmd, uscmd); 12130 12131 return (rval); 12132 } 12133 12134 /* 12135 * Function: sd_ssc_print 12136 * 12137 * Description: Print information available to the console. 12138 * 12139 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 12140 * sd_uscsi_info in. 12141 * sd_severity - log level. 12142 * Context: Kernel thread or interrupt context. 12143 */ 12144 static void 12145 sd_ssc_print(sd_ssc_t *ssc, int sd_severity) 12146 { 12147 struct uscsi_cmd *ucmdp; 12148 struct scsi_device *devp; 12149 dev_info_t *devinfo; 12150 uchar_t *sensep; 12151 int senlen; 12152 union scsi_cdb *cdbp; 12153 uchar_t com; 12154 extern struct scsi_key_strings scsi_cmds[]; 12155 12156 ASSERT(ssc != NULL); 12157 ASSERT(ssc->ssc_un != NULL); 12158 12159 if (SD_FM_LOG(ssc->ssc_un) != SD_FM_LOG_EREPORT) 12160 return; 12161 ucmdp = ssc->ssc_uscsi_cmd; 12162 devp = SD_SCSI_DEVP(ssc->ssc_un); 12163 devinfo = SD_DEVINFO(ssc->ssc_un); 12164 ASSERT(ucmdp != NULL); 12165 ASSERT(devp != NULL); 12166 ASSERT(devinfo != NULL); 12167 sensep = (uint8_t *)ucmdp->uscsi_rqbuf; 12168 senlen = ucmdp->uscsi_rqlen - ucmdp->uscsi_rqresid; 12169 cdbp = (union scsi_cdb *)ucmdp->uscsi_cdb; 12170 12171 /* In certain case (like DOORLOCK), the cdb could be NULL. */ 12172 if (cdbp == NULL) 12173 return; 12174 /* We don't print log if no sense data available. */ 12175 if (senlen == 0) 12176 sensep = NULL; 12177 com = cdbp->scc_cmd; 12178 scsi_generic_errmsg(devp, sd_label, sd_severity, 0, 0, com, 12179 scsi_cmds, sensep, ssc->ssc_un->un_additional_codes, NULL); 12180 } 12181 12182 /* 12183 * Function: sd_ssc_assessment 12184 * 12185 * Description: We use this function to make an assessment at the point 12186 * where SD driver may encounter a potential error. 12187 * 12188 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 12189 * sd_uscsi_info in. 12190 * tp_assess - a hint of strategy for ereport posting. 12191 * Possible values of tp_assess include: 12192 * SD_FMT_IGNORE - we don't post any ereport because we're 12193 * sure that it is ok to ignore the underlying problems. 12194 * SD_FMT_IGNORE_COMPROMISE - we don't post any ereport for now 12195 * but it might be not correct to ignore the underlying hardware 12196 * error. 12197 * SD_FMT_STATUS_CHECK - we will post an ereport with the 12198 * payload driver-assessment of value "fail" or 12199 * "fatal"(depending on what information we have here). This 12200 * assessment value is usually set when SD driver think there 12201 * is a potential error occurred(Typically, when return value 12202 * of the SCSI command is EIO). 12203 * SD_FMT_STANDARD - we will post an ereport with the payload 12204 * driver-assessment of value "info". This assessment value is 12205 * set when the SCSI command returned successfully and with 12206 * sense data sent back. 12207 * 12208 * Context: Kernel thread. 12209 */ 12210 static void 12211 sd_ssc_assessment(sd_ssc_t *ssc, enum sd_type_assessment tp_assess) 12212 { 12213 int senlen = 0; 12214 struct uscsi_cmd *ucmdp = NULL; 12215 struct sd_lun *un; 12216 12217 ASSERT(ssc != NULL); 12218 un = ssc->ssc_un; 12219 ASSERT(un != NULL); 12220 ucmdp = ssc->ssc_uscsi_cmd; 12221 ASSERT(ucmdp != NULL); 12222 12223 if (ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT) { 12224 ssc->ssc_flags &= ~SSC_FLAGS_NEED_ASSESSMENT; 12225 } else { 12226 /* 12227 * If enter here, it indicates that we have a wrong 12228 * calling sequence of sd_ssc_send and sd_ssc_assessment, 12229 * both of which should be called in a pair in case of 12230 * loss of FMA telemetries. 12231 */ 12232 if (ucmdp->uscsi_cdb != NULL) { 12233 SD_INFO(SD_LOG_SDTEST, un, 12234 "sd_ssc_assessment is missing the " 12235 "alternative sd_ssc_send when running 0x%x, " 12236 "or there are superfluous sd_ssc_assessment for " 12237 "the same sd_ssc_send.\n", 12238 ucmdp->uscsi_cdb[0]); 12239 } 12240 /* 12241 * Set the ssc_flags to the initial value to avoid passing 12242 * down dirty flags to the following sd_ssc_send function. 12243 */ 12244 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12245 return; 12246 } 12247 12248 /* 12249 * Only handle an issued command which is waiting for assessment. 12250 * A command which is not issued will not have 12251 * SSC_FLAGS_INVALID_DATA set, so it'ok we just return here. 12252 */ 12253 if (!(ssc->ssc_flags & SSC_FLAGS_CMD_ISSUED)) { 12254 sd_ssc_print(ssc, SCSI_ERR_INFO); 12255 return; 12256 } else { 12257 /* 12258 * For an issued command, we should clear this flag in 12259 * order to make the sd_ssc_t structure be used off 12260 * multiple uscsi commands. 12261 */ 12262 ssc->ssc_flags &= ~SSC_FLAGS_CMD_ISSUED; 12263 } 12264 12265 /* 12266 * We will not deal with non-retryable(flag USCSI_DIAGNOSE set) 12267 * commands here. And we should clear the ssc_flags before return. 12268 */ 12269 if (ucmdp->uscsi_flags & USCSI_DIAGNOSE) { 12270 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12271 return; 12272 } 12273 12274 switch (tp_assess) { 12275 case SD_FMT_IGNORE: 12276 case SD_FMT_IGNORE_COMPROMISE: 12277 break; 12278 case SD_FMT_STATUS_CHECK: 12279 /* 12280 * For a failed command(including the succeeded command 12281 * with invalid data sent back). 12282 */ 12283 sd_ssc_post(ssc, SD_FM_DRV_FATAL); 12284 break; 12285 case SD_FMT_STANDARD: 12286 /* 12287 * Always for the succeeded commands probably with sense 12288 * data sent back. 12289 * Limitation: 12290 * We can only handle a succeeded command with sense 12291 * data sent back when auto-request-sense is enabled. 12292 */ 12293 senlen = ssc->ssc_uscsi_cmd->uscsi_rqlen - 12294 ssc->ssc_uscsi_cmd->uscsi_rqresid; 12295 if ((ssc->ssc_uscsi_info->ui_pkt_state & STATE_ARQ_DONE) && 12296 (un->un_f_arq_enabled == TRUE) && 12297 senlen > 0 && 12298 ssc->ssc_uscsi_cmd->uscsi_rqbuf != NULL) { 12299 sd_ssc_post(ssc, SD_FM_DRV_NOTICE); 12300 } 12301 break; 12302 default: 12303 /* 12304 * Should not have other type of assessment. 12305 */ 12306 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 12307 "sd_ssc_assessment got wrong " 12308 "sd_type_assessment %d.\n", tp_assess); 12309 break; 12310 } 12311 /* 12312 * Clear up the ssc_flags before return. 12313 */ 12314 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12315 } 12316 12317 /* 12318 * Function: sd_ssc_post 12319 * 12320 * Description: 1. read the driver property to get fm-scsi-log flag. 12321 * 2. print log if fm_log_capable is non-zero. 12322 * 3. call sd_ssc_ereport_post to post ereport if possible. 12323 * 12324 * Context: May be called from kernel thread or interrupt context. 12325 */ 12326 static void 12327 sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess) 12328 { 12329 struct sd_lun *un; 12330 int sd_severity; 12331 12332 ASSERT(ssc != NULL); 12333 un = ssc->ssc_un; 12334 ASSERT(un != NULL); 12335 12336 /* 12337 * We may enter here from sd_ssc_assessment(for USCSI command) or 12338 * by directly called from sdintr context. 12339 * We don't handle a non-disk drive(CD-ROM, removable media). 12340 * Clear the ssc_flags before return in case we've set 12341 * SSC_FLAGS_INVALID_XXX which should be skipped for a non-disk 12342 * driver. 12343 */ 12344 if (ISCD(un) || un->un_f_has_removable_media) { 12345 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12346 return; 12347 } 12348 12349 switch (sd_assess) { 12350 case SD_FM_DRV_FATAL: 12351 sd_severity = SCSI_ERR_FATAL; 12352 break; 12353 case SD_FM_DRV_RECOVERY: 12354 sd_severity = SCSI_ERR_RECOVERED; 12355 break; 12356 case SD_FM_DRV_RETRY: 12357 sd_severity = SCSI_ERR_RETRYABLE; 12358 break; 12359 case SD_FM_DRV_NOTICE: 12360 sd_severity = SCSI_ERR_INFO; 12361 break; 12362 default: 12363 sd_severity = SCSI_ERR_UNKNOWN; 12364 } 12365 /* print log */ 12366 sd_ssc_print(ssc, sd_severity); 12367 12368 /* always post ereport */ 12369 sd_ssc_ereport_post(ssc, sd_assess); 12370 } 12371 12372 /* 12373 * Function: sd_ssc_set_info 12374 * 12375 * Description: Mark ssc_flags and set ssc_info which would be the 12376 * payload of uderr ereport. This function will cause 12377 * sd_ssc_ereport_post to post uderr ereport only. 12378 * Besides, when ssc_flags == SSC_FLAGS_INVALID_DATA(USCSI), 12379 * the function will also call SD_ERROR or scsi_log for a 12380 * CDROM/removable-media/DDI_FM_NOT_CAPABLE device. 12381 * 12382 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 12383 * sd_uscsi_info in. 12384 * ssc_flags - indicate the sub-category of a uderr. 12385 * comp - this argument is meaningful only when 12386 * ssc_flags == SSC_FLAGS_INVALID_DATA, and its possible 12387 * values include: 12388 * > 0, SD_ERROR is used with comp as the driver logging 12389 * component; 12390 * = 0, scsi-log is used to log error telemetries; 12391 * < 0, no log available for this telemetry. 12392 * 12393 * Context: Kernel thread or interrupt context 12394 */ 12395 static void 12396 sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, uint_t comp, const char *fmt, ...) 12397 { 12398 va_list ap; 12399 12400 ASSERT(ssc != NULL); 12401 ASSERT(ssc->ssc_un != NULL); 12402 12403 ssc->ssc_flags |= ssc_flags; 12404 va_start(ap, fmt); 12405 (void) vsnprintf(ssc->ssc_info, sizeof (ssc->ssc_info), fmt, ap); 12406 va_end(ap); 12407 12408 /* 12409 * If SSC_FLAGS_INVALID_DATA is set, it should be a uscsi command 12410 * with invalid data sent back. For non-uscsi command, the 12411 * following code will be bypassed. 12412 */ 12413 if (ssc_flags & SSC_FLAGS_INVALID_DATA) { 12414 if (SD_FM_LOG(ssc->ssc_un) == SD_FM_LOG_NSUP) { 12415 /* 12416 * If the error belong to certain component and we 12417 * do not want it to show up on the console, we 12418 * will use SD_ERROR, otherwise scsi_log is 12419 * preferred. 12420 */ 12421 if (comp > 0) { 12422 SD_ERROR(comp, ssc->ssc_un, ssc->ssc_info); 12423 } else if (comp == 0) { 12424 scsi_log(SD_DEVINFO(ssc->ssc_un), sd_label, 12425 CE_WARN, ssc->ssc_info); 12426 } 12427 } 12428 } 12429 } 12430 12431 /* 12432 * Function: sd_buf_iodone 12433 * 12434 * Description: Frees the sd_xbuf & returns the buf to its originator. 12435 * 12436 * Context: May be called from interrupt context. 12437 */ 12438 /* ARGSUSED */ 12439 static void 12440 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp) 12441 { 12442 struct sd_xbuf *xp; 12443 12444 ASSERT(un != NULL); 12445 ASSERT(bp != NULL); 12446 ASSERT(!mutex_owned(SD_MUTEX(un))); 12447 12448 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n"); 12449 12450 xp = SD_GET_XBUF(bp); 12451 ASSERT(xp != NULL); 12452 12453 /* xbuf is gone after this */ 12454 if (ddi_xbuf_done(bp, un->un_xbuf_attr)) { 12455 mutex_enter(SD_MUTEX(un)); 12456 12457 /* 12458 * Grab time when the cmd completed. 12459 * This is used for determining if the system has been 12460 * idle long enough to make it idle to the PM framework. 12461 * This is for lowering the overhead, and therefore improving 12462 * performance per I/O operation. 12463 */ 12464 un->un_pm_idle_time = ddi_get_time(); 12465 12466 un->un_ncmds_in_driver--; 12467 ASSERT(un->un_ncmds_in_driver >= 0); 12468 SD_INFO(SD_LOG_IO, un, 12469 "sd_buf_iodone: un_ncmds_in_driver = %ld\n", 12470 un->un_ncmds_in_driver); 12471 12472 mutex_exit(SD_MUTEX(un)); 12473 } 12474 12475 biodone(bp); /* bp is gone after this */ 12476 12477 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n"); 12478 } 12479 12480 12481 /* 12482 * Function: sd_uscsi_iodone 12483 * 12484 * Description: Frees the sd_xbuf & returns the buf to its originator. 12485 * 12486 * Context: May be called from interrupt context. 12487 */ 12488 /* ARGSUSED */ 12489 static void 12490 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 12491 { 12492 struct sd_xbuf *xp; 12493 12494 ASSERT(un != NULL); 12495 ASSERT(bp != NULL); 12496 12497 xp = SD_GET_XBUF(bp); 12498 ASSERT(xp != NULL); 12499 ASSERT(!mutex_owned(SD_MUTEX(un))); 12500 12501 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n"); 12502 12503 bp->b_private = xp->xb_private; 12504 12505 mutex_enter(SD_MUTEX(un)); 12506 12507 /* 12508 * Grab time when the cmd completed. 12509 * This is used for determining if the system has been 12510 * idle long enough to make it idle to the PM framework. 12511 * This is for lowering the overhead, and therefore improving 12512 * performance per I/O operation. 12513 */ 12514 un->un_pm_idle_time = ddi_get_time(); 12515 12516 un->un_ncmds_in_driver--; 12517 ASSERT(un->un_ncmds_in_driver >= 0); 12518 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", 12519 un->un_ncmds_in_driver); 12520 12521 mutex_exit(SD_MUTEX(un)); 12522 12523 if (((struct uscsi_cmd *)(xp->xb_pktinfo))->uscsi_rqlen > 12524 SENSE_LENGTH) { 12525 kmem_free(xp, sizeof (struct sd_xbuf) - SENSE_LENGTH + 12526 MAX_SENSE_LENGTH); 12527 } else { 12528 kmem_free(xp, sizeof (struct sd_xbuf)); 12529 } 12530 12531 biodone(bp); 12532 12533 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n"); 12534 } 12535 12536 12537 /* 12538 * Function: sd_mapblockaddr_iostart 12539 * 12540 * Description: Verify request lies within the partition limits for 12541 * the indicated minor device. Issue "overrun" buf if 12542 * request would exceed partition range. Converts 12543 * partition-relative block address to absolute. 12544 * 12545 * Upon exit of this function: 12546 * 1.I/O is aligned 12547 * xp->xb_blkno represents the absolute sector address 12548 * 2.I/O is misaligned 12549 * xp->xb_blkno represents the absolute logical block address 12550 * based on DEV_BSIZE. The logical block address will be 12551 * converted to physical sector address in sd_mapblocksize_\ 12552 * iostart. 12553 * 3.I/O is misaligned but is aligned in "overrun" buf 12554 * xp->xb_blkno represents the absolute logical block address 12555 * based on DEV_BSIZE. The logical block address will be 12556 * converted to physical sector address in sd_mapblocksize_\ 12557 * iostart. But no RMW will be issued in this case. 12558 * 12559 * Context: Can sleep 12560 * 12561 * Issues: This follows what the old code did, in terms of accessing 12562 * some of the partition info in the unit struct without holding 12563 * the mutext. This is a general issue, if the partition info 12564 * can be altered while IO is in progress... as soon as we send 12565 * a buf, its partitioning can be invalid before it gets to the 12566 * device. Probably the right fix is to move partitioning out 12567 * of the driver entirely. 12568 */ 12569 12570 static void 12571 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp) 12572 { 12573 diskaddr_t nblocks; /* #blocks in the given partition */ 12574 daddr_t blocknum; /* Block number specified by the buf */ 12575 size_t requested_nblocks; 12576 size_t available_nblocks; 12577 int partition; 12578 diskaddr_t partition_offset; 12579 struct sd_xbuf *xp; 12580 int secmask = 0, blknomask = 0; 12581 ushort_t is_aligned = TRUE; 12582 12583 ASSERT(un != NULL); 12584 ASSERT(bp != NULL); 12585 ASSERT(!mutex_owned(SD_MUTEX(un))); 12586 12587 SD_TRACE(SD_LOG_IO_PARTITION, un, 12588 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp); 12589 12590 xp = SD_GET_XBUF(bp); 12591 ASSERT(xp != NULL); 12592 12593 /* 12594 * If the geometry is not indicated as valid, attempt to access 12595 * the unit & verify the geometry/label. This can be the case for 12596 * removable-media devices, of if the device was opened in 12597 * NDELAY/NONBLOCK mode. 12598 */ 12599 partition = SDPART(bp->b_edev); 12600 12601 if (!SD_IS_VALID_LABEL(un)) { 12602 sd_ssc_t *ssc; 12603 /* 12604 * Initialize sd_ssc_t for internal uscsi commands 12605 * In case of potential porformance issue, we need 12606 * to alloc memory only if there is invalid label 12607 */ 12608 ssc = sd_ssc_init(un); 12609 12610 if (sd_ready_and_valid(ssc, partition) != SD_READY_VALID) { 12611 /* 12612 * For removable devices it is possible to start an 12613 * I/O without a media by opening the device in nodelay 12614 * mode. Also for writable CDs there can be many 12615 * scenarios where there is no geometry yet but volume 12616 * manager is trying to issue a read() just because 12617 * it can see TOC on the CD. So do not print a message 12618 * for removables. 12619 */ 12620 if (!un->un_f_has_removable_media) { 12621 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12622 "i/o to invalid geometry\n"); 12623 } 12624 bioerror(bp, EIO); 12625 bp->b_resid = bp->b_bcount; 12626 SD_BEGIN_IODONE(index, un, bp); 12627 12628 sd_ssc_fini(ssc); 12629 return; 12630 } 12631 sd_ssc_fini(ssc); 12632 } 12633 12634 nblocks = 0; 12635 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 12636 &nblocks, &partition_offset, NULL, NULL, (void *)SD_PATH_DIRECT); 12637 12638 if (un->un_f_enable_rmw) { 12639 blknomask = (un->un_phy_blocksize / DEV_BSIZE) - 1; 12640 secmask = un->un_phy_blocksize - 1; 12641 } else { 12642 blknomask = (un->un_tgt_blocksize / DEV_BSIZE) - 1; 12643 secmask = un->un_tgt_blocksize - 1; 12644 } 12645 12646 if ((bp->b_lblkno & (blknomask)) || (bp->b_bcount & (secmask))) { 12647 is_aligned = FALSE; 12648 } 12649 12650 if (!(NOT_DEVBSIZE(un)) || un->un_f_enable_rmw) { 12651 /* 12652 * If I/O is aligned, no need to involve RMW(Read Modify Write) 12653 * Convert the logical block number to target's physical sector 12654 * number. 12655 */ 12656 if (is_aligned) { 12657 xp->xb_blkno = SD_SYS2TGTBLOCK(un, xp->xb_blkno); 12658 } else { 12659 /* 12660 * There is no RMW if we're just reading, so don't 12661 * warn or error out because of it. 12662 */ 12663 if (bp->b_flags & B_READ) { 12664 /*EMPTY*/ 12665 } else if (!un->un_f_enable_rmw && 12666 un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR) { 12667 bp->b_flags |= B_ERROR; 12668 goto error_exit; 12669 } else if (un->un_f_rmw_type == SD_RMW_TYPE_DEFAULT) { 12670 mutex_enter(SD_MUTEX(un)); 12671 if (!un->un_f_enable_rmw && 12672 un->un_rmw_msg_timeid == NULL) { 12673 scsi_log(SD_DEVINFO(un), sd_label, 12674 CE_WARN, "I/O request is not " 12675 "aligned with %d disk sector size. " 12676 "It is handled through Read Modify " 12677 "Write but the performance is " 12678 "very low.\n", 12679 un->un_tgt_blocksize); 12680 un->un_rmw_msg_timeid = 12681 timeout(sd_rmw_msg_print_handler, 12682 un, SD_RMW_MSG_PRINT_TIMEOUT); 12683 } else { 12684 un->un_rmw_incre_count ++; 12685 } 12686 mutex_exit(SD_MUTEX(un)); 12687 } 12688 12689 nblocks = SD_TGT2SYSBLOCK(un, nblocks); 12690 partition_offset = SD_TGT2SYSBLOCK(un, 12691 partition_offset); 12692 } 12693 } 12694 12695 /* 12696 * blocknum is the starting block number of the request. At this 12697 * point it is still relative to the start of the minor device. 12698 */ 12699 blocknum = xp->xb_blkno; 12700 12701 /* 12702 * Legacy: If the starting block number is one past the last block 12703 * in the partition, do not set B_ERROR in the buf. 12704 */ 12705 if (blocknum == nblocks) { 12706 goto error_exit; 12707 } 12708 12709 /* 12710 * Confirm that the first block of the request lies within the 12711 * partition limits. Also the requested number of bytes must be 12712 * a multiple of the system block size. 12713 */ 12714 if ((blocknum < 0) || (blocknum >= nblocks) || 12715 ((bp->b_bcount & (DEV_BSIZE - 1)) != 0)) { 12716 bp->b_flags |= B_ERROR; 12717 goto error_exit; 12718 } 12719 12720 /* 12721 * If the requsted # blocks exceeds the available # blocks, that 12722 * is an overrun of the partition. 12723 */ 12724 if ((!NOT_DEVBSIZE(un)) && is_aligned) { 12725 requested_nblocks = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 12726 } else { 12727 requested_nblocks = SD_BYTES2SYSBLOCKS(bp->b_bcount); 12728 } 12729 12730 available_nblocks = (size_t)(nblocks - blocknum); 12731 ASSERT(nblocks >= blocknum); 12732 12733 if (requested_nblocks > available_nblocks) { 12734 size_t resid; 12735 12736 /* 12737 * Allocate an "overrun" buf to allow the request to proceed 12738 * for the amount of space available in the partition. The 12739 * amount not transferred will be added into the b_resid 12740 * when the operation is complete. The overrun buf 12741 * replaces the original buf here, and the original buf 12742 * is saved inside the overrun buf, for later use. 12743 */ 12744 if ((!NOT_DEVBSIZE(un)) && is_aligned) { 12745 resid = SD_TGTBLOCKS2BYTES(un, 12746 (offset_t)(requested_nblocks - available_nblocks)); 12747 } else { 12748 resid = SD_SYSBLOCKS2BYTES( 12749 (offset_t)(requested_nblocks - available_nblocks)); 12750 } 12751 12752 size_t count = bp->b_bcount - resid; 12753 /* 12754 * Note: count is an unsigned entity thus it'll NEVER 12755 * be less than 0 so ASSERT the original values are 12756 * correct. 12757 */ 12758 ASSERT(bp->b_bcount >= resid); 12759 12760 bp = sd_bioclone_alloc(bp, count, blocknum, 12761 (int (*)(struct buf *)) sd_mapblockaddr_iodone); 12762 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */ 12763 ASSERT(xp != NULL); 12764 } 12765 12766 /* At this point there should be no residual for this buf. */ 12767 ASSERT(bp->b_resid == 0); 12768 12769 /* Convert the block number to an absolute address. */ 12770 xp->xb_blkno += partition_offset; 12771 12772 SD_NEXT_IOSTART(index, un, bp); 12773 12774 SD_TRACE(SD_LOG_IO_PARTITION, un, 12775 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp); 12776 12777 return; 12778 12779 error_exit: 12780 bp->b_resid = bp->b_bcount; 12781 SD_BEGIN_IODONE(index, un, bp); 12782 SD_TRACE(SD_LOG_IO_PARTITION, un, 12783 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp); 12784 } 12785 12786 12787 /* 12788 * Function: sd_mapblockaddr_iodone 12789 * 12790 * Description: Completion-side processing for partition management. 12791 * 12792 * Context: May be called under interrupt context 12793 */ 12794 12795 static void 12796 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp) 12797 { 12798 /* int partition; */ /* Not used, see below. */ 12799 ASSERT(un != NULL); 12800 ASSERT(bp != NULL); 12801 ASSERT(!mutex_owned(SD_MUTEX(un))); 12802 12803 SD_TRACE(SD_LOG_IO_PARTITION, un, 12804 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp); 12805 12806 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) { 12807 /* 12808 * We have an "overrun" buf to deal with... 12809 */ 12810 struct sd_xbuf *xp; 12811 struct buf *obp; /* ptr to the original buf */ 12812 12813 xp = SD_GET_XBUF(bp); 12814 ASSERT(xp != NULL); 12815 12816 /* Retrieve the pointer to the original buf */ 12817 obp = (struct buf *)xp->xb_private; 12818 ASSERT(obp != NULL); 12819 12820 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid); 12821 bioerror(obp, bp->b_error); 12822 12823 sd_bioclone_free(bp); 12824 12825 /* 12826 * Get back the original buf. 12827 * Note that since the restoration of xb_blkno below 12828 * was removed, the sd_xbuf is not needed. 12829 */ 12830 bp = obp; 12831 /* 12832 * xp = SD_GET_XBUF(bp); 12833 * ASSERT(xp != NULL); 12834 */ 12835 } 12836 12837 /* 12838 * Convert sd->xb_blkno back to a minor-device relative value. 12839 * Note: this has been commented out, as it is not needed in the 12840 * current implementation of the driver (ie, since this function 12841 * is at the top of the layering chains, so the info will be 12842 * discarded) and it is in the "hot" IO path. 12843 * 12844 * partition = getminor(bp->b_edev) & SDPART_MASK; 12845 * xp->xb_blkno -= un->un_offset[partition]; 12846 */ 12847 12848 SD_NEXT_IODONE(index, un, bp); 12849 12850 SD_TRACE(SD_LOG_IO_PARTITION, un, 12851 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp); 12852 } 12853 12854 12855 /* 12856 * Function: sd_mapblocksize_iostart 12857 * 12858 * Description: Convert between system block size (un->un_sys_blocksize) 12859 * and target block size (un->un_tgt_blocksize). 12860 * 12861 * Context: Can sleep to allocate resources. 12862 * 12863 * Assumptions: A higher layer has already performed any partition validation, 12864 * and converted the xp->xb_blkno to an absolute value relative 12865 * to the start of the device. 12866 * 12867 * It is also assumed that the higher layer has implemented 12868 * an "overrun" mechanism for the case where the request would 12869 * read/write beyond the end of a partition. In this case we 12870 * assume (and ASSERT) that bp->b_resid == 0. 12871 * 12872 * Note: The implementation for this routine assumes the target 12873 * block size remains constant between allocation and transport. 12874 */ 12875 12876 static void 12877 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp) 12878 { 12879 struct sd_mapblocksize_info *bsp; 12880 struct sd_xbuf *xp; 12881 offset_t first_byte; 12882 daddr_t start_block, end_block; 12883 daddr_t request_bytes; 12884 ushort_t is_aligned = FALSE; 12885 12886 ASSERT(un != NULL); 12887 ASSERT(bp != NULL); 12888 ASSERT(!mutex_owned(SD_MUTEX(un))); 12889 ASSERT(bp->b_resid == 0); 12890 12891 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12892 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp); 12893 12894 /* 12895 * For a non-writable CD, a write request is an error 12896 */ 12897 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) && 12898 (un->un_f_mmc_writable_media == FALSE)) { 12899 bioerror(bp, EIO); 12900 bp->b_resid = bp->b_bcount; 12901 SD_BEGIN_IODONE(index, un, bp); 12902 return; 12903 } 12904 12905 /* 12906 * We do not need a shadow buf if the device is using 12907 * un->un_sys_blocksize as its block size or if bcount == 0. 12908 * In this case there is no layer-private data block allocated. 12909 */ 12910 if ((un->un_tgt_blocksize == DEV_BSIZE && !un->un_f_enable_rmw) || 12911 (bp->b_bcount == 0)) { 12912 goto done; 12913 } 12914 12915 #if defined(__i386) || defined(__amd64) 12916 /* We do not support non-block-aligned transfers for ROD devices */ 12917 ASSERT(!ISROD(un)); 12918 #endif 12919 12920 xp = SD_GET_XBUF(bp); 12921 ASSERT(xp != NULL); 12922 12923 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12924 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n", 12925 un->un_tgt_blocksize, DEV_BSIZE); 12926 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12927 "request start block:0x%x\n", xp->xb_blkno); 12928 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12929 "request len:0x%x\n", bp->b_bcount); 12930 12931 /* 12932 * Allocate the layer-private data area for the mapblocksize layer. 12933 * Layers are allowed to use the xp_private member of the sd_xbuf 12934 * struct to store the pointer to their layer-private data block, but 12935 * each layer also has the responsibility of restoring the prior 12936 * contents of xb_private before returning the buf/xbuf to the 12937 * higher layer that sent it. 12938 * 12939 * Here we save the prior contents of xp->xb_private into the 12940 * bsp->mbs_oprivate field of our layer-private data area. This value 12941 * is restored by sd_mapblocksize_iodone() just prior to freeing up 12942 * the layer-private area and returning the buf/xbuf to the layer 12943 * that sent it. 12944 * 12945 * Note that here we use kmem_zalloc for the allocation as there are 12946 * parts of the mapblocksize code that expect certain fields to be 12947 * zero unless explicitly set to a required value. 12948 */ 12949 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12950 bsp->mbs_oprivate = xp->xb_private; 12951 xp->xb_private = bsp; 12952 12953 /* 12954 * This treats the data on the disk (target) as an array of bytes. 12955 * first_byte is the byte offset, from the beginning of the device, 12956 * to the location of the request. This is converted from a 12957 * un->un_sys_blocksize block address to a byte offset, and then back 12958 * to a block address based upon a un->un_tgt_blocksize block size. 12959 * 12960 * xp->xb_blkno should be absolute upon entry into this function, 12961 * but, but it is based upon partitions that use the "system" 12962 * block size. It must be adjusted to reflect the block size of 12963 * the target. 12964 * 12965 * Note that end_block is actually the block that follows the last 12966 * block of the request, but that's what is needed for the computation. 12967 */ 12968 first_byte = SD_SYSBLOCKS2BYTES((offset_t)xp->xb_blkno); 12969 if (un->un_f_enable_rmw) { 12970 start_block = xp->xb_blkno = 12971 (first_byte / un->un_phy_blocksize) * 12972 (un->un_phy_blocksize / DEV_BSIZE); 12973 end_block = ((first_byte + bp->b_bcount + 12974 un->un_phy_blocksize - 1) / un->un_phy_blocksize) * 12975 (un->un_phy_blocksize / DEV_BSIZE); 12976 } else { 12977 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize; 12978 end_block = (first_byte + bp->b_bcount + 12979 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize; 12980 } 12981 12982 /* request_bytes is rounded up to a multiple of the target block size */ 12983 request_bytes = (end_block - start_block) * un->un_tgt_blocksize; 12984 12985 /* 12986 * See if the starting address of the request and the request 12987 * length are aligned on a un->un_tgt_blocksize boundary. If aligned 12988 * then we do not need to allocate a shadow buf to handle the request. 12989 */ 12990 if (un->un_f_enable_rmw) { 12991 if (((first_byte % un->un_phy_blocksize) == 0) && 12992 ((bp->b_bcount % un->un_phy_blocksize) == 0)) { 12993 is_aligned = TRUE; 12994 } 12995 } else { 12996 if (((first_byte % un->un_tgt_blocksize) == 0) && 12997 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) { 12998 is_aligned = TRUE; 12999 } 13000 } 13001 13002 if ((bp->b_flags & B_READ) == 0) { 13003 /* 13004 * Lock the range for a write operation. An aligned request is 13005 * considered a simple write; otherwise the request must be a 13006 * read-modify-write. 13007 */ 13008 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1, 13009 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW); 13010 } 13011 13012 /* 13013 * Alloc a shadow buf if the request is not aligned. Also, this is 13014 * where the READ command is generated for a read-modify-write. (The 13015 * write phase is deferred until after the read completes.) 13016 */ 13017 if (is_aligned == FALSE) { 13018 13019 struct sd_mapblocksize_info *shadow_bsp; 13020 struct sd_xbuf *shadow_xp; 13021 struct buf *shadow_bp; 13022 13023 /* 13024 * Allocate the shadow buf and it associated xbuf. Note that 13025 * after this call the xb_blkno value in both the original 13026 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the 13027 * same: absolute relative to the start of the device, and 13028 * adjusted for the target block size. The b_blkno in the 13029 * shadow buf will also be set to this value. We should never 13030 * change b_blkno in the original bp however. 13031 * 13032 * Note also that the shadow buf will always need to be a 13033 * READ command, regardless of whether the incoming command 13034 * is a READ or a WRITE. 13035 */ 13036 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ, 13037 xp->xb_blkno, 13038 (int (*)(struct buf *)) sd_mapblocksize_iodone); 13039 13040 shadow_xp = SD_GET_XBUF(shadow_bp); 13041 13042 /* 13043 * Allocate the layer-private data for the shadow buf. 13044 * (No need to preserve xb_private in the shadow xbuf.) 13045 */ 13046 shadow_xp->xb_private = shadow_bsp = 13047 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 13048 13049 /* 13050 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone 13051 * to figure out where the start of the user data is (based upon 13052 * the system block size) in the data returned by the READ 13053 * command (which will be based upon the target blocksize). Note 13054 * that this is only really used if the request is unaligned. 13055 */ 13056 if (un->un_f_enable_rmw) { 13057 bsp->mbs_copy_offset = (ssize_t)(first_byte - 13058 ((offset_t)xp->xb_blkno * un->un_sys_blocksize)); 13059 ASSERT((bsp->mbs_copy_offset >= 0) && 13060 (bsp->mbs_copy_offset < un->un_phy_blocksize)); 13061 } else { 13062 bsp->mbs_copy_offset = (ssize_t)(first_byte - 13063 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize)); 13064 ASSERT((bsp->mbs_copy_offset >= 0) && 13065 (bsp->mbs_copy_offset < un->un_tgt_blocksize)); 13066 } 13067 13068 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset; 13069 13070 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index; 13071 13072 /* Transfer the wmap (if any) to the shadow buf */ 13073 shadow_bsp->mbs_wmp = bsp->mbs_wmp; 13074 bsp->mbs_wmp = NULL; 13075 13076 /* 13077 * The shadow buf goes on from here in place of the 13078 * original buf. 13079 */ 13080 shadow_bsp->mbs_orig_bp = bp; 13081 bp = shadow_bp; 13082 } 13083 13084 SD_INFO(SD_LOG_IO_RMMEDIA, un, 13085 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno); 13086 SD_INFO(SD_LOG_IO_RMMEDIA, un, 13087 "sd_mapblocksize_iostart: tgt request len:0x%x\n", 13088 request_bytes); 13089 SD_INFO(SD_LOG_IO_RMMEDIA, un, 13090 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp); 13091 13092 done: 13093 SD_NEXT_IOSTART(index, un, bp); 13094 13095 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 13096 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp); 13097 } 13098 13099 13100 /* 13101 * Function: sd_mapblocksize_iodone 13102 * 13103 * Description: Completion side processing for block-size mapping. 13104 * 13105 * Context: May be called under interrupt context 13106 */ 13107 13108 static void 13109 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp) 13110 { 13111 struct sd_mapblocksize_info *bsp; 13112 struct sd_xbuf *xp; 13113 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */ 13114 struct buf *orig_bp; /* ptr to the original buf */ 13115 offset_t shadow_end; 13116 offset_t request_end; 13117 offset_t shadow_start; 13118 ssize_t copy_offset; 13119 size_t copy_length; 13120 size_t shortfall; 13121 uint_t is_write; /* TRUE if this bp is a WRITE */ 13122 uint_t has_wmap; /* TRUE is this bp has a wmap */ 13123 13124 ASSERT(un != NULL); 13125 ASSERT(bp != NULL); 13126 13127 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 13128 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp); 13129 13130 /* 13131 * There is no shadow buf or layer-private data if the target is 13132 * using un->un_sys_blocksize as its block size or if bcount == 0. 13133 */ 13134 if ((un->un_tgt_blocksize == DEV_BSIZE && !un->un_f_enable_rmw) || 13135 (bp->b_bcount == 0)) { 13136 goto exit; 13137 } 13138 13139 xp = SD_GET_XBUF(bp); 13140 ASSERT(xp != NULL); 13141 13142 /* Retrieve the pointer to the layer-private data area from the xbuf. */ 13143 bsp = xp->xb_private; 13144 13145 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE; 13146 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE; 13147 13148 if (is_write) { 13149 /* 13150 * For a WRITE request we must free up the block range that 13151 * we have locked up. This holds regardless of whether this is 13152 * an aligned write request or a read-modify-write request. 13153 */ 13154 sd_range_unlock(un, bsp->mbs_wmp); 13155 bsp->mbs_wmp = NULL; 13156 } 13157 13158 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) { 13159 /* 13160 * An aligned read or write command will have no shadow buf; 13161 * there is not much else to do with it. 13162 */ 13163 goto done; 13164 } 13165 13166 orig_bp = bsp->mbs_orig_bp; 13167 ASSERT(orig_bp != NULL); 13168 orig_xp = SD_GET_XBUF(orig_bp); 13169 ASSERT(orig_xp != NULL); 13170 ASSERT(!mutex_owned(SD_MUTEX(un))); 13171 13172 if (!is_write && has_wmap) { 13173 /* 13174 * A READ with a wmap means this is the READ phase of a 13175 * read-modify-write. If an error occurred on the READ then 13176 * we do not proceed with the WRITE phase or copy any data. 13177 * Just release the write maps and return with an error. 13178 */ 13179 if ((bp->b_resid != 0) || (bp->b_error != 0)) { 13180 orig_bp->b_resid = orig_bp->b_bcount; 13181 bioerror(orig_bp, bp->b_error); 13182 sd_range_unlock(un, bsp->mbs_wmp); 13183 goto freebuf_done; 13184 } 13185 } 13186 13187 /* 13188 * Here is where we set up to copy the data from the shadow buf 13189 * into the space associated with the original buf. 13190 * 13191 * To deal with the conversion between block sizes, these 13192 * computations treat the data as an array of bytes, with the 13193 * first byte (byte 0) corresponding to the first byte in the 13194 * first block on the disk. 13195 */ 13196 13197 /* 13198 * shadow_start and shadow_len indicate the location and size of 13199 * the data returned with the shadow IO request. 13200 */ 13201 if (un->un_f_enable_rmw) { 13202 shadow_start = SD_SYSBLOCKS2BYTES((offset_t)xp->xb_blkno); 13203 } else { 13204 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 13205 } 13206 shadow_end = shadow_start + bp->b_bcount - bp->b_resid; 13207 13208 /* 13209 * copy_offset gives the offset (in bytes) from the start of the first 13210 * block of the READ request to the beginning of the data. We retrieve 13211 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved 13212 * there by sd_mapblockize_iostart(). copy_length gives the amount of 13213 * data to be copied (in bytes). 13214 */ 13215 copy_offset = bsp->mbs_copy_offset; 13216 if (un->un_f_enable_rmw) { 13217 ASSERT((copy_offset >= 0) && 13218 (copy_offset < un->un_phy_blocksize)); 13219 } else { 13220 ASSERT((copy_offset >= 0) && 13221 (copy_offset < un->un_tgt_blocksize)); 13222 } 13223 13224 copy_length = orig_bp->b_bcount; 13225 request_end = shadow_start + copy_offset + orig_bp->b_bcount; 13226 13227 /* 13228 * Set up the resid and error fields of orig_bp as appropriate. 13229 */ 13230 if (shadow_end >= request_end) { 13231 /* We got all the requested data; set resid to zero */ 13232 orig_bp->b_resid = 0; 13233 } else { 13234 /* 13235 * We failed to get enough data to fully satisfy the original 13236 * request. Just copy back whatever data we got and set 13237 * up the residual and error code as required. 13238 * 13239 * 'shortfall' is the amount by which the data received with the 13240 * shadow buf has "fallen short" of the requested amount. 13241 */ 13242 shortfall = (size_t)(request_end - shadow_end); 13243 13244 if (shortfall > orig_bp->b_bcount) { 13245 /* 13246 * We did not get enough data to even partially 13247 * fulfill the original request. The residual is 13248 * equal to the amount requested. 13249 */ 13250 orig_bp->b_resid = orig_bp->b_bcount; 13251 } else { 13252 /* 13253 * We did not get all the data that we requested 13254 * from the device, but we will try to return what 13255 * portion we did get. 13256 */ 13257 orig_bp->b_resid = shortfall; 13258 } 13259 ASSERT(copy_length >= orig_bp->b_resid); 13260 copy_length -= orig_bp->b_resid; 13261 } 13262 13263 /* Propagate the error code from the shadow buf to the original buf */ 13264 bioerror(orig_bp, bp->b_error); 13265 13266 if (is_write) { 13267 goto freebuf_done; /* No data copying for a WRITE */ 13268 } 13269 13270 if (has_wmap) { 13271 /* 13272 * This is a READ command from the READ phase of a 13273 * read-modify-write request. We have to copy the data given 13274 * by the user OVER the data returned by the READ command, 13275 * then convert the command from a READ to a WRITE and send 13276 * it back to the target. 13277 */ 13278 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset, 13279 copy_length); 13280 13281 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */ 13282 13283 /* 13284 * Dispatch the WRITE command to the taskq thread, which 13285 * will in turn send the command to the target. When the 13286 * WRITE command completes, we (sd_mapblocksize_iodone()) 13287 * will get called again as part of the iodone chain 13288 * processing for it. Note that we will still be dealing 13289 * with the shadow buf at that point. 13290 */ 13291 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp, 13292 KM_NOSLEEP) != 0) { 13293 /* 13294 * Dispatch was successful so we are done. Return 13295 * without going any higher up the iodone chain. Do 13296 * not free up any layer-private data until after the 13297 * WRITE completes. 13298 */ 13299 return; 13300 } 13301 13302 /* 13303 * Dispatch of the WRITE command failed; set up the error 13304 * condition and send this IO back up the iodone chain. 13305 */ 13306 bioerror(orig_bp, EIO); 13307 orig_bp->b_resid = orig_bp->b_bcount; 13308 13309 } else { 13310 /* 13311 * This is a regular READ request (ie, not a RMW). Copy the 13312 * data from the shadow buf into the original buf. The 13313 * copy_offset compensates for any "misalignment" between the 13314 * shadow buf (with its un->un_tgt_blocksize blocks) and the 13315 * original buf (with its un->un_sys_blocksize blocks). 13316 */ 13317 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr, 13318 copy_length); 13319 } 13320 13321 freebuf_done: 13322 13323 /* 13324 * At this point we still have both the shadow buf AND the original 13325 * buf to deal with, as well as the layer-private data area in each. 13326 * Local variables are as follows: 13327 * 13328 * bp -- points to shadow buf 13329 * xp -- points to xbuf of shadow buf 13330 * bsp -- points to layer-private data area of shadow buf 13331 * orig_bp -- points to original buf 13332 * 13333 * First free the shadow buf and its associated xbuf, then free the 13334 * layer-private data area from the shadow buf. There is no need to 13335 * restore xb_private in the shadow xbuf. 13336 */ 13337 sd_shadow_buf_free(bp); 13338 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 13339 13340 /* 13341 * Now update the local variables to point to the original buf, xbuf, 13342 * and layer-private area. 13343 */ 13344 bp = orig_bp; 13345 xp = SD_GET_XBUF(bp); 13346 ASSERT(xp != NULL); 13347 ASSERT(xp == orig_xp); 13348 bsp = xp->xb_private; 13349 ASSERT(bsp != NULL); 13350 13351 done: 13352 /* 13353 * Restore xb_private to whatever it was set to by the next higher 13354 * layer in the chain, then free the layer-private data area. 13355 */ 13356 xp->xb_private = bsp->mbs_oprivate; 13357 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 13358 13359 exit: 13360 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp), 13361 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp); 13362 13363 SD_NEXT_IODONE(index, un, bp); 13364 } 13365 13366 13367 /* 13368 * Function: sd_checksum_iostart 13369 * 13370 * Description: A stub function for a layer that's currently not used. 13371 * For now just a placeholder. 13372 * 13373 * Context: Kernel thread context 13374 */ 13375 13376 static void 13377 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp) 13378 { 13379 ASSERT(un != NULL); 13380 ASSERT(bp != NULL); 13381 ASSERT(!mutex_owned(SD_MUTEX(un))); 13382 SD_NEXT_IOSTART(index, un, bp); 13383 } 13384 13385 13386 /* 13387 * Function: sd_checksum_iodone 13388 * 13389 * Description: A stub function for a layer that's currently not used. 13390 * For now just a placeholder. 13391 * 13392 * Context: May be called under interrupt context 13393 */ 13394 13395 static void 13396 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp) 13397 { 13398 ASSERT(un != NULL); 13399 ASSERT(bp != NULL); 13400 ASSERT(!mutex_owned(SD_MUTEX(un))); 13401 SD_NEXT_IODONE(index, un, bp); 13402 } 13403 13404 13405 /* 13406 * Function: sd_checksum_uscsi_iostart 13407 * 13408 * Description: A stub function for a layer that's currently not used. 13409 * For now just a placeholder. 13410 * 13411 * Context: Kernel thread context 13412 */ 13413 13414 static void 13415 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp) 13416 { 13417 ASSERT(un != NULL); 13418 ASSERT(bp != NULL); 13419 ASSERT(!mutex_owned(SD_MUTEX(un))); 13420 SD_NEXT_IOSTART(index, un, bp); 13421 } 13422 13423 13424 /* 13425 * Function: sd_checksum_uscsi_iodone 13426 * 13427 * Description: A stub function for a layer that's currently not used. 13428 * For now just a placeholder. 13429 * 13430 * Context: May be called under interrupt context 13431 */ 13432 13433 static void 13434 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 13435 { 13436 ASSERT(un != NULL); 13437 ASSERT(bp != NULL); 13438 ASSERT(!mutex_owned(SD_MUTEX(un))); 13439 SD_NEXT_IODONE(index, un, bp); 13440 } 13441 13442 13443 /* 13444 * Function: sd_pm_iostart 13445 * 13446 * Description: iostart-side routine for Power mangement. 13447 * 13448 * Context: Kernel thread context 13449 */ 13450 13451 static void 13452 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp) 13453 { 13454 ASSERT(un != NULL); 13455 ASSERT(bp != NULL); 13456 ASSERT(!mutex_owned(SD_MUTEX(un))); 13457 ASSERT(!mutex_owned(&un->un_pm_mutex)); 13458 13459 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n"); 13460 13461 if (sd_pm_entry(un) != DDI_SUCCESS) { 13462 /* 13463 * Set up to return the failed buf back up the 'iodone' 13464 * side of the calling chain. 13465 */ 13466 bioerror(bp, EIO); 13467 bp->b_resid = bp->b_bcount; 13468 13469 SD_BEGIN_IODONE(index, un, bp); 13470 13471 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 13472 return; 13473 } 13474 13475 SD_NEXT_IOSTART(index, un, bp); 13476 13477 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 13478 } 13479 13480 13481 /* 13482 * Function: sd_pm_iodone 13483 * 13484 * Description: iodone-side routine for power mangement. 13485 * 13486 * Context: may be called from interrupt context 13487 */ 13488 13489 static void 13490 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp) 13491 { 13492 ASSERT(un != NULL); 13493 ASSERT(bp != NULL); 13494 ASSERT(!mutex_owned(&un->un_pm_mutex)); 13495 13496 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n"); 13497 13498 /* 13499 * After attach the following flag is only read, so don't 13500 * take the penalty of acquiring a mutex for it. 13501 */ 13502 if (un->un_f_pm_is_enabled == TRUE) { 13503 sd_pm_exit(un); 13504 } 13505 13506 SD_NEXT_IODONE(index, un, bp); 13507 13508 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n"); 13509 } 13510 13511 13512 /* 13513 * Function: sd_core_iostart 13514 * 13515 * Description: Primary driver function for enqueuing buf(9S) structs from 13516 * the system and initiating IO to the target device 13517 * 13518 * Context: Kernel thread context. Can sleep. 13519 * 13520 * Assumptions: - The given xp->xb_blkno is absolute 13521 * (ie, relative to the start of the device). 13522 * - The IO is to be done using the native blocksize of 13523 * the device, as specified in un->un_tgt_blocksize. 13524 */ 13525 /* ARGSUSED */ 13526 static void 13527 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp) 13528 { 13529 struct sd_xbuf *xp; 13530 13531 ASSERT(un != NULL); 13532 ASSERT(bp != NULL); 13533 ASSERT(!mutex_owned(SD_MUTEX(un))); 13534 ASSERT(bp->b_resid == 0); 13535 13536 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp); 13537 13538 xp = SD_GET_XBUF(bp); 13539 ASSERT(xp != NULL); 13540 13541 mutex_enter(SD_MUTEX(un)); 13542 13543 /* 13544 * If we are currently in the failfast state, fail any new IO 13545 * that has B_FAILFAST set, then return. 13546 */ 13547 if ((bp->b_flags & B_FAILFAST) && 13548 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) { 13549 mutex_exit(SD_MUTEX(un)); 13550 bioerror(bp, EIO); 13551 bp->b_resid = bp->b_bcount; 13552 SD_BEGIN_IODONE(index, un, bp); 13553 return; 13554 } 13555 13556 if (SD_IS_DIRECT_PRIORITY(xp)) { 13557 /* 13558 * Priority command -- transport it immediately. 13559 * 13560 * Note: We may want to assert that USCSI_DIAGNOSE is set, 13561 * because all direct priority commands should be associated 13562 * with error recovery actions which we don't want to retry. 13563 */ 13564 sd_start_cmds(un, bp); 13565 } else { 13566 /* 13567 * Normal command -- add it to the wait queue, then start 13568 * transporting commands from the wait queue. 13569 */ 13570 sd_add_buf_to_waitq(un, bp); 13571 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 13572 sd_start_cmds(un, NULL); 13573 } 13574 13575 mutex_exit(SD_MUTEX(un)); 13576 13577 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp); 13578 } 13579 13580 13581 /* 13582 * Function: sd_init_cdb_limits 13583 * 13584 * Description: This is to handle scsi_pkt initialization differences 13585 * between the driver platforms. 13586 * 13587 * Legacy behaviors: 13588 * 13589 * If the block number or the sector count exceeds the 13590 * capabilities of a Group 0 command, shift over to a 13591 * Group 1 command. We don't blindly use Group 1 13592 * commands because a) some drives (CDC Wren IVs) get a 13593 * bit confused, and b) there is probably a fair amount 13594 * of speed difference for a target to receive and decode 13595 * a 10 byte command instead of a 6 byte command. 13596 * 13597 * The xfer time difference of 6 vs 10 byte CDBs is 13598 * still significant so this code is still worthwhile. 13599 * 10 byte CDBs are very inefficient with the fas HBA driver 13600 * and older disks. Each CDB byte took 1 usec with some 13601 * popular disks. 13602 * 13603 * Context: Must be called at attach time 13604 */ 13605 13606 static void 13607 sd_init_cdb_limits(struct sd_lun *un) 13608 { 13609 int hba_cdb_limit; 13610 13611 /* 13612 * Use CDB_GROUP1 commands for most devices except for 13613 * parallel SCSI fixed drives in which case we get better 13614 * performance using CDB_GROUP0 commands (where applicable). 13615 */ 13616 un->un_mincdb = SD_CDB_GROUP1; 13617 #if !defined(__fibre) 13618 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && 13619 !un->un_f_has_removable_media) { 13620 un->un_mincdb = SD_CDB_GROUP0; 13621 } 13622 #endif 13623 13624 /* 13625 * Try to read the max-cdb-length supported by HBA. 13626 */ 13627 un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1); 13628 if (0 >= un->un_max_hba_cdb) { 13629 un->un_max_hba_cdb = CDB_GROUP4; 13630 hba_cdb_limit = SD_CDB_GROUP4; 13631 } else if (0 < un->un_max_hba_cdb && 13632 un->un_max_hba_cdb < CDB_GROUP1) { 13633 hba_cdb_limit = SD_CDB_GROUP0; 13634 } else if (CDB_GROUP1 <= un->un_max_hba_cdb && 13635 un->un_max_hba_cdb < CDB_GROUP5) { 13636 hba_cdb_limit = SD_CDB_GROUP1; 13637 } else if (CDB_GROUP5 <= un->un_max_hba_cdb && 13638 un->un_max_hba_cdb < CDB_GROUP4) { 13639 hba_cdb_limit = SD_CDB_GROUP5; 13640 } else { 13641 hba_cdb_limit = SD_CDB_GROUP4; 13642 } 13643 13644 /* 13645 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4 13646 * commands for fixed disks unless we are building for a 32 bit 13647 * kernel. 13648 */ 13649 #ifdef _LP64 13650 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 13651 min(hba_cdb_limit, SD_CDB_GROUP4); 13652 #else 13653 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 13654 min(hba_cdb_limit, SD_CDB_GROUP1); 13655 #endif 13656 13657 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) 13658 ? sizeof (struct scsi_arq_status) : 1); 13659 if (!ISCD(un)) 13660 un->un_cmd_timeout = (ushort_t)sd_io_time; 13661 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; 13662 } 13663 13664 13665 /* 13666 * Function: sd_initpkt_for_buf 13667 * 13668 * Description: Allocate and initialize for transport a scsi_pkt struct, 13669 * based upon the info specified in the given buf struct. 13670 * 13671 * Assumes the xb_blkno in the request is absolute (ie, 13672 * relative to the start of the device (NOT partition!). 13673 * Also assumes that the request is using the native block 13674 * size of the device (as returned by the READ CAPACITY 13675 * command). 13676 * 13677 * Return Code: SD_PKT_ALLOC_SUCCESS 13678 * SD_PKT_ALLOC_FAILURE 13679 * SD_PKT_ALLOC_FAILURE_NO_DMA 13680 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13681 * 13682 * Context: Kernel thread and may be called from software interrupt context 13683 * as part of a sdrunout callback. This function may not block or 13684 * call routines that block 13685 */ 13686 13687 static int 13688 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp) 13689 { 13690 struct sd_xbuf *xp; 13691 struct scsi_pkt *pktp = NULL; 13692 struct sd_lun *un; 13693 size_t blockcount; 13694 daddr_t startblock; 13695 int rval; 13696 int cmd_flags; 13697 13698 ASSERT(bp != NULL); 13699 ASSERT(pktpp != NULL); 13700 xp = SD_GET_XBUF(bp); 13701 ASSERT(xp != NULL); 13702 un = SD_GET_UN(bp); 13703 ASSERT(un != NULL); 13704 ASSERT(mutex_owned(SD_MUTEX(un))); 13705 ASSERT(bp->b_resid == 0); 13706 13707 SD_TRACE(SD_LOG_IO_CORE, un, 13708 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); 13709 13710 mutex_exit(SD_MUTEX(un)); 13711 13712 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13713 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { 13714 /* 13715 * Already have a scsi_pkt -- just need DMA resources. 13716 * We must recompute the CDB in case the mapping returns 13717 * a nonzero pkt_resid. 13718 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer 13719 * that is being retried, the unmap/remap of the DMA resouces 13720 * will result in the entire transfer starting over again 13721 * from the very first block. 13722 */ 13723 ASSERT(xp->xb_pktp != NULL); 13724 pktp = xp->xb_pktp; 13725 } else { 13726 pktp = NULL; 13727 } 13728 #endif /* __i386 || __amd64 */ 13729 13730 startblock = xp->xb_blkno; /* Absolute block num. */ 13731 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 13732 13733 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); 13734 13735 /* 13736 * sd_setup_rw_pkt will determine the appropriate CDB group to use, 13737 * call scsi_init_pkt, and build the CDB. 13738 */ 13739 rval = sd_setup_rw_pkt(un, &pktp, bp, 13740 cmd_flags, sdrunout, (caddr_t)un, 13741 startblock, blockcount); 13742 13743 if (rval == 0) { 13744 /* 13745 * Success. 13746 * 13747 * If partial DMA is being used and required for this transfer. 13748 * set it up here. 13749 */ 13750 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 && 13751 (pktp->pkt_resid != 0)) { 13752 13753 /* 13754 * Save the CDB length and pkt_resid for the 13755 * next xfer 13756 */ 13757 xp->xb_dma_resid = pktp->pkt_resid; 13758 13759 /* rezero resid */ 13760 pktp->pkt_resid = 0; 13761 13762 } else { 13763 xp->xb_dma_resid = 0; 13764 } 13765 13766 pktp->pkt_flags = un->un_tagflags; 13767 pktp->pkt_time = un->un_cmd_timeout; 13768 pktp->pkt_comp = sdintr; 13769 13770 pktp->pkt_private = bp; 13771 *pktpp = pktp; 13772 13773 SD_TRACE(SD_LOG_IO_CORE, un, 13774 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); 13775 13776 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13777 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; 13778 #endif 13779 13780 mutex_enter(SD_MUTEX(un)); 13781 return (SD_PKT_ALLOC_SUCCESS); 13782 13783 } 13784 13785 /* 13786 * SD_PKT_ALLOC_FAILURE is the only expected failure code 13787 * from sd_setup_rw_pkt. 13788 */ 13789 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 13790 13791 if (rval == SD_PKT_ALLOC_FAILURE) { 13792 *pktpp = NULL; 13793 /* 13794 * Set the driver state to RWAIT to indicate the driver 13795 * is waiting on resource allocations. The driver will not 13796 * suspend, pm_suspend, or detatch while the state is RWAIT. 13797 */ 13798 mutex_enter(SD_MUTEX(un)); 13799 New_state(un, SD_STATE_RWAIT); 13800 13801 SD_ERROR(SD_LOG_IO_CORE, un, 13802 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp); 13803 13804 if ((bp->b_flags & B_ERROR) != 0) { 13805 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 13806 } 13807 return (SD_PKT_ALLOC_FAILURE); 13808 } else { 13809 /* 13810 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13811 * 13812 * This should never happen. Maybe someone messed with the 13813 * kernel's minphys? 13814 */ 13815 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13816 "Request rejected: too large for CDB: " 13817 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount); 13818 SD_ERROR(SD_LOG_IO_CORE, un, 13819 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp); 13820 mutex_enter(SD_MUTEX(un)); 13821 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13822 13823 } 13824 } 13825 13826 13827 /* 13828 * Function: sd_destroypkt_for_buf 13829 * 13830 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing). 13831 * 13832 * Context: Kernel thread or interrupt context 13833 */ 13834 13835 static void 13836 sd_destroypkt_for_buf(struct buf *bp) 13837 { 13838 ASSERT(bp != NULL); 13839 ASSERT(SD_GET_UN(bp) != NULL); 13840 13841 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13842 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp); 13843 13844 ASSERT(SD_GET_PKTP(bp) != NULL); 13845 scsi_destroy_pkt(SD_GET_PKTP(bp)); 13846 13847 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13848 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp); 13849 } 13850 13851 /* 13852 * Function: sd_setup_rw_pkt 13853 * 13854 * Description: Determines appropriate CDB group for the requested LBA 13855 * and transfer length, calls scsi_init_pkt, and builds 13856 * the CDB. Do not use for partial DMA transfers except 13857 * for the initial transfer since the CDB size must 13858 * remain constant. 13859 * 13860 * Context: Kernel thread and may be called from software interrupt 13861 * context as part of a sdrunout callback. This function may not 13862 * block or call routines that block 13863 */ 13864 13865 13866 int 13867 sd_setup_rw_pkt(struct sd_lun *un, 13868 struct scsi_pkt **pktpp, struct buf *bp, int flags, 13869 int (*callback)(caddr_t), caddr_t callback_arg, 13870 diskaddr_t lba, uint32_t blockcount) 13871 { 13872 struct scsi_pkt *return_pktp; 13873 union scsi_cdb *cdbp; 13874 struct sd_cdbinfo *cp = NULL; 13875 int i; 13876 13877 /* 13878 * See which size CDB to use, based upon the request. 13879 */ 13880 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) { 13881 13882 /* 13883 * Check lba and block count against sd_cdbtab limits. 13884 * In the partial DMA case, we have to use the same size 13885 * CDB for all the transfers. Check lba + blockcount 13886 * against the max LBA so we know that segment of the 13887 * transfer can use the CDB we select. 13888 */ 13889 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) && 13890 (blockcount <= sd_cdbtab[i].sc_maxlen)) { 13891 13892 /* 13893 * The command will fit into the CDB type 13894 * specified by sd_cdbtab[i]. 13895 */ 13896 cp = sd_cdbtab + i; 13897 13898 /* 13899 * Call scsi_init_pkt so we can fill in the 13900 * CDB. 13901 */ 13902 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp, 13903 bp, cp->sc_grpcode, un->un_status_len, 0, 13904 flags, callback, callback_arg); 13905 13906 if (return_pktp != NULL) { 13907 13908 /* 13909 * Return new value of pkt 13910 */ 13911 *pktpp = return_pktp; 13912 13913 /* 13914 * To be safe, zero the CDB insuring there is 13915 * no leftover data from a previous command. 13916 */ 13917 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode); 13918 13919 /* 13920 * Handle partial DMA mapping 13921 */ 13922 if (return_pktp->pkt_resid != 0) { 13923 13924 /* 13925 * Not going to xfer as many blocks as 13926 * originally expected 13927 */ 13928 blockcount -= 13929 SD_BYTES2TGTBLOCKS(un, 13930 return_pktp->pkt_resid); 13931 } 13932 13933 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp; 13934 13935 /* 13936 * Set command byte based on the CDB 13937 * type we matched. 13938 */ 13939 cdbp->scc_cmd = cp->sc_grpmask | 13940 ((bp->b_flags & B_READ) ? 13941 SCMD_READ : SCMD_WRITE); 13942 13943 SD_FILL_SCSI1_LUN(un, return_pktp); 13944 13945 /* 13946 * Fill in LBA and length 13947 */ 13948 ASSERT((cp->sc_grpcode == CDB_GROUP1) || 13949 (cp->sc_grpcode == CDB_GROUP4) || 13950 (cp->sc_grpcode == CDB_GROUP0) || 13951 (cp->sc_grpcode == CDB_GROUP5)); 13952 13953 if (cp->sc_grpcode == CDB_GROUP1) { 13954 FORMG1ADDR(cdbp, lba); 13955 FORMG1COUNT(cdbp, blockcount); 13956 return (0); 13957 } else if (cp->sc_grpcode == CDB_GROUP4) { 13958 FORMG4LONGADDR(cdbp, lba); 13959 FORMG4COUNT(cdbp, blockcount); 13960 return (0); 13961 } else if (cp->sc_grpcode == CDB_GROUP0) { 13962 FORMG0ADDR(cdbp, lba); 13963 FORMG0COUNT(cdbp, blockcount); 13964 return (0); 13965 } else if (cp->sc_grpcode == CDB_GROUP5) { 13966 FORMG5ADDR(cdbp, lba); 13967 FORMG5COUNT(cdbp, blockcount); 13968 return (0); 13969 } 13970 13971 /* 13972 * It should be impossible to not match one 13973 * of the CDB types above, so we should never 13974 * reach this point. Set the CDB command byte 13975 * to test-unit-ready to avoid writing 13976 * to somewhere we don't intend. 13977 */ 13978 cdbp->scc_cmd = SCMD_TEST_UNIT_READY; 13979 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13980 } else { 13981 /* 13982 * Couldn't get scsi_pkt 13983 */ 13984 return (SD_PKT_ALLOC_FAILURE); 13985 } 13986 } 13987 } 13988 13989 /* 13990 * None of the available CDB types were suitable. This really 13991 * should never happen: on a 64 bit system we support 13992 * READ16/WRITE16 which will hold an entire 64 bit disk address 13993 * and on a 32 bit system we will refuse to bind to a device 13994 * larger than 2TB so addresses will never be larger than 32 bits. 13995 */ 13996 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13997 } 13998 13999 /* 14000 * Function: sd_setup_next_rw_pkt 14001 * 14002 * Description: Setup packet for partial DMA transfers, except for the 14003 * initial transfer. sd_setup_rw_pkt should be used for 14004 * the initial transfer. 14005 * 14006 * Context: Kernel thread and may be called from interrupt context. 14007 */ 14008 14009 int 14010 sd_setup_next_rw_pkt(struct sd_lun *un, 14011 struct scsi_pkt *pktp, struct buf *bp, 14012 diskaddr_t lba, uint32_t blockcount) 14013 { 14014 uchar_t com; 14015 union scsi_cdb *cdbp; 14016 uchar_t cdb_group_id; 14017 14018 ASSERT(pktp != NULL); 14019 ASSERT(pktp->pkt_cdbp != NULL); 14020 14021 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 14022 com = cdbp->scc_cmd; 14023 cdb_group_id = CDB_GROUPID(com); 14024 14025 ASSERT((cdb_group_id == CDB_GROUPID_0) || 14026 (cdb_group_id == CDB_GROUPID_1) || 14027 (cdb_group_id == CDB_GROUPID_4) || 14028 (cdb_group_id == CDB_GROUPID_5)); 14029 14030 /* 14031 * Move pkt to the next portion of the xfer. 14032 * func is NULL_FUNC so we do not have to release 14033 * the disk mutex here. 14034 */ 14035 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0, 14036 NULL_FUNC, NULL) == pktp) { 14037 /* Success. Handle partial DMA */ 14038 if (pktp->pkt_resid != 0) { 14039 blockcount -= 14040 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid); 14041 } 14042 14043 cdbp->scc_cmd = com; 14044 SD_FILL_SCSI1_LUN(un, pktp); 14045 if (cdb_group_id == CDB_GROUPID_1) { 14046 FORMG1ADDR(cdbp, lba); 14047 FORMG1COUNT(cdbp, blockcount); 14048 return (0); 14049 } else if (cdb_group_id == CDB_GROUPID_4) { 14050 FORMG4LONGADDR(cdbp, lba); 14051 FORMG4COUNT(cdbp, blockcount); 14052 return (0); 14053 } else if (cdb_group_id == CDB_GROUPID_0) { 14054 FORMG0ADDR(cdbp, lba); 14055 FORMG0COUNT(cdbp, blockcount); 14056 return (0); 14057 } else if (cdb_group_id == CDB_GROUPID_5) { 14058 FORMG5ADDR(cdbp, lba); 14059 FORMG5COUNT(cdbp, blockcount); 14060 return (0); 14061 } 14062 14063 /* Unreachable */ 14064 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 14065 } 14066 14067 /* 14068 * Error setting up next portion of cmd transfer. 14069 * Something is definitely very wrong and this 14070 * should not happen. 14071 */ 14072 return (SD_PKT_ALLOC_FAILURE); 14073 } 14074 14075 /* 14076 * Function: sd_initpkt_for_uscsi 14077 * 14078 * Description: Allocate and initialize for transport a scsi_pkt struct, 14079 * based upon the info specified in the given uscsi_cmd struct. 14080 * 14081 * Return Code: SD_PKT_ALLOC_SUCCESS 14082 * SD_PKT_ALLOC_FAILURE 14083 * SD_PKT_ALLOC_FAILURE_NO_DMA 14084 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 14085 * 14086 * Context: Kernel thread and may be called from software interrupt context 14087 * as part of a sdrunout callback. This function may not block or 14088 * call routines that block 14089 */ 14090 14091 static int 14092 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp) 14093 { 14094 struct uscsi_cmd *uscmd; 14095 struct sd_xbuf *xp; 14096 struct scsi_pkt *pktp; 14097 struct sd_lun *un; 14098 uint32_t flags = 0; 14099 14100 ASSERT(bp != NULL); 14101 ASSERT(pktpp != NULL); 14102 xp = SD_GET_XBUF(bp); 14103 ASSERT(xp != NULL); 14104 un = SD_GET_UN(bp); 14105 ASSERT(un != NULL); 14106 ASSERT(mutex_owned(SD_MUTEX(un))); 14107 14108 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 14109 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 14110 ASSERT(uscmd != NULL); 14111 14112 SD_TRACE(SD_LOG_IO_CORE, un, 14113 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp); 14114 14115 /* 14116 * Allocate the scsi_pkt for the command. 14117 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path 14118 * during scsi_init_pkt time and will continue to use the 14119 * same path as long as the same scsi_pkt is used without 14120 * intervening scsi_dma_free(). Since uscsi command does 14121 * not call scsi_dmafree() before retry failed command, it 14122 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT 14123 * set such that scsi_vhci can use other available path for 14124 * retry. Besides, ucsci command does not allow DMA breakup, 14125 * so there is no need to set PKT_DMA_PARTIAL flag. 14126 */ 14127 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 14128 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 14129 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 14130 ((int)(uscmd->uscsi_rqlen) + sizeof (struct scsi_arq_status) 14131 - sizeof (struct scsi_extended_sense)), 0, 14132 (un->un_pkt_flags & ~PKT_DMA_PARTIAL) | PKT_XARQ, 14133 sdrunout, (caddr_t)un); 14134 } else { 14135 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 14136 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 14137 sizeof (struct scsi_arq_status), 0, 14138 (un->un_pkt_flags & ~PKT_DMA_PARTIAL), 14139 sdrunout, (caddr_t)un); 14140 } 14141 14142 if (pktp == NULL) { 14143 *pktpp = NULL; 14144 /* 14145 * Set the driver state to RWAIT to indicate the driver 14146 * is waiting on resource allocations. The driver will not 14147 * suspend, pm_suspend, or detatch while the state is RWAIT. 14148 */ 14149 New_state(un, SD_STATE_RWAIT); 14150 14151 SD_ERROR(SD_LOG_IO_CORE, un, 14152 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp); 14153 14154 if ((bp->b_flags & B_ERROR) != 0) { 14155 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 14156 } 14157 return (SD_PKT_ALLOC_FAILURE); 14158 } 14159 14160 /* 14161 * We do not do DMA breakup for USCSI commands, so return failure 14162 * here if all the needed DMA resources were not allocated. 14163 */ 14164 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) && 14165 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) { 14166 scsi_destroy_pkt(pktp); 14167 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: " 14168 "No partial DMA for USCSI. exit: buf:0x%p\n", bp); 14169 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL); 14170 } 14171 14172 /* Init the cdb from the given uscsi struct */ 14173 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp, 14174 uscmd->uscsi_cdb[0], 0, 0, 0); 14175 14176 SD_FILL_SCSI1_LUN(un, pktp); 14177 14178 /* 14179 * Set up the optional USCSI flags. See the uscsi (7I) man page 14180 * for listing of the supported flags. 14181 */ 14182 14183 if (uscmd->uscsi_flags & USCSI_SILENT) { 14184 flags |= FLAG_SILENT; 14185 } 14186 14187 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) { 14188 flags |= FLAG_DIAGNOSE; 14189 } 14190 14191 if (uscmd->uscsi_flags & USCSI_ISOLATE) { 14192 flags |= FLAG_ISOLATE; 14193 } 14194 14195 if (un->un_f_is_fibre == FALSE) { 14196 if (uscmd->uscsi_flags & USCSI_RENEGOT) { 14197 flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 14198 } 14199 } 14200 14201 /* 14202 * Set the pkt flags here so we save time later. 14203 * Note: These flags are NOT in the uscsi man page!!! 14204 */ 14205 if (uscmd->uscsi_flags & USCSI_HEAD) { 14206 flags |= FLAG_HEAD; 14207 } 14208 14209 if (uscmd->uscsi_flags & USCSI_NOINTR) { 14210 flags |= FLAG_NOINTR; 14211 } 14212 14213 /* 14214 * For tagged queueing, things get a bit complicated. 14215 * Check first for head of queue and last for ordered queue. 14216 * If neither head nor order, use the default driver tag flags. 14217 */ 14218 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) { 14219 if (uscmd->uscsi_flags & USCSI_HTAG) { 14220 flags |= FLAG_HTAG; 14221 } else if (uscmd->uscsi_flags & USCSI_OTAG) { 14222 flags |= FLAG_OTAG; 14223 } else { 14224 flags |= un->un_tagflags & FLAG_TAGMASK; 14225 } 14226 } 14227 14228 if (uscmd->uscsi_flags & USCSI_NODISCON) { 14229 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON; 14230 } 14231 14232 pktp->pkt_flags = flags; 14233 14234 /* Transfer uscsi information to scsi_pkt */ 14235 (void) scsi_uscsi_pktinit(uscmd, pktp); 14236 14237 /* Copy the caller's CDB into the pkt... */ 14238 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen); 14239 14240 if (uscmd->uscsi_timeout == 0) { 14241 pktp->pkt_time = un->un_uscsi_timeout; 14242 } else { 14243 pktp->pkt_time = uscmd->uscsi_timeout; 14244 } 14245 14246 /* need it later to identify USCSI request in sdintr */ 14247 xp->xb_pkt_flags |= SD_XB_USCSICMD; 14248 14249 xp->xb_sense_resid = uscmd->uscsi_rqresid; 14250 14251 pktp->pkt_private = bp; 14252 pktp->pkt_comp = sdintr; 14253 *pktpp = pktp; 14254 14255 SD_TRACE(SD_LOG_IO_CORE, un, 14256 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp); 14257 14258 return (SD_PKT_ALLOC_SUCCESS); 14259 } 14260 14261 14262 /* 14263 * Function: sd_destroypkt_for_uscsi 14264 * 14265 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi 14266 * IOs.. Also saves relevant info into the associated uscsi_cmd 14267 * struct. 14268 * 14269 * Context: May be called under interrupt context 14270 */ 14271 14272 static void 14273 sd_destroypkt_for_uscsi(struct buf *bp) 14274 { 14275 struct uscsi_cmd *uscmd; 14276 struct sd_xbuf *xp; 14277 struct scsi_pkt *pktp; 14278 struct sd_lun *un; 14279 struct sd_uscsi_info *suip; 14280 14281 ASSERT(bp != NULL); 14282 xp = SD_GET_XBUF(bp); 14283 ASSERT(xp != NULL); 14284 un = SD_GET_UN(bp); 14285 ASSERT(un != NULL); 14286 ASSERT(!mutex_owned(SD_MUTEX(un))); 14287 pktp = SD_GET_PKTP(bp); 14288 ASSERT(pktp != NULL); 14289 14290 SD_TRACE(SD_LOG_IO_CORE, un, 14291 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp); 14292 14293 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 14294 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 14295 ASSERT(uscmd != NULL); 14296 14297 /* Save the status and the residual into the uscsi_cmd struct */ 14298 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 14299 uscmd->uscsi_resid = bp->b_resid; 14300 14301 /* Transfer scsi_pkt information to uscsi */ 14302 (void) scsi_uscsi_pktfini(pktp, uscmd); 14303 14304 /* 14305 * If enabled, copy any saved sense data into the area specified 14306 * by the uscsi command. 14307 */ 14308 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 14309 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 14310 /* 14311 * Note: uscmd->uscsi_rqbuf should always point to a buffer 14312 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd()) 14313 */ 14314 uscmd->uscsi_rqstatus = xp->xb_sense_status; 14315 uscmd->uscsi_rqresid = xp->xb_sense_resid; 14316 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 14317 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 14318 MAX_SENSE_LENGTH); 14319 } else { 14320 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 14321 SENSE_LENGTH); 14322 } 14323 } 14324 /* 14325 * The following assignments are for SCSI FMA. 14326 */ 14327 ASSERT(xp->xb_private != NULL); 14328 suip = (struct sd_uscsi_info *)xp->xb_private; 14329 suip->ui_pkt_reason = pktp->pkt_reason; 14330 suip->ui_pkt_state = pktp->pkt_state; 14331 suip->ui_pkt_statistics = pktp->pkt_statistics; 14332 suip->ui_lba = (uint64_t)SD_GET_BLKNO(bp); 14333 14334 /* We are done with the scsi_pkt; free it now */ 14335 ASSERT(SD_GET_PKTP(bp) != NULL); 14336 scsi_destroy_pkt(SD_GET_PKTP(bp)); 14337 14338 SD_TRACE(SD_LOG_IO_CORE, un, 14339 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp); 14340 } 14341 14342 14343 /* 14344 * Function: sd_bioclone_alloc 14345 * 14346 * Description: Allocate a buf(9S) and init it as per the given buf 14347 * and the various arguments. The associated sd_xbuf 14348 * struct is (nearly) duplicated. The struct buf *bp 14349 * argument is saved in new_xp->xb_private. 14350 * 14351 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 14352 * datalen - size of data area for the shadow bp 14353 * blkno - starting LBA 14354 * func - function pointer for b_iodone in the shadow buf. (May 14355 * be NULL if none.) 14356 * 14357 * Return Code: Pointer to allocates buf(9S) struct 14358 * 14359 * Context: Can sleep. 14360 */ 14361 14362 static struct buf * 14363 sd_bioclone_alloc(struct buf *bp, size_t datalen, 14364 daddr_t blkno, int (*func)(struct buf *)) 14365 { 14366 struct sd_lun *un; 14367 struct sd_xbuf *xp; 14368 struct sd_xbuf *new_xp; 14369 struct buf *new_bp; 14370 14371 ASSERT(bp != NULL); 14372 xp = SD_GET_XBUF(bp); 14373 ASSERT(xp != NULL); 14374 un = SD_GET_UN(bp); 14375 ASSERT(un != NULL); 14376 ASSERT(!mutex_owned(SD_MUTEX(un))); 14377 14378 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func, 14379 NULL, KM_SLEEP); 14380 14381 new_bp->b_lblkno = blkno; 14382 14383 /* 14384 * Allocate an xbuf for the shadow bp and copy the contents of the 14385 * original xbuf into it. 14386 */ 14387 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 14388 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 14389 14390 /* 14391 * The given bp is automatically saved in the xb_private member 14392 * of the new xbuf. Callers are allowed to depend on this. 14393 */ 14394 new_xp->xb_private = bp; 14395 14396 new_bp->b_private = new_xp; 14397 14398 return (new_bp); 14399 } 14400 14401 /* 14402 * Function: sd_shadow_buf_alloc 14403 * 14404 * Description: Allocate a buf(9S) and init it as per the given buf 14405 * and the various arguments. The associated sd_xbuf 14406 * struct is (nearly) duplicated. The struct buf *bp 14407 * argument is saved in new_xp->xb_private. 14408 * 14409 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 14410 * datalen - size of data area for the shadow bp 14411 * bflags - B_READ or B_WRITE (pseudo flag) 14412 * blkno - starting LBA 14413 * func - function pointer for b_iodone in the shadow buf. (May 14414 * be NULL if none.) 14415 * 14416 * Return Code: Pointer to allocates buf(9S) struct 14417 * 14418 * Context: Can sleep. 14419 */ 14420 14421 static struct buf * 14422 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags, 14423 daddr_t blkno, int (*func)(struct buf *)) 14424 { 14425 struct sd_lun *un; 14426 struct sd_xbuf *xp; 14427 struct sd_xbuf *new_xp; 14428 struct buf *new_bp; 14429 14430 ASSERT(bp != NULL); 14431 xp = SD_GET_XBUF(bp); 14432 ASSERT(xp != NULL); 14433 un = SD_GET_UN(bp); 14434 ASSERT(un != NULL); 14435 ASSERT(!mutex_owned(SD_MUTEX(un))); 14436 14437 if (bp->b_flags & (B_PAGEIO | B_PHYS)) { 14438 bp_mapin(bp); 14439 } 14440 14441 bflags &= (B_READ | B_WRITE); 14442 #if defined(__i386) || defined(__amd64) 14443 new_bp = getrbuf(KM_SLEEP); 14444 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); 14445 new_bp->b_bcount = datalen; 14446 new_bp->b_flags = bflags | 14447 (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW)); 14448 #else 14449 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, 14450 datalen, bflags, SLEEP_FUNC, NULL); 14451 #endif 14452 new_bp->av_forw = NULL; 14453 new_bp->av_back = NULL; 14454 new_bp->b_dev = bp->b_dev; 14455 new_bp->b_blkno = blkno; 14456 new_bp->b_iodone = func; 14457 new_bp->b_edev = bp->b_edev; 14458 new_bp->b_resid = 0; 14459 14460 /* We need to preserve the B_FAILFAST flag */ 14461 if (bp->b_flags & B_FAILFAST) { 14462 new_bp->b_flags |= B_FAILFAST; 14463 } 14464 14465 /* 14466 * Allocate an xbuf for the shadow bp and copy the contents of the 14467 * original xbuf into it. 14468 */ 14469 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 14470 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 14471 14472 /* Need later to copy data between the shadow buf & original buf! */ 14473 new_xp->xb_pkt_flags |= PKT_CONSISTENT; 14474 14475 /* 14476 * The given bp is automatically saved in the xb_private member 14477 * of the new xbuf. Callers are allowed to depend on this. 14478 */ 14479 new_xp->xb_private = bp; 14480 14481 new_bp->b_private = new_xp; 14482 14483 return (new_bp); 14484 } 14485 14486 /* 14487 * Function: sd_bioclone_free 14488 * 14489 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations 14490 * in the larger than partition operation. 14491 * 14492 * Context: May be called under interrupt context 14493 */ 14494 14495 static void 14496 sd_bioclone_free(struct buf *bp) 14497 { 14498 struct sd_xbuf *xp; 14499 14500 ASSERT(bp != NULL); 14501 xp = SD_GET_XBUF(bp); 14502 ASSERT(xp != NULL); 14503 14504 /* 14505 * Call bp_mapout() before freeing the buf, in case a lower 14506 * layer or HBA had done a bp_mapin(). we must do this here 14507 * as we are the "originator" of the shadow buf. 14508 */ 14509 bp_mapout(bp); 14510 14511 /* 14512 * Null out b_iodone before freeing the bp, to ensure that the driver 14513 * never gets confused by a stale value in this field. (Just a little 14514 * extra defensiveness here.) 14515 */ 14516 bp->b_iodone = NULL; 14517 14518 freerbuf(bp); 14519 14520 kmem_free(xp, sizeof (struct sd_xbuf)); 14521 } 14522 14523 /* 14524 * Function: sd_shadow_buf_free 14525 * 14526 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations. 14527 * 14528 * Context: May be called under interrupt context 14529 */ 14530 14531 static void 14532 sd_shadow_buf_free(struct buf *bp) 14533 { 14534 struct sd_xbuf *xp; 14535 14536 ASSERT(bp != NULL); 14537 xp = SD_GET_XBUF(bp); 14538 ASSERT(xp != NULL); 14539 14540 #if defined(__sparc) 14541 /* 14542 * Call bp_mapout() before freeing the buf, in case a lower 14543 * layer or HBA had done a bp_mapin(). we must do this here 14544 * as we are the "originator" of the shadow buf. 14545 */ 14546 bp_mapout(bp); 14547 #endif 14548 14549 /* 14550 * Null out b_iodone before freeing the bp, to ensure that the driver 14551 * never gets confused by a stale value in this field. (Just a little 14552 * extra defensiveness here.) 14553 */ 14554 bp->b_iodone = NULL; 14555 14556 #if defined(__i386) || defined(__amd64) 14557 kmem_free(bp->b_un.b_addr, bp->b_bcount); 14558 freerbuf(bp); 14559 #else 14560 scsi_free_consistent_buf(bp); 14561 #endif 14562 14563 kmem_free(xp, sizeof (struct sd_xbuf)); 14564 } 14565 14566 14567 /* 14568 * Function: sd_print_transport_rejected_message 14569 * 14570 * Description: This implements the ludicrously complex rules for printing 14571 * a "transport rejected" message. This is to address the 14572 * specific problem of having a flood of this error message 14573 * produced when a failover occurs. 14574 * 14575 * Context: Any. 14576 */ 14577 14578 static void 14579 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp, 14580 int code) 14581 { 14582 ASSERT(un != NULL); 14583 ASSERT(mutex_owned(SD_MUTEX(un))); 14584 ASSERT(xp != NULL); 14585 14586 /* 14587 * Print the "transport rejected" message under the following 14588 * conditions: 14589 * 14590 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set 14591 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR. 14592 * - If the error code IS a TRAN_FATAL_ERROR, then the message is 14593 * printed the FIRST time a TRAN_FATAL_ERROR is returned from 14594 * scsi_transport(9F) (which indicates that the target might have 14595 * gone off-line). This uses the un->un_tran_fatal_count 14596 * count, which is incremented whenever a TRAN_FATAL_ERROR is 14597 * received, and reset to zero whenver a TRAN_ACCEPT is returned 14598 * from scsi_transport(). 14599 * 14600 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of 14601 * the preceeding cases in order for the message to be printed. 14602 */ 14603 if (((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) && 14604 (SD_FM_LOG(un) == SD_FM_LOG_NSUP)) { 14605 if ((sd_level_mask & SD_LOGMASK_DIAG) || 14606 (code != TRAN_FATAL_ERROR) || 14607 (un->un_tran_fatal_count == 1)) { 14608 switch (code) { 14609 case TRAN_BADPKT: 14610 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14611 "transport rejected bad packet\n"); 14612 break; 14613 case TRAN_FATAL_ERROR: 14614 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14615 "transport rejected fatal error\n"); 14616 break; 14617 default: 14618 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14619 "transport rejected (%d)\n", code); 14620 break; 14621 } 14622 } 14623 } 14624 } 14625 14626 14627 /* 14628 * Function: sd_add_buf_to_waitq 14629 * 14630 * Description: Add the given buf(9S) struct to the wait queue for the 14631 * instance. If sorting is enabled, then the buf is added 14632 * to the queue via an elevator sort algorithm (a la 14633 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key. 14634 * If sorting is not enabled, then the buf is just added 14635 * to the end of the wait queue. 14636 * 14637 * Return Code: void 14638 * 14639 * Context: Does not sleep/block, therefore technically can be called 14640 * from any context. However if sorting is enabled then the 14641 * execution time is indeterminate, and may take long if 14642 * the wait queue grows large. 14643 */ 14644 14645 static void 14646 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp) 14647 { 14648 struct buf *ap; 14649 14650 ASSERT(bp != NULL); 14651 ASSERT(un != NULL); 14652 ASSERT(mutex_owned(SD_MUTEX(un))); 14653 14654 /* If the queue is empty, add the buf as the only entry & return. */ 14655 if (un->un_waitq_headp == NULL) { 14656 ASSERT(un->un_waitq_tailp == NULL); 14657 un->un_waitq_headp = un->un_waitq_tailp = bp; 14658 bp->av_forw = NULL; 14659 return; 14660 } 14661 14662 ASSERT(un->un_waitq_tailp != NULL); 14663 14664 /* 14665 * If sorting is disabled, just add the buf to the tail end of 14666 * the wait queue and return. 14667 */ 14668 if (un->un_f_disksort_disabled || un->un_f_enable_rmw) { 14669 un->un_waitq_tailp->av_forw = bp; 14670 un->un_waitq_tailp = bp; 14671 bp->av_forw = NULL; 14672 return; 14673 } 14674 14675 /* 14676 * Sort thru the list of requests currently on the wait queue 14677 * and add the new buf request at the appropriate position. 14678 * 14679 * The un->un_waitq_headp is an activity chain pointer on which 14680 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The 14681 * first queue holds those requests which are positioned after 14682 * the current SD_GET_BLKNO() (in the first request); the second holds 14683 * requests which came in after their SD_GET_BLKNO() number was passed. 14684 * Thus we implement a one way scan, retracting after reaching 14685 * the end of the drive to the first request on the second 14686 * queue, at which time it becomes the first queue. 14687 * A one-way scan is natural because of the way UNIX read-ahead 14688 * blocks are allocated. 14689 * 14690 * If we lie after the first request, then we must locate the 14691 * second request list and add ourselves to it. 14692 */ 14693 ap = un->un_waitq_headp; 14694 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) { 14695 while (ap->av_forw != NULL) { 14696 /* 14697 * Look for an "inversion" in the (normally 14698 * ascending) block numbers. This indicates 14699 * the start of the second request list. 14700 */ 14701 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) { 14702 /* 14703 * Search the second request list for the 14704 * first request at a larger block number. 14705 * We go before that; however if there is 14706 * no such request, we go at the end. 14707 */ 14708 do { 14709 if (SD_GET_BLKNO(bp) < 14710 SD_GET_BLKNO(ap->av_forw)) { 14711 goto insert; 14712 } 14713 ap = ap->av_forw; 14714 } while (ap->av_forw != NULL); 14715 goto insert; /* after last */ 14716 } 14717 ap = ap->av_forw; 14718 } 14719 14720 /* 14721 * No inversions... we will go after the last, and 14722 * be the first request in the second request list. 14723 */ 14724 goto insert; 14725 } 14726 14727 /* 14728 * Request is at/after the current request... 14729 * sort in the first request list. 14730 */ 14731 while (ap->av_forw != NULL) { 14732 /* 14733 * We want to go after the current request (1) if 14734 * there is an inversion after it (i.e. it is the end 14735 * of the first request list), or (2) if the next 14736 * request is a larger block no. than our request. 14737 */ 14738 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) || 14739 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) { 14740 goto insert; 14741 } 14742 ap = ap->av_forw; 14743 } 14744 14745 /* 14746 * Neither a second list nor a larger request, therefore 14747 * we go at the end of the first list (which is the same 14748 * as the end of the whole schebang). 14749 */ 14750 insert: 14751 bp->av_forw = ap->av_forw; 14752 ap->av_forw = bp; 14753 14754 /* 14755 * If we inserted onto the tail end of the waitq, make sure the 14756 * tail pointer is updated. 14757 */ 14758 if (ap == un->un_waitq_tailp) { 14759 un->un_waitq_tailp = bp; 14760 } 14761 } 14762 14763 14764 /* 14765 * Function: sd_start_cmds 14766 * 14767 * Description: Remove and transport cmds from the driver queues. 14768 * 14769 * Arguments: un - pointer to the unit (soft state) struct for the target. 14770 * 14771 * immed_bp - ptr to a buf to be transported immediately. Only 14772 * the immed_bp is transported; bufs on the waitq are not 14773 * processed and the un_retry_bp is not checked. If immed_bp is 14774 * NULL, then normal queue processing is performed. 14775 * 14776 * Context: May be called from kernel thread context, interrupt context, 14777 * or runout callback context. This function may not block or 14778 * call routines that block. 14779 */ 14780 14781 static void 14782 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) 14783 { 14784 struct sd_xbuf *xp; 14785 struct buf *bp; 14786 void (*statp)(kstat_io_t *); 14787 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14788 void (*saved_statp)(kstat_io_t *); 14789 #endif 14790 int rval; 14791 struct sd_fm_internal *sfip = NULL; 14792 14793 ASSERT(un != NULL); 14794 ASSERT(mutex_owned(SD_MUTEX(un))); 14795 ASSERT(un->un_ncmds_in_transport >= 0); 14796 ASSERT(un->un_throttle >= 0); 14797 14798 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); 14799 14800 do { 14801 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14802 saved_statp = NULL; 14803 #endif 14804 14805 /* 14806 * If we are syncing or dumping, fail the command to 14807 * avoid recursively calling back into scsi_transport(). 14808 * The dump I/O itself uses a separate code path so this 14809 * only prevents non-dump I/O from being sent while dumping. 14810 * File system sync takes place before dumping begins. 14811 * During panic, filesystem I/O is allowed provided 14812 * un_in_callback is <= 1. This is to prevent recursion 14813 * such as sd_start_cmds -> scsi_transport -> sdintr -> 14814 * sd_start_cmds and so on. See panic.c for more information 14815 * about the states the system can be in during panic. 14816 */ 14817 if ((un->un_state == SD_STATE_DUMPING) || 14818 (ddi_in_panic() && (un->un_in_callback > 1))) { 14819 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14820 "sd_start_cmds: panicking\n"); 14821 goto exit; 14822 } 14823 14824 if ((bp = immed_bp) != NULL) { 14825 /* 14826 * We have a bp that must be transported immediately. 14827 * It's OK to transport the immed_bp here without doing 14828 * the throttle limit check because the immed_bp is 14829 * always used in a retry/recovery case. This means 14830 * that we know we are not at the throttle limit by 14831 * virtue of the fact that to get here we must have 14832 * already gotten a command back via sdintr(). This also 14833 * relies on (1) the command on un_retry_bp preventing 14834 * further commands from the waitq from being issued; 14835 * and (2) the code in sd_retry_command checking the 14836 * throttle limit before issuing a delayed or immediate 14837 * retry. This holds even if the throttle limit is 14838 * currently ratcheted down from its maximum value. 14839 */ 14840 statp = kstat_runq_enter; 14841 if (bp == un->un_retry_bp) { 14842 ASSERT((un->un_retry_statp == NULL) || 14843 (un->un_retry_statp == kstat_waitq_enter) || 14844 (un->un_retry_statp == 14845 kstat_runq_back_to_waitq)); 14846 /* 14847 * If the waitq kstat was incremented when 14848 * sd_set_retry_bp() queued this bp for a retry, 14849 * then we must set up statp so that the waitq 14850 * count will get decremented correctly below. 14851 * Also we must clear un->un_retry_statp to 14852 * ensure that we do not act on a stale value 14853 * in this field. 14854 */ 14855 if ((un->un_retry_statp == kstat_waitq_enter) || 14856 (un->un_retry_statp == 14857 kstat_runq_back_to_waitq)) { 14858 statp = kstat_waitq_to_runq; 14859 } 14860 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14861 saved_statp = un->un_retry_statp; 14862 #endif 14863 un->un_retry_statp = NULL; 14864 14865 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14866 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " 14867 "un_throttle:%d un_ncmds_in_transport:%d\n", 14868 un, un->un_retry_bp, un->un_throttle, 14869 un->un_ncmds_in_transport); 14870 } else { 14871 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: " 14872 "processing priority bp:0x%p\n", bp); 14873 } 14874 14875 } else if ((bp = un->un_waitq_headp) != NULL) { 14876 /* 14877 * A command on the waitq is ready to go, but do not 14878 * send it if: 14879 * 14880 * (1) the throttle limit has been reached, or 14881 * (2) a retry is pending, or 14882 * (3) a START_STOP_UNIT callback pending, or 14883 * (4) a callback for a SD_PATH_DIRECT_PRIORITY 14884 * command is pending. 14885 * 14886 * For all of these conditions, IO processing will 14887 * restart after the condition is cleared. 14888 */ 14889 if (un->un_ncmds_in_transport >= un->un_throttle) { 14890 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14891 "sd_start_cmds: exiting, " 14892 "throttle limit reached!\n"); 14893 goto exit; 14894 } 14895 if (un->un_retry_bp != NULL) { 14896 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14897 "sd_start_cmds: exiting, retry pending!\n"); 14898 goto exit; 14899 } 14900 if (un->un_startstop_timeid != NULL) { 14901 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14902 "sd_start_cmds: exiting, " 14903 "START_STOP pending!\n"); 14904 goto exit; 14905 } 14906 if (un->un_direct_priority_timeid != NULL) { 14907 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14908 "sd_start_cmds: exiting, " 14909 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n"); 14910 goto exit; 14911 } 14912 14913 /* Dequeue the command */ 14914 un->un_waitq_headp = bp->av_forw; 14915 if (un->un_waitq_headp == NULL) { 14916 un->un_waitq_tailp = NULL; 14917 } 14918 bp->av_forw = NULL; 14919 statp = kstat_waitq_to_runq; 14920 SD_TRACE(SD_LOG_IO_CORE, un, 14921 "sd_start_cmds: processing waitq bp:0x%p\n", bp); 14922 14923 } else { 14924 /* No work to do so bail out now */ 14925 SD_TRACE(SD_LOG_IO_CORE, un, 14926 "sd_start_cmds: no more work, exiting!\n"); 14927 goto exit; 14928 } 14929 14930 /* 14931 * Reset the state to normal. This is the mechanism by which 14932 * the state transitions from either SD_STATE_RWAIT or 14933 * SD_STATE_OFFLINE to SD_STATE_NORMAL. 14934 * If state is SD_STATE_PM_CHANGING then this command is 14935 * part of the device power control and the state must 14936 * not be put back to normal. Doing so would would 14937 * allow new commands to proceed when they shouldn't, 14938 * the device may be going off. 14939 */ 14940 if ((un->un_state != SD_STATE_SUSPENDED) && 14941 (un->un_state != SD_STATE_PM_CHANGING)) { 14942 New_state(un, SD_STATE_NORMAL); 14943 } 14944 14945 xp = SD_GET_XBUF(bp); 14946 ASSERT(xp != NULL); 14947 14948 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14949 /* 14950 * Allocate the scsi_pkt if we need one, or attach DMA 14951 * resources if we have a scsi_pkt that needs them. The 14952 * latter should only occur for commands that are being 14953 * retried. 14954 */ 14955 if ((xp->xb_pktp == NULL) || 14956 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { 14957 #else 14958 if (xp->xb_pktp == NULL) { 14959 #endif 14960 /* 14961 * There is no scsi_pkt allocated for this buf. Call 14962 * the initpkt function to allocate & init one. 14963 * 14964 * The scsi_init_pkt runout callback functionality is 14965 * implemented as follows: 14966 * 14967 * 1) The initpkt function always calls 14968 * scsi_init_pkt(9F) with sdrunout specified as the 14969 * callback routine. 14970 * 2) A successful packet allocation is initialized and 14971 * the I/O is transported. 14972 * 3) The I/O associated with an allocation resource 14973 * failure is left on its queue to be retried via 14974 * runout or the next I/O. 14975 * 4) The I/O associated with a DMA error is removed 14976 * from the queue and failed with EIO. Processing of 14977 * the transport queues is also halted to be 14978 * restarted via runout or the next I/O. 14979 * 5) The I/O associated with a CDB size or packet 14980 * size error is removed from the queue and failed 14981 * with EIO. Processing of the transport queues is 14982 * continued. 14983 * 14984 * Note: there is no interface for canceling a runout 14985 * callback. To prevent the driver from detaching or 14986 * suspending while a runout is pending the driver 14987 * state is set to SD_STATE_RWAIT 14988 * 14989 * Note: using the scsi_init_pkt callback facility can 14990 * result in an I/O request persisting at the head of 14991 * the list which cannot be satisfied even after 14992 * multiple retries. In the future the driver may 14993 * implement some kind of maximum runout count before 14994 * failing an I/O. 14995 * 14996 * Note: the use of funcp below may seem superfluous, 14997 * but it helps warlock figure out the correct 14998 * initpkt function calls (see [s]sd.wlcmd). 14999 */ 15000 struct scsi_pkt *pktp; 15001 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp); 15002 15003 ASSERT(bp != un->un_rqs_bp); 15004 15005 funcp = sd_initpkt_map[xp->xb_chain_iostart]; 15006 switch ((*funcp)(bp, &pktp)) { 15007 case SD_PKT_ALLOC_SUCCESS: 15008 xp->xb_pktp = pktp; 15009 SD_TRACE(SD_LOG_IO_CORE, un, 15010 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n", 15011 pktp); 15012 goto got_pkt; 15013 15014 case SD_PKT_ALLOC_FAILURE: 15015 /* 15016 * Temporary (hopefully) resource depletion. 15017 * Since retries and RQS commands always have a 15018 * scsi_pkt allocated, these cases should never 15019 * get here. So the only cases this needs to 15020 * handle is a bp from the waitq (which we put 15021 * back onto the waitq for sdrunout), or a bp 15022 * sent as an immed_bp (which we just fail). 15023 */ 15024 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15025 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); 15026 15027 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 15028 15029 if (bp == immed_bp) { 15030 /* 15031 * If SD_XB_DMA_FREED is clear, then 15032 * this is a failure to allocate a 15033 * scsi_pkt, and we must fail the 15034 * command. 15035 */ 15036 if ((xp->xb_pkt_flags & 15037 SD_XB_DMA_FREED) == 0) { 15038 break; 15039 } 15040 15041 /* 15042 * If this immediate command is NOT our 15043 * un_retry_bp, then we must fail it. 15044 */ 15045 if (bp != un->un_retry_bp) { 15046 break; 15047 } 15048 15049 /* 15050 * We get here if this cmd is our 15051 * un_retry_bp that was DMAFREED, but 15052 * scsi_init_pkt() failed to reallocate 15053 * DMA resources when we attempted to 15054 * retry it. This can happen when an 15055 * mpxio failover is in progress, but 15056 * we don't want to just fail the 15057 * command in this case. 15058 * 15059 * Use timeout(9F) to restart it after 15060 * a 100ms delay. We don't want to 15061 * let sdrunout() restart it, because 15062 * sdrunout() is just supposed to start 15063 * commands that are sitting on the 15064 * wait queue. The un_retry_bp stays 15065 * set until the command completes, but 15066 * sdrunout can be called many times 15067 * before that happens. Since sdrunout 15068 * cannot tell if the un_retry_bp is 15069 * already in the transport, it could 15070 * end up calling scsi_transport() for 15071 * the un_retry_bp multiple times. 15072 * 15073 * Also: don't schedule the callback 15074 * if some other callback is already 15075 * pending. 15076 */ 15077 if (un->un_retry_statp == NULL) { 15078 /* 15079 * restore the kstat pointer to 15080 * keep kstat counts coherent 15081 * when we do retry the command. 15082 */ 15083 un->un_retry_statp = 15084 saved_statp; 15085 } 15086 15087 if ((un->un_startstop_timeid == NULL) && 15088 (un->un_retry_timeid == NULL) && 15089 (un->un_direct_priority_timeid == 15090 NULL)) { 15091 15092 un->un_retry_timeid = 15093 timeout( 15094 sd_start_retry_command, 15095 un, SD_RESTART_TIMEOUT); 15096 } 15097 goto exit; 15098 } 15099 15100 #else 15101 if (bp == immed_bp) { 15102 break; /* Just fail the command */ 15103 } 15104 #endif 15105 15106 /* Add the buf back to the head of the waitq */ 15107 bp->av_forw = un->un_waitq_headp; 15108 un->un_waitq_headp = bp; 15109 if (un->un_waitq_tailp == NULL) { 15110 un->un_waitq_tailp = bp; 15111 } 15112 goto exit; 15113 15114 case SD_PKT_ALLOC_FAILURE_NO_DMA: 15115 /* 15116 * HBA DMA resource failure. Fail the command 15117 * and continue processing of the queues. 15118 */ 15119 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15120 "sd_start_cmds: " 15121 "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); 15122 break; 15123 15124 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: 15125 /* 15126 * Note:x86: Partial DMA mapping not supported 15127 * for USCSI commands, and all the needed DMA 15128 * resources were not allocated. 15129 */ 15130 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15131 "sd_start_cmds: " 15132 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); 15133 break; 15134 15135 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: 15136 /* 15137 * Note:x86: Request cannot fit into CDB based 15138 * on lba and len. 15139 */ 15140 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15141 "sd_start_cmds: " 15142 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); 15143 break; 15144 15145 default: 15146 /* Should NEVER get here! */ 15147 panic("scsi_initpkt error"); 15148 /*NOTREACHED*/ 15149 } 15150 15151 /* 15152 * Fatal error in allocating a scsi_pkt for this buf. 15153 * Update kstats & return the buf with an error code. 15154 * We must use sd_return_failed_command_no_restart() to 15155 * avoid a recursive call back into sd_start_cmds(). 15156 * However this also means that we must keep processing 15157 * the waitq here in order to avoid stalling. 15158 */ 15159 if (statp == kstat_waitq_to_runq) { 15160 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 15161 } 15162 sd_return_failed_command_no_restart(un, bp, EIO); 15163 if (bp == immed_bp) { 15164 /* immed_bp is gone by now, so clear this */ 15165 immed_bp = NULL; 15166 } 15167 continue; 15168 } 15169 got_pkt: 15170 if (bp == immed_bp) { 15171 /* goto the head of the class.... */ 15172 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 15173 } 15174 15175 un->un_ncmds_in_transport++; 15176 SD_UPDATE_KSTATS(un, statp, bp); 15177 15178 /* 15179 * Call scsi_transport() to send the command to the target. 15180 * According to SCSA architecture, we must drop the mutex here 15181 * before calling scsi_transport() in order to avoid deadlock. 15182 * Note that the scsi_pkt's completion routine can be executed 15183 * (from interrupt context) even before the call to 15184 * scsi_transport() returns. 15185 */ 15186 SD_TRACE(SD_LOG_IO_CORE, un, 15187 "sd_start_cmds: calling scsi_transport()\n"); 15188 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); 15189 15190 mutex_exit(SD_MUTEX(un)); 15191 rval = scsi_transport(xp->xb_pktp); 15192 mutex_enter(SD_MUTEX(un)); 15193 15194 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15195 "sd_start_cmds: scsi_transport() returned %d\n", rval); 15196 15197 switch (rval) { 15198 case TRAN_ACCEPT: 15199 /* Clear this with every pkt accepted by the HBA */ 15200 un->un_tran_fatal_count = 0; 15201 break; /* Success; try the next cmd (if any) */ 15202 15203 case TRAN_BUSY: 15204 un->un_ncmds_in_transport--; 15205 ASSERT(un->un_ncmds_in_transport >= 0); 15206 15207 /* 15208 * Don't retry request sense, the sense data 15209 * is lost when another request is sent. 15210 * Free up the rqs buf and retry 15211 * the original failed cmd. Update kstat. 15212 */ 15213 if (bp == un->un_rqs_bp) { 15214 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 15215 bp = sd_mark_rqs_idle(un, xp); 15216 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15217 NULL, NULL, EIO, un->un_busy_timeout / 500, 15218 kstat_waitq_enter); 15219 goto exit; 15220 } 15221 15222 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 15223 /* 15224 * Free the DMA resources for the scsi_pkt. This will 15225 * allow mpxio to select another path the next time 15226 * we call scsi_transport() with this scsi_pkt. 15227 * See sdintr() for the rationalization behind this. 15228 */ 15229 if ((un->un_f_is_fibre == TRUE) && 15230 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 15231 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { 15232 scsi_dmafree(xp->xb_pktp); 15233 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 15234 } 15235 #endif 15236 15237 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { 15238 /* 15239 * Commands that are SD_PATH_DIRECT_PRIORITY 15240 * are for error recovery situations. These do 15241 * not use the normal command waitq, so if they 15242 * get a TRAN_BUSY we cannot put them back onto 15243 * the waitq for later retry. One possible 15244 * problem is that there could already be some 15245 * other command on un_retry_bp that is waiting 15246 * for this one to complete, so we would be 15247 * deadlocked if we put this command back onto 15248 * the waitq for later retry (since un_retry_bp 15249 * must complete before the driver gets back to 15250 * commands on the waitq). 15251 * 15252 * To avoid deadlock we must schedule a callback 15253 * that will restart this command after a set 15254 * interval. This should keep retrying for as 15255 * long as the underlying transport keeps 15256 * returning TRAN_BUSY (just like for other 15257 * commands). Use the same timeout interval as 15258 * for the ordinary TRAN_BUSY retry. 15259 */ 15260 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15261 "sd_start_cmds: scsi_transport() returned " 15262 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n"); 15263 15264 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 15265 un->un_direct_priority_timeid = 15266 timeout(sd_start_direct_priority_command, 15267 bp, un->un_busy_timeout / 500); 15268 15269 goto exit; 15270 } 15271 15272 /* 15273 * For TRAN_BUSY, we want to reduce the throttle value, 15274 * unless we are retrying a command. 15275 */ 15276 if (bp != un->un_retry_bp) { 15277 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); 15278 } 15279 15280 /* 15281 * Set up the bp to be tried again 10 ms later. 15282 * Note:x86: Is there a timeout value in the sd_lun 15283 * for this condition? 15284 */ 15285 sd_set_retry_bp(un, bp, un->un_busy_timeout / 500, 15286 kstat_runq_back_to_waitq); 15287 goto exit; 15288 15289 case TRAN_FATAL_ERROR: 15290 un->un_tran_fatal_count++; 15291 /* FALLTHRU */ 15292 15293 case TRAN_BADPKT: 15294 default: 15295 un->un_ncmds_in_transport--; 15296 ASSERT(un->un_ncmds_in_transport >= 0); 15297 15298 /* 15299 * If this is our REQUEST SENSE command with a 15300 * transport error, we must get back the pointers 15301 * to the original buf, and mark the REQUEST 15302 * SENSE command as "available". 15303 */ 15304 if (bp == un->un_rqs_bp) { 15305 bp = sd_mark_rqs_idle(un, xp); 15306 xp = SD_GET_XBUF(bp); 15307 } else { 15308 /* 15309 * Legacy behavior: do not update transport 15310 * error count for request sense commands. 15311 */ 15312 SD_UPDATE_ERRSTATS(un, sd_transerrs); 15313 } 15314 15315 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 15316 sd_print_transport_rejected_message(un, xp, rval); 15317 15318 /* 15319 * This command will be terminated by SD driver due 15320 * to a fatal transport error. We should post 15321 * ereport.io.scsi.cmd.disk.tran with driver-assessment 15322 * of "fail" for any command to indicate this 15323 * situation. 15324 */ 15325 if (xp->xb_ena > 0) { 15326 ASSERT(un->un_fm_private != NULL); 15327 sfip = un->un_fm_private; 15328 sfip->fm_ssc.ssc_flags |= SSC_FLAGS_TRAN_ABORT; 15329 sd_ssc_extract_info(&sfip->fm_ssc, un, 15330 xp->xb_pktp, bp, xp); 15331 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL); 15332 } 15333 15334 /* 15335 * We must use sd_return_failed_command_no_restart() to 15336 * avoid a recursive call back into sd_start_cmds(). 15337 * However this also means that we must keep processing 15338 * the waitq here in order to avoid stalling. 15339 */ 15340 sd_return_failed_command_no_restart(un, bp, EIO); 15341 15342 /* 15343 * Notify any threads waiting in sd_ddi_suspend() that 15344 * a command completion has occurred. 15345 */ 15346 if (un->un_state == SD_STATE_SUSPENDED) { 15347 cv_broadcast(&un->un_disk_busy_cv); 15348 } 15349 15350 if (bp == immed_bp) { 15351 /* immed_bp is gone by now, so clear this */ 15352 immed_bp = NULL; 15353 } 15354 break; 15355 } 15356 15357 } while (immed_bp == NULL); 15358 15359 exit: 15360 ASSERT(mutex_owned(SD_MUTEX(un))); 15361 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n"); 15362 } 15363 15364 15365 /* 15366 * Function: sd_return_command 15367 * 15368 * Description: Returns a command to its originator (with or without an 15369 * error). Also starts commands waiting to be transported 15370 * to the target. 15371 * 15372 * Context: May be called from interrupt, kernel, or timeout context 15373 */ 15374 15375 static void 15376 sd_return_command(struct sd_lun *un, struct buf *bp) 15377 { 15378 struct sd_xbuf *xp; 15379 struct scsi_pkt *pktp; 15380 struct sd_fm_internal *sfip; 15381 15382 ASSERT(bp != NULL); 15383 ASSERT(un != NULL); 15384 ASSERT(mutex_owned(SD_MUTEX(un))); 15385 ASSERT(bp != un->un_rqs_bp); 15386 xp = SD_GET_XBUF(bp); 15387 ASSERT(xp != NULL); 15388 15389 pktp = SD_GET_PKTP(bp); 15390 sfip = (struct sd_fm_internal *)un->un_fm_private; 15391 ASSERT(sfip != NULL); 15392 15393 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n"); 15394 15395 /* 15396 * Note: check for the "sdrestart failed" case. 15397 */ 15398 if ((un->un_partial_dma_supported == 1) && 15399 ((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) && 15400 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) && 15401 (xp->xb_pktp->pkt_resid == 0)) { 15402 15403 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) { 15404 /* 15405 * Successfully set up next portion of cmd 15406 * transfer, try sending it 15407 */ 15408 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 15409 NULL, NULL, 0, (clock_t)0, NULL); 15410 sd_start_cmds(un, NULL); 15411 return; /* Note:x86: need a return here? */ 15412 } 15413 } 15414 15415 /* 15416 * If this is the failfast bp, clear it from un_failfast_bp. This 15417 * can happen if upon being re-tried the failfast bp either 15418 * succeeded or encountered another error (possibly even a different 15419 * error than the one that precipitated the failfast state, but in 15420 * that case it would have had to exhaust retries as well). Regardless, 15421 * this should not occur whenever the instance is in the active 15422 * failfast state. 15423 */ 15424 if (bp == un->un_failfast_bp) { 15425 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 15426 un->un_failfast_bp = NULL; 15427 } 15428 15429 /* 15430 * Clear the failfast state upon successful completion of ANY cmd. 15431 */ 15432 if (bp->b_error == 0) { 15433 un->un_failfast_state = SD_FAILFAST_INACTIVE; 15434 /* 15435 * If this is a successful command, but used to be retried, 15436 * we will take it as a recovered command and post an 15437 * ereport with driver-assessment of "recovered". 15438 */ 15439 if (xp->xb_ena > 0) { 15440 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 15441 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RECOVERY); 15442 } 15443 } else { 15444 /* 15445 * If this is a failed non-USCSI command we will post an 15446 * ereport with driver-assessment set accordingly("fail" or 15447 * "fatal"). 15448 */ 15449 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 15450 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 15451 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL); 15452 } 15453 } 15454 15455 /* 15456 * This is used if the command was retried one or more times. Show that 15457 * we are done with it, and allow processing of the waitq to resume. 15458 */ 15459 if (bp == un->un_retry_bp) { 15460 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15461 "sd_return_command: un:0x%p: " 15462 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 15463 un->un_retry_bp = NULL; 15464 un->un_retry_statp = NULL; 15465 } 15466 15467 SD_UPDATE_RDWR_STATS(un, bp); 15468 SD_UPDATE_PARTITION_STATS(un, bp); 15469 15470 switch (un->un_state) { 15471 case SD_STATE_SUSPENDED: 15472 /* 15473 * Notify any threads waiting in sd_ddi_suspend() that 15474 * a command completion has occurred. 15475 */ 15476 cv_broadcast(&un->un_disk_busy_cv); 15477 break; 15478 default: 15479 sd_start_cmds(un, NULL); 15480 break; 15481 } 15482 15483 /* Return this command up the iodone chain to its originator. */ 15484 mutex_exit(SD_MUTEX(un)); 15485 15486 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 15487 xp->xb_pktp = NULL; 15488 15489 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 15490 15491 ASSERT(!mutex_owned(SD_MUTEX(un))); 15492 mutex_enter(SD_MUTEX(un)); 15493 15494 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n"); 15495 } 15496 15497 15498 /* 15499 * Function: sd_return_failed_command 15500 * 15501 * Description: Command completion when an error occurred. 15502 * 15503 * Context: May be called from interrupt context 15504 */ 15505 15506 static void 15507 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode) 15508 { 15509 ASSERT(bp != NULL); 15510 ASSERT(un != NULL); 15511 ASSERT(mutex_owned(SD_MUTEX(un))); 15512 15513 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15514 "sd_return_failed_command: entry\n"); 15515 15516 /* 15517 * b_resid could already be nonzero due to a partial data 15518 * transfer, so do not change it here. 15519 */ 15520 SD_BIOERROR(bp, errcode); 15521 15522 sd_return_command(un, bp); 15523 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15524 "sd_return_failed_command: exit\n"); 15525 } 15526 15527 15528 /* 15529 * Function: sd_return_failed_command_no_restart 15530 * 15531 * Description: Same as sd_return_failed_command, but ensures that no 15532 * call back into sd_start_cmds will be issued. 15533 * 15534 * Context: May be called from interrupt context 15535 */ 15536 15537 static void 15538 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp, 15539 int errcode) 15540 { 15541 struct sd_xbuf *xp; 15542 15543 ASSERT(bp != NULL); 15544 ASSERT(un != NULL); 15545 ASSERT(mutex_owned(SD_MUTEX(un))); 15546 xp = SD_GET_XBUF(bp); 15547 ASSERT(xp != NULL); 15548 ASSERT(errcode != 0); 15549 15550 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15551 "sd_return_failed_command_no_restart: entry\n"); 15552 15553 /* 15554 * b_resid could already be nonzero due to a partial data 15555 * transfer, so do not change it here. 15556 */ 15557 SD_BIOERROR(bp, errcode); 15558 15559 /* 15560 * If this is the failfast bp, clear it. This can happen if the 15561 * failfast bp encounterd a fatal error when we attempted to 15562 * re-try it (such as a scsi_transport(9F) failure). However 15563 * we should NOT be in an active failfast state if the failfast 15564 * bp is not NULL. 15565 */ 15566 if (bp == un->un_failfast_bp) { 15567 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 15568 un->un_failfast_bp = NULL; 15569 } 15570 15571 if (bp == un->un_retry_bp) { 15572 /* 15573 * This command was retried one or more times. Show that we are 15574 * done with it, and allow processing of the waitq to resume. 15575 */ 15576 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15577 "sd_return_failed_command_no_restart: " 15578 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 15579 un->un_retry_bp = NULL; 15580 un->un_retry_statp = NULL; 15581 } 15582 15583 SD_UPDATE_RDWR_STATS(un, bp); 15584 SD_UPDATE_PARTITION_STATS(un, bp); 15585 15586 mutex_exit(SD_MUTEX(un)); 15587 15588 if (xp->xb_pktp != NULL) { 15589 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 15590 xp->xb_pktp = NULL; 15591 } 15592 15593 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 15594 15595 mutex_enter(SD_MUTEX(un)); 15596 15597 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15598 "sd_return_failed_command_no_restart: exit\n"); 15599 } 15600 15601 15602 /* 15603 * Function: sd_retry_command 15604 * 15605 * Description: queue up a command for retry, or (optionally) fail it 15606 * if retry counts are exhausted. 15607 * 15608 * Arguments: un - Pointer to the sd_lun struct for the target. 15609 * 15610 * bp - Pointer to the buf for the command to be retried. 15611 * 15612 * retry_check_flag - Flag to see which (if any) of the retry 15613 * counts should be decremented/checked. If the indicated 15614 * retry count is exhausted, then the command will not be 15615 * retried; it will be failed instead. This should use a 15616 * value equal to one of the following: 15617 * 15618 * SD_RETRIES_NOCHECK 15619 * SD_RESD_RETRIES_STANDARD 15620 * SD_RETRIES_VICTIM 15621 * 15622 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE 15623 * if the check should be made to see of FLAG_ISOLATE is set 15624 * in the pkt. If FLAG_ISOLATE is set, then the command is 15625 * not retried, it is simply failed. 15626 * 15627 * user_funcp - Ptr to function to call before dispatching the 15628 * command. May be NULL if no action needs to be performed. 15629 * (Primarily intended for printing messages.) 15630 * 15631 * user_arg - Optional argument to be passed along to 15632 * the user_funcp call. 15633 * 15634 * failure_code - errno return code to set in the bp if the 15635 * command is going to be failed. 15636 * 15637 * retry_delay - Retry delay interval in (clock_t) units. May 15638 * be zero which indicates that the retry should be retried 15639 * immediately (ie, without an intervening delay). 15640 * 15641 * statp - Ptr to kstat function to be updated if the command 15642 * is queued for a delayed retry. May be NULL if no kstat 15643 * update is desired. 15644 * 15645 * Context: May be called from interrupt context. 15646 */ 15647 15648 static void 15649 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag, 15650 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int 15651 code), void *user_arg, int failure_code, clock_t retry_delay, 15652 void (*statp)(kstat_io_t *)) 15653 { 15654 struct sd_xbuf *xp; 15655 struct scsi_pkt *pktp; 15656 struct sd_fm_internal *sfip; 15657 15658 ASSERT(un != NULL); 15659 ASSERT(mutex_owned(SD_MUTEX(un))); 15660 ASSERT(bp != NULL); 15661 xp = SD_GET_XBUF(bp); 15662 ASSERT(xp != NULL); 15663 pktp = SD_GET_PKTP(bp); 15664 ASSERT(pktp != NULL); 15665 15666 sfip = (struct sd_fm_internal *)un->un_fm_private; 15667 ASSERT(sfip != NULL); 15668 15669 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15670 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp); 15671 15672 /* 15673 * If we are syncing or dumping, fail the command to avoid 15674 * recursively calling back into scsi_transport(). 15675 */ 15676 if (ddi_in_panic()) { 15677 goto fail_command_no_log; 15678 } 15679 15680 /* 15681 * We should never be be retrying a command with FLAG_DIAGNOSE set, so 15682 * log an error and fail the command. 15683 */ 15684 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 15685 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 15686 "ERROR, retrying FLAG_DIAGNOSE command.\n"); 15687 sd_dump_memory(un, SD_LOG_IO, "CDB", 15688 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15689 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 15690 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 15691 goto fail_command; 15692 } 15693 15694 /* 15695 * If we are suspended, then put the command onto head of the 15696 * wait queue since we don't want to start more commands, and 15697 * clear the un_retry_bp. Next time when we are resumed, will 15698 * handle the command in the wait queue. 15699 */ 15700 switch (un->un_state) { 15701 case SD_STATE_SUSPENDED: 15702 case SD_STATE_DUMPING: 15703 bp->av_forw = un->un_waitq_headp; 15704 un->un_waitq_headp = bp; 15705 if (un->un_waitq_tailp == NULL) { 15706 un->un_waitq_tailp = bp; 15707 } 15708 if (bp == un->un_retry_bp) { 15709 un->un_retry_bp = NULL; 15710 un->un_retry_statp = NULL; 15711 } 15712 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 15713 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: " 15714 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp); 15715 return; 15716 default: 15717 break; 15718 } 15719 15720 /* 15721 * If the caller wants us to check FLAG_ISOLATE, then see if that 15722 * is set; if it is then we do not want to retry the command. 15723 * Normally, FLAG_ISOLATE is only used with USCSI cmds. 15724 */ 15725 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) { 15726 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { 15727 goto fail_command; 15728 } 15729 } 15730 15731 15732 /* 15733 * If SD_RETRIES_FAILFAST is set, it indicates that either a 15734 * command timeout or a selection timeout has occurred. This means 15735 * that we were unable to establish an kind of communication with 15736 * the target, and subsequent retries and/or commands are likely 15737 * to encounter similar results and take a long time to complete. 15738 * 15739 * If this is a failfast error condition, we need to update the 15740 * failfast state, even if this bp does not have B_FAILFAST set. 15741 */ 15742 if (retry_check_flag & SD_RETRIES_FAILFAST) { 15743 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { 15744 ASSERT(un->un_failfast_bp == NULL); 15745 /* 15746 * If we are already in the active failfast state, and 15747 * another failfast error condition has been detected, 15748 * then fail this command if it has B_FAILFAST set. 15749 * If B_FAILFAST is clear, then maintain the legacy 15750 * behavior of retrying heroically, even tho this will 15751 * take a lot more time to fail the command. 15752 */ 15753 if (bp->b_flags & B_FAILFAST) { 15754 goto fail_command; 15755 } 15756 } else { 15757 /* 15758 * We're not in the active failfast state, but we 15759 * have a failfast error condition, so we must begin 15760 * transition to the next state. We do this regardless 15761 * of whether or not this bp has B_FAILFAST set. 15762 */ 15763 if (un->un_failfast_bp == NULL) { 15764 /* 15765 * This is the first bp to meet a failfast 15766 * condition so save it on un_failfast_bp & 15767 * do normal retry processing. Do not enter 15768 * active failfast state yet. This marks 15769 * entry into the "failfast pending" state. 15770 */ 15771 un->un_failfast_bp = bp; 15772 15773 } else if (un->un_failfast_bp == bp) { 15774 /* 15775 * This is the second time *this* bp has 15776 * encountered a failfast error condition, 15777 * so enter active failfast state & flush 15778 * queues as appropriate. 15779 */ 15780 un->un_failfast_state = SD_FAILFAST_ACTIVE; 15781 un->un_failfast_bp = NULL; 15782 sd_failfast_flushq(un); 15783 15784 /* 15785 * Fail this bp now if B_FAILFAST set; 15786 * otherwise continue with retries. (It would 15787 * be pretty ironic if this bp succeeded on a 15788 * subsequent retry after we just flushed all 15789 * the queues). 15790 */ 15791 if (bp->b_flags & B_FAILFAST) { 15792 goto fail_command; 15793 } 15794 15795 #if !defined(lint) && !defined(__lint) 15796 } else { 15797 /* 15798 * If neither of the preceeding conditionals 15799 * was true, it means that there is some 15800 * *other* bp that has met an inital failfast 15801 * condition and is currently either being 15802 * retried or is waiting to be retried. In 15803 * that case we should perform normal retry 15804 * processing on *this* bp, since there is a 15805 * chance that the current failfast condition 15806 * is transient and recoverable. If that does 15807 * not turn out to be the case, then retries 15808 * will be cleared when the wait queue is 15809 * flushed anyway. 15810 */ 15811 #endif 15812 } 15813 } 15814 } else { 15815 /* 15816 * SD_RETRIES_FAILFAST is clear, which indicates that we 15817 * likely were able to at least establish some level of 15818 * communication with the target and subsequent commands 15819 * and/or retries are likely to get through to the target, 15820 * In this case we want to be aggressive about clearing 15821 * the failfast state. Note that this does not affect 15822 * the "failfast pending" condition. 15823 */ 15824 un->un_failfast_state = SD_FAILFAST_INACTIVE; 15825 } 15826 15827 15828 /* 15829 * Check the specified retry count to see if we can still do 15830 * any retries with this pkt before we should fail it. 15831 */ 15832 switch (retry_check_flag & SD_RETRIES_MASK) { 15833 case SD_RETRIES_VICTIM: 15834 /* 15835 * Check the victim retry count. If exhausted, then fall 15836 * thru & check against the standard retry count. 15837 */ 15838 if (xp->xb_victim_retry_count < un->un_victim_retry_count) { 15839 /* Increment count & proceed with the retry */ 15840 xp->xb_victim_retry_count++; 15841 break; 15842 } 15843 /* Victim retries exhausted, fall back to std. retries... */ 15844 /* FALLTHRU */ 15845 15846 case SD_RETRIES_STANDARD: 15847 if (xp->xb_retry_count >= un->un_retry_count) { 15848 /* Retries exhausted, fail the command */ 15849 SD_TRACE(SD_LOG_IO_CORE, un, 15850 "sd_retry_command: retries exhausted!\n"); 15851 /* 15852 * update b_resid for failed SCMD_READ & SCMD_WRITE 15853 * commands with nonzero pkt_resid. 15854 */ 15855 if ((pktp->pkt_reason == CMD_CMPLT) && 15856 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) && 15857 (pktp->pkt_resid != 0)) { 15858 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F; 15859 if ((op == SCMD_READ) || (op == SCMD_WRITE)) { 15860 SD_UPDATE_B_RESID(bp, pktp); 15861 } 15862 } 15863 goto fail_command; 15864 } 15865 xp->xb_retry_count++; 15866 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15867 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15868 break; 15869 15870 case SD_RETRIES_UA: 15871 if (xp->xb_ua_retry_count >= sd_ua_retry_count) { 15872 /* Retries exhausted, fail the command */ 15873 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15874 "Unit Attention retries exhausted. " 15875 "Check the target.\n"); 15876 goto fail_command; 15877 } 15878 xp->xb_ua_retry_count++; 15879 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15880 "sd_retry_command: retry count:%d\n", 15881 xp->xb_ua_retry_count); 15882 break; 15883 15884 case SD_RETRIES_BUSY: 15885 if (xp->xb_retry_count >= un->un_busy_retry_count) { 15886 /* Retries exhausted, fail the command */ 15887 SD_TRACE(SD_LOG_IO_CORE, un, 15888 "sd_retry_command: retries exhausted!\n"); 15889 goto fail_command; 15890 } 15891 xp->xb_retry_count++; 15892 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15893 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15894 break; 15895 15896 case SD_RETRIES_NOCHECK: 15897 default: 15898 /* No retry count to check. Just proceed with the retry */ 15899 break; 15900 } 15901 15902 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 15903 15904 /* 15905 * If this is a non-USCSI command being retried 15906 * during execution last time, we should post an ereport with 15907 * driver-assessment of the value "retry". 15908 * For partial DMA, request sense and STATUS_QFULL, there are no 15909 * hardware errors, we bypass ereport posting. 15910 */ 15911 if (failure_code != 0) { 15912 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 15913 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 15914 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RETRY); 15915 } 15916 } 15917 15918 /* 15919 * If we were given a zero timeout, we must attempt to retry the 15920 * command immediately (ie, without a delay). 15921 */ 15922 if (retry_delay == 0) { 15923 /* 15924 * Check some limiting conditions to see if we can actually 15925 * do the immediate retry. If we cannot, then we must 15926 * fall back to queueing up a delayed retry. 15927 */ 15928 if (un->un_ncmds_in_transport >= un->un_throttle) { 15929 /* 15930 * We are at the throttle limit for the target, 15931 * fall back to delayed retry. 15932 */ 15933 retry_delay = un->un_busy_timeout; 15934 statp = kstat_waitq_enter; 15935 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15936 "sd_retry_command: immed. retry hit " 15937 "throttle!\n"); 15938 } else { 15939 /* 15940 * We're clear to proceed with the immediate retry. 15941 * First call the user-provided function (if any) 15942 */ 15943 if (user_funcp != NULL) { 15944 (*user_funcp)(un, bp, user_arg, 15945 SD_IMMEDIATE_RETRY_ISSUED); 15946 #ifdef __lock_lint 15947 sd_print_incomplete_msg(un, bp, user_arg, 15948 SD_IMMEDIATE_RETRY_ISSUED); 15949 sd_print_cmd_incomplete_msg(un, bp, user_arg, 15950 SD_IMMEDIATE_RETRY_ISSUED); 15951 sd_print_sense_failed_msg(un, bp, user_arg, 15952 SD_IMMEDIATE_RETRY_ISSUED); 15953 #endif 15954 } 15955 15956 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15957 "sd_retry_command: issuing immediate retry\n"); 15958 15959 /* 15960 * Call sd_start_cmds() to transport the command to 15961 * the target. 15962 */ 15963 sd_start_cmds(un, bp); 15964 15965 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15966 "sd_retry_command exit\n"); 15967 return; 15968 } 15969 } 15970 15971 /* 15972 * Set up to retry the command after a delay. 15973 * First call the user-provided function (if any) 15974 */ 15975 if (user_funcp != NULL) { 15976 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED); 15977 } 15978 15979 sd_set_retry_bp(un, bp, retry_delay, statp); 15980 15981 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15982 return; 15983 15984 fail_command: 15985 15986 if (user_funcp != NULL) { 15987 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED); 15988 } 15989 15990 fail_command_no_log: 15991 15992 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15993 "sd_retry_command: returning failed command\n"); 15994 15995 sd_return_failed_command(un, bp, failure_code); 15996 15997 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15998 } 15999 16000 16001 /* 16002 * Function: sd_set_retry_bp 16003 * 16004 * Description: Set up the given bp for retry. 16005 * 16006 * Arguments: un - ptr to associated softstate 16007 * bp - ptr to buf(9S) for the command 16008 * retry_delay - time interval before issuing retry (may be 0) 16009 * statp - optional pointer to kstat function 16010 * 16011 * Context: May be called under interrupt context 16012 */ 16013 16014 static void 16015 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, 16016 void (*statp)(kstat_io_t *)) 16017 { 16018 ASSERT(un != NULL); 16019 ASSERT(mutex_owned(SD_MUTEX(un))); 16020 ASSERT(bp != NULL); 16021 16022 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 16023 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp); 16024 16025 /* 16026 * Indicate that the command is being retried. This will not allow any 16027 * other commands on the wait queue to be transported to the target 16028 * until this command has been completed (success or failure). The 16029 * "retry command" is not transported to the target until the given 16030 * time delay expires, unless the user specified a 0 retry_delay. 16031 * 16032 * Note: the timeout(9F) callback routine is what actually calls 16033 * sd_start_cmds() to transport the command, with the exception of a 16034 * zero retry_delay. The only current implementor of a zero retry delay 16035 * is the case where a START_STOP_UNIT is sent to spin-up a device. 16036 */ 16037 if (un->un_retry_bp == NULL) { 16038 ASSERT(un->un_retry_statp == NULL); 16039 un->un_retry_bp = bp; 16040 16041 /* 16042 * If the user has not specified a delay the command should 16043 * be queued and no timeout should be scheduled. 16044 */ 16045 if (retry_delay == 0) { 16046 /* 16047 * Save the kstat pointer that will be used in the 16048 * call to SD_UPDATE_KSTATS() below, so that 16049 * sd_start_cmds() can correctly decrement the waitq 16050 * count when it is time to transport this command. 16051 */ 16052 un->un_retry_statp = statp; 16053 goto done; 16054 } 16055 } 16056 16057 if (un->un_retry_bp == bp) { 16058 /* 16059 * Save the kstat pointer that will be used in the call to 16060 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can 16061 * correctly decrement the waitq count when it is time to 16062 * transport this command. 16063 */ 16064 un->un_retry_statp = statp; 16065 16066 /* 16067 * Schedule a timeout if: 16068 * 1) The user has specified a delay. 16069 * 2) There is not a START_STOP_UNIT callback pending. 16070 * 16071 * If no delay has been specified, then it is up to the caller 16072 * to ensure that IO processing continues without stalling. 16073 * Effectively, this means that the caller will issue the 16074 * required call to sd_start_cmds(). The START_STOP_UNIT 16075 * callback does this after the START STOP UNIT command has 16076 * completed. In either of these cases we should not schedule 16077 * a timeout callback here. Also don't schedule the timeout if 16078 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart. 16079 */ 16080 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) && 16081 (un->un_direct_priority_timeid == NULL)) { 16082 un->un_retry_timeid = 16083 timeout(sd_start_retry_command, un, retry_delay); 16084 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16085 "sd_set_retry_bp: setting timeout: un: 0x%p" 16086 " bp:0x%p un_retry_timeid:0x%p\n", 16087 un, bp, un->un_retry_timeid); 16088 } 16089 } else { 16090 /* 16091 * We only get in here if there is already another command 16092 * waiting to be retried. In this case, we just put the 16093 * given command onto the wait queue, so it can be transported 16094 * after the current retry command has completed. 16095 * 16096 * Also we have to make sure that if the command at the head 16097 * of the wait queue is the un_failfast_bp, that we do not 16098 * put ahead of it any other commands that are to be retried. 16099 */ 16100 if ((un->un_failfast_bp != NULL) && 16101 (un->un_failfast_bp == un->un_waitq_headp)) { 16102 /* 16103 * Enqueue this command AFTER the first command on 16104 * the wait queue (which is also un_failfast_bp). 16105 */ 16106 bp->av_forw = un->un_waitq_headp->av_forw; 16107 un->un_waitq_headp->av_forw = bp; 16108 if (un->un_waitq_headp == un->un_waitq_tailp) { 16109 un->un_waitq_tailp = bp; 16110 } 16111 } else { 16112 /* Enqueue this command at the head of the waitq. */ 16113 bp->av_forw = un->un_waitq_headp; 16114 un->un_waitq_headp = bp; 16115 if (un->un_waitq_tailp == NULL) { 16116 un->un_waitq_tailp = bp; 16117 } 16118 } 16119 16120 if (statp == NULL) { 16121 statp = kstat_waitq_enter; 16122 } 16123 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16124 "sd_set_retry_bp: un:0x%p already delayed retry\n", un); 16125 } 16126 16127 done: 16128 if (statp != NULL) { 16129 SD_UPDATE_KSTATS(un, statp, bp); 16130 } 16131 16132 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16133 "sd_set_retry_bp: exit un:0x%p\n", un); 16134 } 16135 16136 16137 /* 16138 * Function: sd_start_retry_command 16139 * 16140 * Description: Start the command that has been waiting on the target's 16141 * retry queue. Called from timeout(9F) context after the 16142 * retry delay interval has expired. 16143 * 16144 * Arguments: arg - pointer to associated softstate for the device. 16145 * 16146 * Context: timeout(9F) thread context. May not sleep. 16147 */ 16148 16149 static void 16150 sd_start_retry_command(void *arg) 16151 { 16152 struct sd_lun *un = arg; 16153 16154 ASSERT(un != NULL); 16155 ASSERT(!mutex_owned(SD_MUTEX(un))); 16156 16157 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16158 "sd_start_retry_command: entry\n"); 16159 16160 mutex_enter(SD_MUTEX(un)); 16161 16162 un->un_retry_timeid = NULL; 16163 16164 if (un->un_retry_bp != NULL) { 16165 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16166 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n", 16167 un, un->un_retry_bp); 16168 sd_start_cmds(un, un->un_retry_bp); 16169 } 16170 16171 mutex_exit(SD_MUTEX(un)); 16172 16173 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16174 "sd_start_retry_command: exit\n"); 16175 } 16176 16177 /* 16178 * Function: sd_rmw_msg_print_handler 16179 * 16180 * Description: If RMW mode is enabled and warning message is triggered 16181 * print I/O count during a fixed interval. 16182 * 16183 * Arguments: arg - pointer to associated softstate for the device. 16184 * 16185 * Context: timeout(9F) thread context. May not sleep. 16186 */ 16187 static void 16188 sd_rmw_msg_print_handler(void *arg) 16189 { 16190 struct sd_lun *un = arg; 16191 16192 ASSERT(un != NULL); 16193 ASSERT(!mutex_owned(SD_MUTEX(un))); 16194 16195 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16196 "sd_rmw_msg_print_handler: entry\n"); 16197 16198 mutex_enter(SD_MUTEX(un)); 16199 16200 if (un->un_rmw_incre_count > 0) { 16201 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16202 "%"PRIu64" I/O requests are not aligned with %d disk " 16203 "sector size in %ld seconds. They are handled through " 16204 "Read Modify Write but the performance is very low!\n", 16205 un->un_rmw_incre_count, un->un_tgt_blocksize, 16206 drv_hztousec(SD_RMW_MSG_PRINT_TIMEOUT) / 1000000); 16207 un->un_rmw_incre_count = 0; 16208 un->un_rmw_msg_timeid = timeout(sd_rmw_msg_print_handler, 16209 un, SD_RMW_MSG_PRINT_TIMEOUT); 16210 } else { 16211 un->un_rmw_msg_timeid = NULL; 16212 } 16213 16214 mutex_exit(SD_MUTEX(un)); 16215 16216 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16217 "sd_rmw_msg_print_handler: exit\n"); 16218 } 16219 16220 /* 16221 * Function: sd_start_direct_priority_command 16222 * 16223 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had 16224 * received TRAN_BUSY when we called scsi_transport() to send it 16225 * to the underlying HBA. This function is called from timeout(9F) 16226 * context after the delay interval has expired. 16227 * 16228 * Arguments: arg - pointer to associated buf(9S) to be restarted. 16229 * 16230 * Context: timeout(9F) thread context. May not sleep. 16231 */ 16232 16233 static void 16234 sd_start_direct_priority_command(void *arg) 16235 { 16236 struct buf *priority_bp = arg; 16237 struct sd_lun *un; 16238 16239 ASSERT(priority_bp != NULL); 16240 un = SD_GET_UN(priority_bp); 16241 ASSERT(un != NULL); 16242 ASSERT(!mutex_owned(SD_MUTEX(un))); 16243 16244 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16245 "sd_start_direct_priority_command: entry\n"); 16246 16247 mutex_enter(SD_MUTEX(un)); 16248 un->un_direct_priority_timeid = NULL; 16249 sd_start_cmds(un, priority_bp); 16250 mutex_exit(SD_MUTEX(un)); 16251 16252 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16253 "sd_start_direct_priority_command: exit\n"); 16254 } 16255 16256 16257 /* 16258 * Function: sd_send_request_sense_command 16259 * 16260 * Description: Sends a REQUEST SENSE command to the target 16261 * 16262 * Context: May be called from interrupt context. 16263 */ 16264 16265 static void 16266 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 16267 struct scsi_pkt *pktp) 16268 { 16269 ASSERT(bp != NULL); 16270 ASSERT(un != NULL); 16271 ASSERT(mutex_owned(SD_MUTEX(un))); 16272 16273 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: " 16274 "entry: buf:0x%p\n", bp); 16275 16276 /* 16277 * If we are syncing or dumping, then fail the command to avoid a 16278 * recursive callback into scsi_transport(). Also fail the command 16279 * if we are suspended (legacy behavior). 16280 */ 16281 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 16282 (un->un_state == SD_STATE_DUMPING)) { 16283 sd_return_failed_command(un, bp, EIO); 16284 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16285 "sd_send_request_sense_command: syncing/dumping, exit\n"); 16286 return; 16287 } 16288 16289 /* 16290 * Retry the failed command and don't issue the request sense if: 16291 * 1) the sense buf is busy 16292 * 2) we have 1 or more outstanding commands on the target 16293 * (the sense data will be cleared or invalidated any way) 16294 * 16295 * Note: There could be an issue with not checking a retry limit here, 16296 * the problem is determining which retry limit to check. 16297 */ 16298 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { 16299 /* Don't retry if the command is flagged as non-retryable */ 16300 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 16301 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 16302 NULL, NULL, 0, un->un_busy_timeout, 16303 kstat_waitq_enter); 16304 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16305 "sd_send_request_sense_command: " 16306 "at full throttle, retrying exit\n"); 16307 } else { 16308 sd_return_failed_command(un, bp, EIO); 16309 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16310 "sd_send_request_sense_command: " 16311 "at full throttle, non-retryable exit\n"); 16312 } 16313 return; 16314 } 16315 16316 sd_mark_rqs_busy(un, bp); 16317 sd_start_cmds(un, un->un_rqs_bp); 16318 16319 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16320 "sd_send_request_sense_command: exit\n"); 16321 } 16322 16323 16324 /* 16325 * Function: sd_mark_rqs_busy 16326 * 16327 * Description: Indicate that the request sense bp for this instance is 16328 * in use. 16329 * 16330 * Context: May be called under interrupt context 16331 */ 16332 16333 static void 16334 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp) 16335 { 16336 struct sd_xbuf *sense_xp; 16337 16338 ASSERT(un != NULL); 16339 ASSERT(bp != NULL); 16340 ASSERT(mutex_owned(SD_MUTEX(un))); 16341 ASSERT(un->un_sense_isbusy == 0); 16342 16343 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: " 16344 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un); 16345 16346 sense_xp = SD_GET_XBUF(un->un_rqs_bp); 16347 ASSERT(sense_xp != NULL); 16348 16349 SD_INFO(SD_LOG_IO, un, 16350 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp); 16351 16352 ASSERT(sense_xp->xb_pktp != NULL); 16353 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) 16354 == (FLAG_SENSING | FLAG_HEAD)); 16355 16356 un->un_sense_isbusy = 1; 16357 un->un_rqs_bp->b_resid = 0; 16358 sense_xp->xb_pktp->pkt_resid = 0; 16359 sense_xp->xb_pktp->pkt_reason = 0; 16360 16361 /* So we can get back the bp at interrupt time! */ 16362 sense_xp->xb_sense_bp = bp; 16363 16364 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH); 16365 16366 /* 16367 * Mark this buf as awaiting sense data. (This is already set in 16368 * the pkt_flags for the RQS packet.) 16369 */ 16370 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING; 16371 16372 /* Request sense down same path */ 16373 if (scsi_pkt_allocated_correctly((SD_GET_XBUF(bp))->xb_pktp) && 16374 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance) 16375 sense_xp->xb_pktp->pkt_path_instance = 16376 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance; 16377 16378 sense_xp->xb_retry_count = 0; 16379 sense_xp->xb_victim_retry_count = 0; 16380 sense_xp->xb_ua_retry_count = 0; 16381 sense_xp->xb_nr_retry_count = 0; 16382 sense_xp->xb_dma_resid = 0; 16383 16384 /* Clean up the fields for auto-request sense */ 16385 sense_xp->xb_sense_status = 0; 16386 sense_xp->xb_sense_state = 0; 16387 sense_xp->xb_sense_resid = 0; 16388 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data)); 16389 16390 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n"); 16391 } 16392 16393 16394 /* 16395 * Function: sd_mark_rqs_idle 16396 * 16397 * Description: SD_MUTEX must be held continuously through this routine 16398 * to prevent reuse of the rqs struct before the caller can 16399 * complete it's processing. 16400 * 16401 * Return Code: Pointer to the RQS buf 16402 * 16403 * Context: May be called under interrupt context 16404 */ 16405 16406 static struct buf * 16407 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp) 16408 { 16409 struct buf *bp; 16410 ASSERT(un != NULL); 16411 ASSERT(sense_xp != NULL); 16412 ASSERT(mutex_owned(SD_MUTEX(un))); 16413 ASSERT(un->un_sense_isbusy != 0); 16414 16415 un->un_sense_isbusy = 0; 16416 bp = sense_xp->xb_sense_bp; 16417 sense_xp->xb_sense_bp = NULL; 16418 16419 /* This pkt is no longer interested in getting sense data */ 16420 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING; 16421 16422 return (bp); 16423 } 16424 16425 16426 16427 /* 16428 * Function: sd_alloc_rqs 16429 * 16430 * Description: Set up the unit to receive auto request sense data 16431 * 16432 * Return Code: DDI_SUCCESS or DDI_FAILURE 16433 * 16434 * Context: Called under attach(9E) context 16435 */ 16436 16437 static int 16438 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un) 16439 { 16440 struct sd_xbuf *xp; 16441 16442 ASSERT(un != NULL); 16443 ASSERT(!mutex_owned(SD_MUTEX(un))); 16444 ASSERT(un->un_rqs_bp == NULL); 16445 ASSERT(un->un_rqs_pktp == NULL); 16446 16447 /* 16448 * First allocate the required buf and scsi_pkt structs, then set up 16449 * the CDB in the scsi_pkt for a REQUEST SENSE command. 16450 */ 16451 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL, 16452 MAX_SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 16453 if (un->un_rqs_bp == NULL) { 16454 return (DDI_FAILURE); 16455 } 16456 16457 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp, 16458 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL); 16459 16460 if (un->un_rqs_pktp == NULL) { 16461 sd_free_rqs(un); 16462 return (DDI_FAILURE); 16463 } 16464 16465 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */ 16466 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp, 16467 SCMD_REQUEST_SENSE, 0, MAX_SENSE_LENGTH, 0); 16468 16469 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); 16470 16471 /* Set up the other needed members in the ARQ scsi_pkt. */ 16472 un->un_rqs_pktp->pkt_comp = sdintr; 16473 un->un_rqs_pktp->pkt_time = sd_io_time; 16474 un->un_rqs_pktp->pkt_flags |= 16475 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ 16476 16477 /* 16478 * Allocate & init the sd_xbuf struct for the RQS command. Do not 16479 * provide any intpkt, destroypkt routines as we take care of 16480 * scsi_pkt allocation/freeing here and in sd_free_rqs(). 16481 */ 16482 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 16483 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL); 16484 xp->xb_pktp = un->un_rqs_pktp; 16485 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16486 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n", 16487 un, xp, un->un_rqs_pktp, un->un_rqs_bp); 16488 16489 /* 16490 * Save the pointer to the request sense private bp so it can 16491 * be retrieved in sdintr. 16492 */ 16493 un->un_rqs_pktp->pkt_private = un->un_rqs_bp; 16494 ASSERT(un->un_rqs_bp->b_private == xp); 16495 16496 /* 16497 * See if the HBA supports auto-request sense for the specified 16498 * target/lun. If it does, then try to enable it (if not already 16499 * enabled). 16500 * 16501 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return 16502 * failure, while for other HBAs (pln) scsi_ifsetcap will always 16503 * return success. However, in both of these cases ARQ is always 16504 * enabled and scsi_ifgetcap will always return true. The best approach 16505 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). 16506 * 16507 * The 3rd case is the HBA (adp) always return enabled on 16508 * scsi_ifgetgetcap even when it's not enable, the best approach 16509 * is issue a scsi_ifsetcap then a scsi_ifgetcap 16510 * Note: this case is to circumvent the Adaptec bug. (x86 only) 16511 */ 16512 16513 if (un->un_f_is_fibre == TRUE) { 16514 un->un_f_arq_enabled = TRUE; 16515 } else { 16516 #if defined(__i386) || defined(__amd64) 16517 /* 16518 * Circumvent the Adaptec bug, remove this code when 16519 * the bug is fixed 16520 */ 16521 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); 16522 #endif 16523 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { 16524 case 0: 16525 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16526 "sd_alloc_rqs: HBA supports ARQ\n"); 16527 /* 16528 * ARQ is supported by this HBA but currently is not 16529 * enabled. Attempt to enable it and if successful then 16530 * mark this instance as ARQ enabled. 16531 */ 16532 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1) 16533 == 1) { 16534 /* Successfully enabled ARQ in the HBA */ 16535 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16536 "sd_alloc_rqs: ARQ enabled\n"); 16537 un->un_f_arq_enabled = TRUE; 16538 } else { 16539 /* Could not enable ARQ in the HBA */ 16540 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16541 "sd_alloc_rqs: failed ARQ enable\n"); 16542 un->un_f_arq_enabled = FALSE; 16543 } 16544 break; 16545 case 1: 16546 /* 16547 * ARQ is supported by this HBA and is already enabled. 16548 * Just mark ARQ as enabled for this instance. 16549 */ 16550 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16551 "sd_alloc_rqs: ARQ already enabled\n"); 16552 un->un_f_arq_enabled = TRUE; 16553 break; 16554 default: 16555 /* 16556 * ARQ is not supported by this HBA; disable it for this 16557 * instance. 16558 */ 16559 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16560 "sd_alloc_rqs: HBA does not support ARQ\n"); 16561 un->un_f_arq_enabled = FALSE; 16562 break; 16563 } 16564 } 16565 16566 return (DDI_SUCCESS); 16567 } 16568 16569 16570 /* 16571 * Function: sd_free_rqs 16572 * 16573 * Description: Cleanup for the pre-instance RQS command. 16574 * 16575 * Context: Kernel thread context 16576 */ 16577 16578 static void 16579 sd_free_rqs(struct sd_lun *un) 16580 { 16581 ASSERT(un != NULL); 16582 16583 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n"); 16584 16585 /* 16586 * If consistent memory is bound to a scsi_pkt, the pkt 16587 * has to be destroyed *before* freeing the consistent memory. 16588 * Don't change the sequence of this operations. 16589 * scsi_destroy_pkt() might access memory, which isn't allowed, 16590 * after it was freed in scsi_free_consistent_buf(). 16591 */ 16592 if (un->un_rqs_pktp != NULL) { 16593 scsi_destroy_pkt(un->un_rqs_pktp); 16594 un->un_rqs_pktp = NULL; 16595 } 16596 16597 if (un->un_rqs_bp != NULL) { 16598 struct sd_xbuf *xp = SD_GET_XBUF(un->un_rqs_bp); 16599 if (xp != NULL) { 16600 kmem_free(xp, sizeof (struct sd_xbuf)); 16601 } 16602 scsi_free_consistent_buf(un->un_rqs_bp); 16603 un->un_rqs_bp = NULL; 16604 } 16605 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n"); 16606 } 16607 16608 16609 16610 /* 16611 * Function: sd_reduce_throttle 16612 * 16613 * Description: Reduces the maximum # of outstanding commands on a 16614 * target to the current number of outstanding commands. 16615 * Queues a tiemout(9F) callback to restore the limit 16616 * after a specified interval has elapsed. 16617 * Typically used when we get a TRAN_BUSY return code 16618 * back from scsi_transport(). 16619 * 16620 * Arguments: un - ptr to the sd_lun softstate struct 16621 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL 16622 * 16623 * Context: May be called from interrupt context 16624 */ 16625 16626 static void 16627 sd_reduce_throttle(struct sd_lun *un, int throttle_type) 16628 { 16629 ASSERT(un != NULL); 16630 ASSERT(mutex_owned(SD_MUTEX(un))); 16631 ASSERT(un->un_ncmds_in_transport >= 0); 16632 16633 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 16634 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n", 16635 un, un->un_throttle, un->un_ncmds_in_transport); 16636 16637 if (un->un_throttle > 1) { 16638 if (un->un_f_use_adaptive_throttle == TRUE) { 16639 switch (throttle_type) { 16640 case SD_THROTTLE_TRAN_BUSY: 16641 if (un->un_busy_throttle == 0) { 16642 un->un_busy_throttle = un->un_throttle; 16643 } 16644 break; 16645 case SD_THROTTLE_QFULL: 16646 un->un_busy_throttle = 0; 16647 break; 16648 default: 16649 ASSERT(FALSE); 16650 } 16651 16652 if (un->un_ncmds_in_transport > 0) { 16653 un->un_throttle = un->un_ncmds_in_transport; 16654 } 16655 16656 } else { 16657 if (un->un_ncmds_in_transport == 0) { 16658 un->un_throttle = 1; 16659 } else { 16660 un->un_throttle = un->un_ncmds_in_transport; 16661 } 16662 } 16663 } 16664 16665 /* Reschedule the timeout if none is currently active */ 16666 if (un->un_reset_throttle_timeid == NULL) { 16667 un->un_reset_throttle_timeid = timeout(sd_restore_throttle, 16668 un, SD_THROTTLE_RESET_INTERVAL); 16669 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16670 "sd_reduce_throttle: timeout scheduled!\n"); 16671 } 16672 16673 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 16674 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle); 16675 } 16676 16677 16678 16679 /* 16680 * Function: sd_restore_throttle 16681 * 16682 * Description: Callback function for timeout(9F). Resets the current 16683 * value of un->un_throttle to its default. 16684 * 16685 * Arguments: arg - pointer to associated softstate for the device. 16686 * 16687 * Context: May be called from interrupt context 16688 */ 16689 16690 static void 16691 sd_restore_throttle(void *arg) 16692 { 16693 struct sd_lun *un = arg; 16694 16695 ASSERT(un != NULL); 16696 ASSERT(!mutex_owned(SD_MUTEX(un))); 16697 16698 mutex_enter(SD_MUTEX(un)); 16699 16700 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 16701 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle); 16702 16703 un->un_reset_throttle_timeid = NULL; 16704 16705 if (un->un_f_use_adaptive_throttle == TRUE) { 16706 /* 16707 * If un_busy_throttle is nonzero, then it contains the 16708 * value that un_throttle was when we got a TRAN_BUSY back 16709 * from scsi_transport(). We want to revert back to this 16710 * value. 16711 * 16712 * In the QFULL case, the throttle limit will incrementally 16713 * increase until it reaches max throttle. 16714 */ 16715 if (un->un_busy_throttle > 0) { 16716 un->un_throttle = un->un_busy_throttle; 16717 un->un_busy_throttle = 0; 16718 } else { 16719 /* 16720 * increase throttle by 10% open gate slowly, schedule 16721 * another restore if saved throttle has not been 16722 * reached 16723 */ 16724 short throttle; 16725 if (sd_qfull_throttle_enable) { 16726 throttle = un->un_throttle + 16727 max((un->un_throttle / 10), 1); 16728 un->un_throttle = 16729 (throttle < un->un_saved_throttle) ? 16730 throttle : un->un_saved_throttle; 16731 if (un->un_throttle < un->un_saved_throttle) { 16732 un->un_reset_throttle_timeid = 16733 timeout(sd_restore_throttle, 16734 un, 16735 SD_QFULL_THROTTLE_RESET_INTERVAL); 16736 } 16737 } 16738 } 16739 16740 /* 16741 * If un_throttle has fallen below the low-water mark, we 16742 * restore the maximum value here (and allow it to ratchet 16743 * down again if necessary). 16744 */ 16745 if (un->un_throttle < un->un_min_throttle) { 16746 un->un_throttle = un->un_saved_throttle; 16747 } 16748 } else { 16749 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 16750 "restoring limit from 0x%x to 0x%x\n", 16751 un->un_throttle, un->un_saved_throttle); 16752 un->un_throttle = un->un_saved_throttle; 16753 } 16754 16755 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 16756 "sd_restore_throttle: calling sd_start_cmds!\n"); 16757 16758 sd_start_cmds(un, NULL); 16759 16760 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 16761 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n", 16762 un, un->un_throttle); 16763 16764 mutex_exit(SD_MUTEX(un)); 16765 16766 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n"); 16767 } 16768 16769 /* 16770 * Function: sdrunout 16771 * 16772 * Description: Callback routine for scsi_init_pkt when a resource allocation 16773 * fails. 16774 * 16775 * Arguments: arg - a pointer to the sd_lun unit struct for the particular 16776 * soft state instance. 16777 * 16778 * Return Code: The scsi_init_pkt routine allows for the callback function to 16779 * return a 0 indicating the callback should be rescheduled or a 1 16780 * indicating not to reschedule. This routine always returns 1 16781 * because the driver always provides a callback function to 16782 * scsi_init_pkt. This results in a callback always being scheduled 16783 * (via the scsi_init_pkt callback implementation) if a resource 16784 * failure occurs. 16785 * 16786 * Context: This callback function may not block or call routines that block 16787 * 16788 * Note: Using the scsi_init_pkt callback facility can result in an I/O 16789 * request persisting at the head of the list which cannot be 16790 * satisfied even after multiple retries. In the future the driver 16791 * may implement some time of maximum runout count before failing 16792 * an I/O. 16793 */ 16794 16795 static int 16796 sdrunout(caddr_t arg) 16797 { 16798 struct sd_lun *un = (struct sd_lun *)arg; 16799 16800 ASSERT(un != NULL); 16801 ASSERT(!mutex_owned(SD_MUTEX(un))); 16802 16803 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n"); 16804 16805 mutex_enter(SD_MUTEX(un)); 16806 sd_start_cmds(un, NULL); 16807 mutex_exit(SD_MUTEX(un)); 16808 /* 16809 * This callback routine always returns 1 (i.e. do not reschedule) 16810 * because we always specify sdrunout as the callback handler for 16811 * scsi_init_pkt inside the call to sd_start_cmds. 16812 */ 16813 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); 16814 return (1); 16815 } 16816 16817 16818 /* 16819 * Function: sdintr 16820 * 16821 * Description: Completion callback routine for scsi_pkt(9S) structs 16822 * sent to the HBA driver via scsi_transport(9F). 16823 * 16824 * Context: Interrupt context 16825 */ 16826 16827 static void 16828 sdintr(struct scsi_pkt *pktp) 16829 { 16830 struct buf *bp; 16831 struct sd_xbuf *xp; 16832 struct sd_lun *un; 16833 size_t actual_len; 16834 sd_ssc_t *sscp; 16835 16836 ASSERT(pktp != NULL); 16837 bp = (struct buf *)pktp->pkt_private; 16838 ASSERT(bp != NULL); 16839 xp = SD_GET_XBUF(bp); 16840 ASSERT(xp != NULL); 16841 ASSERT(xp->xb_pktp != NULL); 16842 un = SD_GET_UN(bp); 16843 ASSERT(un != NULL); 16844 ASSERT(!mutex_owned(SD_MUTEX(un))); 16845 16846 #ifdef SD_FAULT_INJECTION 16847 16848 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n"); 16849 /* SD FaultInjection */ 16850 sd_faultinjection(pktp); 16851 16852 #endif /* SD_FAULT_INJECTION */ 16853 16854 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p," 16855 " xp:0x%p, un:0x%p\n", bp, xp, un); 16856 16857 mutex_enter(SD_MUTEX(un)); 16858 16859 ASSERT(un->un_fm_private != NULL); 16860 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc; 16861 ASSERT(sscp != NULL); 16862 16863 /* Reduce the count of the #commands currently in transport */ 16864 un->un_ncmds_in_transport--; 16865 ASSERT(un->un_ncmds_in_transport >= 0); 16866 16867 /* Increment counter to indicate that the callback routine is active */ 16868 un->un_in_callback++; 16869 16870 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 16871 16872 #ifdef SDDEBUG 16873 if (bp == un->un_retry_bp) { 16874 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " 16875 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", 16876 un, un->un_retry_bp, un->un_ncmds_in_transport); 16877 } 16878 #endif 16879 16880 /* 16881 * If pkt_reason is CMD_DEV_GONE, fail the command, and update the media 16882 * state if needed. 16883 */ 16884 if (pktp->pkt_reason == CMD_DEV_GONE) { 16885 /* Prevent multiple console messages for the same failure. */ 16886 if (un->un_last_pkt_reason != CMD_DEV_GONE) { 16887 un->un_last_pkt_reason = CMD_DEV_GONE; 16888 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16889 "Command failed to complete...Device is gone\n"); 16890 } 16891 if (un->un_mediastate != DKIO_DEV_GONE) { 16892 un->un_mediastate = DKIO_DEV_GONE; 16893 cv_broadcast(&un->un_state_cv); 16894 } 16895 /* 16896 * If the command happens to be the REQUEST SENSE command, 16897 * free up the rqs buf and fail the original command. 16898 */ 16899 if (bp == un->un_rqs_bp) { 16900 bp = sd_mark_rqs_idle(un, xp); 16901 } 16902 sd_return_failed_command(un, bp, EIO); 16903 goto exit; 16904 } 16905 16906 if (pktp->pkt_state & STATE_XARQ_DONE) { 16907 SD_TRACE(SD_LOG_COMMON, un, 16908 "sdintr: extra sense data received. pkt=%p\n", pktp); 16909 } 16910 16911 /* 16912 * First see if the pkt has auto-request sense data with it.... 16913 * Look at the packet state first so we don't take a performance 16914 * hit looking at the arq enabled flag unless absolutely necessary. 16915 */ 16916 if ((pktp->pkt_state & STATE_ARQ_DONE) && 16917 (un->un_f_arq_enabled == TRUE)) { 16918 /* 16919 * The HBA did an auto request sense for this command so check 16920 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 16921 * driver command that should not be retried. 16922 */ 16923 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 16924 /* 16925 * Save the relevant sense info into the xp for the 16926 * original cmd. 16927 */ 16928 struct scsi_arq_status *asp; 16929 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 16930 xp->xb_sense_status = 16931 *((uchar_t *)(&(asp->sts_rqpkt_status))); 16932 xp->xb_sense_state = asp->sts_rqpkt_state; 16933 xp->xb_sense_resid = asp->sts_rqpkt_resid; 16934 if (pktp->pkt_state & STATE_XARQ_DONE) { 16935 actual_len = MAX_SENSE_LENGTH - 16936 xp->xb_sense_resid; 16937 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16938 MAX_SENSE_LENGTH); 16939 } else { 16940 if (xp->xb_sense_resid > SENSE_LENGTH) { 16941 actual_len = MAX_SENSE_LENGTH - 16942 xp->xb_sense_resid; 16943 } else { 16944 actual_len = SENSE_LENGTH - 16945 xp->xb_sense_resid; 16946 } 16947 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 16948 if ((((struct uscsi_cmd *) 16949 (xp->xb_pktinfo))->uscsi_rqlen) > 16950 actual_len) { 16951 xp->xb_sense_resid = 16952 (((struct uscsi_cmd *) 16953 (xp->xb_pktinfo))-> 16954 uscsi_rqlen) - actual_len; 16955 } else { 16956 xp->xb_sense_resid = 0; 16957 } 16958 } 16959 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16960 SENSE_LENGTH); 16961 } 16962 16963 /* fail the command */ 16964 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16965 "sdintr: arq done and FLAG_DIAGNOSE set\n"); 16966 sd_return_failed_command(un, bp, EIO); 16967 goto exit; 16968 } 16969 16970 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 16971 /* 16972 * We want to either retry or fail this command, so free 16973 * the DMA resources here. If we retry the command then 16974 * the DMA resources will be reallocated in sd_start_cmds(). 16975 * Note that when PKT_DMA_PARTIAL is used, this reallocation 16976 * causes the *entire* transfer to start over again from the 16977 * beginning of the request, even for PARTIAL chunks that 16978 * have already transferred successfully. 16979 */ 16980 if ((un->un_f_is_fibre == TRUE) && 16981 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 16982 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 16983 scsi_dmafree(pktp); 16984 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 16985 } 16986 #endif 16987 16988 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16989 "sdintr: arq done, sd_handle_auto_request_sense\n"); 16990 16991 sd_handle_auto_request_sense(un, bp, xp, pktp); 16992 goto exit; 16993 } 16994 16995 /* Next see if this is the REQUEST SENSE pkt for the instance */ 16996 if (pktp->pkt_flags & FLAG_SENSING) { 16997 /* This pktp is from the unit's REQUEST_SENSE command */ 16998 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16999 "sdintr: sd_handle_request_sense\n"); 17000 sd_handle_request_sense(un, bp, xp, pktp); 17001 goto exit; 17002 } 17003 17004 /* 17005 * Check to see if the command successfully completed as requested; 17006 * this is the most common case (and also the hot performance path). 17007 * 17008 * Requirements for successful completion are: 17009 * pkt_reason is CMD_CMPLT and packet status is status good. 17010 * In addition: 17011 * - A residual of zero indicates successful completion no matter what 17012 * the command is. 17013 * - If the residual is not zero and the command is not a read or 17014 * write, then it's still defined as successful completion. In other 17015 * words, if the command is a read or write the residual must be 17016 * zero for successful completion. 17017 * - If the residual is not zero and the command is a read or 17018 * write, and it's a USCSICMD, then it's still defined as 17019 * successful completion. 17020 */ 17021 if ((pktp->pkt_reason == CMD_CMPLT) && 17022 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) { 17023 17024 /* 17025 * Since this command is returned with a good status, we 17026 * can reset the count for Sonoma failover. 17027 */ 17028 un->un_sonoma_failure_count = 0; 17029 17030 /* 17031 * Return all USCSI commands on good status 17032 */ 17033 if (pktp->pkt_resid == 0) { 17034 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17035 "sdintr: returning command for resid == 0\n"); 17036 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) && 17037 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) { 17038 SD_UPDATE_B_RESID(bp, pktp); 17039 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17040 "sdintr: returning command for resid != 0\n"); 17041 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 17042 SD_UPDATE_B_RESID(bp, pktp); 17043 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17044 "sdintr: returning uscsi command\n"); 17045 } else { 17046 goto not_successful; 17047 } 17048 sd_return_command(un, bp); 17049 17050 /* 17051 * Decrement counter to indicate that the callback routine 17052 * is done. 17053 */ 17054 un->un_in_callback--; 17055 ASSERT(un->un_in_callback >= 0); 17056 mutex_exit(SD_MUTEX(un)); 17057 17058 return; 17059 } 17060 17061 not_successful: 17062 17063 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 17064 /* 17065 * The following is based upon knowledge of the underlying transport 17066 * and its use of DMA resources. This code should be removed when 17067 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor 17068 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() 17069 * and sd_start_cmds(). 17070 * 17071 * Free any DMA resources associated with this command if there 17072 * is a chance it could be retried or enqueued for later retry. 17073 * If we keep the DMA binding then mpxio cannot reissue the 17074 * command on another path whenever a path failure occurs. 17075 * 17076 * Note that when PKT_DMA_PARTIAL is used, free/reallocation 17077 * causes the *entire* transfer to start over again from the 17078 * beginning of the request, even for PARTIAL chunks that 17079 * have already transferred successfully. 17080 * 17081 * This is only done for non-uscsi commands (and also skipped for the 17082 * driver's internal RQS command). Also just do this for Fibre Channel 17083 * devices as these are the only ones that support mpxio. 17084 */ 17085 if ((un->un_f_is_fibre == TRUE) && 17086 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 17087 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 17088 scsi_dmafree(pktp); 17089 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 17090 } 17091 #endif 17092 17093 /* 17094 * The command did not successfully complete as requested so check 17095 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 17096 * driver command that should not be retried so just return. If 17097 * FLAG_DIAGNOSE is not set the error will be processed below. 17098 */ 17099 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 17100 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17101 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n"); 17102 /* 17103 * Issue a request sense if a check condition caused the error 17104 * (we handle the auto request sense case above), otherwise 17105 * just fail the command. 17106 */ 17107 if ((pktp->pkt_reason == CMD_CMPLT) && 17108 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { 17109 sd_send_request_sense_command(un, bp, pktp); 17110 } else { 17111 sd_return_failed_command(un, bp, EIO); 17112 } 17113 goto exit; 17114 } 17115 17116 /* 17117 * The command did not successfully complete as requested so process 17118 * the error, retry, and/or attempt recovery. 17119 */ 17120 switch (pktp->pkt_reason) { 17121 case CMD_CMPLT: 17122 switch (SD_GET_PKT_STATUS(pktp)) { 17123 case STATUS_GOOD: 17124 /* 17125 * The command completed successfully with a non-zero 17126 * residual 17127 */ 17128 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17129 "sdintr: STATUS_GOOD \n"); 17130 sd_pkt_status_good(un, bp, xp, pktp); 17131 break; 17132 17133 case STATUS_CHECK: 17134 case STATUS_TERMINATED: 17135 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17136 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n"); 17137 sd_pkt_status_check_condition(un, bp, xp, pktp); 17138 break; 17139 17140 case STATUS_BUSY: 17141 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17142 "sdintr: STATUS_BUSY\n"); 17143 sd_pkt_status_busy(un, bp, xp, pktp); 17144 break; 17145 17146 case STATUS_RESERVATION_CONFLICT: 17147 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17148 "sdintr: STATUS_RESERVATION_CONFLICT\n"); 17149 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 17150 break; 17151 17152 case STATUS_QFULL: 17153 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17154 "sdintr: STATUS_QFULL\n"); 17155 sd_pkt_status_qfull(un, bp, xp, pktp); 17156 break; 17157 17158 case STATUS_MET: 17159 case STATUS_INTERMEDIATE: 17160 case STATUS_SCSI2: 17161 case STATUS_INTERMEDIATE_MET: 17162 case STATUS_ACA_ACTIVE: 17163 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17164 "Unexpected SCSI status received: 0x%x\n", 17165 SD_GET_PKT_STATUS(pktp)); 17166 /* 17167 * Mark the ssc_flags when detected invalid status 17168 * code for non-USCSI command. 17169 */ 17170 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17171 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS, 17172 0, "stat-code"); 17173 } 17174 sd_return_failed_command(un, bp, EIO); 17175 break; 17176 17177 default: 17178 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17179 "Invalid SCSI status received: 0x%x\n", 17180 SD_GET_PKT_STATUS(pktp)); 17181 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17182 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS, 17183 0, "stat-code"); 17184 } 17185 sd_return_failed_command(un, bp, EIO); 17186 break; 17187 17188 } 17189 break; 17190 17191 case CMD_INCOMPLETE: 17192 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17193 "sdintr: CMD_INCOMPLETE\n"); 17194 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp); 17195 break; 17196 case CMD_TRAN_ERR: 17197 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17198 "sdintr: CMD_TRAN_ERR\n"); 17199 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp); 17200 break; 17201 case CMD_RESET: 17202 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17203 "sdintr: CMD_RESET \n"); 17204 sd_pkt_reason_cmd_reset(un, bp, xp, pktp); 17205 break; 17206 case CMD_ABORTED: 17207 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17208 "sdintr: CMD_ABORTED \n"); 17209 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp); 17210 break; 17211 case CMD_TIMEOUT: 17212 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17213 "sdintr: CMD_TIMEOUT\n"); 17214 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp); 17215 break; 17216 case CMD_UNX_BUS_FREE: 17217 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17218 "sdintr: CMD_UNX_BUS_FREE \n"); 17219 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp); 17220 break; 17221 case CMD_TAG_REJECT: 17222 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17223 "sdintr: CMD_TAG_REJECT\n"); 17224 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp); 17225 break; 17226 default: 17227 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17228 "sdintr: default\n"); 17229 /* 17230 * Mark the ssc_flags for detecting invliad pkt_reason. 17231 */ 17232 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17233 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_PKT_REASON, 17234 0, "pkt-reason"); 17235 } 17236 sd_pkt_reason_default(un, bp, xp, pktp); 17237 break; 17238 } 17239 17240 exit: 17241 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n"); 17242 17243 /* Decrement counter to indicate that the callback routine is done. */ 17244 un->un_in_callback--; 17245 ASSERT(un->un_in_callback >= 0); 17246 17247 /* 17248 * At this point, the pkt has been dispatched, ie, it is either 17249 * being re-tried or has been returned to its caller and should 17250 * not be referenced. 17251 */ 17252 17253 mutex_exit(SD_MUTEX(un)); 17254 } 17255 17256 17257 /* 17258 * Function: sd_print_incomplete_msg 17259 * 17260 * Description: Prints the error message for a CMD_INCOMPLETE error. 17261 * 17262 * Arguments: un - ptr to associated softstate for the device. 17263 * bp - ptr to the buf(9S) for the command. 17264 * arg - message string ptr 17265 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED, 17266 * or SD_NO_RETRY_ISSUED. 17267 * 17268 * Context: May be called under interrupt context 17269 */ 17270 17271 static void 17272 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 17273 { 17274 struct scsi_pkt *pktp; 17275 char *msgp; 17276 char *cmdp = arg; 17277 17278 ASSERT(un != NULL); 17279 ASSERT(mutex_owned(SD_MUTEX(un))); 17280 ASSERT(bp != NULL); 17281 ASSERT(arg != NULL); 17282 pktp = SD_GET_PKTP(bp); 17283 ASSERT(pktp != NULL); 17284 17285 switch (code) { 17286 case SD_DELAYED_RETRY_ISSUED: 17287 case SD_IMMEDIATE_RETRY_ISSUED: 17288 msgp = "retrying"; 17289 break; 17290 case SD_NO_RETRY_ISSUED: 17291 default: 17292 msgp = "giving up"; 17293 break; 17294 } 17295 17296 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 17297 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17298 "incomplete %s- %s\n", cmdp, msgp); 17299 } 17300 } 17301 17302 17303 17304 /* 17305 * Function: sd_pkt_status_good 17306 * 17307 * Description: Processing for a STATUS_GOOD code in pkt_status. 17308 * 17309 * Context: May be called under interrupt context 17310 */ 17311 17312 static void 17313 sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 17314 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17315 { 17316 char *cmdp; 17317 17318 ASSERT(un != NULL); 17319 ASSERT(mutex_owned(SD_MUTEX(un))); 17320 ASSERT(bp != NULL); 17321 ASSERT(xp != NULL); 17322 ASSERT(pktp != NULL); 17323 ASSERT(pktp->pkt_reason == CMD_CMPLT); 17324 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD); 17325 ASSERT(pktp->pkt_resid != 0); 17326 17327 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n"); 17328 17329 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17330 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) { 17331 case SCMD_READ: 17332 cmdp = "read"; 17333 break; 17334 case SCMD_WRITE: 17335 cmdp = "write"; 17336 break; 17337 default: 17338 SD_UPDATE_B_RESID(bp, pktp); 17339 sd_return_command(un, bp); 17340 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 17341 return; 17342 } 17343 17344 /* 17345 * See if we can retry the read/write, preferrably immediately. 17346 * If retries are exhaused, then sd_retry_command() will update 17347 * the b_resid count. 17348 */ 17349 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg, 17350 cmdp, EIO, (clock_t)0, NULL); 17351 17352 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 17353 } 17354 17355 17356 17357 17358 17359 /* 17360 * Function: sd_handle_request_sense 17361 * 17362 * Description: Processing for non-auto Request Sense command. 17363 * 17364 * Arguments: un - ptr to associated softstate 17365 * sense_bp - ptr to buf(9S) for the RQS command 17366 * sense_xp - ptr to the sd_xbuf for the RQS command 17367 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command 17368 * 17369 * Context: May be called under interrupt context 17370 */ 17371 17372 static void 17373 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp, 17374 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp) 17375 { 17376 struct buf *cmd_bp; /* buf for the original command */ 17377 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */ 17378 struct scsi_pkt *cmd_pktp; /* pkt for the original command */ 17379 size_t actual_len; /* actual sense data length */ 17380 17381 ASSERT(un != NULL); 17382 ASSERT(mutex_owned(SD_MUTEX(un))); 17383 ASSERT(sense_bp != NULL); 17384 ASSERT(sense_xp != NULL); 17385 ASSERT(sense_pktp != NULL); 17386 17387 /* 17388 * Note the sense_bp, sense_xp, and sense_pktp here are for the 17389 * RQS command and not the original command. 17390 */ 17391 ASSERT(sense_pktp == un->un_rqs_pktp); 17392 ASSERT(sense_bp == un->un_rqs_bp); 17393 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) == 17394 (FLAG_SENSING | FLAG_HEAD)); 17395 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) & 17396 FLAG_SENSING) == FLAG_SENSING); 17397 17398 /* These are the bp, xp, and pktp for the original command */ 17399 cmd_bp = sense_xp->xb_sense_bp; 17400 cmd_xp = SD_GET_XBUF(cmd_bp); 17401 cmd_pktp = SD_GET_PKTP(cmd_bp); 17402 17403 if (sense_pktp->pkt_reason != CMD_CMPLT) { 17404 /* 17405 * The REQUEST SENSE command failed. Release the REQUEST 17406 * SENSE command for re-use, get back the bp for the original 17407 * command, and attempt to re-try the original command if 17408 * FLAG_DIAGNOSE is not set in the original packet. 17409 */ 17410 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17411 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 17412 cmd_bp = sd_mark_rqs_idle(un, sense_xp); 17413 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD, 17414 NULL, NULL, EIO, (clock_t)0, NULL); 17415 return; 17416 } 17417 } 17418 17419 /* 17420 * Save the relevant sense info into the xp for the original cmd. 17421 * 17422 * Note: if the request sense failed the state info will be zero 17423 * as set in sd_mark_rqs_busy() 17424 */ 17425 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp); 17426 cmd_xp->xb_sense_state = sense_pktp->pkt_state; 17427 actual_len = MAX_SENSE_LENGTH - sense_pktp->pkt_resid; 17428 if ((cmd_xp->xb_pkt_flags & SD_XB_USCSICMD) && 17429 (((struct uscsi_cmd *)cmd_xp->xb_pktinfo)->uscsi_rqlen > 17430 SENSE_LENGTH)) { 17431 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 17432 MAX_SENSE_LENGTH); 17433 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid; 17434 } else { 17435 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 17436 SENSE_LENGTH); 17437 if (actual_len < SENSE_LENGTH) { 17438 cmd_xp->xb_sense_resid = SENSE_LENGTH - actual_len; 17439 } else { 17440 cmd_xp->xb_sense_resid = 0; 17441 } 17442 } 17443 17444 /* 17445 * Free up the RQS command.... 17446 * NOTE: 17447 * Must do this BEFORE calling sd_validate_sense_data! 17448 * sd_validate_sense_data may return the original command in 17449 * which case the pkt will be freed and the flags can no 17450 * longer be touched. 17451 * SD_MUTEX is held through this process until the command 17452 * is dispatched based upon the sense data, so there are 17453 * no race conditions. 17454 */ 17455 (void) sd_mark_rqs_idle(un, sense_xp); 17456 17457 /* 17458 * For a retryable command see if we have valid sense data, if so then 17459 * turn it over to sd_decode_sense() to figure out the right course of 17460 * action. Just fail a non-retryable command. 17461 */ 17462 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 17463 if (sd_validate_sense_data(un, cmd_bp, cmd_xp, actual_len) == 17464 SD_SENSE_DATA_IS_VALID) { 17465 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp); 17466 } 17467 } else { 17468 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB", 17469 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 17470 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data", 17471 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 17472 sd_return_failed_command(un, cmd_bp, EIO); 17473 } 17474 } 17475 17476 17477 17478 17479 /* 17480 * Function: sd_handle_auto_request_sense 17481 * 17482 * Description: Processing for auto-request sense information. 17483 * 17484 * Arguments: un - ptr to associated softstate 17485 * bp - ptr to buf(9S) for the command 17486 * xp - ptr to the sd_xbuf for the command 17487 * pktp - ptr to the scsi_pkt(9S) for the command 17488 * 17489 * Context: May be called under interrupt context 17490 */ 17491 17492 static void 17493 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 17494 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17495 { 17496 struct scsi_arq_status *asp; 17497 size_t actual_len; 17498 17499 ASSERT(un != NULL); 17500 ASSERT(mutex_owned(SD_MUTEX(un))); 17501 ASSERT(bp != NULL); 17502 ASSERT(xp != NULL); 17503 ASSERT(pktp != NULL); 17504 ASSERT(pktp != un->un_rqs_pktp); 17505 ASSERT(bp != un->un_rqs_bp); 17506 17507 /* 17508 * For auto-request sense, we get a scsi_arq_status back from 17509 * the HBA, with the sense data in the sts_sensedata member. 17510 * The pkt_scbp of the packet points to this scsi_arq_status. 17511 */ 17512 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 17513 17514 if (asp->sts_rqpkt_reason != CMD_CMPLT) { 17515 /* 17516 * The auto REQUEST SENSE failed; see if we can re-try 17517 * the original command. 17518 */ 17519 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17520 "auto request sense failed (reason=%s)\n", 17521 scsi_rname(asp->sts_rqpkt_reason)); 17522 17523 sd_reset_target(un, pktp); 17524 17525 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 17526 NULL, NULL, EIO, (clock_t)0, NULL); 17527 return; 17528 } 17529 17530 /* Save the relevant sense info into the xp for the original cmd. */ 17531 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status))); 17532 xp->xb_sense_state = asp->sts_rqpkt_state; 17533 xp->xb_sense_resid = asp->sts_rqpkt_resid; 17534 if (xp->xb_sense_state & STATE_XARQ_DONE) { 17535 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 17536 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 17537 MAX_SENSE_LENGTH); 17538 } else { 17539 if (xp->xb_sense_resid > SENSE_LENGTH) { 17540 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 17541 } else { 17542 actual_len = SENSE_LENGTH - xp->xb_sense_resid; 17543 } 17544 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 17545 if ((((struct uscsi_cmd *) 17546 (xp->xb_pktinfo))->uscsi_rqlen) > actual_len) { 17547 xp->xb_sense_resid = (((struct uscsi_cmd *) 17548 (xp->xb_pktinfo))->uscsi_rqlen) - 17549 actual_len; 17550 } else { 17551 xp->xb_sense_resid = 0; 17552 } 17553 } 17554 bcopy(&asp->sts_sensedata, xp->xb_sense_data, SENSE_LENGTH); 17555 } 17556 17557 /* 17558 * See if we have valid sense data, if so then turn it over to 17559 * sd_decode_sense() to figure out the right course of action. 17560 */ 17561 if (sd_validate_sense_data(un, bp, xp, actual_len) == 17562 SD_SENSE_DATA_IS_VALID) { 17563 sd_decode_sense(un, bp, xp, pktp); 17564 } 17565 } 17566 17567 17568 /* 17569 * Function: sd_print_sense_failed_msg 17570 * 17571 * Description: Print log message when RQS has failed. 17572 * 17573 * Arguments: un - ptr to associated softstate 17574 * bp - ptr to buf(9S) for the command 17575 * arg - generic message string ptr 17576 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17577 * or SD_NO_RETRY_ISSUED 17578 * 17579 * Context: May be called from interrupt context 17580 */ 17581 17582 static void 17583 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg, 17584 int code) 17585 { 17586 char *msgp = arg; 17587 17588 ASSERT(un != NULL); 17589 ASSERT(mutex_owned(SD_MUTEX(un))); 17590 ASSERT(bp != NULL); 17591 17592 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) { 17593 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp); 17594 } 17595 } 17596 17597 17598 /* 17599 * Function: sd_validate_sense_data 17600 * 17601 * Description: Check the given sense data for validity. 17602 * If the sense data is not valid, the command will 17603 * be either failed or retried! 17604 * 17605 * Return Code: SD_SENSE_DATA_IS_INVALID 17606 * SD_SENSE_DATA_IS_VALID 17607 * 17608 * Context: May be called from interrupt context 17609 */ 17610 17611 static int 17612 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 17613 size_t actual_len) 17614 { 17615 struct scsi_extended_sense *esp; 17616 struct scsi_pkt *pktp; 17617 char *msgp = NULL; 17618 sd_ssc_t *sscp; 17619 17620 ASSERT(un != NULL); 17621 ASSERT(mutex_owned(SD_MUTEX(un))); 17622 ASSERT(bp != NULL); 17623 ASSERT(bp != un->un_rqs_bp); 17624 ASSERT(xp != NULL); 17625 ASSERT(un->un_fm_private != NULL); 17626 17627 pktp = SD_GET_PKTP(bp); 17628 ASSERT(pktp != NULL); 17629 17630 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc; 17631 ASSERT(sscp != NULL); 17632 17633 /* 17634 * Check the status of the RQS command (auto or manual). 17635 */ 17636 switch (xp->xb_sense_status & STATUS_MASK) { 17637 case STATUS_GOOD: 17638 break; 17639 17640 case STATUS_RESERVATION_CONFLICT: 17641 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 17642 return (SD_SENSE_DATA_IS_INVALID); 17643 17644 case STATUS_BUSY: 17645 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17646 "Busy Status on REQUEST SENSE\n"); 17647 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL, 17648 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 17649 return (SD_SENSE_DATA_IS_INVALID); 17650 17651 case STATUS_QFULL: 17652 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17653 "QFULL Status on REQUEST SENSE\n"); 17654 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, 17655 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 17656 return (SD_SENSE_DATA_IS_INVALID); 17657 17658 case STATUS_CHECK: 17659 case STATUS_TERMINATED: 17660 msgp = "Check Condition on REQUEST SENSE\n"; 17661 goto sense_failed; 17662 17663 default: 17664 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n"; 17665 goto sense_failed; 17666 } 17667 17668 /* 17669 * See if we got the minimum required amount of sense data. 17670 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes 17671 * or less. 17672 */ 17673 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) || 17674 (actual_len == 0)) { 17675 msgp = "Request Sense couldn't get sense data\n"; 17676 goto sense_failed; 17677 } 17678 17679 if (actual_len < SUN_MIN_SENSE_LENGTH) { 17680 msgp = "Not enough sense information\n"; 17681 /* Mark the ssc_flags for detecting invalid sense data */ 17682 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17683 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0, 17684 "sense-data"); 17685 } 17686 goto sense_failed; 17687 } 17688 17689 /* 17690 * We require the extended sense data 17691 */ 17692 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 17693 if (esp->es_class != CLASS_EXTENDED_SENSE) { 17694 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 17695 static char tmp[8]; 17696 static char buf[148]; 17697 char *p = (char *)(xp->xb_sense_data); 17698 int i; 17699 17700 mutex_enter(&sd_sense_mutex); 17701 (void) strcpy(buf, "undecodable sense information:"); 17702 for (i = 0; i < actual_len; i++) { 17703 (void) sprintf(tmp, " 0x%x", *(p++)&0xff); 17704 (void) strcpy(&buf[strlen(buf)], tmp); 17705 } 17706 i = strlen(buf); 17707 (void) strcpy(&buf[i], "-(assumed fatal)\n"); 17708 17709 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP) { 17710 scsi_log(SD_DEVINFO(un), sd_label, 17711 CE_WARN, buf); 17712 } 17713 mutex_exit(&sd_sense_mutex); 17714 } 17715 17716 /* Mark the ssc_flags for detecting invalid sense data */ 17717 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17718 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0, 17719 "sense-data"); 17720 } 17721 17722 /* Note: Legacy behavior, fail the command with no retry */ 17723 sd_return_failed_command(un, bp, EIO); 17724 return (SD_SENSE_DATA_IS_INVALID); 17725 } 17726 17727 /* 17728 * Check that es_code is valid (es_class concatenated with es_code 17729 * make up the "response code" field. es_class will always be 7, so 17730 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the 17731 * format. 17732 */ 17733 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) && 17734 (esp->es_code != CODE_FMT_FIXED_DEFERRED) && 17735 (esp->es_code != CODE_FMT_DESCR_CURRENT) && 17736 (esp->es_code != CODE_FMT_DESCR_DEFERRED) && 17737 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) { 17738 /* Mark the ssc_flags for detecting invalid sense data */ 17739 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17740 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0, 17741 "sense-data"); 17742 } 17743 goto sense_failed; 17744 } 17745 17746 return (SD_SENSE_DATA_IS_VALID); 17747 17748 sense_failed: 17749 /* 17750 * If the request sense failed (for whatever reason), attempt 17751 * to retry the original command. 17752 */ 17753 #if defined(__i386) || defined(__amd64) 17754 /* 17755 * SD_RETRY_DELAY is conditionally compile (#if fibre) in 17756 * sddef.h for Sparc platform, and x86 uses 1 binary 17757 * for both SCSI/FC. 17758 * The SD_RETRY_DELAY value need to be adjusted here 17759 * when SD_RETRY_DELAY change in sddef.h 17760 */ 17761 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 17762 sd_print_sense_failed_msg, msgp, EIO, 17763 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL); 17764 #else 17765 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 17766 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); 17767 #endif 17768 17769 return (SD_SENSE_DATA_IS_INVALID); 17770 } 17771 17772 /* 17773 * Function: sd_decode_sense 17774 * 17775 * Description: Take recovery action(s) when SCSI Sense Data is received. 17776 * 17777 * Context: Interrupt context. 17778 */ 17779 17780 static void 17781 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 17782 struct scsi_pkt *pktp) 17783 { 17784 uint8_t sense_key; 17785 17786 ASSERT(un != NULL); 17787 ASSERT(mutex_owned(SD_MUTEX(un))); 17788 ASSERT(bp != NULL); 17789 ASSERT(bp != un->un_rqs_bp); 17790 ASSERT(xp != NULL); 17791 ASSERT(pktp != NULL); 17792 17793 sense_key = scsi_sense_key(xp->xb_sense_data); 17794 17795 switch (sense_key) { 17796 case KEY_NO_SENSE: 17797 sd_sense_key_no_sense(un, bp, xp, pktp); 17798 break; 17799 case KEY_RECOVERABLE_ERROR: 17800 sd_sense_key_recoverable_error(un, xp->xb_sense_data, 17801 bp, xp, pktp); 17802 break; 17803 case KEY_NOT_READY: 17804 sd_sense_key_not_ready(un, xp->xb_sense_data, 17805 bp, xp, pktp); 17806 break; 17807 case KEY_MEDIUM_ERROR: 17808 case KEY_HARDWARE_ERROR: 17809 sd_sense_key_medium_or_hardware_error(un, 17810 xp->xb_sense_data, bp, xp, pktp); 17811 break; 17812 case KEY_ILLEGAL_REQUEST: 17813 sd_sense_key_illegal_request(un, bp, xp, pktp); 17814 break; 17815 case KEY_UNIT_ATTENTION: 17816 sd_sense_key_unit_attention(un, xp->xb_sense_data, 17817 bp, xp, pktp); 17818 break; 17819 case KEY_WRITE_PROTECT: 17820 case KEY_VOLUME_OVERFLOW: 17821 case KEY_MISCOMPARE: 17822 sd_sense_key_fail_command(un, bp, xp, pktp); 17823 break; 17824 case KEY_BLANK_CHECK: 17825 sd_sense_key_blank_check(un, bp, xp, pktp); 17826 break; 17827 case KEY_ABORTED_COMMAND: 17828 sd_sense_key_aborted_command(un, bp, xp, pktp); 17829 break; 17830 case KEY_VENDOR_UNIQUE: 17831 case KEY_COPY_ABORTED: 17832 case KEY_EQUAL: 17833 case KEY_RESERVED: 17834 default: 17835 sd_sense_key_default(un, xp->xb_sense_data, 17836 bp, xp, pktp); 17837 break; 17838 } 17839 } 17840 17841 17842 /* 17843 * Function: sd_dump_memory 17844 * 17845 * Description: Debug logging routine to print the contents of a user provided 17846 * buffer. The output of the buffer is broken up into 256 byte 17847 * segments due to a size constraint of the scsi_log. 17848 * implementation. 17849 * 17850 * Arguments: un - ptr to softstate 17851 * comp - component mask 17852 * title - "title" string to preceed data when printed 17853 * data - ptr to data block to be printed 17854 * len - size of data block to be printed 17855 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c) 17856 * 17857 * Context: May be called from interrupt context 17858 */ 17859 17860 #define SD_DUMP_MEMORY_BUF_SIZE 256 17861 17862 static char *sd_dump_format_string[] = { 17863 " 0x%02x", 17864 " %c" 17865 }; 17866 17867 static void 17868 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data, 17869 int len, int fmt) 17870 { 17871 int i, j; 17872 int avail_count; 17873 int start_offset; 17874 int end_offset; 17875 size_t entry_len; 17876 char *bufp; 17877 char *local_buf; 17878 char *format_string; 17879 17880 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR)); 17881 17882 /* 17883 * In the debug version of the driver, this function is called from a 17884 * number of places which are NOPs in the release driver. 17885 * The debug driver therefore has additional methods of filtering 17886 * debug output. 17887 */ 17888 #ifdef SDDEBUG 17889 /* 17890 * In the debug version of the driver we can reduce the amount of debug 17891 * messages by setting sd_error_level to something other than 17892 * SCSI_ERR_ALL and clearing bits in sd_level_mask and 17893 * sd_component_mask. 17894 */ 17895 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) || 17896 (sd_error_level != SCSI_ERR_ALL)) { 17897 return; 17898 } 17899 if (((sd_component_mask & comp) == 0) || 17900 (sd_error_level != SCSI_ERR_ALL)) { 17901 return; 17902 } 17903 #else 17904 if (sd_error_level != SCSI_ERR_ALL) { 17905 return; 17906 } 17907 #endif 17908 17909 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP); 17910 bufp = local_buf; 17911 /* 17912 * Available length is the length of local_buf[], minus the 17913 * length of the title string, minus one for the ":", minus 17914 * one for the newline, minus one for the NULL terminator. 17915 * This gives the #bytes available for holding the printed 17916 * values from the given data buffer. 17917 */ 17918 if (fmt == SD_LOG_HEX) { 17919 format_string = sd_dump_format_string[0]; 17920 } else /* SD_LOG_CHAR */ { 17921 format_string = sd_dump_format_string[1]; 17922 } 17923 /* 17924 * Available count is the number of elements from the given 17925 * data buffer that we can fit into the available length. 17926 * This is based upon the size of the format string used. 17927 * Make one entry and find it's size. 17928 */ 17929 (void) sprintf(bufp, format_string, data[0]); 17930 entry_len = strlen(bufp); 17931 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len; 17932 17933 j = 0; 17934 while (j < len) { 17935 bufp = local_buf; 17936 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE); 17937 start_offset = j; 17938 17939 end_offset = start_offset + avail_count; 17940 17941 (void) sprintf(bufp, "%s:", title); 17942 bufp += strlen(bufp); 17943 for (i = start_offset; ((i < end_offset) && (j < len)); 17944 i++, j++) { 17945 (void) sprintf(bufp, format_string, data[i]); 17946 bufp += entry_len; 17947 } 17948 (void) sprintf(bufp, "\n"); 17949 17950 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf); 17951 } 17952 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE); 17953 } 17954 17955 /* 17956 * Function: sd_print_sense_msg 17957 * 17958 * Description: Log a message based upon the given sense data. 17959 * 17960 * Arguments: un - ptr to associated softstate 17961 * bp - ptr to buf(9S) for the command 17962 * arg - ptr to associate sd_sense_info struct 17963 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17964 * or SD_NO_RETRY_ISSUED 17965 * 17966 * Context: May be called from interrupt context 17967 */ 17968 17969 static void 17970 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 17971 { 17972 struct sd_xbuf *xp; 17973 struct scsi_pkt *pktp; 17974 uint8_t *sensep; 17975 daddr_t request_blkno; 17976 diskaddr_t err_blkno; 17977 int severity; 17978 int pfa_flag; 17979 extern struct scsi_key_strings scsi_cmds[]; 17980 17981 ASSERT(un != NULL); 17982 ASSERT(mutex_owned(SD_MUTEX(un))); 17983 ASSERT(bp != NULL); 17984 xp = SD_GET_XBUF(bp); 17985 ASSERT(xp != NULL); 17986 pktp = SD_GET_PKTP(bp); 17987 ASSERT(pktp != NULL); 17988 ASSERT(arg != NULL); 17989 17990 severity = ((struct sd_sense_info *)(arg))->ssi_severity; 17991 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag; 17992 17993 if ((code == SD_DELAYED_RETRY_ISSUED) || 17994 (code == SD_IMMEDIATE_RETRY_ISSUED)) { 17995 severity = SCSI_ERR_RETRYABLE; 17996 } 17997 17998 /* Use absolute block number for the request block number */ 17999 request_blkno = xp->xb_blkno; 18000 18001 /* 18002 * Now try to get the error block number from the sense data 18003 */ 18004 sensep = xp->xb_sense_data; 18005 18006 if (scsi_sense_info_uint64(sensep, SENSE_LENGTH, 18007 (uint64_t *)&err_blkno)) { 18008 /* 18009 * We retrieved the error block number from the information 18010 * portion of the sense data. 18011 * 18012 * For USCSI commands we are better off using the error 18013 * block no. as the requested block no. (This is the best 18014 * we can estimate.) 18015 */ 18016 if ((SD_IS_BUFIO(xp) == FALSE) && 18017 ((pktp->pkt_flags & FLAG_SILENT) == 0)) { 18018 request_blkno = err_blkno; 18019 } 18020 } else { 18021 /* 18022 * Without the es_valid bit set (for fixed format) or an 18023 * information descriptor (for descriptor format) we cannot 18024 * be certain of the error blkno, so just use the 18025 * request_blkno. 18026 */ 18027 err_blkno = (diskaddr_t)request_blkno; 18028 } 18029 18030 /* 18031 * The following will log the buffer contents for the release driver 18032 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error 18033 * level is set to verbose. 18034 */ 18035 sd_dump_memory(un, SD_LOG_IO, "Failed CDB", 18036 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 18037 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 18038 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX); 18039 18040 if (pfa_flag == FALSE) { 18041 /* This is normally only set for USCSI */ 18042 if ((pktp->pkt_flags & FLAG_SILENT) != 0) { 18043 return; 18044 } 18045 18046 if ((SD_IS_BUFIO(xp) == TRUE) && 18047 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) && 18048 (severity < sd_error_level))) { 18049 return; 18050 } 18051 } 18052 /* 18053 * Check for Sonoma Failover and keep a count of how many failed I/O's 18054 */ 18055 if ((SD_IS_LSI(un)) && 18056 (scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) && 18057 (scsi_sense_asc(sensep) == 0x94) && 18058 (scsi_sense_ascq(sensep) == 0x01)) { 18059 un->un_sonoma_failure_count++; 18060 if (un->un_sonoma_failure_count > 1) { 18061 return; 18062 } 18063 } 18064 18065 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP || 18066 ((scsi_sense_key(sensep) == KEY_RECOVERABLE_ERROR) && 18067 (pktp->pkt_resid == 0))) { 18068 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity, 18069 request_blkno, err_blkno, scsi_cmds, 18070 (struct scsi_extended_sense *)sensep, 18071 un->un_additional_codes, NULL); 18072 } 18073 } 18074 18075 /* 18076 * Function: sd_sense_key_no_sense 18077 * 18078 * Description: Recovery action when sense data was not received. 18079 * 18080 * Context: May be called from interrupt context 18081 */ 18082 18083 static void 18084 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 18085 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18086 { 18087 struct sd_sense_info si; 18088 18089 ASSERT(un != NULL); 18090 ASSERT(mutex_owned(SD_MUTEX(un))); 18091 ASSERT(bp != NULL); 18092 ASSERT(xp != NULL); 18093 ASSERT(pktp != NULL); 18094 18095 si.ssi_severity = SCSI_ERR_FATAL; 18096 si.ssi_pfa_flag = FALSE; 18097 18098 SD_UPDATE_ERRSTATS(un, sd_softerrs); 18099 18100 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18101 &si, EIO, (clock_t)0, NULL); 18102 } 18103 18104 18105 /* 18106 * Function: sd_sense_key_recoverable_error 18107 * 18108 * Description: Recovery actions for a SCSI "Recovered Error" sense key. 18109 * 18110 * Context: May be called from interrupt context 18111 */ 18112 18113 static void 18114 sd_sense_key_recoverable_error(struct sd_lun *un, 18115 uint8_t *sense_datap, 18116 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 18117 { 18118 struct sd_sense_info si; 18119 uint8_t asc = scsi_sense_asc(sense_datap); 18120 18121 ASSERT(un != NULL); 18122 ASSERT(mutex_owned(SD_MUTEX(un))); 18123 ASSERT(bp != NULL); 18124 ASSERT(xp != NULL); 18125 ASSERT(pktp != NULL); 18126 18127 /* 18128 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED 18129 */ 18130 if ((asc == 0x5D) && (sd_report_pfa != 0)) { 18131 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 18132 si.ssi_severity = SCSI_ERR_INFO; 18133 si.ssi_pfa_flag = TRUE; 18134 } else { 18135 SD_UPDATE_ERRSTATS(un, sd_softerrs); 18136 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err); 18137 si.ssi_severity = SCSI_ERR_RECOVERED; 18138 si.ssi_pfa_flag = FALSE; 18139 } 18140 18141 if (pktp->pkt_resid == 0) { 18142 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18143 sd_return_command(un, bp); 18144 return; 18145 } 18146 18147 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18148 &si, EIO, (clock_t)0, NULL); 18149 } 18150 18151 18152 18153 18154 /* 18155 * Function: sd_sense_key_not_ready 18156 * 18157 * Description: Recovery actions for a SCSI "Not Ready" sense key. 18158 * 18159 * Context: May be called from interrupt context 18160 */ 18161 18162 static void 18163 sd_sense_key_not_ready(struct sd_lun *un, 18164 uint8_t *sense_datap, 18165 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 18166 { 18167 struct sd_sense_info si; 18168 uint8_t asc = scsi_sense_asc(sense_datap); 18169 uint8_t ascq = scsi_sense_ascq(sense_datap); 18170 18171 ASSERT(un != NULL); 18172 ASSERT(mutex_owned(SD_MUTEX(un))); 18173 ASSERT(bp != NULL); 18174 ASSERT(xp != NULL); 18175 ASSERT(pktp != NULL); 18176 18177 si.ssi_severity = SCSI_ERR_FATAL; 18178 si.ssi_pfa_flag = FALSE; 18179 18180 /* 18181 * Update error stats after first NOT READY error. Disks may have 18182 * been powered down and may need to be restarted. For CDROMs, 18183 * report NOT READY errors only if media is present. 18184 */ 18185 if ((ISCD(un) && (asc == 0x3A)) || 18186 (xp->xb_nr_retry_count > 0)) { 18187 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18188 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err); 18189 } 18190 18191 /* 18192 * Just fail if the "not ready" retry limit has been reached. 18193 */ 18194 if (xp->xb_nr_retry_count >= un->un_notready_retry_count) { 18195 /* Special check for error message printing for removables. */ 18196 if (un->un_f_has_removable_media && (asc == 0x04) && 18197 (ascq >= 0x04)) { 18198 si.ssi_severity = SCSI_ERR_ALL; 18199 } 18200 goto fail_command; 18201 } 18202 18203 /* 18204 * Check the ASC and ASCQ in the sense data as needed, to determine 18205 * what to do. 18206 */ 18207 switch (asc) { 18208 case 0x04: /* LOGICAL UNIT NOT READY */ 18209 /* 18210 * disk drives that don't spin up result in a very long delay 18211 * in format without warning messages. We will log a message 18212 * if the error level is set to verbose. 18213 */ 18214 if (sd_error_level < SCSI_ERR_RETRYABLE) { 18215 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18216 "logical unit not ready, resetting disk\n"); 18217 } 18218 18219 /* 18220 * There are different requirements for CDROMs and disks for 18221 * the number of retries. If a CD-ROM is giving this, it is 18222 * probably reading TOC and is in the process of getting 18223 * ready, so we should keep on trying for a long time to make 18224 * sure that all types of media are taken in account (for 18225 * some media the drive takes a long time to read TOC). For 18226 * disks we do not want to retry this too many times as this 18227 * can cause a long hang in format when the drive refuses to 18228 * spin up (a very common failure). 18229 */ 18230 switch (ascq) { 18231 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */ 18232 /* 18233 * Disk drives frequently refuse to spin up which 18234 * results in a very long hang in format without 18235 * warning messages. 18236 * 18237 * Note: This code preserves the legacy behavior of 18238 * comparing xb_nr_retry_count against zero for fibre 18239 * channel targets instead of comparing against the 18240 * un_reset_retry_count value. The reason for this 18241 * discrepancy has been so utterly lost beneath the 18242 * Sands of Time that even Indiana Jones could not 18243 * find it. 18244 */ 18245 if (un->un_f_is_fibre == TRUE) { 18246 if (((sd_level_mask & SD_LOGMASK_DIAG) || 18247 (xp->xb_nr_retry_count > 0)) && 18248 (un->un_startstop_timeid == NULL)) { 18249 scsi_log(SD_DEVINFO(un), sd_label, 18250 CE_WARN, "logical unit not ready, " 18251 "resetting disk\n"); 18252 sd_reset_target(un, pktp); 18253 } 18254 } else { 18255 if (((sd_level_mask & SD_LOGMASK_DIAG) || 18256 (xp->xb_nr_retry_count > 18257 un->un_reset_retry_count)) && 18258 (un->un_startstop_timeid == NULL)) { 18259 scsi_log(SD_DEVINFO(un), sd_label, 18260 CE_WARN, "logical unit not ready, " 18261 "resetting disk\n"); 18262 sd_reset_target(un, pktp); 18263 } 18264 } 18265 break; 18266 18267 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */ 18268 /* 18269 * If the target is in the process of becoming 18270 * ready, just proceed with the retry. This can 18271 * happen with CD-ROMs that take a long time to 18272 * read TOC after a power cycle or reset. 18273 */ 18274 goto do_retry; 18275 18276 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */ 18277 break; 18278 18279 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */ 18280 /* 18281 * Retries cannot help here so just fail right away. 18282 */ 18283 goto fail_command; 18284 18285 case 0x88: 18286 /* 18287 * Vendor-unique code for T3/T4: it indicates a 18288 * path problem in a mutipathed config, but as far as 18289 * the target driver is concerned it equates to a fatal 18290 * error, so we should just fail the command right away 18291 * (without printing anything to the console). If this 18292 * is not a T3/T4, fall thru to the default recovery 18293 * action. 18294 * T3/T4 is FC only, don't need to check is_fibre 18295 */ 18296 if (SD_IS_T3(un) || SD_IS_T4(un)) { 18297 sd_return_failed_command(un, bp, EIO); 18298 return; 18299 } 18300 /* FALLTHRU */ 18301 18302 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */ 18303 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */ 18304 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */ 18305 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */ 18306 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */ 18307 default: /* Possible future codes in SCSI spec? */ 18308 /* 18309 * For removable-media devices, do not retry if 18310 * ASCQ > 2 as these result mostly from USCSI commands 18311 * on MMC devices issued to check status of an 18312 * operation initiated in immediate mode. Also for 18313 * ASCQ >= 4 do not print console messages as these 18314 * mainly represent a user-initiated operation 18315 * instead of a system failure. 18316 */ 18317 if (un->un_f_has_removable_media) { 18318 si.ssi_severity = SCSI_ERR_ALL; 18319 goto fail_command; 18320 } 18321 break; 18322 } 18323 18324 /* 18325 * As part of our recovery attempt for the NOT READY 18326 * condition, we issue a START STOP UNIT command. However 18327 * we want to wait for a short delay before attempting this 18328 * as there may still be more commands coming back from the 18329 * target with the check condition. To do this we use 18330 * timeout(9F) to call sd_start_stop_unit_callback() after 18331 * the delay interval expires. (sd_start_stop_unit_callback() 18332 * dispatches sd_start_stop_unit_task(), which will issue 18333 * the actual START STOP UNIT command. The delay interval 18334 * is one-half of the delay that we will use to retry the 18335 * command that generated the NOT READY condition. 18336 * 18337 * Note that we could just dispatch sd_start_stop_unit_task() 18338 * from here and allow it to sleep for the delay interval, 18339 * but then we would be tying up the taskq thread 18340 * uncesessarily for the duration of the delay. 18341 * 18342 * Do not issue the START STOP UNIT if the current command 18343 * is already a START STOP UNIT. 18344 */ 18345 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) { 18346 break; 18347 } 18348 18349 /* 18350 * Do not schedule the timeout if one is already pending. 18351 */ 18352 if (un->un_startstop_timeid != NULL) { 18353 SD_INFO(SD_LOG_ERROR, un, 18354 "sd_sense_key_not_ready: restart already issued to" 18355 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)), 18356 ddi_get_instance(SD_DEVINFO(un))); 18357 break; 18358 } 18359 18360 /* 18361 * Schedule the START STOP UNIT command, then queue the command 18362 * for a retry. 18363 * 18364 * Note: A timeout is not scheduled for this retry because we 18365 * want the retry to be serial with the START_STOP_UNIT. The 18366 * retry will be started when the START_STOP_UNIT is completed 18367 * in sd_start_stop_unit_task. 18368 */ 18369 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback, 18370 un, un->un_busy_timeout / 2); 18371 xp->xb_nr_retry_count++; 18372 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter); 18373 return; 18374 18375 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */ 18376 if (sd_error_level < SCSI_ERR_RETRYABLE) { 18377 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18378 "unit does not respond to selection\n"); 18379 } 18380 break; 18381 18382 case 0x3A: /* MEDIUM NOT PRESENT */ 18383 if (sd_error_level >= SCSI_ERR_FATAL) { 18384 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18385 "Caddy not inserted in drive\n"); 18386 } 18387 18388 sr_ejected(un); 18389 un->un_mediastate = DKIO_EJECTED; 18390 /* The state has changed, inform the media watch routines */ 18391 cv_broadcast(&un->un_state_cv); 18392 /* Just fail if no media is present in the drive. */ 18393 goto fail_command; 18394 18395 default: 18396 if (sd_error_level < SCSI_ERR_RETRYABLE) { 18397 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 18398 "Unit not Ready. Additional sense code 0x%x\n", 18399 asc); 18400 } 18401 break; 18402 } 18403 18404 do_retry: 18405 18406 /* 18407 * Retry the command, as some targets may report NOT READY for 18408 * several seconds after being reset. 18409 */ 18410 xp->xb_nr_retry_count++; 18411 si.ssi_severity = SCSI_ERR_RETRYABLE; 18412 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 18413 &si, EIO, un->un_busy_timeout, NULL); 18414 18415 return; 18416 18417 fail_command: 18418 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18419 sd_return_failed_command(un, bp, EIO); 18420 } 18421 18422 18423 18424 /* 18425 * Function: sd_sense_key_medium_or_hardware_error 18426 * 18427 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error" 18428 * sense key. 18429 * 18430 * Context: May be called from interrupt context 18431 */ 18432 18433 static void 18434 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 18435 uint8_t *sense_datap, 18436 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 18437 { 18438 struct sd_sense_info si; 18439 uint8_t sense_key = scsi_sense_key(sense_datap); 18440 uint8_t asc = scsi_sense_asc(sense_datap); 18441 18442 ASSERT(un != NULL); 18443 ASSERT(mutex_owned(SD_MUTEX(un))); 18444 ASSERT(bp != NULL); 18445 ASSERT(xp != NULL); 18446 ASSERT(pktp != NULL); 18447 18448 si.ssi_severity = SCSI_ERR_FATAL; 18449 si.ssi_pfa_flag = FALSE; 18450 18451 if (sense_key == KEY_MEDIUM_ERROR) { 18452 SD_UPDATE_ERRSTATS(un, sd_rq_media_err); 18453 } 18454 18455 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18456 18457 if ((un->un_reset_retry_count != 0) && 18458 (xp->xb_retry_count == un->un_reset_retry_count)) { 18459 mutex_exit(SD_MUTEX(un)); 18460 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */ 18461 if (un->un_f_allow_bus_device_reset == TRUE) { 18462 18463 boolean_t try_resetting_target = B_TRUE; 18464 18465 /* 18466 * We need to be able to handle specific ASC when we are 18467 * handling a KEY_HARDWARE_ERROR. In particular 18468 * taking the default action of resetting the target may 18469 * not be the appropriate way to attempt recovery. 18470 * Resetting a target because of a single LUN failure 18471 * victimizes all LUNs on that target. 18472 * 18473 * This is true for the LSI arrays, if an LSI 18474 * array controller returns an ASC of 0x84 (LUN Dead) we 18475 * should trust it. 18476 */ 18477 18478 if (sense_key == KEY_HARDWARE_ERROR) { 18479 switch (asc) { 18480 case 0x84: 18481 if (SD_IS_LSI(un)) { 18482 try_resetting_target = B_FALSE; 18483 } 18484 break; 18485 default: 18486 break; 18487 } 18488 } 18489 18490 if (try_resetting_target == B_TRUE) { 18491 int reset_retval = 0; 18492 if (un->un_f_lun_reset_enabled == TRUE) { 18493 SD_TRACE(SD_LOG_IO_CORE, un, 18494 "sd_sense_key_medium_or_hardware_" 18495 "error: issuing RESET_LUN\n"); 18496 reset_retval = 18497 scsi_reset(SD_ADDRESS(un), 18498 RESET_LUN); 18499 } 18500 if (reset_retval == 0) { 18501 SD_TRACE(SD_LOG_IO_CORE, un, 18502 "sd_sense_key_medium_or_hardware_" 18503 "error: issuing RESET_TARGET\n"); 18504 (void) scsi_reset(SD_ADDRESS(un), 18505 RESET_TARGET); 18506 } 18507 } 18508 } 18509 mutex_enter(SD_MUTEX(un)); 18510 } 18511 18512 /* 18513 * This really ought to be a fatal error, but we will retry anyway 18514 * as some drives report this as a spurious error. 18515 */ 18516 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18517 &si, EIO, (clock_t)0, NULL); 18518 } 18519 18520 18521 18522 /* 18523 * Function: sd_sense_key_illegal_request 18524 * 18525 * Description: Recovery actions for a SCSI "Illegal Request" sense key. 18526 * 18527 * Context: May be called from interrupt context 18528 */ 18529 18530 static void 18531 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 18532 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18533 { 18534 struct sd_sense_info si; 18535 18536 ASSERT(un != NULL); 18537 ASSERT(mutex_owned(SD_MUTEX(un))); 18538 ASSERT(bp != NULL); 18539 ASSERT(xp != NULL); 18540 ASSERT(pktp != NULL); 18541 18542 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err); 18543 18544 si.ssi_severity = SCSI_ERR_INFO; 18545 si.ssi_pfa_flag = FALSE; 18546 18547 /* Pointless to retry if the target thinks it's an illegal request */ 18548 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18549 sd_return_failed_command(un, bp, EIO); 18550 } 18551 18552 18553 18554 18555 /* 18556 * Function: sd_sense_key_unit_attention 18557 * 18558 * Description: Recovery actions for a SCSI "Unit Attention" sense key. 18559 * 18560 * Context: May be called from interrupt context 18561 */ 18562 18563 static void 18564 sd_sense_key_unit_attention(struct sd_lun *un, 18565 uint8_t *sense_datap, 18566 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 18567 { 18568 /* 18569 * For UNIT ATTENTION we allow retries for one minute. Devices 18570 * like Sonoma can return UNIT ATTENTION close to a minute 18571 * under certain conditions. 18572 */ 18573 int retry_check_flag = SD_RETRIES_UA; 18574 boolean_t kstat_updated = B_FALSE; 18575 struct sd_sense_info si; 18576 uint8_t asc = scsi_sense_asc(sense_datap); 18577 uint8_t ascq = scsi_sense_ascq(sense_datap); 18578 18579 ASSERT(un != NULL); 18580 ASSERT(mutex_owned(SD_MUTEX(un))); 18581 ASSERT(bp != NULL); 18582 ASSERT(xp != NULL); 18583 ASSERT(pktp != NULL); 18584 18585 si.ssi_severity = SCSI_ERR_INFO; 18586 si.ssi_pfa_flag = FALSE; 18587 18588 18589 switch (asc) { 18590 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */ 18591 if (sd_report_pfa != 0) { 18592 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 18593 si.ssi_pfa_flag = TRUE; 18594 retry_check_flag = SD_RETRIES_STANDARD; 18595 goto do_retry; 18596 } 18597 18598 break; 18599 18600 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ 18601 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 18602 un->un_resvd_status |= 18603 (SD_LOST_RESERVE | SD_WANT_RESERVE); 18604 } 18605 #ifdef _LP64 18606 if (un->un_blockcount + 1 > SD_GROUP1_MAX_ADDRESS) { 18607 if (taskq_dispatch(sd_tq, sd_reenable_dsense_task, 18608 un, KM_NOSLEEP) == 0) { 18609 /* 18610 * If we can't dispatch the task we'll just 18611 * live without descriptor sense. We can 18612 * try again on the next "unit attention" 18613 */ 18614 SD_ERROR(SD_LOG_ERROR, un, 18615 "sd_sense_key_unit_attention: " 18616 "Could not dispatch " 18617 "sd_reenable_dsense_task\n"); 18618 } 18619 } 18620 #endif /* _LP64 */ 18621 /* FALLTHRU */ 18622 18623 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ 18624 if (!un->un_f_has_removable_media) { 18625 break; 18626 } 18627 18628 /* 18629 * When we get a unit attention from a removable-media device, 18630 * it may be in a state that will take a long time to recover 18631 * (e.g., from a reset). Since we are executing in interrupt 18632 * context here, we cannot wait around for the device to come 18633 * back. So hand this command off to sd_media_change_task() 18634 * for deferred processing under taskq thread context. (Note 18635 * that the command still may be failed if a problem is 18636 * encountered at a later time.) 18637 */ 18638 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp, 18639 KM_NOSLEEP) == 0) { 18640 /* 18641 * Cannot dispatch the request so fail the command. 18642 */ 18643 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18644 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 18645 si.ssi_severity = SCSI_ERR_FATAL; 18646 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18647 sd_return_failed_command(un, bp, EIO); 18648 } 18649 18650 /* 18651 * If failed to dispatch sd_media_change_task(), we already 18652 * updated kstat. If succeed to dispatch sd_media_change_task(), 18653 * we should update kstat later if it encounters an error. So, 18654 * we update kstat_updated flag here. 18655 */ 18656 kstat_updated = B_TRUE; 18657 18658 /* 18659 * Either the command has been successfully dispatched to a 18660 * task Q for retrying, or the dispatch failed. In either case 18661 * do NOT retry again by calling sd_retry_command. This sets up 18662 * two retries of the same command and when one completes and 18663 * frees the resources the other will access freed memory, 18664 * a bad thing. 18665 */ 18666 return; 18667 18668 default: 18669 break; 18670 } 18671 18672 /* 18673 * ASC ASCQ 18674 * 2A 09 Capacity data has changed 18675 * 2A 01 Mode parameters changed 18676 * 3F 0E Reported luns data has changed 18677 * Arrays that support logical unit expansion should report 18678 * capacity changes(2Ah/09). Mode parameters changed and 18679 * reported luns data has changed are the approximation. 18680 */ 18681 if (((asc == 0x2a) && (ascq == 0x09)) || 18682 ((asc == 0x2a) && (ascq == 0x01)) || 18683 ((asc == 0x3f) && (ascq == 0x0e))) { 18684 if (taskq_dispatch(sd_tq, sd_target_change_task, un, 18685 KM_NOSLEEP) == 0) { 18686 SD_ERROR(SD_LOG_ERROR, un, 18687 "sd_sense_key_unit_attention: " 18688 "Could not dispatch sd_target_change_task\n"); 18689 } 18690 } 18691 18692 /* 18693 * Update kstat if we haven't done that. 18694 */ 18695 if (!kstat_updated) { 18696 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18697 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 18698 } 18699 18700 do_retry: 18701 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si, 18702 EIO, SD_UA_RETRY_DELAY, NULL); 18703 } 18704 18705 18706 18707 /* 18708 * Function: sd_sense_key_fail_command 18709 * 18710 * Description: Use to fail a command when we don't like the sense key that 18711 * was returned. 18712 * 18713 * Context: May be called from interrupt context 18714 */ 18715 18716 static void 18717 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 18718 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18719 { 18720 struct sd_sense_info si; 18721 18722 ASSERT(un != NULL); 18723 ASSERT(mutex_owned(SD_MUTEX(un))); 18724 ASSERT(bp != NULL); 18725 ASSERT(xp != NULL); 18726 ASSERT(pktp != NULL); 18727 18728 si.ssi_severity = SCSI_ERR_FATAL; 18729 si.ssi_pfa_flag = FALSE; 18730 18731 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18732 sd_return_failed_command(un, bp, EIO); 18733 } 18734 18735 18736 18737 /* 18738 * Function: sd_sense_key_blank_check 18739 * 18740 * Description: Recovery actions for a SCSI "Blank Check" sense key. 18741 * Has no monetary connotation. 18742 * 18743 * Context: May be called from interrupt context 18744 */ 18745 18746 static void 18747 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 18748 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18749 { 18750 struct sd_sense_info si; 18751 18752 ASSERT(un != NULL); 18753 ASSERT(mutex_owned(SD_MUTEX(un))); 18754 ASSERT(bp != NULL); 18755 ASSERT(xp != NULL); 18756 ASSERT(pktp != NULL); 18757 18758 /* 18759 * Blank check is not fatal for removable devices, therefore 18760 * it does not require a console message. 18761 */ 18762 si.ssi_severity = (un->un_f_has_removable_media) ? SCSI_ERR_ALL : 18763 SCSI_ERR_FATAL; 18764 si.ssi_pfa_flag = FALSE; 18765 18766 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18767 sd_return_failed_command(un, bp, EIO); 18768 } 18769 18770 18771 18772 18773 /* 18774 * Function: sd_sense_key_aborted_command 18775 * 18776 * Description: Recovery actions for a SCSI "Aborted Command" sense key. 18777 * 18778 * Context: May be called from interrupt context 18779 */ 18780 18781 static void 18782 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 18783 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18784 { 18785 struct sd_sense_info si; 18786 18787 ASSERT(un != NULL); 18788 ASSERT(mutex_owned(SD_MUTEX(un))); 18789 ASSERT(bp != NULL); 18790 ASSERT(xp != NULL); 18791 ASSERT(pktp != NULL); 18792 18793 si.ssi_severity = SCSI_ERR_FATAL; 18794 si.ssi_pfa_flag = FALSE; 18795 18796 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18797 18798 /* 18799 * This really ought to be a fatal error, but we will retry anyway 18800 * as some drives report this as a spurious error. 18801 */ 18802 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18803 &si, EIO, drv_usectohz(100000), NULL); 18804 } 18805 18806 18807 18808 /* 18809 * Function: sd_sense_key_default 18810 * 18811 * Description: Default recovery action for several SCSI sense keys (basically 18812 * attempts a retry). 18813 * 18814 * Context: May be called from interrupt context 18815 */ 18816 18817 static void 18818 sd_sense_key_default(struct sd_lun *un, 18819 uint8_t *sense_datap, 18820 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 18821 { 18822 struct sd_sense_info si; 18823 uint8_t sense_key = scsi_sense_key(sense_datap); 18824 18825 ASSERT(un != NULL); 18826 ASSERT(mutex_owned(SD_MUTEX(un))); 18827 ASSERT(bp != NULL); 18828 ASSERT(xp != NULL); 18829 ASSERT(pktp != NULL); 18830 18831 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18832 18833 /* 18834 * Undecoded sense key. Attempt retries and hope that will fix 18835 * the problem. Otherwise, we're dead. 18836 */ 18837 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 18838 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18839 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]); 18840 } 18841 18842 si.ssi_severity = SCSI_ERR_FATAL; 18843 si.ssi_pfa_flag = FALSE; 18844 18845 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18846 &si, EIO, (clock_t)0, NULL); 18847 } 18848 18849 18850 18851 /* 18852 * Function: sd_print_retry_msg 18853 * 18854 * Description: Print a message indicating the retry action being taken. 18855 * 18856 * Arguments: un - ptr to associated softstate 18857 * bp - ptr to buf(9S) for the command 18858 * arg - not used. 18859 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 18860 * or SD_NO_RETRY_ISSUED 18861 * 18862 * Context: May be called from interrupt context 18863 */ 18864 /* ARGSUSED */ 18865 static void 18866 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag) 18867 { 18868 struct sd_xbuf *xp; 18869 struct scsi_pkt *pktp; 18870 char *reasonp; 18871 char *msgp; 18872 18873 ASSERT(un != NULL); 18874 ASSERT(mutex_owned(SD_MUTEX(un))); 18875 ASSERT(bp != NULL); 18876 pktp = SD_GET_PKTP(bp); 18877 ASSERT(pktp != NULL); 18878 xp = SD_GET_XBUF(bp); 18879 ASSERT(xp != NULL); 18880 18881 ASSERT(!mutex_owned(&un->un_pm_mutex)); 18882 mutex_enter(&un->un_pm_mutex); 18883 if ((un->un_state == SD_STATE_SUSPENDED) || 18884 (SD_DEVICE_IS_IN_LOW_POWER(un)) || 18885 (pktp->pkt_flags & FLAG_SILENT)) { 18886 mutex_exit(&un->un_pm_mutex); 18887 goto update_pkt_reason; 18888 } 18889 mutex_exit(&un->un_pm_mutex); 18890 18891 /* 18892 * Suppress messages if they are all the same pkt_reason; with 18893 * TQ, many (up to 256) are returned with the same pkt_reason. 18894 * If we are in panic, then suppress the retry messages. 18895 */ 18896 switch (flag) { 18897 case SD_NO_RETRY_ISSUED: 18898 msgp = "giving up"; 18899 break; 18900 case SD_IMMEDIATE_RETRY_ISSUED: 18901 case SD_DELAYED_RETRY_ISSUED: 18902 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) || 18903 ((pktp->pkt_reason == un->un_last_pkt_reason) && 18904 (sd_error_level != SCSI_ERR_ALL))) { 18905 return; 18906 } 18907 msgp = "retrying command"; 18908 break; 18909 default: 18910 goto update_pkt_reason; 18911 } 18912 18913 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" : 18914 scsi_rname(pktp->pkt_reason)); 18915 18916 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP) { 18917 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18918 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp); 18919 } 18920 18921 update_pkt_reason: 18922 /* 18923 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason. 18924 * This is to prevent multiple console messages for the same failure 18925 * condition. Note that un->un_last_pkt_reason is NOT restored if & 18926 * when the command is retried successfully because there still may be 18927 * more commands coming back with the same value of pktp->pkt_reason. 18928 */ 18929 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) { 18930 un->un_last_pkt_reason = pktp->pkt_reason; 18931 } 18932 } 18933 18934 18935 /* 18936 * Function: sd_print_cmd_incomplete_msg 18937 * 18938 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason. 18939 * 18940 * Arguments: un - ptr to associated softstate 18941 * bp - ptr to buf(9S) for the command 18942 * arg - passed to sd_print_retry_msg() 18943 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 18944 * or SD_NO_RETRY_ISSUED 18945 * 18946 * Context: May be called from interrupt context 18947 */ 18948 18949 static void 18950 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, 18951 int code) 18952 { 18953 dev_info_t *dip; 18954 18955 ASSERT(un != NULL); 18956 ASSERT(mutex_owned(SD_MUTEX(un))); 18957 ASSERT(bp != NULL); 18958 18959 switch (code) { 18960 case SD_NO_RETRY_ISSUED: 18961 /* Command was failed. Someone turned off this target? */ 18962 if (un->un_state != SD_STATE_OFFLINE) { 18963 /* 18964 * Suppress message if we are detaching and 18965 * device has been disconnected 18966 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation 18967 * private interface and not part of the DDI 18968 */ 18969 dip = un->un_sd->sd_dev; 18970 if (!(DEVI_IS_DETACHING(dip) && 18971 DEVI_IS_DEVICE_REMOVED(dip))) { 18972 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18973 "disk not responding to selection\n"); 18974 } 18975 New_state(un, SD_STATE_OFFLINE); 18976 } 18977 break; 18978 18979 case SD_DELAYED_RETRY_ISSUED: 18980 case SD_IMMEDIATE_RETRY_ISSUED: 18981 default: 18982 /* Command was successfully queued for retry */ 18983 sd_print_retry_msg(un, bp, arg, code); 18984 break; 18985 } 18986 } 18987 18988 18989 /* 18990 * Function: sd_pkt_reason_cmd_incomplete 18991 * 18992 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason. 18993 * 18994 * Context: May be called from interrupt context 18995 */ 18996 18997 static void 18998 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 18999 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19000 { 19001 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE; 19002 19003 ASSERT(un != NULL); 19004 ASSERT(mutex_owned(SD_MUTEX(un))); 19005 ASSERT(bp != NULL); 19006 ASSERT(xp != NULL); 19007 ASSERT(pktp != NULL); 19008 19009 /* Do not do a reset if selection did not complete */ 19010 /* Note: Should this not just check the bit? */ 19011 if (pktp->pkt_state != STATE_GOT_BUS) { 19012 SD_UPDATE_ERRSTATS(un, sd_transerrs); 19013 sd_reset_target(un, pktp); 19014 } 19015 19016 /* 19017 * If the target was not successfully selected, then set 19018 * SD_RETRIES_FAILFAST to indicate that we lost communication 19019 * with the target, and further retries and/or commands are 19020 * likely to take a long time. 19021 */ 19022 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) { 19023 flag |= SD_RETRIES_FAILFAST; 19024 } 19025 19026 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19027 19028 sd_retry_command(un, bp, flag, 19029 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19030 } 19031 19032 19033 19034 /* 19035 * Function: sd_pkt_reason_cmd_tran_err 19036 * 19037 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason. 19038 * 19039 * Context: May be called from interrupt context 19040 */ 19041 19042 static void 19043 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 19044 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19045 { 19046 ASSERT(un != NULL); 19047 ASSERT(mutex_owned(SD_MUTEX(un))); 19048 ASSERT(bp != NULL); 19049 ASSERT(xp != NULL); 19050 ASSERT(pktp != NULL); 19051 19052 /* 19053 * Do not reset if we got a parity error, or if 19054 * selection did not complete. 19055 */ 19056 SD_UPDATE_ERRSTATS(un, sd_harderrs); 19057 /* Note: Should this not just check the bit for pkt_state? */ 19058 if (((pktp->pkt_statistics & STAT_PERR) == 0) && 19059 (pktp->pkt_state != STATE_GOT_BUS)) { 19060 SD_UPDATE_ERRSTATS(un, sd_transerrs); 19061 sd_reset_target(un, pktp); 19062 } 19063 19064 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19065 19066 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 19067 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19068 } 19069 19070 19071 19072 /* 19073 * Function: sd_pkt_reason_cmd_reset 19074 * 19075 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason. 19076 * 19077 * Context: May be called from interrupt context 19078 */ 19079 19080 static void 19081 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 19082 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19083 { 19084 ASSERT(un != NULL); 19085 ASSERT(mutex_owned(SD_MUTEX(un))); 19086 ASSERT(bp != NULL); 19087 ASSERT(xp != NULL); 19088 ASSERT(pktp != NULL); 19089 19090 /* The target may still be running the command, so try to reset. */ 19091 SD_UPDATE_ERRSTATS(un, sd_transerrs); 19092 sd_reset_target(un, pktp); 19093 19094 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19095 19096 /* 19097 * If pkt_reason is CMD_RESET chances are that this pkt got 19098 * reset because another target on this bus caused it. The target 19099 * that caused it should get CMD_TIMEOUT with pkt_statistics 19100 * of STAT_TIMEOUT/STAT_DEV_RESET. 19101 */ 19102 19103 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 19104 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19105 } 19106 19107 19108 19109 19110 /* 19111 * Function: sd_pkt_reason_cmd_aborted 19112 * 19113 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason. 19114 * 19115 * Context: May be called from interrupt context 19116 */ 19117 19118 static void 19119 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 19120 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19121 { 19122 ASSERT(un != NULL); 19123 ASSERT(mutex_owned(SD_MUTEX(un))); 19124 ASSERT(bp != NULL); 19125 ASSERT(xp != NULL); 19126 ASSERT(pktp != NULL); 19127 19128 /* The target may still be running the command, so try to reset. */ 19129 SD_UPDATE_ERRSTATS(un, sd_transerrs); 19130 sd_reset_target(un, pktp); 19131 19132 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19133 19134 /* 19135 * If pkt_reason is CMD_ABORTED chances are that this pkt got 19136 * aborted because another target on this bus caused it. The target 19137 * that caused it should get CMD_TIMEOUT with pkt_statistics 19138 * of STAT_TIMEOUT/STAT_DEV_RESET. 19139 */ 19140 19141 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 19142 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19143 } 19144 19145 19146 19147 /* 19148 * Function: sd_pkt_reason_cmd_timeout 19149 * 19150 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason. 19151 * 19152 * Context: May be called from interrupt context 19153 */ 19154 19155 static void 19156 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 19157 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19158 { 19159 ASSERT(un != NULL); 19160 ASSERT(mutex_owned(SD_MUTEX(un))); 19161 ASSERT(bp != NULL); 19162 ASSERT(xp != NULL); 19163 ASSERT(pktp != NULL); 19164 19165 19166 SD_UPDATE_ERRSTATS(un, sd_transerrs); 19167 sd_reset_target(un, pktp); 19168 19169 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19170 19171 /* 19172 * A command timeout indicates that we could not establish 19173 * communication with the target, so set SD_RETRIES_FAILFAST 19174 * as further retries/commands are likely to take a long time. 19175 */ 19176 sd_retry_command(un, bp, 19177 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST), 19178 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19179 } 19180 19181 19182 19183 /* 19184 * Function: sd_pkt_reason_cmd_unx_bus_free 19185 * 19186 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason. 19187 * 19188 * Context: May be called from interrupt context 19189 */ 19190 19191 static void 19192 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 19193 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19194 { 19195 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code); 19196 19197 ASSERT(un != NULL); 19198 ASSERT(mutex_owned(SD_MUTEX(un))); 19199 ASSERT(bp != NULL); 19200 ASSERT(xp != NULL); 19201 ASSERT(pktp != NULL); 19202 19203 SD_UPDATE_ERRSTATS(un, sd_harderrs); 19204 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19205 19206 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? 19207 sd_print_retry_msg : NULL; 19208 19209 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 19210 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19211 } 19212 19213 19214 /* 19215 * Function: sd_pkt_reason_cmd_tag_reject 19216 * 19217 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason. 19218 * 19219 * Context: May be called from interrupt context 19220 */ 19221 19222 static void 19223 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 19224 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19225 { 19226 ASSERT(un != NULL); 19227 ASSERT(mutex_owned(SD_MUTEX(un))); 19228 ASSERT(bp != NULL); 19229 ASSERT(xp != NULL); 19230 ASSERT(pktp != NULL); 19231 19232 SD_UPDATE_ERRSTATS(un, sd_harderrs); 19233 pktp->pkt_flags = 0; 19234 un->un_tagflags = 0; 19235 if (un->un_f_opt_queueing == TRUE) { 19236 un->un_throttle = min(un->un_throttle, 3); 19237 } else { 19238 un->un_throttle = 1; 19239 } 19240 mutex_exit(SD_MUTEX(un)); 19241 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 19242 mutex_enter(SD_MUTEX(un)); 19243 19244 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19245 19246 /* Legacy behavior not to check retry counts here. */ 19247 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE), 19248 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19249 } 19250 19251 19252 /* 19253 * Function: sd_pkt_reason_default 19254 * 19255 * Description: Default recovery actions for SCSA pkt_reason values that 19256 * do not have more explicit recovery actions. 19257 * 19258 * Context: May be called from interrupt context 19259 */ 19260 19261 static void 19262 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 19263 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19264 { 19265 ASSERT(un != NULL); 19266 ASSERT(mutex_owned(SD_MUTEX(un))); 19267 ASSERT(bp != NULL); 19268 ASSERT(xp != NULL); 19269 ASSERT(pktp != NULL); 19270 19271 SD_UPDATE_ERRSTATS(un, sd_transerrs); 19272 sd_reset_target(un, pktp); 19273 19274 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19275 19276 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 19277 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19278 } 19279 19280 19281 19282 /* 19283 * Function: sd_pkt_status_check_condition 19284 * 19285 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status. 19286 * 19287 * Context: May be called from interrupt context 19288 */ 19289 19290 static void 19291 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 19292 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19293 { 19294 ASSERT(un != NULL); 19295 ASSERT(mutex_owned(SD_MUTEX(un))); 19296 ASSERT(bp != NULL); 19297 ASSERT(xp != NULL); 19298 ASSERT(pktp != NULL); 19299 19300 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: " 19301 "entry: buf:0x%p xp:0x%p\n", bp, xp); 19302 19303 /* 19304 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the 19305 * command will be retried after the request sense). Otherwise, retry 19306 * the command. Note: we are issuing the request sense even though the 19307 * retry limit may have been reached for the failed command. 19308 */ 19309 if (un->un_f_arq_enabled == FALSE) { 19310 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 19311 "no ARQ, sending request sense command\n"); 19312 sd_send_request_sense_command(un, bp, pktp); 19313 } else { 19314 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 19315 "ARQ,retrying request sense command\n"); 19316 #if defined(__i386) || defined(__amd64) 19317 /* 19318 * The SD_RETRY_DELAY value need to be adjusted here 19319 * when SD_RETRY_DELAY change in sddef.h 19320 */ 19321 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 19322 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, 19323 NULL); 19324 #else 19325 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 19326 EIO, SD_RETRY_DELAY, NULL); 19327 #endif 19328 } 19329 19330 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); 19331 } 19332 19333 19334 /* 19335 * Function: sd_pkt_status_busy 19336 * 19337 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status. 19338 * 19339 * Context: May be called from interrupt context 19340 */ 19341 19342 static void 19343 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 19344 struct scsi_pkt *pktp) 19345 { 19346 ASSERT(un != NULL); 19347 ASSERT(mutex_owned(SD_MUTEX(un))); 19348 ASSERT(bp != NULL); 19349 ASSERT(xp != NULL); 19350 ASSERT(pktp != NULL); 19351 19352 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19353 "sd_pkt_status_busy: entry\n"); 19354 19355 /* If retries are exhausted, just fail the command. */ 19356 if (xp->xb_retry_count >= un->un_busy_retry_count) { 19357 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 19358 "device busy too long\n"); 19359 sd_return_failed_command(un, bp, EIO); 19360 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19361 "sd_pkt_status_busy: exit\n"); 19362 return; 19363 } 19364 xp->xb_retry_count++; 19365 19366 /* 19367 * Try to reset the target. However, we do not want to perform 19368 * more than one reset if the device continues to fail. The reset 19369 * will be performed when the retry count reaches the reset 19370 * threshold. This threshold should be set such that at least 19371 * one retry is issued before the reset is performed. 19372 */ 19373 if (xp->xb_retry_count == 19374 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) { 19375 int rval = 0; 19376 mutex_exit(SD_MUTEX(un)); 19377 if (un->un_f_allow_bus_device_reset == TRUE) { 19378 /* 19379 * First try to reset the LUN; if we cannot then 19380 * try to reset the target. 19381 */ 19382 if (un->un_f_lun_reset_enabled == TRUE) { 19383 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19384 "sd_pkt_status_busy: RESET_LUN\n"); 19385 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 19386 } 19387 if (rval == 0) { 19388 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19389 "sd_pkt_status_busy: RESET_TARGET\n"); 19390 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 19391 } 19392 } 19393 if (rval == 0) { 19394 /* 19395 * If the RESET_LUN and/or RESET_TARGET failed, 19396 * try RESET_ALL 19397 */ 19398 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19399 "sd_pkt_status_busy: RESET_ALL\n"); 19400 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL); 19401 } 19402 mutex_enter(SD_MUTEX(un)); 19403 if (rval == 0) { 19404 /* 19405 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed. 19406 * At this point we give up & fail the command. 19407 */ 19408 sd_return_failed_command(un, bp, EIO); 19409 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19410 "sd_pkt_status_busy: exit (failed cmd)\n"); 19411 return; 19412 } 19413 } 19414 19415 /* 19416 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as 19417 * we have already checked the retry counts above. 19418 */ 19419 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 19420 EIO, un->un_busy_timeout, NULL); 19421 19422 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19423 "sd_pkt_status_busy: exit\n"); 19424 } 19425 19426 19427 /* 19428 * Function: sd_pkt_status_reservation_conflict 19429 * 19430 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI 19431 * command status. 19432 * 19433 * Context: May be called from interrupt context 19434 */ 19435 19436 static void 19437 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp, 19438 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19439 { 19440 ASSERT(un != NULL); 19441 ASSERT(mutex_owned(SD_MUTEX(un))); 19442 ASSERT(bp != NULL); 19443 ASSERT(xp != NULL); 19444 ASSERT(pktp != NULL); 19445 19446 /* 19447 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation 19448 * conflict could be due to various reasons like incorrect keys, not 19449 * registered or not reserved etc. So, we return EACCES to the caller. 19450 */ 19451 if (un->un_reservation_type == SD_SCSI3_RESERVATION) { 19452 int cmd = SD_GET_PKT_OPCODE(pktp); 19453 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) || 19454 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) { 19455 sd_return_failed_command(un, bp, EACCES); 19456 return; 19457 } 19458 } 19459 19460 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 19461 19462 if ((un->un_resvd_status & SD_FAILFAST) != 0) { 19463 if (sd_failfast_enable != 0) { 19464 /* By definition, we must panic here.... */ 19465 sd_panic_for_res_conflict(un); 19466 /*NOTREACHED*/ 19467 } 19468 SD_ERROR(SD_LOG_IO, un, 19469 "sd_handle_resv_conflict: Disk Reserved\n"); 19470 sd_return_failed_command(un, bp, EACCES); 19471 return; 19472 } 19473 19474 /* 19475 * 1147670: retry only if sd_retry_on_reservation_conflict 19476 * property is set (default is 1). Retries will not succeed 19477 * on a disk reserved by another initiator. HA systems 19478 * may reset this via sd.conf to avoid these retries. 19479 * 19480 * Note: The legacy return code for this failure is EIO, however EACCES 19481 * seems more appropriate for a reservation conflict. 19482 */ 19483 if (sd_retry_on_reservation_conflict == 0) { 19484 SD_ERROR(SD_LOG_IO, un, 19485 "sd_handle_resv_conflict: Device Reserved\n"); 19486 sd_return_failed_command(un, bp, EIO); 19487 return; 19488 } 19489 19490 /* 19491 * Retry the command if we can. 19492 * 19493 * Note: The legacy return code for this failure is EIO, however EACCES 19494 * seems more appropriate for a reservation conflict. 19495 */ 19496 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 19497 (clock_t)2, NULL); 19498 } 19499 19500 19501 19502 /* 19503 * Function: sd_pkt_status_qfull 19504 * 19505 * Description: Handle a QUEUE FULL condition from the target. This can 19506 * occur if the HBA does not handle the queue full condition. 19507 * (Basically this means third-party HBAs as Sun HBAs will 19508 * handle the queue full condition.) Note that if there are 19509 * some commands already in the transport, then the queue full 19510 * has occurred because the queue for this nexus is actually 19511 * full. If there are no commands in the transport, then the 19512 * queue full is resulting from some other initiator or lun 19513 * consuming all the resources at the target. 19514 * 19515 * Context: May be called from interrupt context 19516 */ 19517 19518 static void 19519 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 19520 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19521 { 19522 ASSERT(un != NULL); 19523 ASSERT(mutex_owned(SD_MUTEX(un))); 19524 ASSERT(bp != NULL); 19525 ASSERT(xp != NULL); 19526 ASSERT(pktp != NULL); 19527 19528 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19529 "sd_pkt_status_qfull: entry\n"); 19530 19531 /* 19532 * Just lower the QFULL throttle and retry the command. Note that 19533 * we do not limit the number of retries here. 19534 */ 19535 sd_reduce_throttle(un, SD_THROTTLE_QFULL); 19536 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, 19537 SD_RESTART_TIMEOUT, NULL); 19538 19539 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19540 "sd_pkt_status_qfull: exit\n"); 19541 } 19542 19543 19544 /* 19545 * Function: sd_reset_target 19546 * 19547 * Description: Issue a scsi_reset(9F), with either RESET_LUN, 19548 * RESET_TARGET, or RESET_ALL. 19549 * 19550 * Context: May be called under interrupt context. 19551 */ 19552 19553 static void 19554 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp) 19555 { 19556 int rval = 0; 19557 19558 ASSERT(un != NULL); 19559 ASSERT(mutex_owned(SD_MUTEX(un))); 19560 ASSERT(pktp != NULL); 19561 19562 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n"); 19563 19564 /* 19565 * No need to reset if the transport layer has already done so. 19566 */ 19567 if ((pktp->pkt_statistics & 19568 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) { 19569 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19570 "sd_reset_target: no reset\n"); 19571 return; 19572 } 19573 19574 mutex_exit(SD_MUTEX(un)); 19575 19576 if (un->un_f_allow_bus_device_reset == TRUE) { 19577 if (un->un_f_lun_reset_enabled == TRUE) { 19578 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19579 "sd_reset_target: RESET_LUN\n"); 19580 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 19581 } 19582 if (rval == 0) { 19583 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19584 "sd_reset_target: RESET_TARGET\n"); 19585 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 19586 } 19587 } 19588 19589 if (rval == 0) { 19590 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19591 "sd_reset_target: RESET_ALL\n"); 19592 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 19593 } 19594 19595 mutex_enter(SD_MUTEX(un)); 19596 19597 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n"); 19598 } 19599 19600 /* 19601 * Function: sd_target_change_task 19602 * 19603 * Description: Handle dynamic target change 19604 * 19605 * Context: Executes in a taskq() thread context 19606 */ 19607 static void 19608 sd_target_change_task(void *arg) 19609 { 19610 struct sd_lun *un = arg; 19611 uint64_t capacity; 19612 diskaddr_t label_cap; 19613 uint_t lbasize; 19614 sd_ssc_t *ssc; 19615 19616 ASSERT(un != NULL); 19617 ASSERT(!mutex_owned(SD_MUTEX(un))); 19618 19619 if ((un->un_f_blockcount_is_valid == FALSE) || 19620 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 19621 return; 19622 } 19623 19624 ssc = sd_ssc_init(un); 19625 19626 if (sd_send_scsi_READ_CAPACITY(ssc, &capacity, 19627 &lbasize, SD_PATH_DIRECT) != 0) { 19628 SD_ERROR(SD_LOG_ERROR, un, 19629 "sd_target_change_task: fail to read capacity\n"); 19630 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19631 goto task_exit; 19632 } 19633 19634 mutex_enter(SD_MUTEX(un)); 19635 if (capacity <= un->un_blockcount) { 19636 mutex_exit(SD_MUTEX(un)); 19637 goto task_exit; 19638 } 19639 19640 sd_update_block_info(un, lbasize, capacity); 19641 mutex_exit(SD_MUTEX(un)); 19642 19643 /* 19644 * If lun is EFI labeled and lun capacity is greater than the 19645 * capacity contained in the label, log a sys event. 19646 */ 19647 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 19648 (void*)SD_PATH_DIRECT) == 0) { 19649 mutex_enter(SD_MUTEX(un)); 19650 if (un->un_f_blockcount_is_valid && 19651 un->un_blockcount > label_cap) { 19652 mutex_exit(SD_MUTEX(un)); 19653 sd_log_lun_expansion_event(un, KM_SLEEP); 19654 } else { 19655 mutex_exit(SD_MUTEX(un)); 19656 } 19657 } 19658 19659 task_exit: 19660 sd_ssc_fini(ssc); 19661 } 19662 19663 19664 /* 19665 * Function: sd_log_dev_status_event 19666 * 19667 * Description: Log EC_dev_status sysevent 19668 * 19669 * Context: Never called from interrupt context 19670 */ 19671 static void 19672 sd_log_dev_status_event(struct sd_lun *un, char *esc, int km_flag) 19673 { 19674 int err; 19675 char *path; 19676 nvlist_t *attr_list; 19677 19678 /* Allocate and build sysevent attribute list */ 19679 err = nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE, km_flag); 19680 if (err != 0) { 19681 SD_ERROR(SD_LOG_ERROR, un, 19682 "sd_log_dev_status_event: fail to allocate space\n"); 19683 return; 19684 } 19685 19686 path = kmem_alloc(MAXPATHLEN, km_flag); 19687 if (path == NULL) { 19688 nvlist_free(attr_list); 19689 SD_ERROR(SD_LOG_ERROR, un, 19690 "sd_log_dev_status_event: fail to allocate space\n"); 19691 return; 19692 } 19693 /* 19694 * Add path attribute to identify the lun. 19695 * We are using minor node 'a' as the sysevent attribute. 19696 */ 19697 (void) snprintf(path, MAXPATHLEN, "/devices"); 19698 (void) ddi_pathname(SD_DEVINFO(un), path + strlen(path)); 19699 (void) snprintf(path + strlen(path), MAXPATHLEN - strlen(path), 19700 ":a"); 19701 19702 err = nvlist_add_string(attr_list, DEV_PHYS_PATH, path); 19703 if (err != 0) { 19704 nvlist_free(attr_list); 19705 kmem_free(path, MAXPATHLEN); 19706 SD_ERROR(SD_LOG_ERROR, un, 19707 "sd_log_dev_status_event: fail to add attribute\n"); 19708 return; 19709 } 19710 19711 /* Log dynamic lun expansion sysevent */ 19712 err = ddi_log_sysevent(SD_DEVINFO(un), SUNW_VENDOR, EC_DEV_STATUS, 19713 esc, attr_list, NULL, km_flag); 19714 if (err != DDI_SUCCESS) { 19715 SD_ERROR(SD_LOG_ERROR, un, 19716 "sd_log_dev_status_event: fail to log sysevent\n"); 19717 } 19718 19719 nvlist_free(attr_list); 19720 kmem_free(path, MAXPATHLEN); 19721 } 19722 19723 19724 /* 19725 * Function: sd_log_lun_expansion_event 19726 * 19727 * Description: Log lun expansion sys event 19728 * 19729 * Context: Never called from interrupt context 19730 */ 19731 static void 19732 sd_log_lun_expansion_event(struct sd_lun *un, int km_flag) 19733 { 19734 sd_log_dev_status_event(un, ESC_DEV_DLE, km_flag); 19735 } 19736 19737 19738 /* 19739 * Function: sd_log_eject_request_event 19740 * 19741 * Description: Log eject request sysevent 19742 * 19743 * Context: Never called from interrupt context 19744 */ 19745 static void 19746 sd_log_eject_request_event(struct sd_lun *un, int km_flag) 19747 { 19748 sd_log_dev_status_event(un, ESC_DEV_EJECT_REQUEST, km_flag); 19749 } 19750 19751 19752 /* 19753 * Function: sd_media_change_task 19754 * 19755 * Description: Recovery action for CDROM to become available. 19756 * 19757 * Context: Executes in a taskq() thread context 19758 */ 19759 19760 static void 19761 sd_media_change_task(void *arg) 19762 { 19763 struct scsi_pkt *pktp = arg; 19764 struct sd_lun *un; 19765 struct buf *bp; 19766 struct sd_xbuf *xp; 19767 int err = 0; 19768 int retry_count = 0; 19769 int retry_limit = SD_UNIT_ATTENTION_RETRY/10; 19770 struct sd_sense_info si; 19771 19772 ASSERT(pktp != NULL); 19773 bp = (struct buf *)pktp->pkt_private; 19774 ASSERT(bp != NULL); 19775 xp = SD_GET_XBUF(bp); 19776 ASSERT(xp != NULL); 19777 un = SD_GET_UN(bp); 19778 ASSERT(un != NULL); 19779 ASSERT(!mutex_owned(SD_MUTEX(un))); 19780 ASSERT(un->un_f_monitor_media_state); 19781 19782 si.ssi_severity = SCSI_ERR_INFO; 19783 si.ssi_pfa_flag = FALSE; 19784 19785 /* 19786 * When a reset is issued on a CDROM, it takes a long time to 19787 * recover. First few attempts to read capacity and other things 19788 * related to handling unit attention fail (with a ASC 0x4 and 19789 * ASCQ 0x1). In that case we want to do enough retries and we want 19790 * to limit the retries in other cases of genuine failures like 19791 * no media in drive. 19792 */ 19793 while (retry_count++ < retry_limit) { 19794 if ((err = sd_handle_mchange(un)) == 0) { 19795 break; 19796 } 19797 if (err == EAGAIN) { 19798 retry_limit = SD_UNIT_ATTENTION_RETRY; 19799 } 19800 /* Sleep for 0.5 sec. & try again */ 19801 delay(drv_usectohz(500000)); 19802 } 19803 19804 /* 19805 * Dispatch (retry or fail) the original command here, 19806 * along with appropriate console messages.... 19807 * 19808 * Must grab the mutex before calling sd_retry_command, 19809 * sd_print_sense_msg and sd_return_failed_command. 19810 */ 19811 mutex_enter(SD_MUTEX(un)); 19812 if (err != SD_CMD_SUCCESS) { 19813 SD_UPDATE_ERRSTATS(un, sd_harderrs); 19814 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 19815 si.ssi_severity = SCSI_ERR_FATAL; 19816 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 19817 sd_return_failed_command(un, bp, EIO); 19818 } else { 19819 sd_retry_command(un, bp, SD_RETRIES_UA, sd_print_sense_msg, 19820 &si, EIO, (clock_t)0, NULL); 19821 } 19822 mutex_exit(SD_MUTEX(un)); 19823 } 19824 19825 19826 19827 /* 19828 * Function: sd_handle_mchange 19829 * 19830 * Description: Perform geometry validation & other recovery when CDROM 19831 * has been removed from drive. 19832 * 19833 * Return Code: 0 for success 19834 * errno-type return code of either sd_send_scsi_DOORLOCK() or 19835 * sd_send_scsi_READ_CAPACITY() 19836 * 19837 * Context: Executes in a taskq() thread context 19838 */ 19839 19840 static int 19841 sd_handle_mchange(struct sd_lun *un) 19842 { 19843 uint64_t capacity; 19844 uint32_t lbasize; 19845 int rval; 19846 sd_ssc_t *ssc; 19847 19848 ASSERT(!mutex_owned(SD_MUTEX(un))); 19849 ASSERT(un->un_f_monitor_media_state); 19850 19851 ssc = sd_ssc_init(un); 19852 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize, 19853 SD_PATH_DIRECT_PRIORITY); 19854 19855 if (rval != 0) 19856 goto failed; 19857 19858 mutex_enter(SD_MUTEX(un)); 19859 sd_update_block_info(un, lbasize, capacity); 19860 19861 if (un->un_errstats != NULL) { 19862 struct sd_errstats *stp = 19863 (struct sd_errstats *)un->un_errstats->ks_data; 19864 stp->sd_capacity.value.ui64 = (uint64_t) 19865 ((uint64_t)un->un_blockcount * 19866 (uint64_t)un->un_tgt_blocksize); 19867 } 19868 19869 /* 19870 * Check if the media in the device is writable or not 19871 */ 19872 if (ISCD(un)) { 19873 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT_PRIORITY); 19874 } 19875 19876 /* 19877 * Note: Maybe let the strategy/partitioning chain worry about getting 19878 * valid geometry. 19879 */ 19880 mutex_exit(SD_MUTEX(un)); 19881 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 19882 19883 19884 if (cmlb_validate(un->un_cmlbhandle, 0, 19885 (void *)SD_PATH_DIRECT_PRIORITY) != 0) { 19886 sd_ssc_fini(ssc); 19887 return (EIO); 19888 } else { 19889 if (un->un_f_pkstats_enabled) { 19890 sd_set_pstats(un); 19891 SD_TRACE(SD_LOG_IO_PARTITION, un, 19892 "sd_handle_mchange: un:0x%p pstats created and " 19893 "set\n", un); 19894 } 19895 } 19896 19897 /* 19898 * Try to lock the door 19899 */ 19900 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 19901 SD_PATH_DIRECT_PRIORITY); 19902 failed: 19903 if (rval != 0) 19904 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19905 sd_ssc_fini(ssc); 19906 return (rval); 19907 } 19908 19909 19910 /* 19911 * Function: sd_send_scsi_DOORLOCK 19912 * 19913 * Description: Issue the scsi DOOR LOCK command 19914 * 19915 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 19916 * structure for this target. 19917 * flag - SD_REMOVAL_ALLOW 19918 * SD_REMOVAL_PREVENT 19919 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19920 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19921 * to use the USCSI "direct" chain and bypass the normal 19922 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19923 * command is issued as part of an error recovery action. 19924 * 19925 * Return Code: 0 - Success 19926 * errno return code from sd_ssc_send() 19927 * 19928 * Context: Can sleep. 19929 */ 19930 19931 static int 19932 sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag) 19933 { 19934 struct scsi_extended_sense sense_buf; 19935 union scsi_cdb cdb; 19936 struct uscsi_cmd ucmd_buf; 19937 int status; 19938 struct sd_lun *un; 19939 19940 ASSERT(ssc != NULL); 19941 un = ssc->ssc_un; 19942 ASSERT(un != NULL); 19943 ASSERT(!mutex_owned(SD_MUTEX(un))); 19944 19945 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un); 19946 19947 /* already determined doorlock is not supported, fake success */ 19948 if (un->un_f_doorlock_supported == FALSE) { 19949 return (0); 19950 } 19951 19952 /* 19953 * If we are ejecting and see an SD_REMOVAL_PREVENT 19954 * ignore the command so we can complete the eject 19955 * operation. 19956 */ 19957 if (flag == SD_REMOVAL_PREVENT) { 19958 mutex_enter(SD_MUTEX(un)); 19959 if (un->un_f_ejecting == TRUE) { 19960 mutex_exit(SD_MUTEX(un)); 19961 return (EAGAIN); 19962 } 19963 mutex_exit(SD_MUTEX(un)); 19964 } 19965 19966 bzero(&cdb, sizeof (cdb)); 19967 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19968 19969 cdb.scc_cmd = SCMD_DOORLOCK; 19970 cdb.cdb_opaque[4] = (uchar_t)flag; 19971 19972 ucmd_buf.uscsi_cdb = (char *)&cdb; 19973 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19974 ucmd_buf.uscsi_bufaddr = NULL; 19975 ucmd_buf.uscsi_buflen = 0; 19976 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19977 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19978 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19979 ucmd_buf.uscsi_timeout = 15; 19980 19981 SD_TRACE(SD_LOG_IO, un, 19982 "sd_send_scsi_DOORLOCK: returning sd_ssc_send\n"); 19983 19984 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19985 UIO_SYSSPACE, path_flag); 19986 19987 if (status == 0) 19988 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19989 19990 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) && 19991 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19992 (scsi_sense_key((uint8_t *)&sense_buf) == KEY_ILLEGAL_REQUEST)) { 19993 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19994 19995 /* fake success and skip subsequent doorlock commands */ 19996 un->un_f_doorlock_supported = FALSE; 19997 return (0); 19998 } 19999 20000 return (status); 20001 } 20002 20003 /* 20004 * Function: sd_send_scsi_READ_CAPACITY 20005 * 20006 * Description: This routine uses the scsi READ CAPACITY command to determine 20007 * the device capacity in number of blocks and the device native 20008 * block size. If this function returns a failure, then the 20009 * values in *capp and *lbap are undefined. If the capacity 20010 * returned is 0xffffffff then the lun is too large for a 20011 * normal READ CAPACITY command and the results of a 20012 * READ CAPACITY 16 will be used instead. 20013 * 20014 * Arguments: ssc - ssc contains ptr to soft state struct for the target 20015 * capp - ptr to unsigned 64-bit variable to receive the 20016 * capacity value from the command. 20017 * lbap - ptr to unsigned 32-bit varaible to receive the 20018 * block size value from the command 20019 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20020 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20021 * to use the USCSI "direct" chain and bypass the normal 20022 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 20023 * command is issued as part of an error recovery action. 20024 * 20025 * Return Code: 0 - Success 20026 * EIO - IO error 20027 * EACCES - Reservation conflict detected 20028 * EAGAIN - Device is becoming ready 20029 * errno return code from sd_ssc_send() 20030 * 20031 * Context: Can sleep. Blocks until command completes. 20032 */ 20033 20034 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity) 20035 20036 static int 20037 sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, uint32_t *lbap, 20038 int path_flag) 20039 { 20040 struct scsi_extended_sense sense_buf; 20041 struct uscsi_cmd ucmd_buf; 20042 union scsi_cdb cdb; 20043 uint32_t *capacity_buf; 20044 uint64_t capacity; 20045 uint32_t lbasize; 20046 uint32_t pbsize; 20047 int status; 20048 struct sd_lun *un; 20049 20050 ASSERT(ssc != NULL); 20051 20052 un = ssc->ssc_un; 20053 ASSERT(un != NULL); 20054 ASSERT(!mutex_owned(SD_MUTEX(un))); 20055 ASSERT(capp != NULL); 20056 ASSERT(lbap != NULL); 20057 20058 SD_TRACE(SD_LOG_IO, un, 20059 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 20060 20061 /* 20062 * First send a READ_CAPACITY command to the target. 20063 * (This command is mandatory under SCSI-2.) 20064 * 20065 * Set up the CDB for the READ_CAPACITY command. The Partial 20066 * Medium Indicator bit is cleared. The address field must be 20067 * zero if the PMI bit is zero. 20068 */ 20069 bzero(&cdb, sizeof (cdb)); 20070 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20071 20072 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP); 20073 20074 cdb.scc_cmd = SCMD_READ_CAPACITY; 20075 20076 ucmd_buf.uscsi_cdb = (char *)&cdb; 20077 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 20078 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; 20079 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; 20080 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20081 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 20082 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20083 ucmd_buf.uscsi_timeout = 60; 20084 20085 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20086 UIO_SYSSPACE, path_flag); 20087 20088 switch (status) { 20089 case 0: 20090 /* Return failure if we did not get valid capacity data. */ 20091 if (ucmd_buf.uscsi_resid != 0) { 20092 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 20093 "sd_send_scsi_READ_CAPACITY received invalid " 20094 "capacity data"); 20095 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 20096 return (EIO); 20097 } 20098 /* 20099 * Read capacity and block size from the READ CAPACITY 10 data. 20100 * This data may be adjusted later due to device specific 20101 * issues. 20102 * 20103 * According to the SCSI spec, the READ CAPACITY 10 20104 * command returns the following: 20105 * 20106 * bytes 0-3: Maximum logical block address available. 20107 * (MSB in byte:0 & LSB in byte:3) 20108 * 20109 * bytes 4-7: Block length in bytes 20110 * (MSB in byte:4 & LSB in byte:7) 20111 * 20112 */ 20113 capacity = BE_32(capacity_buf[0]); 20114 lbasize = BE_32(capacity_buf[1]); 20115 20116 /* 20117 * Done with capacity_buf 20118 */ 20119 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 20120 20121 /* 20122 * if the reported capacity is set to all 0xf's, then 20123 * this disk is too large and requires SBC-2 commands. 20124 * Reissue the request using READ CAPACITY 16. 20125 */ 20126 if (capacity == 0xffffffff) { 20127 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 20128 status = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, 20129 &lbasize, &pbsize, path_flag); 20130 if (status != 0) { 20131 return (status); 20132 } else { 20133 goto rc16_done; 20134 } 20135 } 20136 break; /* Success! */ 20137 case EIO: 20138 switch (ucmd_buf.uscsi_status) { 20139 case STATUS_RESERVATION_CONFLICT: 20140 status = EACCES; 20141 break; 20142 case STATUS_CHECK: 20143 /* 20144 * Check condition; look for ASC/ASCQ of 0x04/0x01 20145 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 20146 */ 20147 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20148 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 20149 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 20150 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 20151 return (EAGAIN); 20152 } 20153 break; 20154 default: 20155 break; 20156 } 20157 /* FALLTHRU */ 20158 default: 20159 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 20160 return (status); 20161 } 20162 20163 /* 20164 * Some ATAPI CD-ROM drives report inaccurate LBA size values 20165 * (2352 and 0 are common) so for these devices always force the value 20166 * to 2048 as required by the ATAPI specs. 20167 */ 20168 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 20169 lbasize = 2048; 20170 } 20171 20172 /* 20173 * Get the maximum LBA value from the READ CAPACITY data. 20174 * Here we assume that the Partial Medium Indicator (PMI) bit 20175 * was cleared when issuing the command. This means that the LBA 20176 * returned from the device is the LBA of the last logical block 20177 * on the logical unit. The actual logical block count will be 20178 * this value plus one. 20179 */ 20180 capacity += 1; 20181 20182 /* 20183 * Currently, for removable media, the capacity is saved in terms 20184 * of un->un_sys_blocksize, so scale the capacity value to reflect this. 20185 */ 20186 if (un->un_f_has_removable_media) 20187 capacity *= (lbasize / un->un_sys_blocksize); 20188 20189 rc16_done: 20190 20191 /* 20192 * Copy the values from the READ CAPACITY command into the space 20193 * provided by the caller. 20194 */ 20195 *capp = capacity; 20196 *lbap = lbasize; 20197 20198 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: " 20199 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 20200 20201 /* 20202 * Both the lbasize and capacity from the device must be nonzero, 20203 * otherwise we assume that the values are not valid and return 20204 * failure to the caller. (4203735) 20205 */ 20206 if ((capacity == 0) || (lbasize == 0)) { 20207 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 20208 "sd_send_scsi_READ_CAPACITY received invalid value " 20209 "capacity %llu lbasize %d", capacity, lbasize); 20210 return (EIO); 20211 } 20212 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20213 return (0); 20214 } 20215 20216 /* 20217 * Function: sd_send_scsi_READ_CAPACITY_16 20218 * 20219 * Description: This routine uses the scsi READ CAPACITY 16 command to 20220 * determine the device capacity in number of blocks and the 20221 * device native block size. If this function returns a failure, 20222 * then the values in *capp and *lbap are undefined. 20223 * This routine should be called by sd_send_scsi_READ_CAPACITY 20224 * which will apply any device specific adjustments to capacity 20225 * and lbasize. One exception is it is also called by 20226 * sd_get_media_info_ext. In that function, there is no need to 20227 * adjust the capacity and lbasize. 20228 * 20229 * Arguments: ssc - ssc contains ptr to soft state struct for the target 20230 * capp - ptr to unsigned 64-bit variable to receive the 20231 * capacity value from the command. 20232 * lbap - ptr to unsigned 32-bit varaible to receive the 20233 * block size value from the command 20234 * psp - ptr to unsigned 32-bit variable to receive the 20235 * physical block size value from the command 20236 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20237 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20238 * to use the USCSI "direct" chain and bypass the normal 20239 * command waitq. SD_PATH_DIRECT_PRIORITY is used when 20240 * this command is issued as part of an error recovery 20241 * action. 20242 * 20243 * Return Code: 0 - Success 20244 * EIO - IO error 20245 * EACCES - Reservation conflict detected 20246 * EAGAIN - Device is becoming ready 20247 * errno return code from sd_ssc_send() 20248 * 20249 * Context: Can sleep. Blocks until command completes. 20250 */ 20251 20252 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16) 20253 20254 static int 20255 sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp, 20256 uint32_t *lbap, uint32_t *psp, int path_flag) 20257 { 20258 struct scsi_extended_sense sense_buf; 20259 struct uscsi_cmd ucmd_buf; 20260 union scsi_cdb cdb; 20261 uint64_t *capacity16_buf; 20262 uint64_t capacity; 20263 uint32_t lbasize; 20264 uint32_t pbsize; 20265 uint32_t lbpb_exp; 20266 int status; 20267 struct sd_lun *un; 20268 20269 ASSERT(ssc != NULL); 20270 20271 un = ssc->ssc_un; 20272 ASSERT(un != NULL); 20273 ASSERT(!mutex_owned(SD_MUTEX(un))); 20274 ASSERT(capp != NULL); 20275 ASSERT(lbap != NULL); 20276 20277 SD_TRACE(SD_LOG_IO, un, 20278 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 20279 20280 /* 20281 * First send a READ_CAPACITY_16 command to the target. 20282 * 20283 * Set up the CDB for the READ_CAPACITY_16 command. The Partial 20284 * Medium Indicator bit is cleared. The address field must be 20285 * zero if the PMI bit is zero. 20286 */ 20287 bzero(&cdb, sizeof (cdb)); 20288 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20289 20290 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP); 20291 20292 ucmd_buf.uscsi_cdb = (char *)&cdb; 20293 ucmd_buf.uscsi_cdblen = CDB_GROUP4; 20294 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; 20295 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; 20296 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20297 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 20298 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20299 ucmd_buf.uscsi_timeout = 60; 20300 20301 /* 20302 * Read Capacity (16) is a Service Action In command. One 20303 * command byte (0x9E) is overloaded for multiple operations, 20304 * with the second CDB byte specifying the desired operation 20305 */ 20306 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4; 20307 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4; 20308 20309 /* 20310 * Fill in allocation length field 20311 */ 20312 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen); 20313 20314 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20315 UIO_SYSSPACE, path_flag); 20316 20317 switch (status) { 20318 case 0: 20319 /* Return failure if we did not get valid capacity data. */ 20320 if (ucmd_buf.uscsi_resid > 20) { 20321 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 20322 "sd_send_scsi_READ_CAPACITY_16 received invalid " 20323 "capacity data"); 20324 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 20325 return (EIO); 20326 } 20327 20328 /* 20329 * Read capacity and block size from the READ CAPACITY 16 data. 20330 * This data may be adjusted later due to device specific 20331 * issues. 20332 * 20333 * According to the SCSI spec, the READ CAPACITY 16 20334 * command returns the following: 20335 * 20336 * bytes 0-7: Maximum logical block address available. 20337 * (MSB in byte:0 & LSB in byte:7) 20338 * 20339 * bytes 8-11: Block length in bytes 20340 * (MSB in byte:8 & LSB in byte:11) 20341 * 20342 * byte 13: LOGICAL BLOCKS PER PHYSICAL BLOCK EXPONENT 20343 */ 20344 capacity = BE_64(capacity16_buf[0]); 20345 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); 20346 lbpb_exp = (BE_64(capacity16_buf[1]) >> 16) & 0x0f; 20347 20348 pbsize = lbasize << lbpb_exp; 20349 20350 /* 20351 * Done with capacity16_buf 20352 */ 20353 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 20354 20355 /* 20356 * if the reported capacity is set to all 0xf's, then 20357 * this disk is too large. This could only happen with 20358 * a device that supports LBAs larger than 64 bits which 20359 * are not defined by any current T10 standards. 20360 */ 20361 if (capacity == 0xffffffffffffffff) { 20362 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 20363 "disk is too large"); 20364 return (EIO); 20365 } 20366 break; /* Success! */ 20367 case EIO: 20368 switch (ucmd_buf.uscsi_status) { 20369 case STATUS_RESERVATION_CONFLICT: 20370 status = EACCES; 20371 break; 20372 case STATUS_CHECK: 20373 /* 20374 * Check condition; look for ASC/ASCQ of 0x04/0x01 20375 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 20376 */ 20377 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20378 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 20379 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 20380 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 20381 return (EAGAIN); 20382 } 20383 break; 20384 default: 20385 break; 20386 } 20387 /* FALLTHRU */ 20388 default: 20389 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 20390 return (status); 20391 } 20392 20393 /* 20394 * Some ATAPI CD-ROM drives report inaccurate LBA size values 20395 * (2352 and 0 are common) so for these devices always force the value 20396 * to 2048 as required by the ATAPI specs. 20397 */ 20398 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 20399 lbasize = 2048; 20400 } 20401 20402 /* 20403 * Get the maximum LBA value from the READ CAPACITY 16 data. 20404 * Here we assume that the Partial Medium Indicator (PMI) bit 20405 * was cleared when issuing the command. This means that the LBA 20406 * returned from the device is the LBA of the last logical block 20407 * on the logical unit. The actual logical block count will be 20408 * this value plus one. 20409 */ 20410 capacity += 1; 20411 20412 /* 20413 * Currently, for removable media, the capacity is saved in terms 20414 * of un->un_sys_blocksize, so scale the capacity value to reflect this. 20415 */ 20416 if (un->un_f_has_removable_media) 20417 capacity *= (lbasize / un->un_sys_blocksize); 20418 20419 *capp = capacity; 20420 *lbap = lbasize; 20421 *psp = pbsize; 20422 20423 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: " 20424 "capacity:0x%llx lbasize:0x%x, pbsize: 0x%x\n", 20425 capacity, lbasize, pbsize); 20426 20427 if ((capacity == 0) || (lbasize == 0) || (pbsize == 0)) { 20428 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 20429 "sd_send_scsi_READ_CAPACITY_16 received invalid value " 20430 "capacity %llu lbasize %d pbsize %d", capacity, lbasize); 20431 return (EIO); 20432 } 20433 20434 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20435 return (0); 20436 } 20437 20438 20439 /* 20440 * Function: sd_send_scsi_START_STOP_UNIT 20441 * 20442 * Description: Issue a scsi START STOP UNIT command to the target. 20443 * 20444 * Arguments: ssc - ssc contatins pointer to driver soft state (unit) 20445 * structure for this target. 20446 * pc_flag - SD_POWER_CONDITION 20447 * SD_START_STOP 20448 * flag - SD_TARGET_START 20449 * SD_TARGET_STOP 20450 * SD_TARGET_EJECT 20451 * SD_TARGET_CLOSE 20452 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20453 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20454 * to use the USCSI "direct" chain and bypass the normal 20455 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 20456 * command is issued as part of an error recovery action. 20457 * 20458 * Return Code: 0 - Success 20459 * EIO - IO error 20460 * EACCES - Reservation conflict detected 20461 * ENXIO - Not Ready, medium not present 20462 * errno return code from sd_ssc_send() 20463 * 20464 * Context: Can sleep. 20465 */ 20466 20467 static int 20468 sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int pc_flag, int flag, 20469 int path_flag) 20470 { 20471 struct scsi_extended_sense sense_buf; 20472 union scsi_cdb cdb; 20473 struct uscsi_cmd ucmd_buf; 20474 int status; 20475 struct sd_lun *un; 20476 20477 ASSERT(ssc != NULL); 20478 un = ssc->ssc_un; 20479 ASSERT(un != NULL); 20480 ASSERT(!mutex_owned(SD_MUTEX(un))); 20481 20482 SD_TRACE(SD_LOG_IO, un, 20483 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un); 20484 20485 if (un->un_f_check_start_stop && 20486 (pc_flag == SD_START_STOP) && 20487 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) && 20488 (un->un_f_start_stop_supported != TRUE)) { 20489 return (0); 20490 } 20491 20492 /* 20493 * If we are performing an eject operation and 20494 * we receive any command other than SD_TARGET_EJECT 20495 * we should immediately return. 20496 */ 20497 if (flag != SD_TARGET_EJECT) { 20498 mutex_enter(SD_MUTEX(un)); 20499 if (un->un_f_ejecting == TRUE) { 20500 mutex_exit(SD_MUTEX(un)); 20501 return (EAGAIN); 20502 } 20503 mutex_exit(SD_MUTEX(un)); 20504 } 20505 20506 bzero(&cdb, sizeof (cdb)); 20507 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20508 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20509 20510 cdb.scc_cmd = SCMD_START_STOP; 20511 cdb.cdb_opaque[4] = (pc_flag == SD_POWER_CONDITION) ? 20512 (uchar_t)(flag << 4) : (uchar_t)flag; 20513 20514 ucmd_buf.uscsi_cdb = (char *)&cdb; 20515 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 20516 ucmd_buf.uscsi_bufaddr = NULL; 20517 ucmd_buf.uscsi_buflen = 0; 20518 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20519 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20520 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 20521 ucmd_buf.uscsi_timeout = 200; 20522 20523 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20524 UIO_SYSSPACE, path_flag); 20525 20526 switch (status) { 20527 case 0: 20528 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20529 break; /* Success! */ 20530 case EIO: 20531 switch (ucmd_buf.uscsi_status) { 20532 case STATUS_RESERVATION_CONFLICT: 20533 status = EACCES; 20534 break; 20535 case STATUS_CHECK: 20536 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) { 20537 switch (scsi_sense_key( 20538 (uint8_t *)&sense_buf)) { 20539 case KEY_ILLEGAL_REQUEST: 20540 status = ENOTSUP; 20541 break; 20542 case KEY_NOT_READY: 20543 if (scsi_sense_asc( 20544 (uint8_t *)&sense_buf) 20545 == 0x3A) { 20546 status = ENXIO; 20547 } 20548 break; 20549 default: 20550 break; 20551 } 20552 } 20553 break; 20554 default: 20555 break; 20556 } 20557 break; 20558 default: 20559 break; 20560 } 20561 20562 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n"); 20563 20564 return (status); 20565 } 20566 20567 20568 /* 20569 * Function: sd_start_stop_unit_callback 20570 * 20571 * Description: timeout(9F) callback to begin recovery process for a 20572 * device that has spun down. 20573 * 20574 * Arguments: arg - pointer to associated softstate struct. 20575 * 20576 * Context: Executes in a timeout(9F) thread context 20577 */ 20578 20579 static void 20580 sd_start_stop_unit_callback(void *arg) 20581 { 20582 struct sd_lun *un = arg; 20583 ASSERT(un != NULL); 20584 ASSERT(!mutex_owned(SD_MUTEX(un))); 20585 20586 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n"); 20587 20588 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP); 20589 } 20590 20591 20592 /* 20593 * Function: sd_start_stop_unit_task 20594 * 20595 * Description: Recovery procedure when a drive is spun down. 20596 * 20597 * Arguments: arg - pointer to associated softstate struct. 20598 * 20599 * Context: Executes in a taskq() thread context 20600 */ 20601 20602 static void 20603 sd_start_stop_unit_task(void *arg) 20604 { 20605 struct sd_lun *un = arg; 20606 sd_ssc_t *ssc; 20607 int power_level; 20608 int rval; 20609 20610 ASSERT(un != NULL); 20611 ASSERT(!mutex_owned(SD_MUTEX(un))); 20612 20613 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n"); 20614 20615 /* 20616 * Some unformatted drives report not ready error, no need to 20617 * restart if format has been initiated. 20618 */ 20619 mutex_enter(SD_MUTEX(un)); 20620 if (un->un_f_format_in_progress == TRUE) { 20621 mutex_exit(SD_MUTEX(un)); 20622 return; 20623 } 20624 mutex_exit(SD_MUTEX(un)); 20625 20626 ssc = sd_ssc_init(un); 20627 /* 20628 * When a START STOP command is issued from here, it is part of a 20629 * failure recovery operation and must be issued before any other 20630 * commands, including any pending retries. Thus it must be sent 20631 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up 20632 * succeeds or not, we will start I/O after the attempt. 20633 * If power condition is supported and the current power level 20634 * is capable of performing I/O, we should set the power condition 20635 * to that level. Otherwise, set the power condition to ACTIVE. 20636 */ 20637 if (un->un_f_power_condition_supported) { 20638 mutex_enter(SD_MUTEX(un)); 20639 ASSERT(SD_PM_IS_LEVEL_VALID(un, un->un_power_level)); 20640 power_level = sd_pwr_pc.ran_perf[un->un_power_level] 20641 > 0 ? un->un_power_level : SD_SPINDLE_ACTIVE; 20642 mutex_exit(SD_MUTEX(un)); 20643 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_POWER_CONDITION, 20644 sd_pl2pc[power_level], SD_PATH_DIRECT_PRIORITY); 20645 } else { 20646 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 20647 SD_TARGET_START, SD_PATH_DIRECT_PRIORITY); 20648 } 20649 20650 if (rval != 0) 20651 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 20652 sd_ssc_fini(ssc); 20653 /* 20654 * The above call blocks until the START_STOP_UNIT command completes. 20655 * Now that it has completed, we must re-try the original IO that 20656 * received the NOT READY condition in the first place. There are 20657 * three possible conditions here: 20658 * 20659 * (1) The original IO is on un_retry_bp. 20660 * (2) The original IO is on the regular wait queue, and un_retry_bp 20661 * is NULL. 20662 * (3) The original IO is on the regular wait queue, and un_retry_bp 20663 * points to some other, unrelated bp. 20664 * 20665 * For each case, we must call sd_start_cmds() with un_retry_bp 20666 * as the argument. If un_retry_bp is NULL, this will initiate 20667 * processing of the regular wait queue. If un_retry_bp is not NULL, 20668 * then this will process the bp on un_retry_bp. That may or may not 20669 * be the original IO, but that does not matter: the important thing 20670 * is to keep the IO processing going at this point. 20671 * 20672 * Note: This is a very specific error recovery sequence associated 20673 * with a drive that is not spun up. We attempt a START_STOP_UNIT and 20674 * serialize the I/O with completion of the spin-up. 20675 */ 20676 mutex_enter(SD_MUTEX(un)); 20677 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 20678 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n", 20679 un, un->un_retry_bp); 20680 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */ 20681 sd_start_cmds(un, un->un_retry_bp); 20682 mutex_exit(SD_MUTEX(un)); 20683 20684 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n"); 20685 } 20686 20687 20688 /* 20689 * Function: sd_send_scsi_INQUIRY 20690 * 20691 * Description: Issue the scsi INQUIRY command. 20692 * 20693 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20694 * structure for this target. 20695 * bufaddr 20696 * buflen 20697 * evpd 20698 * page_code 20699 * page_length 20700 * 20701 * Return Code: 0 - Success 20702 * errno return code from sd_ssc_send() 20703 * 20704 * Context: Can sleep. Does not return until command is completed. 20705 */ 20706 20707 static int 20708 sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, size_t buflen, 20709 uchar_t evpd, uchar_t page_code, size_t *residp) 20710 { 20711 union scsi_cdb cdb; 20712 struct uscsi_cmd ucmd_buf; 20713 int status; 20714 struct sd_lun *un; 20715 20716 ASSERT(ssc != NULL); 20717 un = ssc->ssc_un; 20718 ASSERT(un != NULL); 20719 ASSERT(!mutex_owned(SD_MUTEX(un))); 20720 ASSERT(bufaddr != NULL); 20721 20722 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un); 20723 20724 bzero(&cdb, sizeof (cdb)); 20725 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20726 bzero(bufaddr, buflen); 20727 20728 cdb.scc_cmd = SCMD_INQUIRY; 20729 cdb.cdb_opaque[1] = evpd; 20730 cdb.cdb_opaque[2] = page_code; 20731 FORMG0COUNT(&cdb, buflen); 20732 20733 ucmd_buf.uscsi_cdb = (char *)&cdb; 20734 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 20735 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20736 ucmd_buf.uscsi_buflen = buflen; 20737 ucmd_buf.uscsi_rqbuf = NULL; 20738 ucmd_buf.uscsi_rqlen = 0; 20739 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 20740 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ 20741 20742 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20743 UIO_SYSSPACE, SD_PATH_DIRECT); 20744 20745 /* 20746 * Only handle status == 0, the upper-level caller 20747 * will put different assessment based on the context. 20748 */ 20749 if (status == 0) 20750 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20751 20752 if ((status == 0) && (residp != NULL)) { 20753 *residp = ucmd_buf.uscsi_resid; 20754 } 20755 20756 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n"); 20757 20758 return (status); 20759 } 20760 20761 20762 /* 20763 * Function: sd_send_scsi_TEST_UNIT_READY 20764 * 20765 * Description: Issue the scsi TEST UNIT READY command. 20766 * This routine can be told to set the flag USCSI_DIAGNOSE to 20767 * prevent retrying failed commands. Use this when the intent 20768 * is either to check for device readiness, to clear a Unit 20769 * Attention, or to clear any outstanding sense data. 20770 * However under specific conditions the expected behavior 20771 * is for retries to bring a device ready, so use the flag 20772 * with caution. 20773 * 20774 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20775 * structure for this target. 20776 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present 20777 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE. 20778 * 0: dont check for media present, do retries on cmd. 20779 * 20780 * Return Code: 0 - Success 20781 * EIO - IO error 20782 * EACCES - Reservation conflict detected 20783 * ENXIO - Not Ready, medium not present 20784 * errno return code from sd_ssc_send() 20785 * 20786 * Context: Can sleep. Does not return until command is completed. 20787 */ 20788 20789 static int 20790 sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag) 20791 { 20792 struct scsi_extended_sense sense_buf; 20793 union scsi_cdb cdb; 20794 struct uscsi_cmd ucmd_buf; 20795 int status; 20796 struct sd_lun *un; 20797 20798 ASSERT(ssc != NULL); 20799 un = ssc->ssc_un; 20800 ASSERT(un != NULL); 20801 ASSERT(!mutex_owned(SD_MUTEX(un))); 20802 20803 SD_TRACE(SD_LOG_IO, un, 20804 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un); 20805 20806 /* 20807 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect 20808 * timeouts when they receive a TUR and the queue is not empty. Check 20809 * the configuration flag set during attach (indicating the drive has 20810 * this firmware bug) and un_ncmds_in_transport before issuing the 20811 * TUR. If there are 20812 * pending commands return success, this is a bit arbitrary but is ok 20813 * for non-removables (i.e. the eliteI disks) and non-clustering 20814 * configurations. 20815 */ 20816 if (un->un_f_cfg_tur_check == TRUE) { 20817 mutex_enter(SD_MUTEX(un)); 20818 if (un->un_ncmds_in_transport != 0) { 20819 mutex_exit(SD_MUTEX(un)); 20820 return (0); 20821 } 20822 mutex_exit(SD_MUTEX(un)); 20823 } 20824 20825 bzero(&cdb, sizeof (cdb)); 20826 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20827 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20828 20829 cdb.scc_cmd = SCMD_TEST_UNIT_READY; 20830 20831 ucmd_buf.uscsi_cdb = (char *)&cdb; 20832 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 20833 ucmd_buf.uscsi_bufaddr = NULL; 20834 ucmd_buf.uscsi_buflen = 0; 20835 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20836 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20837 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 20838 20839 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ 20840 if ((flag & SD_DONT_RETRY_TUR) != 0) { 20841 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; 20842 } 20843 ucmd_buf.uscsi_timeout = 60; 20844 20845 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20846 UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : 20847 SD_PATH_STANDARD)); 20848 20849 switch (status) { 20850 case 0: 20851 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20852 break; /* Success! */ 20853 case EIO: 20854 switch (ucmd_buf.uscsi_status) { 20855 case STATUS_RESERVATION_CONFLICT: 20856 status = EACCES; 20857 break; 20858 case STATUS_CHECK: 20859 if ((flag & SD_CHECK_FOR_MEDIA) == 0) { 20860 break; 20861 } 20862 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20863 (scsi_sense_key((uint8_t *)&sense_buf) == 20864 KEY_NOT_READY) && 20865 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x3A)) { 20866 status = ENXIO; 20867 } 20868 break; 20869 default: 20870 break; 20871 } 20872 break; 20873 default: 20874 break; 20875 } 20876 20877 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n"); 20878 20879 return (status); 20880 } 20881 20882 /* 20883 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN 20884 * 20885 * Description: Issue the scsi PERSISTENT RESERVE IN command. 20886 * 20887 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20888 * structure for this target. 20889 * 20890 * Return Code: 0 - Success 20891 * EACCES 20892 * ENOTSUP 20893 * errno return code from sd_ssc_send() 20894 * 20895 * Context: Can sleep. Does not return until command is completed. 20896 */ 20897 20898 static int 20899 sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, uchar_t usr_cmd, 20900 uint16_t data_len, uchar_t *data_bufp) 20901 { 20902 struct scsi_extended_sense sense_buf; 20903 union scsi_cdb cdb; 20904 struct uscsi_cmd ucmd_buf; 20905 int status; 20906 int no_caller_buf = FALSE; 20907 struct sd_lun *un; 20908 20909 ASSERT(ssc != NULL); 20910 un = ssc->ssc_un; 20911 ASSERT(un != NULL); 20912 ASSERT(!mutex_owned(SD_MUTEX(un))); 20913 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV)); 20914 20915 SD_TRACE(SD_LOG_IO, un, 20916 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un); 20917 20918 bzero(&cdb, sizeof (cdb)); 20919 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20920 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20921 if (data_bufp == NULL) { 20922 /* Allocate a default buf if the caller did not give one */ 20923 ASSERT(data_len == 0); 20924 data_len = MHIOC_RESV_KEY_SIZE; 20925 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP); 20926 no_caller_buf = TRUE; 20927 } 20928 20929 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN; 20930 cdb.cdb_opaque[1] = usr_cmd; 20931 FORMG1COUNT(&cdb, data_len); 20932 20933 ucmd_buf.uscsi_cdb = (char *)&cdb; 20934 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 20935 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; 20936 ucmd_buf.uscsi_buflen = data_len; 20937 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20938 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20939 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20940 ucmd_buf.uscsi_timeout = 60; 20941 20942 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20943 UIO_SYSSPACE, SD_PATH_STANDARD); 20944 20945 switch (status) { 20946 case 0: 20947 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20948 20949 break; /* Success! */ 20950 case EIO: 20951 switch (ucmd_buf.uscsi_status) { 20952 case STATUS_RESERVATION_CONFLICT: 20953 status = EACCES; 20954 break; 20955 case STATUS_CHECK: 20956 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20957 (scsi_sense_key((uint8_t *)&sense_buf) == 20958 KEY_ILLEGAL_REQUEST)) { 20959 status = ENOTSUP; 20960 } 20961 break; 20962 default: 20963 break; 20964 } 20965 break; 20966 default: 20967 break; 20968 } 20969 20970 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n"); 20971 20972 if (no_caller_buf == TRUE) { 20973 kmem_free(data_bufp, data_len); 20974 } 20975 20976 return (status); 20977 } 20978 20979 20980 /* 20981 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT 20982 * 20983 * Description: This routine is the driver entry point for handling CD-ROM 20984 * multi-host persistent reservation requests (MHIOCGRP_INKEYS, 20985 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the 20986 * device. 20987 * 20988 * Arguments: ssc - ssc contains un - pointer to soft state struct 20989 * for the target. 20990 * usr_cmd SCSI-3 reservation facility command (one of 20991 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE, 20992 * SD_SCSI3_PREEMPTANDABORT, SD_SCSI3_CLEAR) 20993 * usr_bufp - user provided pointer register, reserve descriptor or 20994 * preempt and abort structure (mhioc_register_t, 20995 * mhioc_resv_desc_t, mhioc_preemptandabort_t) 20996 * 20997 * Return Code: 0 - Success 20998 * EACCES 20999 * ENOTSUP 21000 * errno return code from sd_ssc_send() 21001 * 21002 * Context: Can sleep. Does not return until command is completed. 21003 */ 21004 21005 static int 21006 sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, uchar_t usr_cmd, 21007 uchar_t *usr_bufp) 21008 { 21009 struct scsi_extended_sense sense_buf; 21010 union scsi_cdb cdb; 21011 struct uscsi_cmd ucmd_buf; 21012 int status; 21013 uchar_t data_len = sizeof (sd_prout_t); 21014 sd_prout_t *prp; 21015 struct sd_lun *un; 21016 21017 ASSERT(ssc != NULL); 21018 un = ssc->ssc_un; 21019 ASSERT(un != NULL); 21020 ASSERT(!mutex_owned(SD_MUTEX(un))); 21021 ASSERT(data_len == 24); /* required by scsi spec */ 21022 21023 SD_TRACE(SD_LOG_IO, un, 21024 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un); 21025 21026 if (usr_bufp == NULL) { 21027 return (EINVAL); 21028 } 21029 21030 bzero(&cdb, sizeof (cdb)); 21031 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21032 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21033 prp = kmem_zalloc(data_len, KM_SLEEP); 21034 21035 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT; 21036 cdb.cdb_opaque[1] = usr_cmd; 21037 FORMG1COUNT(&cdb, data_len); 21038 21039 ucmd_buf.uscsi_cdb = (char *)&cdb; 21040 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 21041 ucmd_buf.uscsi_bufaddr = (caddr_t)prp; 21042 ucmd_buf.uscsi_buflen = data_len; 21043 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21044 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21045 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 21046 ucmd_buf.uscsi_timeout = 60; 21047 21048 switch (usr_cmd) { 21049 case SD_SCSI3_REGISTER: { 21050 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; 21051 21052 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 21053 bcopy(ptr->newkey.key, prp->service_key, 21054 MHIOC_RESV_KEY_SIZE); 21055 prp->aptpl = ptr->aptpl; 21056 break; 21057 } 21058 case SD_SCSI3_CLEAR: { 21059 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 21060 21061 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 21062 break; 21063 } 21064 case SD_SCSI3_RESERVE: 21065 case SD_SCSI3_RELEASE: { 21066 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 21067 21068 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 21069 prp->scope_address = BE_32(ptr->scope_specific_addr); 21070 cdb.cdb_opaque[2] = ptr->type; 21071 break; 21072 } 21073 case SD_SCSI3_PREEMPTANDABORT: { 21074 mhioc_preemptandabort_t *ptr = 21075 (mhioc_preemptandabort_t *)usr_bufp; 21076 21077 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 21078 bcopy(ptr->victim_key.key, prp->service_key, 21079 MHIOC_RESV_KEY_SIZE); 21080 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr); 21081 cdb.cdb_opaque[2] = ptr->resvdesc.type; 21082 ucmd_buf.uscsi_flags |= USCSI_HEAD; 21083 break; 21084 } 21085 case SD_SCSI3_REGISTERANDIGNOREKEY: 21086 { 21087 mhioc_registerandignorekey_t *ptr; 21088 ptr = (mhioc_registerandignorekey_t *)usr_bufp; 21089 bcopy(ptr->newkey.key, 21090 prp->service_key, MHIOC_RESV_KEY_SIZE); 21091 prp->aptpl = ptr->aptpl; 21092 break; 21093 } 21094 default: 21095 ASSERT(FALSE); 21096 break; 21097 } 21098 21099 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21100 UIO_SYSSPACE, SD_PATH_STANDARD); 21101 21102 switch (status) { 21103 case 0: 21104 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21105 break; /* Success! */ 21106 case EIO: 21107 switch (ucmd_buf.uscsi_status) { 21108 case STATUS_RESERVATION_CONFLICT: 21109 status = EACCES; 21110 break; 21111 case STATUS_CHECK: 21112 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 21113 (scsi_sense_key((uint8_t *)&sense_buf) == 21114 KEY_ILLEGAL_REQUEST)) { 21115 status = ENOTSUP; 21116 } 21117 break; 21118 default: 21119 break; 21120 } 21121 break; 21122 default: 21123 break; 21124 } 21125 21126 kmem_free(prp, data_len); 21127 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n"); 21128 return (status); 21129 } 21130 21131 21132 /* 21133 * Function: sd_send_scsi_SYNCHRONIZE_CACHE 21134 * 21135 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target 21136 * 21137 * Arguments: un - pointer to the target's soft state struct 21138 * dkc - pointer to the callback structure 21139 * 21140 * Return Code: 0 - success 21141 * errno-type error code 21142 * 21143 * Context: kernel thread context only. 21144 * 21145 * _______________________________________________________________ 21146 * | dkc_flag & | dkc_callback | DKIOCFLUSHWRITECACHE | 21147 * |FLUSH_VOLATILE| | operation | 21148 * |______________|______________|_________________________________| 21149 * | 0 | NULL | Synchronous flush on both | 21150 * | | | volatile and non-volatile cache | 21151 * |______________|______________|_________________________________| 21152 * | 1 | NULL | Synchronous flush on volatile | 21153 * | | | cache; disk drivers may suppress| 21154 * | | | flush if disk table indicates | 21155 * | | | non-volatile cache | 21156 * |______________|______________|_________________________________| 21157 * | 0 | !NULL | Asynchronous flush on both | 21158 * | | | volatile and non-volatile cache;| 21159 * |______________|______________|_________________________________| 21160 * | 1 | !NULL | Asynchronous flush on volatile | 21161 * | | | cache; disk drivers may suppress| 21162 * | | | flush if disk table indicates | 21163 * | | | non-volatile cache | 21164 * |______________|______________|_________________________________| 21165 * 21166 */ 21167 21168 static int 21169 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc) 21170 { 21171 struct sd_uscsi_info *uip; 21172 struct uscsi_cmd *uscmd; 21173 union scsi_cdb *cdb; 21174 struct buf *bp; 21175 int rval = 0; 21176 int is_async; 21177 21178 SD_TRACE(SD_LOG_IO, un, 21179 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un); 21180 21181 ASSERT(un != NULL); 21182 ASSERT(!mutex_owned(SD_MUTEX(un))); 21183 21184 if (dkc == NULL || dkc->dkc_callback == NULL) { 21185 is_async = FALSE; 21186 } else { 21187 is_async = TRUE; 21188 } 21189 21190 mutex_enter(SD_MUTEX(un)); 21191 /* check whether cache flush should be suppressed */ 21192 if (un->un_f_suppress_cache_flush == TRUE) { 21193 mutex_exit(SD_MUTEX(un)); 21194 /* 21195 * suppress the cache flush if the device is told to do 21196 * so by sd.conf or disk table 21197 */ 21198 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_SYNCHRONIZE_CACHE: \ 21199 skip the cache flush since suppress_cache_flush is %d!\n", 21200 un->un_f_suppress_cache_flush); 21201 21202 if (is_async == TRUE) { 21203 /* invoke callback for asynchronous flush */ 21204 (*dkc->dkc_callback)(dkc->dkc_cookie, 0); 21205 } 21206 return (rval); 21207 } 21208 mutex_exit(SD_MUTEX(un)); 21209 21210 /* 21211 * check dkc_flag & FLUSH_VOLATILE so SYNC_NV bit can be 21212 * set properly 21213 */ 21214 cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP); 21215 cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE; 21216 21217 mutex_enter(SD_MUTEX(un)); 21218 if (dkc != NULL && un->un_f_sync_nv_supported && 21219 (dkc->dkc_flag & FLUSH_VOLATILE)) { 21220 /* 21221 * if the device supports SYNC_NV bit, turn on 21222 * the SYNC_NV bit to only flush volatile cache 21223 */ 21224 cdb->cdb_un.tag |= SD_SYNC_NV_BIT; 21225 } 21226 mutex_exit(SD_MUTEX(un)); 21227 21228 /* 21229 * First get some memory for the uscsi_cmd struct and cdb 21230 * and initialize for SYNCHRONIZE_CACHE cmd. 21231 */ 21232 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 21233 uscmd->uscsi_cdblen = CDB_GROUP1; 21234 uscmd->uscsi_cdb = (caddr_t)cdb; 21235 uscmd->uscsi_bufaddr = NULL; 21236 uscmd->uscsi_buflen = 0; 21237 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 21238 uscmd->uscsi_rqlen = SENSE_LENGTH; 21239 uscmd->uscsi_rqresid = SENSE_LENGTH; 21240 uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 21241 uscmd->uscsi_timeout = sd_io_time; 21242 21243 /* 21244 * Allocate an sd_uscsi_info struct and fill it with the info 21245 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 21246 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 21247 * since we allocate the buf here in this function, we do not 21248 * need to preserve the prior contents of b_private. 21249 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 21250 */ 21251 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 21252 uip->ui_flags = SD_PATH_DIRECT; 21253 uip->ui_cmdp = uscmd; 21254 21255 bp = getrbuf(KM_SLEEP); 21256 bp->b_private = uip; 21257 21258 /* 21259 * Setup buffer to carry uscsi request. 21260 */ 21261 bp->b_flags = B_BUSY; 21262 bp->b_bcount = 0; 21263 bp->b_blkno = 0; 21264 21265 if (is_async == TRUE) { 21266 bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone; 21267 uip->ui_dkc = *dkc; 21268 } 21269 21270 bp->b_edev = SD_GET_DEV(un); 21271 bp->b_dev = cmpdev(bp->b_edev); /* maybe unnecessary? */ 21272 21273 /* 21274 * Unset un_f_sync_cache_required flag 21275 */ 21276 mutex_enter(SD_MUTEX(un)); 21277 un->un_f_sync_cache_required = FALSE; 21278 mutex_exit(SD_MUTEX(un)); 21279 21280 (void) sd_uscsi_strategy(bp); 21281 21282 /* 21283 * If synchronous request, wait for completion 21284 * If async just return and let b_iodone callback 21285 * cleanup. 21286 * NOTE: On return, u_ncmds_in_driver will be decremented, 21287 * but it was also incremented in sd_uscsi_strategy(), so 21288 * we should be ok. 21289 */ 21290 if (is_async == FALSE) { 21291 (void) biowait(bp); 21292 rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp); 21293 } 21294 21295 return (rval); 21296 } 21297 21298 21299 static int 21300 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp) 21301 { 21302 struct sd_uscsi_info *uip; 21303 struct uscsi_cmd *uscmd; 21304 uint8_t *sense_buf; 21305 struct sd_lun *un; 21306 int status; 21307 union scsi_cdb *cdb; 21308 21309 uip = (struct sd_uscsi_info *)(bp->b_private); 21310 ASSERT(uip != NULL); 21311 21312 uscmd = uip->ui_cmdp; 21313 ASSERT(uscmd != NULL); 21314 21315 sense_buf = (uint8_t *)uscmd->uscsi_rqbuf; 21316 ASSERT(sense_buf != NULL); 21317 21318 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 21319 ASSERT(un != NULL); 21320 21321 cdb = (union scsi_cdb *)uscmd->uscsi_cdb; 21322 21323 status = geterror(bp); 21324 switch (status) { 21325 case 0: 21326 break; /* Success! */ 21327 case EIO: 21328 switch (uscmd->uscsi_status) { 21329 case STATUS_RESERVATION_CONFLICT: 21330 /* Ignore reservation conflict */ 21331 status = 0; 21332 goto done; 21333 21334 case STATUS_CHECK: 21335 if ((uscmd->uscsi_rqstatus == STATUS_GOOD) && 21336 (scsi_sense_key(sense_buf) == 21337 KEY_ILLEGAL_REQUEST)) { 21338 /* Ignore Illegal Request error */ 21339 if (cdb->cdb_un.tag&SD_SYNC_NV_BIT) { 21340 mutex_enter(SD_MUTEX(un)); 21341 un->un_f_sync_nv_supported = FALSE; 21342 mutex_exit(SD_MUTEX(un)); 21343 status = 0; 21344 SD_TRACE(SD_LOG_IO, un, 21345 "un_f_sync_nv_supported \ 21346 is set to false.\n"); 21347 goto done; 21348 } 21349 21350 mutex_enter(SD_MUTEX(un)); 21351 un->un_f_sync_cache_supported = FALSE; 21352 mutex_exit(SD_MUTEX(un)); 21353 SD_TRACE(SD_LOG_IO, un, 21354 "sd_send_scsi_SYNCHRONIZE_CACHE_biodone: \ 21355 un_f_sync_cache_supported set to false \ 21356 with asc = %x, ascq = %x\n", 21357 scsi_sense_asc(sense_buf), 21358 scsi_sense_ascq(sense_buf)); 21359 status = ENOTSUP; 21360 goto done; 21361 } 21362 break; 21363 default: 21364 break; 21365 } 21366 /* FALLTHRU */ 21367 default: 21368 /* 21369 * Turn on the un_f_sync_cache_required flag 21370 * since the SYNC CACHE command failed 21371 */ 21372 mutex_enter(SD_MUTEX(un)); 21373 un->un_f_sync_cache_required = TRUE; 21374 mutex_exit(SD_MUTEX(un)); 21375 21376 /* 21377 * Don't log an error message if this device 21378 * has removable media. 21379 */ 21380 if (!un->un_f_has_removable_media) { 21381 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 21382 "SYNCHRONIZE CACHE command failed (%d)\n", status); 21383 } 21384 break; 21385 } 21386 21387 done: 21388 if (uip->ui_dkc.dkc_callback != NULL) { 21389 (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status); 21390 } 21391 21392 ASSERT((bp->b_flags & B_REMAPPED) == 0); 21393 freerbuf(bp); 21394 kmem_free(uip, sizeof (struct sd_uscsi_info)); 21395 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 21396 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 21397 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 21398 21399 return (status); 21400 } 21401 21402 21403 /* 21404 * Function: sd_send_scsi_GET_CONFIGURATION 21405 * 21406 * Description: Issues the get configuration command to the device. 21407 * Called from sd_check_for_writable_cd & sd_get_media_info 21408 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN 21409 * Arguments: ssc 21410 * ucmdbuf 21411 * rqbuf 21412 * rqbuflen 21413 * bufaddr 21414 * buflen 21415 * path_flag 21416 * 21417 * Return Code: 0 - Success 21418 * errno return code from sd_ssc_send() 21419 * 21420 * Context: Can sleep. Does not return until command is completed. 21421 * 21422 */ 21423 21424 static int 21425 sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, struct uscsi_cmd *ucmdbuf, 21426 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen, 21427 int path_flag) 21428 { 21429 char cdb[CDB_GROUP1]; 21430 int status; 21431 struct sd_lun *un; 21432 21433 ASSERT(ssc != NULL); 21434 un = ssc->ssc_un; 21435 ASSERT(un != NULL); 21436 ASSERT(!mutex_owned(SD_MUTEX(un))); 21437 ASSERT(bufaddr != NULL); 21438 ASSERT(ucmdbuf != NULL); 21439 ASSERT(rqbuf != NULL); 21440 21441 SD_TRACE(SD_LOG_IO, un, 21442 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un); 21443 21444 bzero(cdb, sizeof (cdb)); 21445 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 21446 bzero(rqbuf, rqbuflen); 21447 bzero(bufaddr, buflen); 21448 21449 /* 21450 * Set up cdb field for the get configuration command. 21451 */ 21452 cdb[0] = SCMD_GET_CONFIGURATION; 21453 cdb[1] = 0x02; /* Requested Type */ 21454 cdb[8] = SD_PROFILE_HEADER_LEN; 21455 ucmdbuf->uscsi_cdb = cdb; 21456 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 21457 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 21458 ucmdbuf->uscsi_buflen = buflen; 21459 ucmdbuf->uscsi_timeout = sd_io_time; 21460 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 21461 ucmdbuf->uscsi_rqlen = rqbuflen; 21462 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 21463 21464 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL, 21465 UIO_SYSSPACE, path_flag); 21466 21467 switch (status) { 21468 case 0: 21469 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21470 break; /* Success! */ 21471 case EIO: 21472 switch (ucmdbuf->uscsi_status) { 21473 case STATUS_RESERVATION_CONFLICT: 21474 status = EACCES; 21475 break; 21476 default: 21477 break; 21478 } 21479 break; 21480 default: 21481 break; 21482 } 21483 21484 if (status == 0) { 21485 SD_DUMP_MEMORY(un, SD_LOG_IO, 21486 "sd_send_scsi_GET_CONFIGURATION: data", 21487 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 21488 } 21489 21490 SD_TRACE(SD_LOG_IO, un, 21491 "sd_send_scsi_GET_CONFIGURATION: exit\n"); 21492 21493 return (status); 21494 } 21495 21496 /* 21497 * Function: sd_send_scsi_feature_GET_CONFIGURATION 21498 * 21499 * Description: Issues the get configuration command to the device to 21500 * retrieve a specific feature. Called from 21501 * sd_check_for_writable_cd & sd_set_mmc_caps. 21502 * Arguments: ssc 21503 * ucmdbuf 21504 * rqbuf 21505 * rqbuflen 21506 * bufaddr 21507 * buflen 21508 * feature 21509 * 21510 * Return Code: 0 - Success 21511 * errno return code from sd_ssc_send() 21512 * 21513 * Context: Can sleep. Does not return until command is completed. 21514 * 21515 */ 21516 static int 21517 sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc, 21518 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 21519 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag) 21520 { 21521 char cdb[CDB_GROUP1]; 21522 int status; 21523 struct sd_lun *un; 21524 21525 ASSERT(ssc != NULL); 21526 un = ssc->ssc_un; 21527 ASSERT(un != NULL); 21528 ASSERT(!mutex_owned(SD_MUTEX(un))); 21529 ASSERT(bufaddr != NULL); 21530 ASSERT(ucmdbuf != NULL); 21531 ASSERT(rqbuf != NULL); 21532 21533 SD_TRACE(SD_LOG_IO, un, 21534 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un); 21535 21536 bzero(cdb, sizeof (cdb)); 21537 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 21538 bzero(rqbuf, rqbuflen); 21539 bzero(bufaddr, buflen); 21540 21541 /* 21542 * Set up cdb field for the get configuration command. 21543 */ 21544 cdb[0] = SCMD_GET_CONFIGURATION; 21545 cdb[1] = 0x02; /* Requested Type */ 21546 cdb[3] = feature; 21547 cdb[8] = buflen; 21548 ucmdbuf->uscsi_cdb = cdb; 21549 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 21550 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 21551 ucmdbuf->uscsi_buflen = buflen; 21552 ucmdbuf->uscsi_timeout = sd_io_time; 21553 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 21554 ucmdbuf->uscsi_rqlen = rqbuflen; 21555 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 21556 21557 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL, 21558 UIO_SYSSPACE, path_flag); 21559 21560 switch (status) { 21561 case 0: 21562 21563 break; /* Success! */ 21564 case EIO: 21565 switch (ucmdbuf->uscsi_status) { 21566 case STATUS_RESERVATION_CONFLICT: 21567 status = EACCES; 21568 break; 21569 default: 21570 break; 21571 } 21572 break; 21573 default: 21574 break; 21575 } 21576 21577 if (status == 0) { 21578 SD_DUMP_MEMORY(un, SD_LOG_IO, 21579 "sd_send_scsi_feature_GET_CONFIGURATION: data", 21580 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 21581 } 21582 21583 SD_TRACE(SD_LOG_IO, un, 21584 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n"); 21585 21586 return (status); 21587 } 21588 21589 21590 /* 21591 * Function: sd_send_scsi_MODE_SENSE 21592 * 21593 * Description: Utility function for issuing a scsi MODE SENSE command. 21594 * Note: This routine uses a consistent implementation for Group0, 21595 * Group1, and Group2 commands across all platforms. ATAPI devices 21596 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 21597 * 21598 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21599 * structure for this target. 21600 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 21601 * CDB_GROUP[1|2] (10 byte). 21602 * bufaddr - buffer for page data retrieved from the target. 21603 * buflen - size of page to be retrieved. 21604 * page_code - page code of data to be retrieved from the target. 21605 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 21606 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 21607 * to use the USCSI "direct" chain and bypass the normal 21608 * command waitq. 21609 * 21610 * Return Code: 0 - Success 21611 * errno return code from sd_ssc_send() 21612 * 21613 * Context: Can sleep. Does not return until command is completed. 21614 */ 21615 21616 static int 21617 sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr, 21618 size_t buflen, uchar_t page_code, int path_flag) 21619 { 21620 struct scsi_extended_sense sense_buf; 21621 union scsi_cdb cdb; 21622 struct uscsi_cmd ucmd_buf; 21623 int status; 21624 int headlen; 21625 struct sd_lun *un; 21626 21627 ASSERT(ssc != NULL); 21628 un = ssc->ssc_un; 21629 ASSERT(un != NULL); 21630 ASSERT(!mutex_owned(SD_MUTEX(un))); 21631 ASSERT(bufaddr != NULL); 21632 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 21633 (cdbsize == CDB_GROUP2)); 21634 21635 SD_TRACE(SD_LOG_IO, un, 21636 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un); 21637 21638 bzero(&cdb, sizeof (cdb)); 21639 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21640 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21641 bzero(bufaddr, buflen); 21642 21643 if (cdbsize == CDB_GROUP0) { 21644 cdb.scc_cmd = SCMD_MODE_SENSE; 21645 cdb.cdb_opaque[2] = page_code; 21646 FORMG0COUNT(&cdb, buflen); 21647 headlen = MODE_HEADER_LENGTH; 21648 } else { 21649 cdb.scc_cmd = SCMD_MODE_SENSE_G1; 21650 cdb.cdb_opaque[2] = page_code; 21651 FORMG1COUNT(&cdb, buflen); 21652 headlen = MODE_HEADER_LENGTH_GRP2; 21653 } 21654 21655 ASSERT(headlen <= buflen); 21656 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 21657 21658 ucmd_buf.uscsi_cdb = (char *)&cdb; 21659 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 21660 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 21661 ucmd_buf.uscsi_buflen = buflen; 21662 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21663 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21664 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 21665 ucmd_buf.uscsi_timeout = 60; 21666 21667 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21668 UIO_SYSSPACE, path_flag); 21669 21670 switch (status) { 21671 case 0: 21672 /* 21673 * sr_check_wp() uses 0x3f page code and check the header of 21674 * mode page to determine if target device is write-protected. 21675 * But some USB devices return 0 bytes for 0x3f page code. For 21676 * this case, make sure that mode page header is returned at 21677 * least. 21678 */ 21679 if (buflen - ucmd_buf.uscsi_resid < headlen) { 21680 status = EIO; 21681 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 21682 "mode page header is not returned"); 21683 } 21684 break; /* Success! */ 21685 case EIO: 21686 switch (ucmd_buf.uscsi_status) { 21687 case STATUS_RESERVATION_CONFLICT: 21688 status = EACCES; 21689 break; 21690 default: 21691 break; 21692 } 21693 break; 21694 default: 21695 break; 21696 } 21697 21698 if (status == 0) { 21699 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data", 21700 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21701 } 21702 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n"); 21703 21704 return (status); 21705 } 21706 21707 21708 /* 21709 * Function: sd_send_scsi_MODE_SELECT 21710 * 21711 * Description: Utility function for issuing a scsi MODE SELECT command. 21712 * Note: This routine uses a consistent implementation for Group0, 21713 * Group1, and Group2 commands across all platforms. ATAPI devices 21714 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 21715 * 21716 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21717 * structure for this target. 21718 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 21719 * CDB_GROUP[1|2] (10 byte). 21720 * bufaddr - buffer for page data retrieved from the target. 21721 * buflen - size of page to be retrieved. 21722 * save_page - boolean to determin if SP bit should be set. 21723 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 21724 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 21725 * to use the USCSI "direct" chain and bypass the normal 21726 * command waitq. 21727 * 21728 * Return Code: 0 - Success 21729 * errno return code from sd_ssc_send() 21730 * 21731 * Context: Can sleep. Does not return until command is completed. 21732 */ 21733 21734 static int 21735 sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr, 21736 size_t buflen, uchar_t save_page, int path_flag) 21737 { 21738 struct scsi_extended_sense sense_buf; 21739 union scsi_cdb cdb; 21740 struct uscsi_cmd ucmd_buf; 21741 int status; 21742 struct sd_lun *un; 21743 21744 ASSERT(ssc != NULL); 21745 un = ssc->ssc_un; 21746 ASSERT(un != NULL); 21747 ASSERT(!mutex_owned(SD_MUTEX(un))); 21748 ASSERT(bufaddr != NULL); 21749 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 21750 (cdbsize == CDB_GROUP2)); 21751 21752 SD_TRACE(SD_LOG_IO, un, 21753 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un); 21754 21755 bzero(&cdb, sizeof (cdb)); 21756 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21757 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21758 21759 /* Set the PF bit for many third party drives */ 21760 cdb.cdb_opaque[1] = 0x10; 21761 21762 /* Set the savepage(SP) bit if given */ 21763 if (save_page == SD_SAVE_PAGE) { 21764 cdb.cdb_opaque[1] |= 0x01; 21765 } 21766 21767 if (cdbsize == CDB_GROUP0) { 21768 cdb.scc_cmd = SCMD_MODE_SELECT; 21769 FORMG0COUNT(&cdb, buflen); 21770 } else { 21771 cdb.scc_cmd = SCMD_MODE_SELECT_G1; 21772 FORMG1COUNT(&cdb, buflen); 21773 } 21774 21775 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 21776 21777 ucmd_buf.uscsi_cdb = (char *)&cdb; 21778 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 21779 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 21780 ucmd_buf.uscsi_buflen = buflen; 21781 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21782 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21783 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 21784 ucmd_buf.uscsi_timeout = 60; 21785 21786 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21787 UIO_SYSSPACE, path_flag); 21788 21789 switch (status) { 21790 case 0: 21791 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21792 break; /* Success! */ 21793 case EIO: 21794 switch (ucmd_buf.uscsi_status) { 21795 case STATUS_RESERVATION_CONFLICT: 21796 status = EACCES; 21797 break; 21798 default: 21799 break; 21800 } 21801 break; 21802 default: 21803 break; 21804 } 21805 21806 if (status == 0) { 21807 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data", 21808 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21809 } 21810 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n"); 21811 21812 return (status); 21813 } 21814 21815 21816 /* 21817 * Function: sd_send_scsi_RDWR 21818 * 21819 * Description: Issue a scsi READ or WRITE command with the given parameters. 21820 * 21821 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21822 * structure for this target. 21823 * cmd: SCMD_READ or SCMD_WRITE 21824 * bufaddr: Address of caller's buffer to receive the RDWR data 21825 * buflen: Length of caller's buffer receive the RDWR data. 21826 * start_block: Block number for the start of the RDWR operation. 21827 * (Assumes target-native block size.) 21828 * residp: Pointer to variable to receive the redisual of the 21829 * RDWR operation (may be NULL of no residual requested). 21830 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 21831 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 21832 * to use the USCSI "direct" chain and bypass the normal 21833 * command waitq. 21834 * 21835 * Return Code: 0 - Success 21836 * errno return code from sd_ssc_send() 21837 * 21838 * Context: Can sleep. Does not return until command is completed. 21839 */ 21840 21841 static int 21842 sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr, 21843 size_t buflen, daddr_t start_block, int path_flag) 21844 { 21845 struct scsi_extended_sense sense_buf; 21846 union scsi_cdb cdb; 21847 struct uscsi_cmd ucmd_buf; 21848 uint32_t block_count; 21849 int status; 21850 int cdbsize; 21851 uchar_t flag; 21852 struct sd_lun *un; 21853 21854 ASSERT(ssc != NULL); 21855 un = ssc->ssc_un; 21856 ASSERT(un != NULL); 21857 ASSERT(!mutex_owned(SD_MUTEX(un))); 21858 ASSERT(bufaddr != NULL); 21859 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE)); 21860 21861 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un); 21862 21863 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 21864 return (EINVAL); 21865 } 21866 21867 mutex_enter(SD_MUTEX(un)); 21868 block_count = SD_BYTES2TGTBLOCKS(un, buflen); 21869 mutex_exit(SD_MUTEX(un)); 21870 21871 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE; 21872 21873 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: " 21874 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n", 21875 bufaddr, buflen, start_block, block_count); 21876 21877 bzero(&cdb, sizeof (cdb)); 21878 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21879 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21880 21881 /* Compute CDB size to use */ 21882 if (start_block > 0xffffffff) 21883 cdbsize = CDB_GROUP4; 21884 else if ((start_block & 0xFFE00000) || 21885 (un->un_f_cfg_is_atapi == TRUE)) 21886 cdbsize = CDB_GROUP1; 21887 else 21888 cdbsize = CDB_GROUP0; 21889 21890 switch (cdbsize) { 21891 case CDB_GROUP0: /* 6-byte CDBs */ 21892 cdb.scc_cmd = cmd; 21893 FORMG0ADDR(&cdb, start_block); 21894 FORMG0COUNT(&cdb, block_count); 21895 break; 21896 case CDB_GROUP1: /* 10-byte CDBs */ 21897 cdb.scc_cmd = cmd | SCMD_GROUP1; 21898 FORMG1ADDR(&cdb, start_block); 21899 FORMG1COUNT(&cdb, block_count); 21900 break; 21901 case CDB_GROUP4: /* 16-byte CDBs */ 21902 cdb.scc_cmd = cmd | SCMD_GROUP4; 21903 FORMG4LONGADDR(&cdb, (uint64_t)start_block); 21904 FORMG4COUNT(&cdb, block_count); 21905 break; 21906 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */ 21907 default: 21908 /* All others reserved */ 21909 return (EINVAL); 21910 } 21911 21912 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */ 21913 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 21914 21915 ucmd_buf.uscsi_cdb = (char *)&cdb; 21916 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 21917 ucmd_buf.uscsi_bufaddr = bufaddr; 21918 ucmd_buf.uscsi_buflen = buflen; 21919 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21920 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21921 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; 21922 ucmd_buf.uscsi_timeout = 60; 21923 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21924 UIO_SYSSPACE, path_flag); 21925 21926 switch (status) { 21927 case 0: 21928 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21929 break; /* Success! */ 21930 case EIO: 21931 switch (ucmd_buf.uscsi_status) { 21932 case STATUS_RESERVATION_CONFLICT: 21933 status = EACCES; 21934 break; 21935 default: 21936 break; 21937 } 21938 break; 21939 default: 21940 break; 21941 } 21942 21943 if (status == 0) { 21944 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data", 21945 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21946 } 21947 21948 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n"); 21949 21950 return (status); 21951 } 21952 21953 21954 /* 21955 * Function: sd_send_scsi_LOG_SENSE 21956 * 21957 * Description: Issue a scsi LOG_SENSE command with the given parameters. 21958 * 21959 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21960 * structure for this target. 21961 * 21962 * Return Code: 0 - Success 21963 * errno return code from sd_ssc_send() 21964 * 21965 * Context: Can sleep. Does not return until command is completed. 21966 */ 21967 21968 static int 21969 sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, uint16_t buflen, 21970 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, 21971 int path_flag) 21972 21973 { 21974 struct scsi_extended_sense sense_buf; 21975 union scsi_cdb cdb; 21976 struct uscsi_cmd ucmd_buf; 21977 int status; 21978 struct sd_lun *un; 21979 21980 ASSERT(ssc != NULL); 21981 un = ssc->ssc_un; 21982 ASSERT(un != NULL); 21983 ASSERT(!mutex_owned(SD_MUTEX(un))); 21984 21985 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un); 21986 21987 bzero(&cdb, sizeof (cdb)); 21988 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21989 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21990 21991 cdb.scc_cmd = SCMD_LOG_SENSE_G1; 21992 cdb.cdb_opaque[2] = (page_control << 6) | page_code; 21993 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8); 21994 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF); 21995 FORMG1COUNT(&cdb, buflen); 21996 21997 ucmd_buf.uscsi_cdb = (char *)&cdb; 21998 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 21999 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 22000 ucmd_buf.uscsi_buflen = buflen; 22001 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 22002 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 22003 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 22004 ucmd_buf.uscsi_timeout = 60; 22005 22006 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 22007 UIO_SYSSPACE, path_flag); 22008 22009 switch (status) { 22010 case 0: 22011 break; 22012 case EIO: 22013 switch (ucmd_buf.uscsi_status) { 22014 case STATUS_RESERVATION_CONFLICT: 22015 status = EACCES; 22016 break; 22017 case STATUS_CHECK: 22018 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 22019 (scsi_sense_key((uint8_t *)&sense_buf) == 22020 KEY_ILLEGAL_REQUEST) && 22021 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x24)) { 22022 /* 22023 * ASC 0x24: INVALID FIELD IN CDB 22024 */ 22025 switch (page_code) { 22026 case START_STOP_CYCLE_PAGE: 22027 /* 22028 * The start stop cycle counter is 22029 * implemented as page 0x31 in earlier 22030 * generation disks. In new generation 22031 * disks the start stop cycle counter is 22032 * implemented as page 0xE. To properly 22033 * handle this case if an attempt for 22034 * log page 0xE is made and fails we 22035 * will try again using page 0x31. 22036 * 22037 * Network storage BU committed to 22038 * maintain the page 0x31 for this 22039 * purpose and will not have any other 22040 * page implemented with page code 0x31 22041 * until all disks transition to the 22042 * standard page. 22043 */ 22044 mutex_enter(SD_MUTEX(un)); 22045 un->un_start_stop_cycle_page = 22046 START_STOP_CYCLE_VU_PAGE; 22047 cdb.cdb_opaque[2] = 22048 (char)(page_control << 6) | 22049 un->un_start_stop_cycle_page; 22050 mutex_exit(SD_MUTEX(un)); 22051 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22052 status = sd_ssc_send( 22053 ssc, &ucmd_buf, FKIOCTL, 22054 UIO_SYSSPACE, path_flag); 22055 22056 break; 22057 case TEMPERATURE_PAGE: 22058 status = ENOTTY; 22059 break; 22060 default: 22061 break; 22062 } 22063 } 22064 break; 22065 default: 22066 break; 22067 } 22068 break; 22069 default: 22070 break; 22071 } 22072 22073 if (status == 0) { 22074 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 22075 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data", 22076 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 22077 } 22078 22079 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n"); 22080 22081 return (status); 22082 } 22083 22084 22085 /* 22086 * Function: sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION 22087 * 22088 * Description: Issue the scsi GET EVENT STATUS NOTIFICATION command. 22089 * 22090 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 22091 * structure for this target. 22092 * bufaddr 22093 * buflen 22094 * class_req 22095 * 22096 * Return Code: 0 - Success 22097 * errno return code from sd_ssc_send() 22098 * 22099 * Context: Can sleep. Does not return until command is completed. 22100 */ 22101 22102 static int 22103 sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION(sd_ssc_t *ssc, uchar_t *bufaddr, 22104 size_t buflen, uchar_t class_req) 22105 { 22106 union scsi_cdb cdb; 22107 struct uscsi_cmd ucmd_buf; 22108 int status; 22109 struct sd_lun *un; 22110 22111 ASSERT(ssc != NULL); 22112 un = ssc->ssc_un; 22113 ASSERT(un != NULL); 22114 ASSERT(!mutex_owned(SD_MUTEX(un))); 22115 ASSERT(bufaddr != NULL); 22116 22117 SD_TRACE(SD_LOG_IO, un, 22118 "sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION: entry: un:0x%p\n", un); 22119 22120 bzero(&cdb, sizeof (cdb)); 22121 bzero(&ucmd_buf, sizeof (ucmd_buf)); 22122 bzero(bufaddr, buflen); 22123 22124 cdb.scc_cmd = SCMD_GET_EVENT_STATUS_NOTIFICATION; 22125 cdb.cdb_opaque[1] = 1; /* polled */ 22126 cdb.cdb_opaque[4] = class_req; 22127 FORMG1COUNT(&cdb, buflen); 22128 22129 ucmd_buf.uscsi_cdb = (char *)&cdb; 22130 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 22131 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 22132 ucmd_buf.uscsi_buflen = buflen; 22133 ucmd_buf.uscsi_rqbuf = NULL; 22134 ucmd_buf.uscsi_rqlen = 0; 22135 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 22136 ucmd_buf.uscsi_timeout = 60; 22137 22138 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 22139 UIO_SYSSPACE, SD_PATH_DIRECT); 22140 22141 /* 22142 * Only handle status == 0, the upper-level caller 22143 * will put different assessment based on the context. 22144 */ 22145 if (status == 0) { 22146 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 22147 22148 if (ucmd_buf.uscsi_resid != 0) { 22149 status = EIO; 22150 } 22151 } 22152 22153 SD_TRACE(SD_LOG_IO, un, 22154 "sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION: exit\n"); 22155 22156 return (status); 22157 } 22158 22159 22160 static boolean_t 22161 sd_gesn_media_data_valid(uchar_t *data) 22162 { 22163 uint16_t len; 22164 22165 len = (data[1] << 8) | data[0]; 22166 return ((len >= 6) && 22167 ((data[2] & SD_GESN_HEADER_NEA) == 0) && 22168 ((data[2] & SD_GESN_HEADER_CLASS) == SD_GESN_MEDIA_CLASS) && 22169 ((data[3] & (1 << SD_GESN_MEDIA_CLASS)) != 0)); 22170 } 22171 22172 22173 /* 22174 * Function: sdioctl 22175 * 22176 * Description: Driver's ioctl(9e) entry point function. 22177 * 22178 * Arguments: dev - device number 22179 * cmd - ioctl operation to be performed 22180 * arg - user argument, contains data to be set or reference 22181 * parameter for get 22182 * flag - bit flag, indicating open settings, 32/64 bit type 22183 * cred_p - user credential pointer 22184 * rval_p - calling process return value (OPT) 22185 * 22186 * Return Code: EINVAL 22187 * ENOTTY 22188 * ENXIO 22189 * EIO 22190 * EFAULT 22191 * ENOTSUP 22192 * EPERM 22193 * 22194 * Context: Called from the device switch at normal priority. 22195 */ 22196 22197 static int 22198 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) 22199 { 22200 struct sd_lun *un = NULL; 22201 int err = 0; 22202 int i = 0; 22203 cred_t *cr; 22204 int tmprval = EINVAL; 22205 boolean_t is_valid; 22206 sd_ssc_t *ssc; 22207 22208 /* 22209 * All device accesses go thru sdstrategy where we check on suspend 22210 * status 22211 */ 22212 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22213 return (ENXIO); 22214 } 22215 22216 ASSERT(!mutex_owned(SD_MUTEX(un))); 22217 22218 /* Initialize sd_ssc_t for internal uscsi commands */ 22219 ssc = sd_ssc_init(un); 22220 22221 is_valid = SD_IS_VALID_LABEL(un); 22222 22223 /* 22224 * Moved this wait from sd_uscsi_strategy to here for 22225 * reasons of deadlock prevention. Internal driver commands, 22226 * specifically those to change a devices power level, result 22227 * in a call to sd_uscsi_strategy. 22228 */ 22229 mutex_enter(SD_MUTEX(un)); 22230 while ((un->un_state == SD_STATE_SUSPENDED) || 22231 (un->un_state == SD_STATE_PM_CHANGING)) { 22232 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 22233 } 22234 /* 22235 * Twiddling the counter here protects commands from now 22236 * through to the top of sd_uscsi_strategy. Without the 22237 * counter inc. a power down, for example, could get in 22238 * after the above check for state is made and before 22239 * execution gets to the top of sd_uscsi_strategy. 22240 * That would cause problems. 22241 */ 22242 un->un_ncmds_in_driver++; 22243 22244 if (!is_valid && 22245 (flag & (FNDELAY | FNONBLOCK))) { 22246 switch (cmd) { 22247 case DKIOCGGEOM: /* SD_PATH_DIRECT */ 22248 case DKIOCGVTOC: 22249 case DKIOCGEXTVTOC: 22250 case DKIOCGAPART: 22251 case DKIOCPARTINFO: 22252 case DKIOCEXTPARTINFO: 22253 case DKIOCSGEOM: 22254 case DKIOCSAPART: 22255 case DKIOCGETEFI: 22256 case DKIOCPARTITION: 22257 case DKIOCSVTOC: 22258 case DKIOCSEXTVTOC: 22259 case DKIOCSETEFI: 22260 case DKIOCGMBOOT: 22261 case DKIOCSMBOOT: 22262 case DKIOCG_PHYGEOM: 22263 case DKIOCG_VIRTGEOM: 22264 #if defined(__i386) || defined(__amd64) 22265 case DKIOCSETEXTPART: 22266 #endif 22267 /* let cmlb handle it */ 22268 goto skip_ready_valid; 22269 22270 case CDROMPAUSE: 22271 case CDROMRESUME: 22272 case CDROMPLAYMSF: 22273 case CDROMPLAYTRKIND: 22274 case CDROMREADTOCHDR: 22275 case CDROMREADTOCENTRY: 22276 case CDROMSTOP: 22277 case CDROMSTART: 22278 case CDROMVOLCTRL: 22279 case CDROMSUBCHNL: 22280 case CDROMREADMODE2: 22281 case CDROMREADMODE1: 22282 case CDROMREADOFFSET: 22283 case CDROMSBLKMODE: 22284 case CDROMGBLKMODE: 22285 case CDROMGDRVSPEED: 22286 case CDROMSDRVSPEED: 22287 case CDROMCDDA: 22288 case CDROMCDXA: 22289 case CDROMSUBCODE: 22290 if (!ISCD(un)) { 22291 un->un_ncmds_in_driver--; 22292 ASSERT(un->un_ncmds_in_driver >= 0); 22293 mutex_exit(SD_MUTEX(un)); 22294 err = ENOTTY; 22295 goto done_without_assess; 22296 } 22297 break; 22298 case FDEJECT: 22299 case DKIOCEJECT: 22300 case CDROMEJECT: 22301 if (!un->un_f_eject_media_supported) { 22302 un->un_ncmds_in_driver--; 22303 ASSERT(un->un_ncmds_in_driver >= 0); 22304 mutex_exit(SD_MUTEX(un)); 22305 err = ENOTTY; 22306 goto done_without_assess; 22307 } 22308 break; 22309 case DKIOCFLUSHWRITECACHE: 22310 mutex_exit(SD_MUTEX(un)); 22311 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 22312 if (err != 0) { 22313 mutex_enter(SD_MUTEX(un)); 22314 un->un_ncmds_in_driver--; 22315 ASSERT(un->un_ncmds_in_driver >= 0); 22316 mutex_exit(SD_MUTEX(un)); 22317 err = EIO; 22318 goto done_quick_assess; 22319 } 22320 mutex_enter(SD_MUTEX(un)); 22321 /* FALLTHROUGH */ 22322 case DKIOCREMOVABLE: 22323 case DKIOCHOTPLUGGABLE: 22324 case DKIOCINFO: 22325 case DKIOCGMEDIAINFO: 22326 case DKIOCGMEDIAINFOEXT: 22327 case DKIOCSOLIDSTATE: 22328 case MHIOCENFAILFAST: 22329 case MHIOCSTATUS: 22330 case MHIOCTKOWN: 22331 case MHIOCRELEASE: 22332 case MHIOCGRP_INKEYS: 22333 case MHIOCGRP_INRESV: 22334 case MHIOCGRP_REGISTER: 22335 case MHIOCGRP_CLEAR: 22336 case MHIOCGRP_RESERVE: 22337 case MHIOCGRP_PREEMPTANDABORT: 22338 case MHIOCGRP_REGISTERANDIGNOREKEY: 22339 case CDROMCLOSETRAY: 22340 case USCSICMD: 22341 goto skip_ready_valid; 22342 default: 22343 break; 22344 } 22345 22346 mutex_exit(SD_MUTEX(un)); 22347 err = sd_ready_and_valid(ssc, SDPART(dev)); 22348 mutex_enter(SD_MUTEX(un)); 22349 22350 if (err != SD_READY_VALID) { 22351 switch (cmd) { 22352 case DKIOCSTATE: 22353 case CDROMGDRVSPEED: 22354 case CDROMSDRVSPEED: 22355 case FDEJECT: /* for eject command */ 22356 case DKIOCEJECT: 22357 case CDROMEJECT: 22358 case DKIOCREMOVABLE: 22359 case DKIOCHOTPLUGGABLE: 22360 break; 22361 default: 22362 if (un->un_f_has_removable_media) { 22363 err = ENXIO; 22364 } else { 22365 /* Do not map SD_RESERVED_BY_OTHERS to EIO */ 22366 if (err == SD_RESERVED_BY_OTHERS) { 22367 err = EACCES; 22368 } else { 22369 err = EIO; 22370 } 22371 } 22372 un->un_ncmds_in_driver--; 22373 ASSERT(un->un_ncmds_in_driver >= 0); 22374 mutex_exit(SD_MUTEX(un)); 22375 22376 goto done_without_assess; 22377 } 22378 } 22379 } 22380 22381 skip_ready_valid: 22382 mutex_exit(SD_MUTEX(un)); 22383 22384 switch (cmd) { 22385 case DKIOCINFO: 22386 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n"); 22387 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag); 22388 break; 22389 22390 case DKIOCGMEDIAINFO: 22391 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n"); 22392 err = sd_get_media_info(dev, (caddr_t)arg, flag); 22393 break; 22394 22395 case DKIOCGMEDIAINFOEXT: 22396 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFOEXT\n"); 22397 err = sd_get_media_info_ext(dev, (caddr_t)arg, flag); 22398 break; 22399 22400 case DKIOCGGEOM: 22401 case DKIOCGVTOC: 22402 case DKIOCGEXTVTOC: 22403 case DKIOCGAPART: 22404 case DKIOCPARTINFO: 22405 case DKIOCEXTPARTINFO: 22406 case DKIOCSGEOM: 22407 case DKIOCSAPART: 22408 case DKIOCGETEFI: 22409 case DKIOCPARTITION: 22410 case DKIOCSVTOC: 22411 case DKIOCSEXTVTOC: 22412 case DKIOCSETEFI: 22413 case DKIOCGMBOOT: 22414 case DKIOCSMBOOT: 22415 case DKIOCG_PHYGEOM: 22416 case DKIOCG_VIRTGEOM: 22417 #if defined(__i386) || defined(__amd64) 22418 case DKIOCSETEXTPART: 22419 #endif 22420 SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd); 22421 22422 /* TUR should spin up */ 22423 22424 if (un->un_f_has_removable_media) 22425 err = sd_send_scsi_TEST_UNIT_READY(ssc, 22426 SD_CHECK_FOR_MEDIA); 22427 22428 else 22429 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 22430 22431 if (err != 0) 22432 goto done_with_assess; 22433 22434 err = cmlb_ioctl(un->un_cmlbhandle, dev, 22435 cmd, arg, flag, cred_p, rval_p, (void *)SD_PATH_DIRECT); 22436 22437 if ((err == 0) && 22438 ((cmd == DKIOCSETEFI) || 22439 (un->un_f_pkstats_enabled) && 22440 (cmd == DKIOCSAPART || cmd == DKIOCSVTOC || 22441 cmd == DKIOCSEXTVTOC))) { 22442 22443 tmprval = cmlb_validate(un->un_cmlbhandle, CMLB_SILENT, 22444 (void *)SD_PATH_DIRECT); 22445 if ((tmprval == 0) && un->un_f_pkstats_enabled) { 22446 sd_set_pstats(un); 22447 SD_TRACE(SD_LOG_IO_PARTITION, un, 22448 "sd_ioctl: un:0x%p pstats created and " 22449 "set\n", un); 22450 } 22451 } 22452 22453 if ((cmd == DKIOCSVTOC || cmd == DKIOCSEXTVTOC) || 22454 ((cmd == DKIOCSETEFI) && (tmprval == 0))) { 22455 22456 mutex_enter(SD_MUTEX(un)); 22457 if (un->un_f_devid_supported && 22458 (un->un_f_opt_fab_devid == TRUE)) { 22459 if (un->un_devid == NULL) { 22460 sd_register_devid(ssc, SD_DEVINFO(un), 22461 SD_TARGET_IS_UNRESERVED); 22462 } else { 22463 /* 22464 * The device id for this disk 22465 * has been fabricated. The 22466 * device id must be preserved 22467 * by writing it back out to 22468 * disk. 22469 */ 22470 if (sd_write_deviceid(ssc) != 0) { 22471 ddi_devid_free(un->un_devid); 22472 un->un_devid = NULL; 22473 } 22474 } 22475 } 22476 mutex_exit(SD_MUTEX(un)); 22477 } 22478 22479 break; 22480 22481 case DKIOCLOCK: 22482 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n"); 22483 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 22484 SD_PATH_STANDARD); 22485 goto done_with_assess; 22486 22487 case DKIOCUNLOCK: 22488 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n"); 22489 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW, 22490 SD_PATH_STANDARD); 22491 goto done_with_assess; 22492 22493 case DKIOCSTATE: { 22494 enum dkio_state state; 22495 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n"); 22496 22497 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) { 22498 err = EFAULT; 22499 } else { 22500 err = sd_check_media(dev, state); 22501 if (err == 0) { 22502 if (ddi_copyout(&un->un_mediastate, (void *)arg, 22503 sizeof (int), flag) != 0) 22504 err = EFAULT; 22505 } 22506 } 22507 break; 22508 } 22509 22510 case DKIOCREMOVABLE: 22511 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n"); 22512 i = un->un_f_has_removable_media ? 1 : 0; 22513 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 22514 err = EFAULT; 22515 } else { 22516 err = 0; 22517 } 22518 break; 22519 22520 case DKIOCSOLIDSTATE: 22521 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSOLIDSTATE\n"); 22522 i = un->un_f_is_solid_state ? 1 : 0; 22523 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 22524 err = EFAULT; 22525 } else { 22526 err = 0; 22527 } 22528 break; 22529 22530 case DKIOCHOTPLUGGABLE: 22531 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCHOTPLUGGABLE\n"); 22532 i = un->un_f_is_hotpluggable ? 1 : 0; 22533 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 22534 err = EFAULT; 22535 } else { 22536 err = 0; 22537 } 22538 break; 22539 22540 case DKIOCREADONLY: 22541 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREADONLY\n"); 22542 i = 0; 22543 if ((ISCD(un) && !un->un_f_mmc_writable_media) || 22544 (sr_check_wp(dev) != 0)) { 22545 i = 1; 22546 } 22547 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 22548 err = EFAULT; 22549 } else { 22550 err = 0; 22551 } 22552 break; 22553 22554 case DKIOCGTEMPERATURE: 22555 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n"); 22556 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag); 22557 break; 22558 22559 case MHIOCENFAILFAST: 22560 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n"); 22561 if ((err = drv_priv(cred_p)) == 0) { 22562 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag); 22563 } 22564 break; 22565 22566 case MHIOCTKOWN: 22567 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n"); 22568 if ((err = drv_priv(cred_p)) == 0) { 22569 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag); 22570 } 22571 break; 22572 22573 case MHIOCRELEASE: 22574 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n"); 22575 if ((err = drv_priv(cred_p)) == 0) { 22576 err = sd_mhdioc_release(dev); 22577 } 22578 break; 22579 22580 case MHIOCSTATUS: 22581 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n"); 22582 if ((err = drv_priv(cred_p)) == 0) { 22583 switch (sd_send_scsi_TEST_UNIT_READY(ssc, 0)) { 22584 case 0: 22585 err = 0; 22586 break; 22587 case EACCES: 22588 *rval_p = 1; 22589 err = 0; 22590 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22591 break; 22592 default: 22593 err = EIO; 22594 goto done_with_assess; 22595 } 22596 } 22597 break; 22598 22599 case MHIOCQRESERVE: 22600 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n"); 22601 if ((err = drv_priv(cred_p)) == 0) { 22602 err = sd_reserve_release(dev, SD_RESERVE); 22603 } 22604 break; 22605 22606 case MHIOCREREGISTERDEVID: 22607 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n"); 22608 if (drv_priv(cred_p) == EPERM) { 22609 err = EPERM; 22610 } else if (!un->un_f_devid_supported) { 22611 err = ENOTTY; 22612 } else { 22613 err = sd_mhdioc_register_devid(dev); 22614 } 22615 break; 22616 22617 case MHIOCGRP_INKEYS: 22618 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n"); 22619 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 22620 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22621 err = ENOTSUP; 22622 } else { 22623 err = sd_mhdioc_inkeys(dev, (caddr_t)arg, 22624 flag); 22625 } 22626 } 22627 break; 22628 22629 case MHIOCGRP_INRESV: 22630 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n"); 22631 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 22632 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22633 err = ENOTSUP; 22634 } else { 22635 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag); 22636 } 22637 } 22638 break; 22639 22640 case MHIOCGRP_REGISTER: 22641 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n"); 22642 if ((err = drv_priv(cred_p)) != EPERM) { 22643 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22644 err = ENOTSUP; 22645 } else if (arg != NULL) { 22646 mhioc_register_t reg; 22647 if (ddi_copyin((void *)arg, ®, 22648 sizeof (mhioc_register_t), flag) != 0) { 22649 err = EFAULT; 22650 } else { 22651 err = 22652 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22653 ssc, SD_SCSI3_REGISTER, 22654 (uchar_t *)®); 22655 if (err != 0) 22656 goto done_with_assess; 22657 } 22658 } 22659 } 22660 break; 22661 22662 case MHIOCGRP_CLEAR: 22663 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_CLEAR\n"); 22664 if ((err = drv_priv(cred_p)) != EPERM) { 22665 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22666 err = ENOTSUP; 22667 } else if (arg != NULL) { 22668 mhioc_register_t reg; 22669 if (ddi_copyin((void *)arg, ®, 22670 sizeof (mhioc_register_t), flag) != 0) { 22671 err = EFAULT; 22672 } else { 22673 err = 22674 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22675 ssc, SD_SCSI3_CLEAR, 22676 (uchar_t *)®); 22677 if (err != 0) 22678 goto done_with_assess; 22679 } 22680 } 22681 } 22682 break; 22683 22684 case MHIOCGRP_RESERVE: 22685 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n"); 22686 if ((err = drv_priv(cred_p)) != EPERM) { 22687 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22688 err = ENOTSUP; 22689 } else if (arg != NULL) { 22690 mhioc_resv_desc_t resv_desc; 22691 if (ddi_copyin((void *)arg, &resv_desc, 22692 sizeof (mhioc_resv_desc_t), flag) != 0) { 22693 err = EFAULT; 22694 } else { 22695 err = 22696 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22697 ssc, SD_SCSI3_RESERVE, 22698 (uchar_t *)&resv_desc); 22699 if (err != 0) 22700 goto done_with_assess; 22701 } 22702 } 22703 } 22704 break; 22705 22706 case MHIOCGRP_PREEMPTANDABORT: 22707 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 22708 if ((err = drv_priv(cred_p)) != EPERM) { 22709 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22710 err = ENOTSUP; 22711 } else if (arg != NULL) { 22712 mhioc_preemptandabort_t preempt_abort; 22713 if (ddi_copyin((void *)arg, &preempt_abort, 22714 sizeof (mhioc_preemptandabort_t), 22715 flag) != 0) { 22716 err = EFAULT; 22717 } else { 22718 err = 22719 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22720 ssc, SD_SCSI3_PREEMPTANDABORT, 22721 (uchar_t *)&preempt_abort); 22722 if (err != 0) 22723 goto done_with_assess; 22724 } 22725 } 22726 } 22727 break; 22728 22729 case MHIOCGRP_REGISTERANDIGNOREKEY: 22730 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTERANDIGNOREKEY\n"); 22731 if ((err = drv_priv(cred_p)) != EPERM) { 22732 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22733 err = ENOTSUP; 22734 } else if (arg != NULL) { 22735 mhioc_registerandignorekey_t r_and_i; 22736 if (ddi_copyin((void *)arg, (void *)&r_and_i, 22737 sizeof (mhioc_registerandignorekey_t), 22738 flag) != 0) { 22739 err = EFAULT; 22740 } else { 22741 err = 22742 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22743 ssc, SD_SCSI3_REGISTERANDIGNOREKEY, 22744 (uchar_t *)&r_and_i); 22745 if (err != 0) 22746 goto done_with_assess; 22747 } 22748 } 22749 } 22750 break; 22751 22752 case USCSICMD: 22753 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n"); 22754 cr = ddi_get_cred(); 22755 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 22756 err = EPERM; 22757 } else { 22758 enum uio_seg uioseg; 22759 22760 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE : 22761 UIO_USERSPACE; 22762 if (un->un_f_format_in_progress == TRUE) { 22763 err = EAGAIN; 22764 break; 22765 } 22766 22767 err = sd_ssc_send(ssc, 22768 (struct uscsi_cmd *)arg, 22769 flag, uioseg, SD_PATH_STANDARD); 22770 if (err != 0) 22771 goto done_with_assess; 22772 else 22773 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 22774 } 22775 break; 22776 22777 case CDROMPAUSE: 22778 case CDROMRESUME: 22779 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n"); 22780 if (!ISCD(un)) { 22781 err = ENOTTY; 22782 } else { 22783 err = sr_pause_resume(dev, cmd); 22784 } 22785 break; 22786 22787 case CDROMPLAYMSF: 22788 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n"); 22789 if (!ISCD(un)) { 22790 err = ENOTTY; 22791 } else { 22792 err = sr_play_msf(dev, (caddr_t)arg, flag); 22793 } 22794 break; 22795 22796 case CDROMPLAYTRKIND: 22797 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); 22798 #if defined(__i386) || defined(__amd64) 22799 /* 22800 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead 22801 */ 22802 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 22803 #else 22804 if (!ISCD(un)) { 22805 #endif 22806 err = ENOTTY; 22807 } else { 22808 err = sr_play_trkind(dev, (caddr_t)arg, flag); 22809 } 22810 break; 22811 22812 case CDROMREADTOCHDR: 22813 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n"); 22814 if (!ISCD(un)) { 22815 err = ENOTTY; 22816 } else { 22817 err = sr_read_tochdr(dev, (caddr_t)arg, flag); 22818 } 22819 break; 22820 22821 case CDROMREADTOCENTRY: 22822 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n"); 22823 if (!ISCD(un)) { 22824 err = ENOTTY; 22825 } else { 22826 err = sr_read_tocentry(dev, (caddr_t)arg, flag); 22827 } 22828 break; 22829 22830 case CDROMSTOP: 22831 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n"); 22832 if (!ISCD(un)) { 22833 err = ENOTTY; 22834 } else { 22835 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 22836 SD_TARGET_STOP, SD_PATH_STANDARD); 22837 goto done_with_assess; 22838 } 22839 break; 22840 22841 case CDROMSTART: 22842 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n"); 22843 if (!ISCD(un)) { 22844 err = ENOTTY; 22845 } else { 22846 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 22847 SD_TARGET_START, SD_PATH_STANDARD); 22848 goto done_with_assess; 22849 } 22850 break; 22851 22852 case CDROMCLOSETRAY: 22853 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n"); 22854 if (!ISCD(un)) { 22855 err = ENOTTY; 22856 } else { 22857 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 22858 SD_TARGET_CLOSE, SD_PATH_STANDARD); 22859 goto done_with_assess; 22860 } 22861 break; 22862 22863 case FDEJECT: /* for eject command */ 22864 case DKIOCEJECT: 22865 case CDROMEJECT: 22866 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n"); 22867 if (!un->un_f_eject_media_supported) { 22868 err = ENOTTY; 22869 } else { 22870 err = sr_eject(dev); 22871 } 22872 break; 22873 22874 case CDROMVOLCTRL: 22875 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n"); 22876 if (!ISCD(un)) { 22877 err = ENOTTY; 22878 } else { 22879 err = sr_volume_ctrl(dev, (caddr_t)arg, flag); 22880 } 22881 break; 22882 22883 case CDROMSUBCHNL: 22884 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n"); 22885 if (!ISCD(un)) { 22886 err = ENOTTY; 22887 } else { 22888 err = sr_read_subchannel(dev, (caddr_t)arg, flag); 22889 } 22890 break; 22891 22892 case CDROMREADMODE2: 22893 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n"); 22894 if (!ISCD(un)) { 22895 err = ENOTTY; 22896 } else if (un->un_f_cfg_is_atapi == TRUE) { 22897 /* 22898 * If the drive supports READ CD, use that instead of 22899 * switching the LBA size via a MODE SELECT 22900 * Block Descriptor 22901 */ 22902 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag); 22903 } else { 22904 err = sr_read_mode2(dev, (caddr_t)arg, flag); 22905 } 22906 break; 22907 22908 case CDROMREADMODE1: 22909 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n"); 22910 if (!ISCD(un)) { 22911 err = ENOTTY; 22912 } else { 22913 err = sr_read_mode1(dev, (caddr_t)arg, flag); 22914 } 22915 break; 22916 22917 case CDROMREADOFFSET: 22918 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n"); 22919 if (!ISCD(un)) { 22920 err = ENOTTY; 22921 } else { 22922 err = sr_read_sony_session_offset(dev, (caddr_t)arg, 22923 flag); 22924 } 22925 break; 22926 22927 case CDROMSBLKMODE: 22928 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n"); 22929 /* 22930 * There is no means of changing block size in case of atapi 22931 * drives, thus return ENOTTY if drive type is atapi 22932 */ 22933 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 22934 err = ENOTTY; 22935 } else if (un->un_f_mmc_cap == TRUE) { 22936 22937 /* 22938 * MMC Devices do not support changing the 22939 * logical block size 22940 * 22941 * Note: EINVAL is being returned instead of ENOTTY to 22942 * maintain consistancy with the original mmc 22943 * driver update. 22944 */ 22945 err = EINVAL; 22946 } else { 22947 mutex_enter(SD_MUTEX(un)); 22948 if ((!(un->un_exclopen & (1<<SDPART(dev)))) || 22949 (un->un_ncmds_in_transport > 0)) { 22950 mutex_exit(SD_MUTEX(un)); 22951 err = EINVAL; 22952 } else { 22953 mutex_exit(SD_MUTEX(un)); 22954 err = sr_change_blkmode(dev, cmd, arg, flag); 22955 } 22956 } 22957 break; 22958 22959 case CDROMGBLKMODE: 22960 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n"); 22961 if (!ISCD(un)) { 22962 err = ENOTTY; 22963 } else if ((un->un_f_cfg_is_atapi != FALSE) && 22964 (un->un_f_blockcount_is_valid != FALSE)) { 22965 /* 22966 * Drive is an ATAPI drive so return target block 22967 * size for ATAPI drives since we cannot change the 22968 * blocksize on ATAPI drives. Used primarily to detect 22969 * if an ATAPI cdrom is present. 22970 */ 22971 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg, 22972 sizeof (int), flag) != 0) { 22973 err = EFAULT; 22974 } else { 22975 err = 0; 22976 } 22977 22978 } else { 22979 /* 22980 * Drive supports changing block sizes via a Mode 22981 * Select. 22982 */ 22983 err = sr_change_blkmode(dev, cmd, arg, flag); 22984 } 22985 break; 22986 22987 case CDROMGDRVSPEED: 22988 case CDROMSDRVSPEED: 22989 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n"); 22990 if (!ISCD(un)) { 22991 err = ENOTTY; 22992 } else if (un->un_f_mmc_cap == TRUE) { 22993 /* 22994 * Note: In the future the driver implementation 22995 * for getting and 22996 * setting cd speed should entail: 22997 * 1) If non-mmc try the Toshiba mode page 22998 * (sr_change_speed) 22999 * 2) If mmc but no support for Real Time Streaming try 23000 * the SET CD SPEED (0xBB) command 23001 * (sr_atapi_change_speed) 23002 * 3) If mmc and support for Real Time Streaming 23003 * try the GET PERFORMANCE and SET STREAMING 23004 * commands (not yet implemented, 4380808) 23005 */ 23006 /* 23007 * As per recent MMC spec, CD-ROM speed is variable 23008 * and changes with LBA. Since there is no such 23009 * things as drive speed now, fail this ioctl. 23010 * 23011 * Note: EINVAL is returned for consistancy of original 23012 * implementation which included support for getting 23013 * the drive speed of mmc devices but not setting 23014 * the drive speed. Thus EINVAL would be returned 23015 * if a set request was made for an mmc device. 23016 * We no longer support get or set speed for 23017 * mmc but need to remain consistent with regard 23018 * to the error code returned. 23019 */ 23020 err = EINVAL; 23021 } else if (un->un_f_cfg_is_atapi == TRUE) { 23022 err = sr_atapi_change_speed(dev, cmd, arg, flag); 23023 } else { 23024 err = sr_change_speed(dev, cmd, arg, flag); 23025 } 23026 break; 23027 23028 case CDROMCDDA: 23029 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n"); 23030 if (!ISCD(un)) { 23031 err = ENOTTY; 23032 } else { 23033 err = sr_read_cdda(dev, (void *)arg, flag); 23034 } 23035 break; 23036 23037 case CDROMCDXA: 23038 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n"); 23039 if (!ISCD(un)) { 23040 err = ENOTTY; 23041 } else { 23042 err = sr_read_cdxa(dev, (caddr_t)arg, flag); 23043 } 23044 break; 23045 23046 case CDROMSUBCODE: 23047 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n"); 23048 if (!ISCD(un)) { 23049 err = ENOTTY; 23050 } else { 23051 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag); 23052 } 23053 break; 23054 23055 23056 #ifdef SDDEBUG 23057 /* RESET/ABORTS testing ioctls */ 23058 case DKIOCRESET: { 23059 int reset_level; 23060 23061 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) { 23062 err = EFAULT; 23063 } else { 23064 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: " 23065 "reset_level = 0x%lx\n", reset_level); 23066 if (scsi_reset(SD_ADDRESS(un), reset_level)) { 23067 err = 0; 23068 } else { 23069 err = EIO; 23070 } 23071 } 23072 break; 23073 } 23074 23075 case DKIOCABORT: 23076 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n"); 23077 if (scsi_abort(SD_ADDRESS(un), NULL)) { 23078 err = 0; 23079 } else { 23080 err = EIO; 23081 } 23082 break; 23083 #endif 23084 23085 #ifdef SD_FAULT_INJECTION 23086 /* SDIOC FaultInjection testing ioctls */ 23087 case SDIOCSTART: 23088 case SDIOCSTOP: 23089 case SDIOCINSERTPKT: 23090 case SDIOCINSERTXB: 23091 case SDIOCINSERTUN: 23092 case SDIOCINSERTARQ: 23093 case SDIOCPUSH: 23094 case SDIOCRETRIEVE: 23095 case SDIOCRUN: 23096 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" 23097 "SDIOC detected cmd:0x%X:\n", cmd); 23098 /* call error generator */ 23099 sd_faultinjection_ioctl(cmd, arg, un); 23100 err = 0; 23101 break; 23102 23103 #endif /* SD_FAULT_INJECTION */ 23104 23105 case DKIOCFLUSHWRITECACHE: 23106 { 23107 struct dk_callback *dkc = (struct dk_callback *)arg; 23108 23109 mutex_enter(SD_MUTEX(un)); 23110 if (!un->un_f_sync_cache_supported || 23111 !un->un_f_write_cache_enabled) { 23112 err = un->un_f_sync_cache_supported ? 23113 0 : ENOTSUP; 23114 mutex_exit(SD_MUTEX(un)); 23115 if ((flag & FKIOCTL) && dkc != NULL && 23116 dkc->dkc_callback != NULL) { 23117 (*dkc->dkc_callback)(dkc->dkc_cookie, 23118 err); 23119 /* 23120 * Did callback and reported error. 23121 * Since we did a callback, ioctl 23122 * should return 0. 23123 */ 23124 err = 0; 23125 } 23126 break; 23127 } 23128 mutex_exit(SD_MUTEX(un)); 23129 23130 if ((flag & FKIOCTL) && dkc != NULL && 23131 dkc->dkc_callback != NULL) { 23132 /* async SYNC CACHE request */ 23133 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 23134 } else { 23135 /* synchronous SYNC CACHE request */ 23136 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 23137 } 23138 } 23139 break; 23140 23141 case DKIOCGETWCE: { 23142 23143 int wce; 23144 23145 if ((err = sd_get_write_cache_enabled(ssc, &wce)) != 0) { 23146 break; 23147 } 23148 23149 if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) { 23150 err = EFAULT; 23151 } 23152 break; 23153 } 23154 23155 case DKIOCSETWCE: { 23156 23157 int wce, sync_supported; 23158 int cur_wce = 0; 23159 23160 if (ddi_copyin((void *)arg, &wce, sizeof (wce), flag)) { 23161 err = EFAULT; 23162 break; 23163 } 23164 23165 /* 23166 * Synchronize multiple threads trying to enable 23167 * or disable the cache via the un_f_wcc_cv 23168 * condition variable. 23169 */ 23170 mutex_enter(SD_MUTEX(un)); 23171 23172 /* 23173 * Don't allow the cache to be enabled if the 23174 * config file has it disabled. 23175 */ 23176 if (un->un_f_opt_disable_cache && wce) { 23177 mutex_exit(SD_MUTEX(un)); 23178 err = EINVAL; 23179 break; 23180 } 23181 23182 /* 23183 * Wait for write cache change in progress 23184 * bit to be clear before proceeding. 23185 */ 23186 while (un->un_f_wcc_inprog) 23187 cv_wait(&un->un_wcc_cv, SD_MUTEX(un)); 23188 23189 un->un_f_wcc_inprog = 1; 23190 23191 mutex_exit(SD_MUTEX(un)); 23192 23193 /* 23194 * Get the current write cache state 23195 */ 23196 if ((err = sd_get_write_cache_enabled(ssc, &cur_wce)) != 0) { 23197 mutex_enter(SD_MUTEX(un)); 23198 un->un_f_wcc_inprog = 0; 23199 cv_broadcast(&un->un_wcc_cv); 23200 mutex_exit(SD_MUTEX(un)); 23201 break; 23202 } 23203 23204 mutex_enter(SD_MUTEX(un)); 23205 un->un_f_write_cache_enabled = (cur_wce != 0); 23206 23207 if (un->un_f_write_cache_enabled && wce == 0) { 23208 /* 23209 * Disable the write cache. Don't clear 23210 * un_f_write_cache_enabled until after 23211 * the mode select and flush are complete. 23212 */ 23213 sync_supported = un->un_f_sync_cache_supported; 23214 23215 /* 23216 * If cache flush is suppressed, we assume that the 23217 * controller firmware will take care of managing the 23218 * write cache for us: no need to explicitly 23219 * disable it. 23220 */ 23221 if (!un->un_f_suppress_cache_flush) { 23222 mutex_exit(SD_MUTEX(un)); 23223 if ((err = sd_cache_control(ssc, 23224 SD_CACHE_NOCHANGE, 23225 SD_CACHE_DISABLE)) == 0 && 23226 sync_supported) { 23227 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, 23228 NULL); 23229 } 23230 } else { 23231 mutex_exit(SD_MUTEX(un)); 23232 } 23233 23234 mutex_enter(SD_MUTEX(un)); 23235 if (err == 0) { 23236 un->un_f_write_cache_enabled = 0; 23237 } 23238 23239 } else if (!un->un_f_write_cache_enabled && wce != 0) { 23240 /* 23241 * Set un_f_write_cache_enabled first, so there is 23242 * no window where the cache is enabled, but the 23243 * bit says it isn't. 23244 */ 23245 un->un_f_write_cache_enabled = 1; 23246 23247 /* 23248 * If cache flush is suppressed, we assume that the 23249 * controller firmware will take care of managing the 23250 * write cache for us: no need to explicitly 23251 * enable it. 23252 */ 23253 if (!un->un_f_suppress_cache_flush) { 23254 mutex_exit(SD_MUTEX(un)); 23255 err = sd_cache_control(ssc, SD_CACHE_NOCHANGE, 23256 SD_CACHE_ENABLE); 23257 } else { 23258 mutex_exit(SD_MUTEX(un)); 23259 } 23260 23261 mutex_enter(SD_MUTEX(un)); 23262 23263 if (err) { 23264 un->un_f_write_cache_enabled = 0; 23265 } 23266 } 23267 23268 un->un_f_wcc_inprog = 0; 23269 cv_broadcast(&un->un_wcc_cv); 23270 mutex_exit(SD_MUTEX(un)); 23271 break; 23272 } 23273 23274 default: 23275 err = ENOTTY; 23276 break; 23277 } 23278 mutex_enter(SD_MUTEX(un)); 23279 un->un_ncmds_in_driver--; 23280 ASSERT(un->un_ncmds_in_driver >= 0); 23281 mutex_exit(SD_MUTEX(un)); 23282 23283 23284 done_without_assess: 23285 sd_ssc_fini(ssc); 23286 23287 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 23288 return (err); 23289 23290 done_with_assess: 23291 mutex_enter(SD_MUTEX(un)); 23292 un->un_ncmds_in_driver--; 23293 ASSERT(un->un_ncmds_in_driver >= 0); 23294 mutex_exit(SD_MUTEX(un)); 23295 23296 done_quick_assess: 23297 if (err != 0) 23298 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23299 /* Uninitialize sd_ssc_t pointer */ 23300 sd_ssc_fini(ssc); 23301 23302 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 23303 return (err); 23304 } 23305 23306 23307 /* 23308 * Function: sd_dkio_ctrl_info 23309 * 23310 * Description: This routine is the driver entry point for handling controller 23311 * information ioctl requests (DKIOCINFO). 23312 * 23313 * Arguments: dev - the device number 23314 * arg - pointer to user provided dk_cinfo structure 23315 * specifying the controller type and attributes. 23316 * flag - this argument is a pass through to ddi_copyxxx() 23317 * directly from the mode argument of ioctl(). 23318 * 23319 * Return Code: 0 23320 * EFAULT 23321 * ENXIO 23322 */ 23323 23324 static int 23325 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag) 23326 { 23327 struct sd_lun *un = NULL; 23328 struct dk_cinfo *info; 23329 dev_info_t *pdip; 23330 int lun, tgt; 23331 23332 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23333 return (ENXIO); 23334 } 23335 23336 info = (struct dk_cinfo *) 23337 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); 23338 23339 switch (un->un_ctype) { 23340 case CTYPE_CDROM: 23341 info->dki_ctype = DKC_CDROM; 23342 break; 23343 default: 23344 info->dki_ctype = DKC_SCSI_CCS; 23345 break; 23346 } 23347 pdip = ddi_get_parent(SD_DEVINFO(un)); 23348 info->dki_cnum = ddi_get_instance(pdip); 23349 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) { 23350 (void) strcpy(info->dki_cname, ddi_get_name(pdip)); 23351 } else { 23352 (void) strncpy(info->dki_cname, ddi_node_name(pdip), 23353 DK_DEVLEN - 1); 23354 } 23355 23356 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 23357 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0); 23358 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 23359 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0); 23360 23361 /* Unit Information */ 23362 info->dki_unit = ddi_get_instance(SD_DEVINFO(un)); 23363 info->dki_slave = ((tgt << 3) | lun); 23364 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)), 23365 DK_DEVLEN - 1); 23366 info->dki_flags = DKI_FMTVOL; 23367 info->dki_partition = SDPART(dev); 23368 23369 /* Max Transfer size of this device in blocks */ 23370 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize; 23371 info->dki_addr = 0; 23372 info->dki_space = 0; 23373 info->dki_prio = 0; 23374 info->dki_vec = 0; 23375 23376 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) { 23377 kmem_free(info, sizeof (struct dk_cinfo)); 23378 return (EFAULT); 23379 } else { 23380 kmem_free(info, sizeof (struct dk_cinfo)); 23381 return (0); 23382 } 23383 } 23384 23385 /* 23386 * Function: sd_get_media_info_com 23387 * 23388 * Description: This routine returns the information required to populate 23389 * the fields for the dk_minfo/dk_minfo_ext structures. 23390 * 23391 * Arguments: dev - the device number 23392 * dki_media_type - media_type 23393 * dki_lbsize - logical block size 23394 * dki_capacity - capacity in blocks 23395 * dki_pbsize - physical block size (if requested) 23396 * 23397 * Return Code: 0 23398 * EACCESS 23399 * EFAULT 23400 * ENXIO 23401 * EIO 23402 */ 23403 static int 23404 sd_get_media_info_com(dev_t dev, uint_t *dki_media_type, uint_t *dki_lbsize, 23405 diskaddr_t *dki_capacity, uint_t *dki_pbsize) 23406 { 23407 struct sd_lun *un = NULL; 23408 struct uscsi_cmd com; 23409 struct scsi_inquiry *sinq; 23410 u_longlong_t media_capacity; 23411 uint64_t capacity; 23412 uint_t lbasize; 23413 uint_t pbsize; 23414 uchar_t *out_data; 23415 uchar_t *rqbuf; 23416 int rval = 0; 23417 int rtn; 23418 sd_ssc_t *ssc; 23419 23420 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 23421 (un->un_state == SD_STATE_OFFLINE)) { 23422 return (ENXIO); 23423 } 23424 23425 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info_com: entry\n"); 23426 23427 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 23428 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 23429 ssc = sd_ssc_init(un); 23430 23431 /* Issue a TUR to determine if the drive is ready with media present */ 23432 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_CHECK_FOR_MEDIA); 23433 if (rval == ENXIO) { 23434 goto done; 23435 } else if (rval != 0) { 23436 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23437 } 23438 23439 /* Now get configuration data */ 23440 if (ISCD(un)) { 23441 *dki_media_type = DK_CDROM; 23442 23443 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 23444 if (un->un_f_mmc_cap == TRUE) { 23445 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, 23446 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN, 23447 SD_PATH_STANDARD); 23448 23449 if (rtn) { 23450 /* 23451 * We ignore all failures for CD and need to 23452 * put the assessment before processing code 23453 * to avoid missing assessment for FMA. 23454 */ 23455 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23456 /* 23457 * Failed for other than an illegal request 23458 * or command not supported 23459 */ 23460 if ((com.uscsi_status == STATUS_CHECK) && 23461 (com.uscsi_rqstatus == STATUS_GOOD)) { 23462 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 23463 (rqbuf[12] != 0x20)) { 23464 rval = EIO; 23465 goto no_assessment; 23466 } 23467 } 23468 } else { 23469 /* 23470 * The GET CONFIGURATION command succeeded 23471 * so set the media type according to the 23472 * returned data 23473 */ 23474 *dki_media_type = out_data[6]; 23475 *dki_media_type <<= 8; 23476 *dki_media_type |= out_data[7]; 23477 } 23478 } 23479 } else { 23480 /* 23481 * The profile list is not available, so we attempt to identify 23482 * the media type based on the inquiry data 23483 */ 23484 sinq = un->un_sd->sd_inq; 23485 if ((sinq->inq_dtype == DTYPE_DIRECT) || 23486 (sinq->inq_dtype == DTYPE_OPTICAL)) { 23487 /* This is a direct access device or optical disk */ 23488 *dki_media_type = DK_FIXED_DISK; 23489 23490 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 23491 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 23492 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 23493 *dki_media_type = DK_ZIP; 23494 } else if ( 23495 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 23496 *dki_media_type = DK_JAZ; 23497 } 23498 } 23499 } else { 23500 /* 23501 * Not a CD, direct access or optical disk so return 23502 * unknown media 23503 */ 23504 *dki_media_type = DK_UNKNOWN; 23505 } 23506 } 23507 23508 /* 23509 * Now read the capacity so we can provide the lbasize, 23510 * pbsize and capacity. 23511 */ 23512 if (dki_pbsize && un->un_f_descr_format_supported) { 23513 rval = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, &lbasize, 23514 &pbsize, SD_PATH_DIRECT); 23515 23516 /* 23517 * Override the physical blocksize if the instance already 23518 * has a larger value. 23519 */ 23520 pbsize = MAX(pbsize, un->un_phy_blocksize); 23521 } 23522 23523 if (dki_pbsize == NULL || rval != 0 || 23524 !un->un_f_descr_format_supported) { 23525 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize, 23526 SD_PATH_DIRECT); 23527 23528 switch (rval) { 23529 case 0: 23530 if (un->un_f_enable_rmw && 23531 un->un_phy_blocksize != 0) { 23532 pbsize = un->un_phy_blocksize; 23533 } else { 23534 pbsize = lbasize; 23535 } 23536 media_capacity = capacity; 23537 23538 /* 23539 * sd_send_scsi_READ_CAPACITY() reports capacity in 23540 * un->un_sys_blocksize chunks. So we need to convert 23541 * it into cap.lbsize chunks. 23542 */ 23543 if (un->un_f_has_removable_media) { 23544 media_capacity *= un->un_sys_blocksize; 23545 media_capacity /= lbasize; 23546 } 23547 break; 23548 case EACCES: 23549 rval = EACCES; 23550 goto done; 23551 default: 23552 rval = EIO; 23553 goto done; 23554 } 23555 } else { 23556 if (un->un_f_enable_rmw && 23557 !ISP2(pbsize % DEV_BSIZE)) { 23558 pbsize = SSD_SECSIZE; 23559 } else if (!ISP2(lbasize % DEV_BSIZE) || 23560 !ISP2(pbsize % DEV_BSIZE)) { 23561 pbsize = lbasize = DEV_BSIZE; 23562 } 23563 media_capacity = capacity; 23564 } 23565 23566 /* 23567 * If lun is expanded dynamically, update the un structure. 23568 */ 23569 mutex_enter(SD_MUTEX(un)); 23570 if ((un->un_f_blockcount_is_valid == TRUE) && 23571 (un->un_f_tgt_blocksize_is_valid == TRUE) && 23572 (capacity > un->un_blockcount)) { 23573 un->un_f_expnevent = B_FALSE; 23574 sd_update_block_info(un, lbasize, capacity); 23575 } 23576 mutex_exit(SD_MUTEX(un)); 23577 23578 *dki_lbsize = lbasize; 23579 *dki_capacity = media_capacity; 23580 if (dki_pbsize) 23581 *dki_pbsize = pbsize; 23582 23583 done: 23584 if (rval != 0) { 23585 if (rval == EIO) 23586 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23587 else 23588 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23589 } 23590 no_assessment: 23591 sd_ssc_fini(ssc); 23592 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 23593 kmem_free(rqbuf, SENSE_LENGTH); 23594 return (rval); 23595 } 23596 23597 /* 23598 * Function: sd_get_media_info 23599 * 23600 * Description: This routine is the driver entry point for handling ioctl 23601 * requests for the media type or command set profile used by the 23602 * drive to operate on the media (DKIOCGMEDIAINFO). 23603 * 23604 * Arguments: dev - the device number 23605 * arg - pointer to user provided dk_minfo structure 23606 * specifying the media type, logical block size and 23607 * drive capacity. 23608 * flag - this argument is a pass through to ddi_copyxxx() 23609 * directly from the mode argument of ioctl(). 23610 * 23611 * Return Code: returns the value from sd_get_media_info_com 23612 */ 23613 static int 23614 sd_get_media_info(dev_t dev, caddr_t arg, int flag) 23615 { 23616 struct dk_minfo mi; 23617 int rval; 23618 23619 rval = sd_get_media_info_com(dev, &mi.dki_media_type, 23620 &mi.dki_lbsize, &mi.dki_capacity, NULL); 23621 23622 if (rval) 23623 return (rval); 23624 if (ddi_copyout(&mi, arg, sizeof (struct dk_minfo), flag)) 23625 rval = EFAULT; 23626 return (rval); 23627 } 23628 23629 /* 23630 * Function: sd_get_media_info_ext 23631 * 23632 * Description: This routine is the driver entry point for handling ioctl 23633 * requests for the media type or command set profile used by the 23634 * drive to operate on the media (DKIOCGMEDIAINFOEXT). The 23635 * difference this ioctl and DKIOCGMEDIAINFO is the return value 23636 * of this ioctl contains both logical block size and physical 23637 * block size. 23638 * 23639 * 23640 * Arguments: dev - the device number 23641 * arg - pointer to user provided dk_minfo_ext structure 23642 * specifying the media type, logical block size, 23643 * physical block size and disk capacity. 23644 * flag - this argument is a pass through to ddi_copyxxx() 23645 * directly from the mode argument of ioctl(). 23646 * 23647 * Return Code: returns the value from sd_get_media_info_com 23648 */ 23649 static int 23650 sd_get_media_info_ext(dev_t dev, caddr_t arg, int flag) 23651 { 23652 struct dk_minfo_ext mie; 23653 int rval = 0; 23654 23655 rval = sd_get_media_info_com(dev, &mie.dki_media_type, 23656 &mie.dki_lbsize, &mie.dki_capacity, &mie.dki_pbsize); 23657 23658 if (rval) 23659 return (rval); 23660 if (ddi_copyout(&mie, arg, sizeof (struct dk_minfo_ext), flag)) 23661 rval = EFAULT; 23662 return (rval); 23663 23664 } 23665 23666 /* 23667 * Function: sd_watch_request_submit 23668 * 23669 * Description: Call scsi_watch_request_submit or scsi_mmc_watch_request_submit 23670 * depending on which is supported by device. 23671 */ 23672 static opaque_t 23673 sd_watch_request_submit(struct sd_lun *un) 23674 { 23675 dev_t dev; 23676 23677 /* All submissions are unified to use same device number */ 23678 dev = sd_make_device(SD_DEVINFO(un)); 23679 23680 if (un->un_f_mmc_cap && un->un_f_mmc_gesn_polling) { 23681 return (scsi_mmc_watch_request_submit(SD_SCSI_DEVP(un), 23682 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 23683 (caddr_t)dev)); 23684 } else { 23685 return (scsi_watch_request_submit(SD_SCSI_DEVP(un), 23686 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 23687 (caddr_t)dev)); 23688 } 23689 } 23690 23691 23692 /* 23693 * Function: sd_check_media 23694 * 23695 * Description: This utility routine implements the functionality for the 23696 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the 23697 * driver state changes from that specified by the user 23698 * (inserted or ejected). For example, if the user specifies 23699 * DKIO_EJECTED and the current media state is inserted this 23700 * routine will immediately return DKIO_INSERTED. However, if the 23701 * current media state is not inserted the user thread will be 23702 * blocked until the drive state changes. If DKIO_NONE is specified 23703 * the user thread will block until a drive state change occurs. 23704 * 23705 * Arguments: dev - the device number 23706 * state - user pointer to a dkio_state, updated with the current 23707 * drive state at return. 23708 * 23709 * Return Code: ENXIO 23710 * EIO 23711 * EAGAIN 23712 * EINTR 23713 */ 23714 23715 static int 23716 sd_check_media(dev_t dev, enum dkio_state state) 23717 { 23718 struct sd_lun *un = NULL; 23719 enum dkio_state prev_state; 23720 opaque_t token = NULL; 23721 int rval = 0; 23722 sd_ssc_t *ssc; 23723 23724 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23725 return (ENXIO); 23726 } 23727 23728 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n"); 23729 23730 ssc = sd_ssc_init(un); 23731 23732 mutex_enter(SD_MUTEX(un)); 23733 23734 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: " 23735 "state=%x, mediastate=%x\n", state, un->un_mediastate); 23736 23737 prev_state = un->un_mediastate; 23738 23739 /* is there anything to do? */ 23740 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) { 23741 /* 23742 * submit the request to the scsi_watch service; 23743 * scsi_media_watch_cb() does the real work 23744 */ 23745 mutex_exit(SD_MUTEX(un)); 23746 23747 /* 23748 * This change handles the case where a scsi watch request is 23749 * added to a device that is powered down. To accomplish this 23750 * we power up the device before adding the scsi watch request, 23751 * since the scsi watch sends a TUR directly to the device 23752 * which the device cannot handle if it is powered down. 23753 */ 23754 if (sd_pm_entry(un) != DDI_SUCCESS) { 23755 mutex_enter(SD_MUTEX(un)); 23756 goto done; 23757 } 23758 23759 token = sd_watch_request_submit(un); 23760 23761 sd_pm_exit(un); 23762 23763 mutex_enter(SD_MUTEX(un)); 23764 if (token == NULL) { 23765 rval = EAGAIN; 23766 goto done; 23767 } 23768 23769 /* 23770 * This is a special case IOCTL that doesn't return 23771 * until the media state changes. Routine sdpower 23772 * knows about and handles this so don't count it 23773 * as an active cmd in the driver, which would 23774 * keep the device busy to the pm framework. 23775 * If the count isn't decremented the device can't 23776 * be powered down. 23777 */ 23778 un->un_ncmds_in_driver--; 23779 ASSERT(un->un_ncmds_in_driver >= 0); 23780 23781 /* 23782 * if a prior request had been made, this will be the same 23783 * token, as scsi_watch was designed that way. 23784 */ 23785 un->un_swr_token = token; 23786 un->un_specified_mediastate = state; 23787 23788 /* 23789 * now wait for media change 23790 * we will not be signalled unless mediastate == state but it is 23791 * still better to test for this condition, since there is a 23792 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED 23793 */ 23794 SD_TRACE(SD_LOG_COMMON, un, 23795 "sd_check_media: waiting for media state change\n"); 23796 while (un->un_mediastate == state) { 23797 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) { 23798 SD_TRACE(SD_LOG_COMMON, un, 23799 "sd_check_media: waiting for media state " 23800 "was interrupted\n"); 23801 un->un_ncmds_in_driver++; 23802 rval = EINTR; 23803 goto done; 23804 } 23805 SD_TRACE(SD_LOG_COMMON, un, 23806 "sd_check_media: received signal, state=%x\n", 23807 un->un_mediastate); 23808 } 23809 /* 23810 * Inc the counter to indicate the device once again 23811 * has an active outstanding cmd. 23812 */ 23813 un->un_ncmds_in_driver++; 23814 } 23815 23816 /* invalidate geometry */ 23817 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) { 23818 sr_ejected(un); 23819 } 23820 23821 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) { 23822 uint64_t capacity; 23823 uint_t lbasize; 23824 23825 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n"); 23826 mutex_exit(SD_MUTEX(un)); 23827 /* 23828 * Since the following routines use SD_PATH_DIRECT, we must 23829 * call PM directly before the upcoming disk accesses. This 23830 * may cause the disk to be power/spin up. 23831 */ 23832 23833 if (sd_pm_entry(un) == DDI_SUCCESS) { 23834 rval = sd_send_scsi_READ_CAPACITY(ssc, 23835 &capacity, &lbasize, SD_PATH_DIRECT); 23836 if (rval != 0) { 23837 sd_pm_exit(un); 23838 if (rval == EIO) 23839 sd_ssc_assessment(ssc, 23840 SD_FMT_STATUS_CHECK); 23841 else 23842 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23843 mutex_enter(SD_MUTEX(un)); 23844 goto done; 23845 } 23846 } else { 23847 rval = EIO; 23848 mutex_enter(SD_MUTEX(un)); 23849 goto done; 23850 } 23851 mutex_enter(SD_MUTEX(un)); 23852 23853 sd_update_block_info(un, lbasize, capacity); 23854 23855 /* 23856 * Check if the media in the device is writable or not 23857 */ 23858 if (ISCD(un)) { 23859 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT); 23860 } 23861 23862 mutex_exit(SD_MUTEX(un)); 23863 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 23864 if ((cmlb_validate(un->un_cmlbhandle, 0, 23865 (void *)SD_PATH_DIRECT) == 0) && un->un_f_pkstats_enabled) { 23866 sd_set_pstats(un); 23867 SD_TRACE(SD_LOG_IO_PARTITION, un, 23868 "sd_check_media: un:0x%p pstats created and " 23869 "set\n", un); 23870 } 23871 23872 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 23873 SD_PATH_DIRECT); 23874 23875 sd_pm_exit(un); 23876 23877 if (rval != 0) { 23878 if (rval == EIO) 23879 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23880 else 23881 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23882 } 23883 23884 mutex_enter(SD_MUTEX(un)); 23885 } 23886 done: 23887 sd_ssc_fini(ssc); 23888 un->un_f_watcht_stopped = FALSE; 23889 if (token != NULL && un->un_swr_token != NULL) { 23890 /* 23891 * Use of this local token and the mutex ensures that we avoid 23892 * some race conditions associated with terminating the 23893 * scsi watch. 23894 */ 23895 token = un->un_swr_token; 23896 mutex_exit(SD_MUTEX(un)); 23897 (void) scsi_watch_request_terminate(token, 23898 SCSI_WATCH_TERMINATE_WAIT); 23899 if (scsi_watch_get_ref_count(token) == 0) { 23900 mutex_enter(SD_MUTEX(un)); 23901 un->un_swr_token = (opaque_t)NULL; 23902 } else { 23903 mutex_enter(SD_MUTEX(un)); 23904 } 23905 } 23906 23907 /* 23908 * Update the capacity kstat value, if no media previously 23909 * (capacity kstat is 0) and a media has been inserted 23910 * (un_f_blockcount_is_valid == TRUE) 23911 */ 23912 if (un->un_errstats) { 23913 struct sd_errstats *stp = NULL; 23914 23915 stp = (struct sd_errstats *)un->un_errstats->ks_data; 23916 if ((stp->sd_capacity.value.ui64 == 0) && 23917 (un->un_f_blockcount_is_valid == TRUE)) { 23918 stp->sd_capacity.value.ui64 = 23919 (uint64_t)((uint64_t)un->un_blockcount * 23920 un->un_sys_blocksize); 23921 } 23922 } 23923 mutex_exit(SD_MUTEX(un)); 23924 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n"); 23925 return (rval); 23926 } 23927 23928 23929 /* 23930 * Function: sd_delayed_cv_broadcast 23931 * 23932 * Description: Delayed cv_broadcast to allow for target to recover from media 23933 * insertion. 23934 * 23935 * Arguments: arg - driver soft state (unit) structure 23936 */ 23937 23938 static void 23939 sd_delayed_cv_broadcast(void *arg) 23940 { 23941 struct sd_lun *un = arg; 23942 23943 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n"); 23944 23945 mutex_enter(SD_MUTEX(un)); 23946 un->un_dcvb_timeid = NULL; 23947 cv_broadcast(&un->un_state_cv); 23948 mutex_exit(SD_MUTEX(un)); 23949 } 23950 23951 23952 /* 23953 * Function: sd_media_watch_cb 23954 * 23955 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This 23956 * routine processes the TUR sense data and updates the driver 23957 * state if a transition has occurred. The user thread 23958 * (sd_check_media) is then signalled. 23959 * 23960 * Arguments: arg - the device 'dev_t' is used for context to discriminate 23961 * among multiple watches that share this callback function 23962 * resultp - scsi watch facility result packet containing scsi 23963 * packet, status byte and sense data 23964 * 23965 * Return Code: 0 for success, -1 for failure 23966 */ 23967 23968 static int 23969 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 23970 { 23971 struct sd_lun *un; 23972 struct scsi_status *statusp = resultp->statusp; 23973 uint8_t *sensep = (uint8_t *)resultp->sensep; 23974 enum dkio_state state = DKIO_NONE; 23975 dev_t dev = (dev_t)arg; 23976 uchar_t actual_sense_length; 23977 uint8_t skey, asc, ascq; 23978 23979 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23980 return (-1); 23981 } 23982 actual_sense_length = resultp->actual_sense_length; 23983 23984 mutex_enter(SD_MUTEX(un)); 23985 SD_TRACE(SD_LOG_COMMON, un, 23986 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n", 23987 *((char *)statusp), (void *)sensep, actual_sense_length); 23988 23989 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) { 23990 un->un_mediastate = DKIO_DEV_GONE; 23991 cv_broadcast(&un->un_state_cv); 23992 mutex_exit(SD_MUTEX(un)); 23993 23994 return (0); 23995 } 23996 23997 if (un->un_f_mmc_cap && un->un_f_mmc_gesn_polling) { 23998 if (sd_gesn_media_data_valid(resultp->mmc_data)) { 23999 if ((resultp->mmc_data[5] & 24000 SD_GESN_MEDIA_EVENT_STATUS_PRESENT) != 0) { 24001 state = DKIO_INSERTED; 24002 } else { 24003 state = DKIO_EJECTED; 24004 } 24005 if ((resultp->mmc_data[4] & SD_GESN_MEDIA_EVENT_CODE) == 24006 SD_GESN_MEDIA_EVENT_EJECTREQUEST) { 24007 sd_log_eject_request_event(un, KM_NOSLEEP); 24008 } 24009 } 24010 } else if (sensep != NULL) { 24011 /* 24012 * If there was a check condition then sensep points to valid 24013 * sense data. If status was not a check condition but a 24014 * reservation or busy status then the new state is DKIO_NONE. 24015 */ 24016 skey = scsi_sense_key(sensep); 24017 asc = scsi_sense_asc(sensep); 24018 ascq = scsi_sense_ascq(sensep); 24019 24020 SD_INFO(SD_LOG_COMMON, un, 24021 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n", 24022 skey, asc, ascq); 24023 /* This routine only uses up to 13 bytes of sense data. */ 24024 if (actual_sense_length >= 13) { 24025 if (skey == KEY_UNIT_ATTENTION) { 24026 if (asc == 0x28) { 24027 state = DKIO_INSERTED; 24028 } 24029 } else if (skey == KEY_NOT_READY) { 24030 /* 24031 * Sense data of 02/06/00 means that the 24032 * drive could not read the media (No 24033 * reference position found). In this case 24034 * to prevent a hang on the DKIOCSTATE IOCTL 24035 * we set the media state to DKIO_INSERTED. 24036 */ 24037 if (asc == 0x06 && ascq == 0x00) 24038 state = DKIO_INSERTED; 24039 24040 /* 24041 * if 02/04/02 means that the host 24042 * should send start command. Explicitly 24043 * leave the media state as is 24044 * (inserted) as the media is inserted 24045 * and host has stopped device for PM 24046 * reasons. Upon next true read/write 24047 * to this media will bring the 24048 * device to the right state good for 24049 * media access. 24050 */ 24051 if (asc == 0x3a) { 24052 state = DKIO_EJECTED; 24053 } else { 24054 /* 24055 * If the drive is busy with an 24056 * operation or long write, keep the 24057 * media in an inserted state. 24058 */ 24059 24060 if ((asc == 0x04) && 24061 ((ascq == 0x02) || 24062 (ascq == 0x07) || 24063 (ascq == 0x08))) { 24064 state = DKIO_INSERTED; 24065 } 24066 } 24067 } else if (skey == KEY_NO_SENSE) { 24068 if ((asc == 0x00) && (ascq == 0x00)) { 24069 /* 24070 * Sense Data 00/00/00 does not provide 24071 * any information about the state of 24072 * the media. Ignore it. 24073 */ 24074 mutex_exit(SD_MUTEX(un)); 24075 return (0); 24076 } 24077 } 24078 } 24079 } else if ((*((char *)statusp) == STATUS_GOOD) && 24080 (resultp->pkt->pkt_reason == CMD_CMPLT)) { 24081 state = DKIO_INSERTED; 24082 } 24083 24084 SD_TRACE(SD_LOG_COMMON, un, 24085 "sd_media_watch_cb: state=%x, specified=%x\n", 24086 state, un->un_specified_mediastate); 24087 24088 /* 24089 * now signal the waiting thread if this is *not* the specified state; 24090 * delay the signal if the state is DKIO_INSERTED to allow the target 24091 * to recover 24092 */ 24093 if (state != un->un_specified_mediastate) { 24094 un->un_mediastate = state; 24095 if (state == DKIO_INSERTED) { 24096 /* 24097 * delay the signal to give the drive a chance 24098 * to do what it apparently needs to do 24099 */ 24100 SD_TRACE(SD_LOG_COMMON, un, 24101 "sd_media_watch_cb: delayed cv_broadcast\n"); 24102 if (un->un_dcvb_timeid == NULL) { 24103 un->un_dcvb_timeid = 24104 timeout(sd_delayed_cv_broadcast, un, 24105 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY)); 24106 } 24107 } else { 24108 SD_TRACE(SD_LOG_COMMON, un, 24109 "sd_media_watch_cb: immediate cv_broadcast\n"); 24110 cv_broadcast(&un->un_state_cv); 24111 } 24112 } 24113 mutex_exit(SD_MUTEX(un)); 24114 return (0); 24115 } 24116 24117 24118 /* 24119 * Function: sd_dkio_get_temp 24120 * 24121 * Description: This routine is the driver entry point for handling ioctl 24122 * requests to get the disk temperature. 24123 * 24124 * Arguments: dev - the device number 24125 * arg - pointer to user provided dk_temperature structure. 24126 * flag - this argument is a pass through to ddi_copyxxx() 24127 * directly from the mode argument of ioctl(). 24128 * 24129 * Return Code: 0 24130 * EFAULT 24131 * ENXIO 24132 * EAGAIN 24133 */ 24134 24135 static int 24136 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag) 24137 { 24138 struct sd_lun *un = NULL; 24139 struct dk_temperature *dktemp = NULL; 24140 uchar_t *temperature_page; 24141 int rval = 0; 24142 int path_flag = SD_PATH_STANDARD; 24143 sd_ssc_t *ssc; 24144 24145 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24146 return (ENXIO); 24147 } 24148 24149 ssc = sd_ssc_init(un); 24150 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP); 24151 24152 /* copyin the disk temp argument to get the user flags */ 24153 if (ddi_copyin((void *)arg, dktemp, 24154 sizeof (struct dk_temperature), flag) != 0) { 24155 rval = EFAULT; 24156 goto done; 24157 } 24158 24159 /* Initialize the temperature to invalid. */ 24160 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 24161 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 24162 24163 /* 24164 * Note: Investigate removing the "bypass pm" semantic. 24165 * Can we just bypass PM always? 24166 */ 24167 if (dktemp->dkt_flags & DKT_BYPASS_PM) { 24168 path_flag = SD_PATH_DIRECT; 24169 ASSERT(!mutex_owned(&un->un_pm_mutex)); 24170 mutex_enter(&un->un_pm_mutex); 24171 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 24172 /* 24173 * If DKT_BYPASS_PM is set, and the drive happens to be 24174 * in low power mode, we can not wake it up, Need to 24175 * return EAGAIN. 24176 */ 24177 mutex_exit(&un->un_pm_mutex); 24178 rval = EAGAIN; 24179 goto done; 24180 } else { 24181 /* 24182 * Indicate to PM the device is busy. This is required 24183 * to avoid a race - i.e. the ioctl is issuing a 24184 * command and the pm framework brings down the device 24185 * to low power mode (possible power cut-off on some 24186 * platforms). 24187 */ 24188 mutex_exit(&un->un_pm_mutex); 24189 if (sd_pm_entry(un) != DDI_SUCCESS) { 24190 rval = EAGAIN; 24191 goto done; 24192 } 24193 } 24194 } 24195 24196 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP); 24197 24198 rval = sd_send_scsi_LOG_SENSE(ssc, temperature_page, 24199 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag); 24200 if (rval != 0) 24201 goto done2; 24202 24203 /* 24204 * For the current temperature verify that the parameter length is 0x02 24205 * and the parameter code is 0x00 24206 */ 24207 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) && 24208 (temperature_page[5] == 0x00)) { 24209 if (temperature_page[9] == 0xFF) { 24210 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 24211 } else { 24212 dktemp->dkt_cur_temp = (short)(temperature_page[9]); 24213 } 24214 } 24215 24216 /* 24217 * For the reference temperature verify that the parameter 24218 * length is 0x02 and the parameter code is 0x01 24219 */ 24220 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) && 24221 (temperature_page[11] == 0x01)) { 24222 if (temperature_page[15] == 0xFF) { 24223 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 24224 } else { 24225 dktemp->dkt_ref_temp = (short)(temperature_page[15]); 24226 } 24227 } 24228 24229 /* Do the copyout regardless of the temperature commands status. */ 24230 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature), 24231 flag) != 0) { 24232 rval = EFAULT; 24233 goto done1; 24234 } 24235 24236 done2: 24237 if (rval != 0) { 24238 if (rval == EIO) 24239 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 24240 else 24241 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 24242 } 24243 done1: 24244 if (path_flag == SD_PATH_DIRECT) { 24245 sd_pm_exit(un); 24246 } 24247 24248 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE); 24249 done: 24250 sd_ssc_fini(ssc); 24251 if (dktemp != NULL) { 24252 kmem_free(dktemp, sizeof (struct dk_temperature)); 24253 } 24254 24255 return (rval); 24256 } 24257 24258 24259 /* 24260 * Function: sd_log_page_supported 24261 * 24262 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of 24263 * supported log pages. 24264 * 24265 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 24266 * structure for this target. 24267 * log_page - 24268 * 24269 * Return Code: -1 - on error (log sense is optional and may not be supported). 24270 * 0 - log page not found. 24271 * 1 - log page found. 24272 */ 24273 24274 static int 24275 sd_log_page_supported(sd_ssc_t *ssc, int log_page) 24276 { 24277 uchar_t *log_page_data; 24278 int i; 24279 int match = 0; 24280 int log_size; 24281 int status = 0; 24282 struct sd_lun *un; 24283 24284 ASSERT(ssc != NULL); 24285 un = ssc->ssc_un; 24286 ASSERT(un != NULL); 24287 24288 log_page_data = kmem_zalloc(0xFF, KM_SLEEP); 24289 24290 status = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 0xFF, 0, 0x01, 0, 24291 SD_PATH_DIRECT); 24292 24293 if (status != 0) { 24294 if (status == EIO) { 24295 /* 24296 * Some disks do not support log sense, we 24297 * should ignore this kind of error(sense key is 24298 * 0x5 - illegal request). 24299 */ 24300 uint8_t *sensep; 24301 int senlen; 24302 24303 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 24304 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 24305 ssc->ssc_uscsi_cmd->uscsi_rqresid); 24306 24307 if (senlen > 0 && 24308 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 24309 sd_ssc_assessment(ssc, 24310 SD_FMT_IGNORE_COMPROMISE); 24311 } else { 24312 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 24313 } 24314 } else { 24315 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 24316 } 24317 24318 SD_ERROR(SD_LOG_COMMON, un, 24319 "sd_log_page_supported: failed log page retrieval\n"); 24320 kmem_free(log_page_data, 0xFF); 24321 return (-1); 24322 } 24323 24324 log_size = log_page_data[3]; 24325 24326 /* 24327 * The list of supported log pages start from the fourth byte. Check 24328 * until we run out of log pages or a match is found. 24329 */ 24330 for (i = 4; (i < (log_size + 4)) && !match; i++) { 24331 if (log_page_data[i] == log_page) { 24332 match++; 24333 } 24334 } 24335 kmem_free(log_page_data, 0xFF); 24336 return (match); 24337 } 24338 24339 24340 /* 24341 * Function: sd_mhdioc_failfast 24342 * 24343 * Description: This routine is the driver entry point for handling ioctl 24344 * requests to enable/disable the multihost failfast option. 24345 * (MHIOCENFAILFAST) 24346 * 24347 * Arguments: dev - the device number 24348 * arg - user specified probing interval. 24349 * flag - this argument is a pass through to ddi_copyxxx() 24350 * directly from the mode argument of ioctl(). 24351 * 24352 * Return Code: 0 24353 * EFAULT 24354 * ENXIO 24355 */ 24356 24357 static int 24358 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag) 24359 { 24360 struct sd_lun *un = NULL; 24361 int mh_time; 24362 int rval = 0; 24363 24364 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24365 return (ENXIO); 24366 } 24367 24368 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag)) 24369 return (EFAULT); 24370 24371 if (mh_time) { 24372 mutex_enter(SD_MUTEX(un)); 24373 un->un_resvd_status |= SD_FAILFAST; 24374 mutex_exit(SD_MUTEX(un)); 24375 /* 24376 * If mh_time is INT_MAX, then this ioctl is being used for 24377 * SCSI-3 PGR purposes, and we don't need to spawn watch thread. 24378 */ 24379 if (mh_time != INT_MAX) { 24380 rval = sd_check_mhd(dev, mh_time); 24381 } 24382 } else { 24383 (void) sd_check_mhd(dev, 0); 24384 mutex_enter(SD_MUTEX(un)); 24385 un->un_resvd_status &= ~SD_FAILFAST; 24386 mutex_exit(SD_MUTEX(un)); 24387 } 24388 return (rval); 24389 } 24390 24391 24392 /* 24393 * Function: sd_mhdioc_takeown 24394 * 24395 * Description: This routine is the driver entry point for handling ioctl 24396 * requests to forcefully acquire exclusive access rights to the 24397 * multihost disk (MHIOCTKOWN). 24398 * 24399 * Arguments: dev - the device number 24400 * arg - user provided structure specifying the delay 24401 * parameters in milliseconds 24402 * flag - this argument is a pass through to ddi_copyxxx() 24403 * directly from the mode argument of ioctl(). 24404 * 24405 * Return Code: 0 24406 * EFAULT 24407 * ENXIO 24408 */ 24409 24410 static int 24411 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag) 24412 { 24413 struct sd_lun *un = NULL; 24414 struct mhioctkown *tkown = NULL; 24415 int rval = 0; 24416 24417 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24418 return (ENXIO); 24419 } 24420 24421 if (arg != NULL) { 24422 tkown = (struct mhioctkown *) 24423 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP); 24424 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag); 24425 if (rval != 0) { 24426 rval = EFAULT; 24427 goto error; 24428 } 24429 } 24430 24431 rval = sd_take_ownership(dev, tkown); 24432 mutex_enter(SD_MUTEX(un)); 24433 if (rval == 0) { 24434 un->un_resvd_status |= SD_RESERVE; 24435 if (tkown != NULL && tkown->reinstate_resv_delay != 0) { 24436 sd_reinstate_resv_delay = 24437 tkown->reinstate_resv_delay * 1000; 24438 } else { 24439 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 24440 } 24441 /* 24442 * Give the scsi_watch routine interval set by 24443 * the MHIOCENFAILFAST ioctl precedence here. 24444 */ 24445 if ((un->un_resvd_status & SD_FAILFAST) == 0) { 24446 mutex_exit(SD_MUTEX(un)); 24447 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000); 24448 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24449 "sd_mhdioc_takeown : %d\n", 24450 sd_reinstate_resv_delay); 24451 } else { 24452 mutex_exit(SD_MUTEX(un)); 24453 } 24454 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY, 24455 sd_mhd_reset_notify_cb, (caddr_t)un); 24456 } else { 24457 un->un_resvd_status &= ~SD_RESERVE; 24458 mutex_exit(SD_MUTEX(un)); 24459 } 24460 24461 error: 24462 if (tkown != NULL) { 24463 kmem_free(tkown, sizeof (struct mhioctkown)); 24464 } 24465 return (rval); 24466 } 24467 24468 24469 /* 24470 * Function: sd_mhdioc_release 24471 * 24472 * Description: This routine is the driver entry point for handling ioctl 24473 * requests to release exclusive access rights to the multihost 24474 * disk (MHIOCRELEASE). 24475 * 24476 * Arguments: dev - the device number 24477 * 24478 * Return Code: 0 24479 * ENXIO 24480 */ 24481 24482 static int 24483 sd_mhdioc_release(dev_t dev) 24484 { 24485 struct sd_lun *un = NULL; 24486 timeout_id_t resvd_timeid_save; 24487 int resvd_status_save; 24488 int rval = 0; 24489 24490 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24491 return (ENXIO); 24492 } 24493 24494 mutex_enter(SD_MUTEX(un)); 24495 resvd_status_save = un->un_resvd_status; 24496 un->un_resvd_status &= 24497 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE); 24498 if (un->un_resvd_timeid) { 24499 resvd_timeid_save = un->un_resvd_timeid; 24500 un->un_resvd_timeid = NULL; 24501 mutex_exit(SD_MUTEX(un)); 24502 (void) untimeout(resvd_timeid_save); 24503 } else { 24504 mutex_exit(SD_MUTEX(un)); 24505 } 24506 24507 /* 24508 * destroy any pending timeout thread that may be attempting to 24509 * reinstate reservation on this device. 24510 */ 24511 sd_rmv_resv_reclaim_req(dev); 24512 24513 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) { 24514 mutex_enter(SD_MUTEX(un)); 24515 if ((un->un_mhd_token) && 24516 ((un->un_resvd_status & SD_FAILFAST) == 0)) { 24517 mutex_exit(SD_MUTEX(un)); 24518 (void) sd_check_mhd(dev, 0); 24519 } else { 24520 mutex_exit(SD_MUTEX(un)); 24521 } 24522 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 24523 sd_mhd_reset_notify_cb, (caddr_t)un); 24524 } else { 24525 /* 24526 * sd_mhd_watch_cb will restart the resvd recover timeout thread 24527 */ 24528 mutex_enter(SD_MUTEX(un)); 24529 un->un_resvd_status = resvd_status_save; 24530 mutex_exit(SD_MUTEX(un)); 24531 } 24532 return (rval); 24533 } 24534 24535 24536 /* 24537 * Function: sd_mhdioc_register_devid 24538 * 24539 * Description: This routine is the driver entry point for handling ioctl 24540 * requests to register the device id (MHIOCREREGISTERDEVID). 24541 * 24542 * Note: The implementation for this ioctl has been updated to 24543 * be consistent with the original PSARC case (1999/357) 24544 * (4375899, 4241671, 4220005) 24545 * 24546 * Arguments: dev - the device number 24547 * 24548 * Return Code: 0 24549 * ENXIO 24550 */ 24551 24552 static int 24553 sd_mhdioc_register_devid(dev_t dev) 24554 { 24555 struct sd_lun *un = NULL; 24556 int rval = 0; 24557 sd_ssc_t *ssc; 24558 24559 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24560 return (ENXIO); 24561 } 24562 24563 ASSERT(!mutex_owned(SD_MUTEX(un))); 24564 24565 mutex_enter(SD_MUTEX(un)); 24566 24567 /* If a devid already exists, de-register it */ 24568 if (un->un_devid != NULL) { 24569 ddi_devid_unregister(SD_DEVINFO(un)); 24570 /* 24571 * After unregister devid, needs to free devid memory 24572 */ 24573 ddi_devid_free(un->un_devid); 24574 un->un_devid = NULL; 24575 } 24576 24577 /* Check for reservation conflict */ 24578 mutex_exit(SD_MUTEX(un)); 24579 ssc = sd_ssc_init(un); 24580 rval = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 24581 mutex_enter(SD_MUTEX(un)); 24582 24583 switch (rval) { 24584 case 0: 24585 sd_register_devid(ssc, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED); 24586 break; 24587 case EACCES: 24588 break; 24589 default: 24590 rval = EIO; 24591 } 24592 24593 mutex_exit(SD_MUTEX(un)); 24594 if (rval != 0) { 24595 if (rval == EIO) 24596 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 24597 else 24598 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 24599 } 24600 sd_ssc_fini(ssc); 24601 return (rval); 24602 } 24603 24604 24605 /* 24606 * Function: sd_mhdioc_inkeys 24607 * 24608 * Description: This routine is the driver entry point for handling ioctl 24609 * requests to issue the SCSI-3 Persistent In Read Keys command 24610 * to the device (MHIOCGRP_INKEYS). 24611 * 24612 * Arguments: dev - the device number 24613 * arg - user provided in_keys structure 24614 * flag - this argument is a pass through to ddi_copyxxx() 24615 * directly from the mode argument of ioctl(). 24616 * 24617 * Return Code: code returned by sd_persistent_reservation_in_read_keys() 24618 * ENXIO 24619 * EFAULT 24620 */ 24621 24622 static int 24623 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag) 24624 { 24625 struct sd_lun *un; 24626 mhioc_inkeys_t inkeys; 24627 int rval = 0; 24628 24629 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24630 return (ENXIO); 24631 } 24632 24633 #ifdef _MULTI_DATAMODEL 24634 switch (ddi_model_convert_from(flag & FMODELS)) { 24635 case DDI_MODEL_ILP32: { 24636 struct mhioc_inkeys32 inkeys32; 24637 24638 if (ddi_copyin(arg, &inkeys32, 24639 sizeof (struct mhioc_inkeys32), flag) != 0) { 24640 return (EFAULT); 24641 } 24642 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li; 24643 if ((rval = sd_persistent_reservation_in_read_keys(un, 24644 &inkeys, flag)) != 0) { 24645 return (rval); 24646 } 24647 inkeys32.generation = inkeys.generation; 24648 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32), 24649 flag) != 0) { 24650 return (EFAULT); 24651 } 24652 break; 24653 } 24654 case DDI_MODEL_NONE: 24655 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), 24656 flag) != 0) { 24657 return (EFAULT); 24658 } 24659 if ((rval = sd_persistent_reservation_in_read_keys(un, 24660 &inkeys, flag)) != 0) { 24661 return (rval); 24662 } 24663 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), 24664 flag) != 0) { 24665 return (EFAULT); 24666 } 24667 break; 24668 } 24669 24670 #else /* ! _MULTI_DATAMODEL */ 24671 24672 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) { 24673 return (EFAULT); 24674 } 24675 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag); 24676 if (rval != 0) { 24677 return (rval); 24678 } 24679 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) { 24680 return (EFAULT); 24681 } 24682 24683 #endif /* _MULTI_DATAMODEL */ 24684 24685 return (rval); 24686 } 24687 24688 24689 /* 24690 * Function: sd_mhdioc_inresv 24691 * 24692 * Description: This routine is the driver entry point for handling ioctl 24693 * requests to issue the SCSI-3 Persistent In Read Reservations 24694 * command to the device (MHIOCGRP_INKEYS). 24695 * 24696 * Arguments: dev - the device number 24697 * arg - user provided in_resv structure 24698 * flag - this argument is a pass through to ddi_copyxxx() 24699 * directly from the mode argument of ioctl(). 24700 * 24701 * Return Code: code returned by sd_persistent_reservation_in_read_resv() 24702 * ENXIO 24703 * EFAULT 24704 */ 24705 24706 static int 24707 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag) 24708 { 24709 struct sd_lun *un; 24710 mhioc_inresvs_t inresvs; 24711 int rval = 0; 24712 24713 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24714 return (ENXIO); 24715 } 24716 24717 #ifdef _MULTI_DATAMODEL 24718 24719 switch (ddi_model_convert_from(flag & FMODELS)) { 24720 case DDI_MODEL_ILP32: { 24721 struct mhioc_inresvs32 inresvs32; 24722 24723 if (ddi_copyin(arg, &inresvs32, 24724 sizeof (struct mhioc_inresvs32), flag) != 0) { 24725 return (EFAULT); 24726 } 24727 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li; 24728 if ((rval = sd_persistent_reservation_in_read_resv(un, 24729 &inresvs, flag)) != 0) { 24730 return (rval); 24731 } 24732 inresvs32.generation = inresvs.generation; 24733 if (ddi_copyout(&inresvs32, arg, 24734 sizeof (struct mhioc_inresvs32), flag) != 0) { 24735 return (EFAULT); 24736 } 24737 break; 24738 } 24739 case DDI_MODEL_NONE: 24740 if (ddi_copyin(arg, &inresvs, 24741 sizeof (mhioc_inresvs_t), flag) != 0) { 24742 return (EFAULT); 24743 } 24744 if ((rval = sd_persistent_reservation_in_read_resv(un, 24745 &inresvs, flag)) != 0) { 24746 return (rval); 24747 } 24748 if (ddi_copyout(&inresvs, arg, 24749 sizeof (mhioc_inresvs_t), flag) != 0) { 24750 return (EFAULT); 24751 } 24752 break; 24753 } 24754 24755 #else /* ! _MULTI_DATAMODEL */ 24756 24757 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) { 24758 return (EFAULT); 24759 } 24760 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag); 24761 if (rval != 0) { 24762 return (rval); 24763 } 24764 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) { 24765 return (EFAULT); 24766 } 24767 24768 #endif /* ! _MULTI_DATAMODEL */ 24769 24770 return (rval); 24771 } 24772 24773 24774 /* 24775 * The following routines support the clustering functionality described below 24776 * and implement lost reservation reclaim functionality. 24777 * 24778 * Clustering 24779 * ---------- 24780 * The clustering code uses two different, independent forms of SCSI 24781 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3 24782 * Persistent Group Reservations. For any particular disk, it will use either 24783 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk. 24784 * 24785 * SCSI-2 24786 * The cluster software takes ownership of a multi-hosted disk by issuing the 24787 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the 24788 * MHIOCRELEASE ioctl. Closely related is the MHIOCENFAILFAST ioctl -- a 24789 * cluster, just after taking ownership of the disk with the MHIOCTKOWN ioctl 24790 * then issues the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the 24791 * driver. The meaning of failfast is that if the driver (on this host) ever 24792 * encounters the scsi error return code RESERVATION_CONFLICT from the device, 24793 * it should immediately panic the host. The motivation for this ioctl is that 24794 * if this host does encounter reservation conflict, the underlying cause is 24795 * that some other host of the cluster has decided that this host is no longer 24796 * in the cluster and has seized control of the disks for itself. Since this 24797 * host is no longer in the cluster, it ought to panic itself. The 24798 * MHIOCENFAILFAST ioctl does two things: 24799 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT 24800 * error to panic the host 24801 * (b) it sets up a periodic timer to test whether this host still has 24802 * "access" (in that no other host has reserved the device): if the 24803 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The 24804 * purpose of that periodic timer is to handle scenarios where the host is 24805 * otherwise temporarily quiescent, temporarily doing no real i/o. 24806 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host, 24807 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for 24808 * the device itself. 24809 * 24810 * SCSI-3 PGR 24811 * A direct semantic implementation of the SCSI-3 Persistent Reservation 24812 * facility is supported through the shared multihost disk ioctls 24813 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE, 24814 * MHIOCGRP_PREEMPTANDABORT, MHIOCGRP_CLEAR) 24815 * 24816 * Reservation Reclaim: 24817 * -------------------- 24818 * To support the lost reservation reclaim operations this driver creates a 24819 * single thread to handle reinstating reservations on all devices that have 24820 * lost reservations sd_resv_reclaim_requests are logged for all devices that 24821 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb 24822 * and the reservation reclaim thread loops through the requests to regain the 24823 * lost reservations. 24824 */ 24825 24826 /* 24827 * Function: sd_check_mhd() 24828 * 24829 * Description: This function sets up and submits a scsi watch request or 24830 * terminates an existing watch request. This routine is used in 24831 * support of reservation reclaim. 24832 * 24833 * Arguments: dev - the device 'dev_t' is used for context to discriminate 24834 * among multiple watches that share the callback function 24835 * interval - the number of microseconds specifying the watch 24836 * interval for issuing TEST UNIT READY commands. If 24837 * set to 0 the watch should be terminated. If the 24838 * interval is set to 0 and if the device is required 24839 * to hold reservation while disabling failfast, the 24840 * watch is restarted with an interval of 24841 * reinstate_resv_delay. 24842 * 24843 * Return Code: 0 - Successful submit/terminate of scsi watch request 24844 * ENXIO - Indicates an invalid device was specified 24845 * EAGAIN - Unable to submit the scsi watch request 24846 */ 24847 24848 static int 24849 sd_check_mhd(dev_t dev, int interval) 24850 { 24851 struct sd_lun *un; 24852 opaque_t token; 24853 24854 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24855 return (ENXIO); 24856 } 24857 24858 /* is this a watch termination request? */ 24859 if (interval == 0) { 24860 mutex_enter(SD_MUTEX(un)); 24861 /* if there is an existing watch task then terminate it */ 24862 if (un->un_mhd_token) { 24863 token = un->un_mhd_token; 24864 un->un_mhd_token = NULL; 24865 mutex_exit(SD_MUTEX(un)); 24866 (void) scsi_watch_request_terminate(token, 24867 SCSI_WATCH_TERMINATE_ALL_WAIT); 24868 mutex_enter(SD_MUTEX(un)); 24869 } else { 24870 mutex_exit(SD_MUTEX(un)); 24871 /* 24872 * Note: If we return here we don't check for the 24873 * failfast case. This is the original legacy 24874 * implementation but perhaps we should be checking 24875 * the failfast case. 24876 */ 24877 return (0); 24878 } 24879 /* 24880 * If the device is required to hold reservation while 24881 * disabling failfast, we need to restart the scsi_watch 24882 * routine with an interval of reinstate_resv_delay. 24883 */ 24884 if (un->un_resvd_status & SD_RESERVE) { 24885 interval = sd_reinstate_resv_delay/1000; 24886 } else { 24887 /* no failfast so bail */ 24888 mutex_exit(SD_MUTEX(un)); 24889 return (0); 24890 } 24891 mutex_exit(SD_MUTEX(un)); 24892 } 24893 24894 /* 24895 * adjust minimum time interval to 1 second, 24896 * and convert from msecs to usecs 24897 */ 24898 if (interval > 0 && interval < 1000) { 24899 interval = 1000; 24900 } 24901 interval *= 1000; 24902 24903 /* 24904 * submit the request to the scsi_watch service 24905 */ 24906 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval, 24907 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev); 24908 if (token == NULL) { 24909 return (EAGAIN); 24910 } 24911 24912 /* 24913 * save token for termination later on 24914 */ 24915 mutex_enter(SD_MUTEX(un)); 24916 un->un_mhd_token = token; 24917 mutex_exit(SD_MUTEX(un)); 24918 return (0); 24919 } 24920 24921 24922 /* 24923 * Function: sd_mhd_watch_cb() 24924 * 24925 * Description: This function is the call back function used by the scsi watch 24926 * facility. The scsi watch facility sends the "Test Unit Ready" 24927 * and processes the status. If applicable (i.e. a "Unit Attention" 24928 * status and automatic "Request Sense" not used) the scsi watch 24929 * facility will send a "Request Sense" and retrieve the sense data 24930 * to be passed to this callback function. In either case the 24931 * automatic "Request Sense" or the facility submitting one, this 24932 * callback is passed the status and sense data. 24933 * 24934 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24935 * among multiple watches that share this callback function 24936 * resultp - scsi watch facility result packet containing scsi 24937 * packet, status byte and sense data 24938 * 24939 * Return Code: 0 - continue the watch task 24940 * non-zero - terminate the watch task 24941 */ 24942 24943 static int 24944 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 24945 { 24946 struct sd_lun *un; 24947 struct scsi_status *statusp; 24948 uint8_t *sensep; 24949 struct scsi_pkt *pkt; 24950 uchar_t actual_sense_length; 24951 dev_t dev = (dev_t)arg; 24952 24953 ASSERT(resultp != NULL); 24954 statusp = resultp->statusp; 24955 sensep = (uint8_t *)resultp->sensep; 24956 pkt = resultp->pkt; 24957 actual_sense_length = resultp->actual_sense_length; 24958 24959 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24960 return (ENXIO); 24961 } 24962 24963 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24964 "sd_mhd_watch_cb: reason '%s', status '%s'\n", 24965 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp))); 24966 24967 /* Begin processing of the status and/or sense data */ 24968 if (pkt->pkt_reason != CMD_CMPLT) { 24969 /* Handle the incomplete packet */ 24970 sd_mhd_watch_incomplete(un, pkt); 24971 return (0); 24972 } else if (*((unsigned char *)statusp) != STATUS_GOOD) { 24973 if (*((unsigned char *)statusp) 24974 == STATUS_RESERVATION_CONFLICT) { 24975 /* 24976 * Handle a reservation conflict by panicking if 24977 * configured for failfast or by logging the conflict 24978 * and updating the reservation status 24979 */ 24980 mutex_enter(SD_MUTEX(un)); 24981 if ((un->un_resvd_status & SD_FAILFAST) && 24982 (sd_failfast_enable)) { 24983 sd_panic_for_res_conflict(un); 24984 /*NOTREACHED*/ 24985 } 24986 SD_INFO(SD_LOG_IOCTL_MHD, un, 24987 "sd_mhd_watch_cb: Reservation Conflict\n"); 24988 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 24989 mutex_exit(SD_MUTEX(un)); 24990 } 24991 } 24992 24993 if (sensep != NULL) { 24994 if (actual_sense_length >= (SENSE_LENGTH - 2)) { 24995 mutex_enter(SD_MUTEX(un)); 24996 if ((scsi_sense_asc(sensep) == 24997 SD_SCSI_RESET_SENSE_CODE) && 24998 (un->un_resvd_status & SD_RESERVE)) { 24999 /* 25000 * The additional sense code indicates a power 25001 * on or bus device reset has occurred; update 25002 * the reservation status. 25003 */ 25004 un->un_resvd_status |= 25005 (SD_LOST_RESERVE | SD_WANT_RESERVE); 25006 SD_INFO(SD_LOG_IOCTL_MHD, un, 25007 "sd_mhd_watch_cb: Lost Reservation\n"); 25008 } 25009 } else { 25010 return (0); 25011 } 25012 } else { 25013 mutex_enter(SD_MUTEX(un)); 25014 } 25015 25016 if ((un->un_resvd_status & SD_RESERVE) && 25017 (un->un_resvd_status & SD_LOST_RESERVE)) { 25018 if (un->un_resvd_status & SD_WANT_RESERVE) { 25019 /* 25020 * A reset occurred in between the last probe and this 25021 * one so if a timeout is pending cancel it. 25022 */ 25023 if (un->un_resvd_timeid) { 25024 timeout_id_t temp_id = un->un_resvd_timeid; 25025 un->un_resvd_timeid = NULL; 25026 mutex_exit(SD_MUTEX(un)); 25027 (void) untimeout(temp_id); 25028 mutex_enter(SD_MUTEX(un)); 25029 } 25030 un->un_resvd_status &= ~SD_WANT_RESERVE; 25031 } 25032 if (un->un_resvd_timeid == 0) { 25033 /* Schedule a timeout to handle the lost reservation */ 25034 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover, 25035 (void *)dev, 25036 drv_usectohz(sd_reinstate_resv_delay)); 25037 } 25038 } 25039 mutex_exit(SD_MUTEX(un)); 25040 return (0); 25041 } 25042 25043 25044 /* 25045 * Function: sd_mhd_watch_incomplete() 25046 * 25047 * Description: This function is used to find out why a scsi pkt sent by the 25048 * scsi watch facility was not completed. Under some scenarios this 25049 * routine will return. Otherwise it will send a bus reset to see 25050 * if the drive is still online. 25051 * 25052 * Arguments: un - driver soft state (unit) structure 25053 * pkt - incomplete scsi pkt 25054 */ 25055 25056 static void 25057 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt) 25058 { 25059 int be_chatty; 25060 int perr; 25061 25062 ASSERT(pkt != NULL); 25063 ASSERT(un != NULL); 25064 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT)); 25065 perr = (pkt->pkt_statistics & STAT_PERR); 25066 25067 mutex_enter(SD_MUTEX(un)); 25068 if (un->un_state == SD_STATE_DUMPING) { 25069 mutex_exit(SD_MUTEX(un)); 25070 return; 25071 } 25072 25073 switch (pkt->pkt_reason) { 25074 case CMD_UNX_BUS_FREE: 25075 /* 25076 * If we had a parity error that caused the target to drop BSY*, 25077 * don't be chatty about it. 25078 */ 25079 if (perr && be_chatty) { 25080 be_chatty = 0; 25081 } 25082 break; 25083 case CMD_TAG_REJECT: 25084 /* 25085 * The SCSI-2 spec states that a tag reject will be sent by the 25086 * target if tagged queuing is not supported. A tag reject may 25087 * also be sent during certain initialization periods or to 25088 * control internal resources. For the latter case the target 25089 * may also return Queue Full. 25090 * 25091 * If this driver receives a tag reject from a target that is 25092 * going through an init period or controlling internal 25093 * resources tagged queuing will be disabled. This is a less 25094 * than optimal behavior but the driver is unable to determine 25095 * the target state and assumes tagged queueing is not supported 25096 */ 25097 pkt->pkt_flags = 0; 25098 un->un_tagflags = 0; 25099 25100 if (un->un_f_opt_queueing == TRUE) { 25101 un->un_throttle = min(un->un_throttle, 3); 25102 } else { 25103 un->un_throttle = 1; 25104 } 25105 mutex_exit(SD_MUTEX(un)); 25106 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 25107 mutex_enter(SD_MUTEX(un)); 25108 break; 25109 case CMD_INCOMPLETE: 25110 /* 25111 * The transport stopped with an abnormal state, fallthrough and 25112 * reset the target and/or bus unless selection did not complete 25113 * (indicated by STATE_GOT_BUS) in which case we don't want to 25114 * go through a target/bus reset 25115 */ 25116 if (pkt->pkt_state == STATE_GOT_BUS) { 25117 break; 25118 } 25119 /*FALLTHROUGH*/ 25120 25121 case CMD_TIMEOUT: 25122 default: 25123 /* 25124 * The lun may still be running the command, so a lun reset 25125 * should be attempted. If the lun reset fails or cannot be 25126 * issued, than try a target reset. Lastly try a bus reset. 25127 */ 25128 if ((pkt->pkt_statistics & 25129 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 25130 int reset_retval = 0; 25131 mutex_exit(SD_MUTEX(un)); 25132 if (un->un_f_allow_bus_device_reset == TRUE) { 25133 if (un->un_f_lun_reset_enabled == TRUE) { 25134 reset_retval = 25135 scsi_reset(SD_ADDRESS(un), 25136 RESET_LUN); 25137 } 25138 if (reset_retval == 0) { 25139 reset_retval = 25140 scsi_reset(SD_ADDRESS(un), 25141 RESET_TARGET); 25142 } 25143 } 25144 if (reset_retval == 0) { 25145 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 25146 } 25147 mutex_enter(SD_MUTEX(un)); 25148 } 25149 break; 25150 } 25151 25152 /* A device/bus reset has occurred; update the reservation status. */ 25153 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics & 25154 (STAT_BUS_RESET | STAT_DEV_RESET))) { 25155 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 25156 un->un_resvd_status |= 25157 (SD_LOST_RESERVE | SD_WANT_RESERVE); 25158 SD_INFO(SD_LOG_IOCTL_MHD, un, 25159 "sd_mhd_watch_incomplete: Lost Reservation\n"); 25160 } 25161 } 25162 25163 /* 25164 * The disk has been turned off; Update the device state. 25165 * 25166 * Note: Should we be offlining the disk here? 25167 */ 25168 if (pkt->pkt_state == STATE_GOT_BUS) { 25169 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: " 25170 "Disk not responding to selection\n"); 25171 if (un->un_state != SD_STATE_OFFLINE) { 25172 New_state(un, SD_STATE_OFFLINE); 25173 } 25174 } else if (be_chatty) { 25175 /* 25176 * suppress messages if they are all the same pkt reason; 25177 * with TQ, many (up to 256) are returned with the same 25178 * pkt_reason 25179 */ 25180 if (pkt->pkt_reason != un->un_last_pkt_reason) { 25181 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25182 "sd_mhd_watch_incomplete: " 25183 "SCSI transport failed: reason '%s'\n", 25184 scsi_rname(pkt->pkt_reason)); 25185 } 25186 } 25187 un->un_last_pkt_reason = pkt->pkt_reason; 25188 mutex_exit(SD_MUTEX(un)); 25189 } 25190 25191 25192 /* 25193 * Function: sd_sname() 25194 * 25195 * Description: This is a simple little routine to return a string containing 25196 * a printable description of command status byte for use in 25197 * logging. 25198 * 25199 * Arguments: status - pointer to a status byte 25200 * 25201 * Return Code: char * - string containing status description. 25202 */ 25203 25204 static char * 25205 sd_sname(uchar_t status) 25206 { 25207 switch (status & STATUS_MASK) { 25208 case STATUS_GOOD: 25209 return ("good status"); 25210 case STATUS_CHECK: 25211 return ("check condition"); 25212 case STATUS_MET: 25213 return ("condition met"); 25214 case STATUS_BUSY: 25215 return ("busy"); 25216 case STATUS_INTERMEDIATE: 25217 return ("intermediate"); 25218 case STATUS_INTERMEDIATE_MET: 25219 return ("intermediate - condition met"); 25220 case STATUS_RESERVATION_CONFLICT: 25221 return ("reservation_conflict"); 25222 case STATUS_TERMINATED: 25223 return ("command terminated"); 25224 case STATUS_QFULL: 25225 return ("queue full"); 25226 default: 25227 return ("<unknown status>"); 25228 } 25229 } 25230 25231 25232 /* 25233 * Function: sd_mhd_resvd_recover() 25234 * 25235 * Description: This function adds a reservation entry to the 25236 * sd_resv_reclaim_request list and signals the reservation 25237 * reclaim thread that there is work pending. If the reservation 25238 * reclaim thread has not been previously created this function 25239 * will kick it off. 25240 * 25241 * Arguments: arg - the device 'dev_t' is used for context to discriminate 25242 * among multiple watches that share this callback function 25243 * 25244 * Context: This routine is called by timeout() and is run in interrupt 25245 * context. It must not sleep or call other functions which may 25246 * sleep. 25247 */ 25248 25249 static void 25250 sd_mhd_resvd_recover(void *arg) 25251 { 25252 dev_t dev = (dev_t)arg; 25253 struct sd_lun *un; 25254 struct sd_thr_request *sd_treq = NULL; 25255 struct sd_thr_request *sd_cur = NULL; 25256 struct sd_thr_request *sd_prev = NULL; 25257 int already_there = 0; 25258 25259 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25260 return; 25261 } 25262 25263 mutex_enter(SD_MUTEX(un)); 25264 un->un_resvd_timeid = NULL; 25265 if (un->un_resvd_status & SD_WANT_RESERVE) { 25266 /* 25267 * There was a reset so don't issue the reserve, allow the 25268 * sd_mhd_watch_cb callback function to notice this and 25269 * reschedule the timeout for reservation. 25270 */ 25271 mutex_exit(SD_MUTEX(un)); 25272 return; 25273 } 25274 mutex_exit(SD_MUTEX(un)); 25275 25276 /* 25277 * Add this device to the sd_resv_reclaim_request list and the 25278 * sd_resv_reclaim_thread should take care of the rest. 25279 * 25280 * Note: We can't sleep in this context so if the memory allocation 25281 * fails allow the sd_mhd_watch_cb callback function to notice this and 25282 * reschedule the timeout for reservation. (4378460) 25283 */ 25284 sd_treq = (struct sd_thr_request *) 25285 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP); 25286 if (sd_treq == NULL) { 25287 return; 25288 } 25289 25290 sd_treq->sd_thr_req_next = NULL; 25291 sd_treq->dev = dev; 25292 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25293 if (sd_tr.srq_thr_req_head == NULL) { 25294 sd_tr.srq_thr_req_head = sd_treq; 25295 } else { 25296 sd_cur = sd_prev = sd_tr.srq_thr_req_head; 25297 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) { 25298 if (sd_cur->dev == dev) { 25299 /* 25300 * already in Queue so don't log 25301 * another request for the device 25302 */ 25303 already_there = 1; 25304 break; 25305 } 25306 sd_prev = sd_cur; 25307 } 25308 if (!already_there) { 25309 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: " 25310 "logging request for %lx\n", dev); 25311 sd_prev->sd_thr_req_next = sd_treq; 25312 } else { 25313 kmem_free(sd_treq, sizeof (struct sd_thr_request)); 25314 } 25315 } 25316 25317 /* 25318 * Create a kernel thread to do the reservation reclaim and free up this 25319 * thread. We cannot block this thread while we go away to do the 25320 * reservation reclaim 25321 */ 25322 if (sd_tr.srq_resv_reclaim_thread == NULL) 25323 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0, 25324 sd_resv_reclaim_thread, NULL, 25325 0, &p0, TS_RUN, v.v_maxsyspri - 2); 25326 25327 /* Tell the reservation reclaim thread that it has work to do */ 25328 cv_signal(&sd_tr.srq_resv_reclaim_cv); 25329 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25330 } 25331 25332 /* 25333 * Function: sd_resv_reclaim_thread() 25334 * 25335 * Description: This function implements the reservation reclaim operations 25336 * 25337 * Arguments: arg - the device 'dev_t' is used for context to discriminate 25338 * among multiple watches that share this callback function 25339 */ 25340 25341 static void 25342 sd_resv_reclaim_thread() 25343 { 25344 struct sd_lun *un; 25345 struct sd_thr_request *sd_mhreq; 25346 25347 /* Wait for work */ 25348 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25349 if (sd_tr.srq_thr_req_head == NULL) { 25350 cv_wait(&sd_tr.srq_resv_reclaim_cv, 25351 &sd_tr.srq_resv_reclaim_mutex); 25352 } 25353 25354 /* Loop while we have work */ 25355 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) { 25356 un = ddi_get_soft_state(sd_state, 25357 SDUNIT(sd_tr.srq_thr_cur_req->dev)); 25358 if (un == NULL) { 25359 /* 25360 * softstate structure is NULL so just 25361 * dequeue the request and continue 25362 */ 25363 sd_tr.srq_thr_req_head = 25364 sd_tr.srq_thr_cur_req->sd_thr_req_next; 25365 kmem_free(sd_tr.srq_thr_cur_req, 25366 sizeof (struct sd_thr_request)); 25367 continue; 25368 } 25369 25370 /* dequeue the request */ 25371 sd_mhreq = sd_tr.srq_thr_cur_req; 25372 sd_tr.srq_thr_req_head = 25373 sd_tr.srq_thr_cur_req->sd_thr_req_next; 25374 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25375 25376 /* 25377 * Reclaim reservation only if SD_RESERVE is still set. There 25378 * may have been a call to MHIOCRELEASE before we got here. 25379 */ 25380 mutex_enter(SD_MUTEX(un)); 25381 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 25382 /* 25383 * Note: The SD_LOST_RESERVE flag is cleared before 25384 * reclaiming the reservation. If this is done after the 25385 * call to sd_reserve_release a reservation loss in the 25386 * window between pkt completion of reserve cmd and 25387 * mutex_enter below may not be recognized 25388 */ 25389 un->un_resvd_status &= ~SD_LOST_RESERVE; 25390 mutex_exit(SD_MUTEX(un)); 25391 25392 if (sd_reserve_release(sd_mhreq->dev, 25393 SD_RESERVE) == 0) { 25394 mutex_enter(SD_MUTEX(un)); 25395 un->un_resvd_status |= SD_RESERVE; 25396 mutex_exit(SD_MUTEX(un)); 25397 SD_INFO(SD_LOG_IOCTL_MHD, un, 25398 "sd_resv_reclaim_thread: " 25399 "Reservation Recovered\n"); 25400 } else { 25401 mutex_enter(SD_MUTEX(un)); 25402 un->un_resvd_status |= SD_LOST_RESERVE; 25403 mutex_exit(SD_MUTEX(un)); 25404 SD_INFO(SD_LOG_IOCTL_MHD, un, 25405 "sd_resv_reclaim_thread: Failed " 25406 "Reservation Recovery\n"); 25407 } 25408 } else { 25409 mutex_exit(SD_MUTEX(un)); 25410 } 25411 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25412 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req); 25413 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 25414 sd_mhreq = sd_tr.srq_thr_cur_req = NULL; 25415 /* 25416 * wakeup the destroy thread if anyone is waiting on 25417 * us to complete. 25418 */ 25419 cv_signal(&sd_tr.srq_inprocess_cv); 25420 SD_TRACE(SD_LOG_IOCTL_MHD, un, 25421 "sd_resv_reclaim_thread: cv_signalling current request \n"); 25422 } 25423 25424 /* 25425 * cleanup the sd_tr structure now that this thread will not exist 25426 */ 25427 ASSERT(sd_tr.srq_thr_req_head == NULL); 25428 ASSERT(sd_tr.srq_thr_cur_req == NULL); 25429 sd_tr.srq_resv_reclaim_thread = NULL; 25430 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25431 thread_exit(); 25432 } 25433 25434 25435 /* 25436 * Function: sd_rmv_resv_reclaim_req() 25437 * 25438 * Description: This function removes any pending reservation reclaim requests 25439 * for the specified device. 25440 * 25441 * Arguments: dev - the device 'dev_t' 25442 */ 25443 25444 static void 25445 sd_rmv_resv_reclaim_req(dev_t dev) 25446 { 25447 struct sd_thr_request *sd_mhreq; 25448 struct sd_thr_request *sd_prev; 25449 25450 /* Remove a reservation reclaim request from the list */ 25451 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25452 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) { 25453 /* 25454 * We are attempting to reinstate reservation for 25455 * this device. We wait for sd_reserve_release() 25456 * to return before we return. 25457 */ 25458 cv_wait(&sd_tr.srq_inprocess_cv, 25459 &sd_tr.srq_resv_reclaim_mutex); 25460 } else { 25461 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head; 25462 if (sd_mhreq && sd_mhreq->dev == dev) { 25463 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next; 25464 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 25465 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25466 return; 25467 } 25468 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) { 25469 if (sd_mhreq && sd_mhreq->dev == dev) { 25470 break; 25471 } 25472 sd_prev = sd_mhreq; 25473 } 25474 if (sd_mhreq != NULL) { 25475 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next; 25476 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 25477 } 25478 } 25479 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25480 } 25481 25482 25483 /* 25484 * Function: sd_mhd_reset_notify_cb() 25485 * 25486 * Description: This is a call back function for scsi_reset_notify. This 25487 * function updates the softstate reserved status and logs the 25488 * reset. The driver scsi watch facility callback function 25489 * (sd_mhd_watch_cb) and reservation reclaim thread functionality 25490 * will reclaim the reservation. 25491 * 25492 * Arguments: arg - driver soft state (unit) structure 25493 */ 25494 25495 static void 25496 sd_mhd_reset_notify_cb(caddr_t arg) 25497 { 25498 struct sd_lun *un = (struct sd_lun *)arg; 25499 25500 mutex_enter(SD_MUTEX(un)); 25501 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 25502 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE); 25503 SD_INFO(SD_LOG_IOCTL_MHD, un, 25504 "sd_mhd_reset_notify_cb: Lost Reservation\n"); 25505 } 25506 mutex_exit(SD_MUTEX(un)); 25507 } 25508 25509 25510 /* 25511 * Function: sd_take_ownership() 25512 * 25513 * Description: This routine implements an algorithm to achieve a stable 25514 * reservation on disks which don't implement priority reserve, 25515 * and makes sure that other host lose re-reservation attempts. 25516 * This algorithm contains of a loop that keeps issuing the RESERVE 25517 * for some period of time (min_ownership_delay, default 6 seconds) 25518 * During that loop, it looks to see if there has been a bus device 25519 * reset or bus reset (both of which cause an existing reservation 25520 * to be lost). If the reservation is lost issue RESERVE until a 25521 * period of min_ownership_delay with no resets has gone by, or 25522 * until max_ownership_delay has expired. This loop ensures that 25523 * the host really did manage to reserve the device, in spite of 25524 * resets. The looping for min_ownership_delay (default six 25525 * seconds) is important to early generation clustering products, 25526 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an 25527 * MHIOCENFAILFAST periodic timer of two seconds. By having 25528 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having 25529 * MHIOCENFAILFAST poll every two seconds, the idea is that by the 25530 * time the MHIOCTKOWN ioctl returns, the other host (if any) will 25531 * have already noticed, via the MHIOCENFAILFAST polling, that it 25532 * no longer "owns" the disk and will have panicked itself. Thus, 25533 * the host issuing the MHIOCTKOWN is assured (with timing 25534 * dependencies) that by the time it actually starts to use the 25535 * disk for real work, the old owner is no longer accessing it. 25536 * 25537 * min_ownership_delay is the minimum amount of time for which the 25538 * disk must be reserved continuously devoid of resets before the 25539 * MHIOCTKOWN ioctl will return success. 25540 * 25541 * max_ownership_delay indicates the amount of time by which the 25542 * take ownership should succeed or timeout with an error. 25543 * 25544 * Arguments: dev - the device 'dev_t' 25545 * *p - struct containing timing info. 25546 * 25547 * Return Code: 0 for success or error code 25548 */ 25549 25550 static int 25551 sd_take_ownership(dev_t dev, struct mhioctkown *p) 25552 { 25553 struct sd_lun *un; 25554 int rval; 25555 int err; 25556 int reservation_count = 0; 25557 int min_ownership_delay = 6000000; /* in usec */ 25558 int max_ownership_delay = 30000000; /* in usec */ 25559 clock_t start_time; /* starting time of this algorithm */ 25560 clock_t end_time; /* time limit for giving up */ 25561 clock_t ownership_time; /* time limit for stable ownership */ 25562 clock_t current_time; 25563 clock_t previous_current_time; 25564 25565 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25566 return (ENXIO); 25567 } 25568 25569 /* 25570 * Attempt a device reservation. A priority reservation is requested. 25571 */ 25572 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE)) 25573 != SD_SUCCESS) { 25574 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25575 "sd_take_ownership: return(1)=%d\n", rval); 25576 return (rval); 25577 } 25578 25579 /* Update the softstate reserved status to indicate the reservation */ 25580 mutex_enter(SD_MUTEX(un)); 25581 un->un_resvd_status |= SD_RESERVE; 25582 un->un_resvd_status &= 25583 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT); 25584 mutex_exit(SD_MUTEX(un)); 25585 25586 if (p != NULL) { 25587 if (p->min_ownership_delay != 0) { 25588 min_ownership_delay = p->min_ownership_delay * 1000; 25589 } 25590 if (p->max_ownership_delay != 0) { 25591 max_ownership_delay = p->max_ownership_delay * 1000; 25592 } 25593 } 25594 SD_INFO(SD_LOG_IOCTL_MHD, un, 25595 "sd_take_ownership: min, max delays: %d, %d\n", 25596 min_ownership_delay, max_ownership_delay); 25597 25598 start_time = ddi_get_lbolt(); 25599 current_time = start_time; 25600 ownership_time = current_time + drv_usectohz(min_ownership_delay); 25601 end_time = start_time + drv_usectohz(max_ownership_delay); 25602 25603 while (current_time - end_time < 0) { 25604 delay(drv_usectohz(500000)); 25605 25606 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) { 25607 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) { 25608 mutex_enter(SD_MUTEX(un)); 25609 rval = (un->un_resvd_status & 25610 SD_RESERVATION_CONFLICT) ? EACCES : EIO; 25611 mutex_exit(SD_MUTEX(un)); 25612 break; 25613 } 25614 } 25615 previous_current_time = current_time; 25616 current_time = ddi_get_lbolt(); 25617 mutex_enter(SD_MUTEX(un)); 25618 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) { 25619 ownership_time = ddi_get_lbolt() + 25620 drv_usectohz(min_ownership_delay); 25621 reservation_count = 0; 25622 } else { 25623 reservation_count++; 25624 } 25625 un->un_resvd_status |= SD_RESERVE; 25626 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE); 25627 mutex_exit(SD_MUTEX(un)); 25628 25629 SD_INFO(SD_LOG_IOCTL_MHD, un, 25630 "sd_take_ownership: ticks for loop iteration=%ld, " 25631 "reservation=%s\n", (current_time - previous_current_time), 25632 reservation_count ? "ok" : "reclaimed"); 25633 25634 if (current_time - ownership_time >= 0 && 25635 reservation_count >= 4) { 25636 rval = 0; /* Achieved a stable ownership */ 25637 break; 25638 } 25639 if (current_time - end_time >= 0) { 25640 rval = EACCES; /* No ownership in max possible time */ 25641 break; 25642 } 25643 } 25644 SD_TRACE(SD_LOG_IOCTL_MHD, un, 25645 "sd_take_ownership: return(2)=%d\n", rval); 25646 return (rval); 25647 } 25648 25649 25650 /* 25651 * Function: sd_reserve_release() 25652 * 25653 * Description: This function builds and sends scsi RESERVE, RELEASE, and 25654 * PRIORITY RESERVE commands based on a user specified command type 25655 * 25656 * Arguments: dev - the device 'dev_t' 25657 * cmd - user specified command type; one of SD_PRIORITY_RESERVE, 25658 * SD_RESERVE, SD_RELEASE 25659 * 25660 * Return Code: 0 or Error Code 25661 */ 25662 25663 static int 25664 sd_reserve_release(dev_t dev, int cmd) 25665 { 25666 struct uscsi_cmd *com = NULL; 25667 struct sd_lun *un = NULL; 25668 char cdb[CDB_GROUP0]; 25669 int rval; 25670 25671 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) || 25672 (cmd == SD_PRIORITY_RESERVE)); 25673 25674 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25675 return (ENXIO); 25676 } 25677 25678 /* instantiate and initialize the command and cdb */ 25679 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25680 bzero(cdb, CDB_GROUP0); 25681 com->uscsi_flags = USCSI_SILENT; 25682 com->uscsi_timeout = un->un_reserve_release_time; 25683 com->uscsi_cdblen = CDB_GROUP0; 25684 com->uscsi_cdb = cdb; 25685 if (cmd == SD_RELEASE) { 25686 cdb[0] = SCMD_RELEASE; 25687 } else { 25688 cdb[0] = SCMD_RESERVE; 25689 } 25690 25691 /* Send the command. */ 25692 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25693 SD_PATH_STANDARD); 25694 25695 /* 25696 * "break" a reservation that is held by another host, by issuing a 25697 * reset if priority reserve is desired, and we could not get the 25698 * device. 25699 */ 25700 if ((cmd == SD_PRIORITY_RESERVE) && 25701 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 25702 /* 25703 * First try to reset the LUN. If we cannot, then try a target 25704 * reset, followed by a bus reset if the target reset fails. 25705 */ 25706 int reset_retval = 0; 25707 if (un->un_f_lun_reset_enabled == TRUE) { 25708 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 25709 } 25710 if (reset_retval == 0) { 25711 /* The LUN reset either failed or was not issued */ 25712 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 25713 } 25714 if ((reset_retval == 0) && 25715 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) { 25716 rval = EIO; 25717 kmem_free(com, sizeof (*com)); 25718 return (rval); 25719 } 25720 25721 bzero(com, sizeof (struct uscsi_cmd)); 25722 com->uscsi_flags = USCSI_SILENT; 25723 com->uscsi_cdb = cdb; 25724 com->uscsi_cdblen = CDB_GROUP0; 25725 com->uscsi_timeout = 5; 25726 25727 /* 25728 * Reissue the last reserve command, this time without request 25729 * sense. Assume that it is just a regular reserve command. 25730 */ 25731 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25732 SD_PATH_STANDARD); 25733 } 25734 25735 /* Return an error if still getting a reservation conflict. */ 25736 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 25737 rval = EACCES; 25738 } 25739 25740 kmem_free(com, sizeof (*com)); 25741 return (rval); 25742 } 25743 25744 25745 #define SD_NDUMP_RETRIES 12 25746 /* 25747 * System Crash Dump routine 25748 */ 25749 25750 static int 25751 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 25752 { 25753 int instance; 25754 int partition; 25755 int i; 25756 int err; 25757 struct sd_lun *un; 25758 struct scsi_pkt *wr_pktp; 25759 struct buf *wr_bp; 25760 struct buf wr_buf; 25761 daddr_t tgt_byte_offset; /* rmw - byte offset for target */ 25762 daddr_t tgt_blkno; /* rmw - blkno for target */ 25763 size_t tgt_byte_count; /* rmw - # of bytes to xfer */ 25764 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */ 25765 size_t io_start_offset; 25766 int doing_rmw = FALSE; 25767 int rval; 25768 ssize_t dma_resid; 25769 daddr_t oblkno; 25770 diskaddr_t nblks = 0; 25771 diskaddr_t start_block; 25772 25773 instance = SDUNIT(dev); 25774 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 25775 !SD_IS_VALID_LABEL(un) || ISCD(un)) { 25776 return (ENXIO); 25777 } 25778 25779 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 25780 25781 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); 25782 25783 partition = SDPART(dev); 25784 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); 25785 25786 if (!(NOT_DEVBSIZE(un))) { 25787 int secmask = 0; 25788 int blknomask = 0; 25789 25790 blknomask = (un->un_tgt_blocksize / DEV_BSIZE) - 1; 25791 secmask = un->un_tgt_blocksize - 1; 25792 25793 if (blkno & blknomask) { 25794 SD_TRACE(SD_LOG_DUMP, un, 25795 "sddump: dump start block not modulo %d\n", 25796 un->un_tgt_blocksize); 25797 return (EINVAL); 25798 } 25799 25800 if ((nblk * DEV_BSIZE) & secmask) { 25801 SD_TRACE(SD_LOG_DUMP, un, 25802 "sddump: dump length not modulo %d\n", 25803 un->un_tgt_blocksize); 25804 return (EINVAL); 25805 } 25806 25807 } 25808 25809 /* Validate blocks to dump at against partition size. */ 25810 25811 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 25812 &nblks, &start_block, NULL, NULL, (void *)SD_PATH_DIRECT); 25813 25814 if (NOT_DEVBSIZE(un)) { 25815 if ((blkno + nblk) > nblks) { 25816 SD_TRACE(SD_LOG_DUMP, un, 25817 "sddump: dump range larger than partition: " 25818 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 25819 blkno, nblk, nblks); 25820 return (EINVAL); 25821 } 25822 } else { 25823 if (((blkno / (un->un_tgt_blocksize / DEV_BSIZE)) + 25824 (nblk / (un->un_tgt_blocksize / DEV_BSIZE))) > nblks) { 25825 SD_TRACE(SD_LOG_DUMP, un, 25826 "sddump: dump range larger than partition: " 25827 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 25828 blkno, nblk, nblks); 25829 return (EINVAL); 25830 } 25831 } 25832 25833 mutex_enter(&un->un_pm_mutex); 25834 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 25835 struct scsi_pkt *start_pktp; 25836 25837 mutex_exit(&un->un_pm_mutex); 25838 25839 /* 25840 * use pm framework to power on HBA 1st 25841 */ 25842 (void) pm_raise_power(SD_DEVINFO(un), 0, 25843 SD_PM_STATE_ACTIVE(un)); 25844 25845 /* 25846 * Dump no long uses sdpower to power on a device, it's 25847 * in-line here so it can be done in polled mode. 25848 */ 25849 25850 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n"); 25851 25852 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL, 25853 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL); 25854 25855 if (start_pktp == NULL) { 25856 /* We were not given a SCSI packet, fail. */ 25857 return (EIO); 25858 } 25859 bzero(start_pktp->pkt_cdbp, CDB_GROUP0); 25860 start_pktp->pkt_cdbp[0] = SCMD_START_STOP; 25861 start_pktp->pkt_cdbp[4] = SD_TARGET_START; 25862 start_pktp->pkt_flags = FLAG_NOINTR; 25863 25864 mutex_enter(SD_MUTEX(un)); 25865 SD_FILL_SCSI1_LUN(un, start_pktp); 25866 mutex_exit(SD_MUTEX(un)); 25867 /* 25868 * Scsi_poll returns 0 (success) if the command completes and 25869 * the status block is STATUS_GOOD. 25870 */ 25871 if (sd_scsi_poll(un, start_pktp) != 0) { 25872 scsi_destroy_pkt(start_pktp); 25873 return (EIO); 25874 } 25875 scsi_destroy_pkt(start_pktp); 25876 (void) sd_pm_state_change(un, SD_PM_STATE_ACTIVE(un), 25877 SD_PM_STATE_CHANGE); 25878 } else { 25879 mutex_exit(&un->un_pm_mutex); 25880 } 25881 25882 mutex_enter(SD_MUTEX(un)); 25883 un->un_throttle = 0; 25884 25885 /* 25886 * The first time through, reset the specific target device. 25887 * However, when cpr calls sddump we know that sd is in a 25888 * a good state so no bus reset is required. 25889 * Clear sense data via Request Sense cmd. 25890 * In sddump we don't care about allow_bus_device_reset anymore 25891 */ 25892 25893 if ((un->un_state != SD_STATE_SUSPENDED) && 25894 (un->un_state != SD_STATE_DUMPING)) { 25895 25896 New_state(un, SD_STATE_DUMPING); 25897 25898 if (un->un_f_is_fibre == FALSE) { 25899 mutex_exit(SD_MUTEX(un)); 25900 /* 25901 * Attempt a bus reset for parallel scsi. 25902 * 25903 * Note: A bus reset is required because on some host 25904 * systems (i.e. E420R) a bus device reset is 25905 * insufficient to reset the state of the target. 25906 * 25907 * Note: Don't issue the reset for fibre-channel, 25908 * because this tends to hang the bus (loop) for 25909 * too long while everyone is logging out and in 25910 * and the deadman timer for dumping will fire 25911 * before the dump is complete. 25912 */ 25913 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) { 25914 mutex_enter(SD_MUTEX(un)); 25915 Restore_state(un); 25916 mutex_exit(SD_MUTEX(un)); 25917 return (EIO); 25918 } 25919 25920 /* Delay to give the device some recovery time. */ 25921 drv_usecwait(10000); 25922 25923 if (sd_send_polled_RQS(un) == SD_FAILURE) { 25924 SD_INFO(SD_LOG_DUMP, un, 25925 "sddump: sd_send_polled_RQS failed\n"); 25926 } 25927 mutex_enter(SD_MUTEX(un)); 25928 } 25929 } 25930 25931 /* 25932 * Convert the partition-relative block number to a 25933 * disk physical block number. 25934 */ 25935 if (NOT_DEVBSIZE(un)) { 25936 blkno += start_block; 25937 } else { 25938 blkno = blkno / (un->un_tgt_blocksize / DEV_BSIZE); 25939 blkno += start_block; 25940 } 25941 25942 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno); 25943 25944 25945 /* 25946 * Check if the device has a non-512 block size. 25947 */ 25948 wr_bp = NULL; 25949 if (NOT_DEVBSIZE(un)) { 25950 tgt_byte_offset = blkno * un->un_sys_blocksize; 25951 tgt_byte_count = nblk * un->un_sys_blocksize; 25952 if ((tgt_byte_offset % un->un_tgt_blocksize) || 25953 (tgt_byte_count % un->un_tgt_blocksize)) { 25954 doing_rmw = TRUE; 25955 /* 25956 * Calculate the block number and number of block 25957 * in terms of the media block size. 25958 */ 25959 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 25960 tgt_nblk = 25961 ((tgt_byte_offset + tgt_byte_count + 25962 (un->un_tgt_blocksize - 1)) / 25963 un->un_tgt_blocksize) - tgt_blkno; 25964 25965 /* 25966 * Invoke the routine which is going to do read part 25967 * of read-modify-write. 25968 * Note that this routine returns a pointer to 25969 * a valid bp in wr_bp. 25970 */ 25971 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk, 25972 &wr_bp); 25973 if (err) { 25974 mutex_exit(SD_MUTEX(un)); 25975 return (err); 25976 } 25977 /* 25978 * Offset is being calculated as - 25979 * (original block # * system block size) - 25980 * (new block # * target block size) 25981 */ 25982 io_start_offset = 25983 ((uint64_t)(blkno * un->un_sys_blocksize)) - 25984 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize)); 25985 25986 ASSERT((io_start_offset >= 0) && 25987 (io_start_offset < un->un_tgt_blocksize)); 25988 /* 25989 * Do the modify portion of read modify write. 25990 */ 25991 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset], 25992 (size_t)nblk * un->un_sys_blocksize); 25993 } else { 25994 doing_rmw = FALSE; 25995 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 25996 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize; 25997 } 25998 25999 /* Convert blkno and nblk to target blocks */ 26000 blkno = tgt_blkno; 26001 nblk = tgt_nblk; 26002 } else { 26003 wr_bp = &wr_buf; 26004 bzero(wr_bp, sizeof (struct buf)); 26005 wr_bp->b_flags = B_BUSY; 26006 wr_bp->b_un.b_addr = addr; 26007 wr_bp->b_bcount = nblk << DEV_BSHIFT; 26008 wr_bp->b_resid = 0; 26009 } 26010 26011 mutex_exit(SD_MUTEX(un)); 26012 26013 /* 26014 * Obtain a SCSI packet for the write command. 26015 * It should be safe to call the allocator here without 26016 * worrying about being locked for DVMA mapping because 26017 * the address we're passed is already a DVMA mapping 26018 * 26019 * We are also not going to worry about semaphore ownership 26020 * in the dump buffer. Dumping is single threaded at present. 26021 */ 26022 26023 wr_pktp = NULL; 26024 26025 dma_resid = wr_bp->b_bcount; 26026 oblkno = blkno; 26027 26028 if (!(NOT_DEVBSIZE(un))) { 26029 nblk = nblk / (un->un_tgt_blocksize / DEV_BSIZE); 26030 } 26031 26032 while (dma_resid != 0) { 26033 26034 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 26035 wr_bp->b_flags &= ~B_ERROR; 26036 26037 if (un->un_partial_dma_supported == 1) { 26038 blkno = oblkno + 26039 ((wr_bp->b_bcount - dma_resid) / 26040 un->un_tgt_blocksize); 26041 nblk = dma_resid / un->un_tgt_blocksize; 26042 26043 if (wr_pktp) { 26044 /* 26045 * Partial DMA transfers after initial transfer 26046 */ 26047 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp, 26048 blkno, nblk); 26049 } else { 26050 /* Initial transfer */ 26051 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 26052 un->un_pkt_flags, NULL_FUNC, NULL, 26053 blkno, nblk); 26054 } 26055 } else { 26056 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 26057 0, NULL_FUNC, NULL, blkno, nblk); 26058 } 26059 26060 if (rval == 0) { 26061 /* We were given a SCSI packet, continue. */ 26062 break; 26063 } 26064 26065 if (i == 0) { 26066 if (wr_bp->b_flags & B_ERROR) { 26067 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26068 "no resources for dumping; " 26069 "error code: 0x%x, retrying", 26070 geterror(wr_bp)); 26071 } else { 26072 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26073 "no resources for dumping; retrying"); 26074 } 26075 } else if (i != (SD_NDUMP_RETRIES - 1)) { 26076 if (wr_bp->b_flags & B_ERROR) { 26077 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26078 "no resources for dumping; error code: " 26079 "0x%x, retrying\n", geterror(wr_bp)); 26080 } 26081 } else { 26082 if (wr_bp->b_flags & B_ERROR) { 26083 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26084 "no resources for dumping; " 26085 "error code: 0x%x, retries failed, " 26086 "giving up.\n", geterror(wr_bp)); 26087 } else { 26088 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26089 "no resources for dumping; " 26090 "retries failed, giving up.\n"); 26091 } 26092 mutex_enter(SD_MUTEX(un)); 26093 Restore_state(un); 26094 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) { 26095 mutex_exit(SD_MUTEX(un)); 26096 scsi_free_consistent_buf(wr_bp); 26097 } else { 26098 mutex_exit(SD_MUTEX(un)); 26099 } 26100 return (EIO); 26101 } 26102 drv_usecwait(10000); 26103 } 26104 26105 if (un->un_partial_dma_supported == 1) { 26106 /* 26107 * save the resid from PARTIAL_DMA 26108 */ 26109 dma_resid = wr_pktp->pkt_resid; 26110 if (dma_resid != 0) 26111 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid); 26112 wr_pktp->pkt_resid = 0; 26113 } else { 26114 dma_resid = 0; 26115 } 26116 26117 /* SunBug 1222170 */ 26118 wr_pktp->pkt_flags = FLAG_NOINTR; 26119 26120 err = EIO; 26121 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 26122 26123 /* 26124 * Scsi_poll returns 0 (success) if the command completes and 26125 * the status block is STATUS_GOOD. We should only check 26126 * errors if this condition is not true. Even then we should 26127 * send our own request sense packet only if we have a check 26128 * condition and auto request sense has not been performed by 26129 * the hba. 26130 */ 26131 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n"); 26132 26133 if ((sd_scsi_poll(un, wr_pktp) == 0) && 26134 (wr_pktp->pkt_resid == 0)) { 26135 err = SD_SUCCESS; 26136 break; 26137 } 26138 26139 /* 26140 * Check CMD_DEV_GONE 1st, give up if device is gone. 26141 */ 26142 if (wr_pktp->pkt_reason == CMD_DEV_GONE) { 26143 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26144 "Error while dumping state...Device is gone\n"); 26145 break; 26146 } 26147 26148 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) { 26149 SD_INFO(SD_LOG_DUMP, un, 26150 "sddump: write failed with CHECK, try # %d\n", i); 26151 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) { 26152 (void) sd_send_polled_RQS(un); 26153 } 26154 26155 continue; 26156 } 26157 26158 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) { 26159 int reset_retval = 0; 26160 26161 SD_INFO(SD_LOG_DUMP, un, 26162 "sddump: write failed with BUSY, try # %d\n", i); 26163 26164 if (un->un_f_lun_reset_enabled == TRUE) { 26165 reset_retval = scsi_reset(SD_ADDRESS(un), 26166 RESET_LUN); 26167 } 26168 if (reset_retval == 0) { 26169 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 26170 } 26171 (void) sd_send_polled_RQS(un); 26172 26173 } else { 26174 SD_INFO(SD_LOG_DUMP, un, 26175 "sddump: write failed with 0x%x, try # %d\n", 26176 SD_GET_PKT_STATUS(wr_pktp), i); 26177 mutex_enter(SD_MUTEX(un)); 26178 sd_reset_target(un, wr_pktp); 26179 mutex_exit(SD_MUTEX(un)); 26180 } 26181 26182 /* 26183 * If we are not getting anywhere with lun/target resets, 26184 * let's reset the bus. 26185 */ 26186 if (i == SD_NDUMP_RETRIES/2) { 26187 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 26188 (void) sd_send_polled_RQS(un); 26189 } 26190 } 26191 } 26192 26193 scsi_destroy_pkt(wr_pktp); 26194 mutex_enter(SD_MUTEX(un)); 26195 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) { 26196 mutex_exit(SD_MUTEX(un)); 26197 scsi_free_consistent_buf(wr_bp); 26198 } else { 26199 mutex_exit(SD_MUTEX(un)); 26200 } 26201 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err); 26202 return (err); 26203 } 26204 26205 /* 26206 * Function: sd_scsi_poll() 26207 * 26208 * Description: This is a wrapper for the scsi_poll call. 26209 * 26210 * Arguments: sd_lun - The unit structure 26211 * scsi_pkt - The scsi packet being sent to the device. 26212 * 26213 * Return Code: 0 - Command completed successfully with good status 26214 * -1 - Command failed. This could indicate a check condition 26215 * or other status value requiring recovery action. 26216 * 26217 * NOTE: This code is only called off sddump(). 26218 */ 26219 26220 static int 26221 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp) 26222 { 26223 int status; 26224 26225 ASSERT(un != NULL); 26226 ASSERT(!mutex_owned(SD_MUTEX(un))); 26227 ASSERT(pktp != NULL); 26228 26229 status = SD_SUCCESS; 26230 26231 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) { 26232 pktp->pkt_flags |= un->un_tagflags; 26233 pktp->pkt_flags &= ~FLAG_NODISCON; 26234 } 26235 26236 status = sd_ddi_scsi_poll(pktp); 26237 /* 26238 * Scsi_poll returns 0 (success) if the command completes and the 26239 * status block is STATUS_GOOD. We should only check errors if this 26240 * condition is not true. Even then we should send our own request 26241 * sense packet only if we have a check condition and auto 26242 * request sense has not been performed by the hba. 26243 * Don't get RQS data if pkt_reason is CMD_DEV_GONE. 26244 */ 26245 if ((status != SD_SUCCESS) && 26246 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) && 26247 (pktp->pkt_state & STATE_ARQ_DONE) == 0 && 26248 (pktp->pkt_reason != CMD_DEV_GONE)) 26249 (void) sd_send_polled_RQS(un); 26250 26251 return (status); 26252 } 26253 26254 /* 26255 * Function: sd_send_polled_RQS() 26256 * 26257 * Description: This sends the request sense command to a device. 26258 * 26259 * Arguments: sd_lun - The unit structure 26260 * 26261 * Return Code: 0 - Command completed successfully with good status 26262 * -1 - Command failed. 26263 * 26264 */ 26265 26266 static int 26267 sd_send_polled_RQS(struct sd_lun *un) 26268 { 26269 int ret_val; 26270 struct scsi_pkt *rqs_pktp; 26271 struct buf *rqs_bp; 26272 26273 ASSERT(un != NULL); 26274 ASSERT(!mutex_owned(SD_MUTEX(un))); 26275 26276 ret_val = SD_SUCCESS; 26277 26278 rqs_pktp = un->un_rqs_pktp; 26279 rqs_bp = un->un_rqs_bp; 26280 26281 mutex_enter(SD_MUTEX(un)); 26282 26283 if (un->un_sense_isbusy) { 26284 ret_val = SD_FAILURE; 26285 mutex_exit(SD_MUTEX(un)); 26286 return (ret_val); 26287 } 26288 26289 /* 26290 * If the request sense buffer (and packet) is not in use, 26291 * let's set the un_sense_isbusy and send our packet 26292 */ 26293 un->un_sense_isbusy = 1; 26294 rqs_pktp->pkt_resid = 0; 26295 rqs_pktp->pkt_reason = 0; 26296 rqs_pktp->pkt_flags |= FLAG_NOINTR; 26297 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH); 26298 26299 mutex_exit(SD_MUTEX(un)); 26300 26301 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at" 26302 " 0x%p\n", rqs_bp->b_un.b_addr); 26303 26304 /* 26305 * Can't send this to sd_scsi_poll, we wrap ourselves around the 26306 * axle - it has a call into us! 26307 */ 26308 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) { 26309 SD_INFO(SD_LOG_COMMON, un, 26310 "sd_send_polled_RQS: RQS failed\n"); 26311 } 26312 26313 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:", 26314 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX); 26315 26316 mutex_enter(SD_MUTEX(un)); 26317 un->un_sense_isbusy = 0; 26318 mutex_exit(SD_MUTEX(un)); 26319 26320 return (ret_val); 26321 } 26322 26323 /* 26324 * Defines needed for localized version of the scsi_poll routine. 26325 */ 26326 #define CSEC 10000 /* usecs */ 26327 #define SEC_TO_CSEC (1000000/CSEC) 26328 26329 /* 26330 * Function: sd_ddi_scsi_poll() 26331 * 26332 * Description: Localized version of the scsi_poll routine. The purpose is to 26333 * send a scsi_pkt to a device as a polled command. This version 26334 * is to ensure more robust handling of transport errors. 26335 * Specifically this routine cures not ready, coming ready 26336 * transition for power up and reset of sonoma's. This can take 26337 * up to 45 seconds for power-on and 20 seconds for reset of a 26338 * sonoma lun. 26339 * 26340 * Arguments: scsi_pkt - The scsi_pkt being sent to a device 26341 * 26342 * Return Code: 0 - Command completed successfully with good status 26343 * -1 - Command failed. 26344 * 26345 * NOTE: This code is almost identical to scsi_poll, however before 6668774 can 26346 * be fixed (removing this code), we need to determine how to handle the 26347 * KEY_UNIT_ATTENTION condition below in conditions not as limited as sddump(). 26348 * 26349 * NOTE: This code is only called off sddump(). 26350 */ 26351 static int 26352 sd_ddi_scsi_poll(struct scsi_pkt *pkt) 26353 { 26354 int rval = -1; 26355 int savef; 26356 long savet; 26357 void (*savec)(); 26358 int timeout; 26359 int busy_count; 26360 int poll_delay; 26361 int rc; 26362 uint8_t *sensep; 26363 struct scsi_arq_status *arqstat; 26364 extern int do_polled_io; 26365 26366 ASSERT(pkt->pkt_scbp); 26367 26368 /* 26369 * save old flags.. 26370 */ 26371 savef = pkt->pkt_flags; 26372 savec = pkt->pkt_comp; 26373 savet = pkt->pkt_time; 26374 26375 pkt->pkt_flags |= FLAG_NOINTR; 26376 26377 /* 26378 * XXX there is nothing in the SCSA spec that states that we should not 26379 * do a callback for polled cmds; however, removing this will break sd 26380 * and probably other target drivers 26381 */ 26382 pkt->pkt_comp = NULL; 26383 26384 /* 26385 * we don't like a polled command without timeout. 26386 * 60 seconds seems long enough. 26387 */ 26388 if (pkt->pkt_time == 0) 26389 pkt->pkt_time = SCSI_POLL_TIMEOUT; 26390 26391 /* 26392 * Send polled cmd. 26393 * 26394 * We do some error recovery for various errors. Tran_busy, 26395 * queue full, and non-dispatched commands are retried every 10 msec. 26396 * as they are typically transient failures. Busy status and Not 26397 * Ready are retried every second as this status takes a while to 26398 * change. 26399 */ 26400 timeout = pkt->pkt_time * SEC_TO_CSEC; 26401 26402 for (busy_count = 0; busy_count < timeout; busy_count++) { 26403 /* 26404 * Initialize pkt status variables. 26405 */ 26406 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0; 26407 26408 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) { 26409 if (rc != TRAN_BUSY) { 26410 /* Transport failed - give up. */ 26411 break; 26412 } else { 26413 /* Transport busy - try again. */ 26414 poll_delay = 1 * CSEC; /* 10 msec. */ 26415 } 26416 } else { 26417 /* 26418 * Transport accepted - check pkt status. 26419 */ 26420 rc = (*pkt->pkt_scbp) & STATUS_MASK; 26421 if ((pkt->pkt_reason == CMD_CMPLT) && 26422 (rc == STATUS_CHECK) && 26423 (pkt->pkt_state & STATE_ARQ_DONE)) { 26424 arqstat = 26425 (struct scsi_arq_status *)(pkt->pkt_scbp); 26426 sensep = (uint8_t *)&arqstat->sts_sensedata; 26427 } else { 26428 sensep = NULL; 26429 } 26430 26431 if ((pkt->pkt_reason == CMD_CMPLT) && 26432 (rc == STATUS_GOOD)) { 26433 /* No error - we're done */ 26434 rval = 0; 26435 break; 26436 26437 } else if (pkt->pkt_reason == CMD_DEV_GONE) { 26438 /* Lost connection - give up */ 26439 break; 26440 26441 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) && 26442 (pkt->pkt_state == 0)) { 26443 /* Pkt not dispatched - try again. */ 26444 poll_delay = 1 * CSEC; /* 10 msec. */ 26445 26446 } else if ((pkt->pkt_reason == CMD_CMPLT) && 26447 (rc == STATUS_QFULL)) { 26448 /* Queue full - try again. */ 26449 poll_delay = 1 * CSEC; /* 10 msec. */ 26450 26451 } else if ((pkt->pkt_reason == CMD_CMPLT) && 26452 (rc == STATUS_BUSY)) { 26453 /* Busy - try again. */ 26454 poll_delay = 100 * CSEC; /* 1 sec. */ 26455 busy_count += (SEC_TO_CSEC - 1); 26456 26457 } else if ((sensep != NULL) && 26458 (scsi_sense_key(sensep) == KEY_UNIT_ATTENTION)) { 26459 /* 26460 * Unit Attention - try again. 26461 * Pretend it took 1 sec. 26462 * NOTE: 'continue' avoids poll_delay 26463 */ 26464 busy_count += (SEC_TO_CSEC - 1); 26465 continue; 26466 26467 } else if ((sensep != NULL) && 26468 (scsi_sense_key(sensep) == KEY_NOT_READY) && 26469 (scsi_sense_asc(sensep) == 0x04) && 26470 (scsi_sense_ascq(sensep) == 0x01)) { 26471 /* 26472 * Not ready -> ready - try again. 26473 * 04h/01h: LUN IS IN PROCESS OF BECOMING READY 26474 * ...same as STATUS_BUSY 26475 */ 26476 poll_delay = 100 * CSEC; /* 1 sec. */ 26477 busy_count += (SEC_TO_CSEC - 1); 26478 26479 } else { 26480 /* BAD status - give up. */ 26481 break; 26482 } 26483 } 26484 26485 if (((curthread->t_flag & T_INTR_THREAD) == 0) && 26486 !do_polled_io) { 26487 delay(drv_usectohz(poll_delay)); 26488 } else { 26489 /* we busy wait during cpr_dump or interrupt threads */ 26490 drv_usecwait(poll_delay); 26491 } 26492 } 26493 26494 pkt->pkt_flags = savef; 26495 pkt->pkt_comp = savec; 26496 pkt->pkt_time = savet; 26497 26498 /* return on error */ 26499 if (rval) 26500 return (rval); 26501 26502 /* 26503 * This is not a performance critical code path. 26504 * 26505 * As an accommodation for scsi_poll callers, to avoid ddi_dma_sync() 26506 * issues associated with looking at DMA memory prior to 26507 * scsi_pkt_destroy(), we scsi_sync_pkt() prior to return. 26508 */ 26509 scsi_sync_pkt(pkt); 26510 return (0); 26511 } 26512 26513 26514 26515 /* 26516 * Function: sd_persistent_reservation_in_read_keys 26517 * 26518 * Description: This routine is the driver entry point for handling CD-ROM 26519 * multi-host persistent reservation requests (MHIOCGRP_INKEYS) 26520 * by sending the SCSI-3 PRIN commands to the device. 26521 * Processes the read keys command response by copying the 26522 * reservation key information into the user provided buffer. 26523 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented. 26524 * 26525 * Arguments: un - Pointer to soft state struct for the target. 26526 * usrp - user provided pointer to multihost Persistent In Read 26527 * Keys structure (mhioc_inkeys_t) 26528 * flag - this argument is a pass through to ddi_copyxxx() 26529 * directly from the mode argument of ioctl(). 26530 * 26531 * Return Code: 0 - Success 26532 * EACCES 26533 * ENOTSUP 26534 * errno return code from sd_send_scsi_cmd() 26535 * 26536 * Context: Can sleep. Does not return until command is completed. 26537 */ 26538 26539 static int 26540 sd_persistent_reservation_in_read_keys(struct sd_lun *un, 26541 mhioc_inkeys_t *usrp, int flag) 26542 { 26543 #ifdef _MULTI_DATAMODEL 26544 struct mhioc_key_list32 li32; 26545 #endif 26546 sd_prin_readkeys_t *in; 26547 mhioc_inkeys_t *ptr; 26548 mhioc_key_list_t li; 26549 uchar_t *data_bufp; 26550 int data_len; 26551 int rval = 0; 26552 size_t copysz; 26553 sd_ssc_t *ssc; 26554 26555 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) { 26556 return (EINVAL); 26557 } 26558 bzero(&li, sizeof (mhioc_key_list_t)); 26559 26560 ssc = sd_ssc_init(un); 26561 26562 /* 26563 * Get the listsize from user 26564 */ 26565 #ifdef _MULTI_DATAMODEL 26566 26567 switch (ddi_model_convert_from(flag & FMODELS)) { 26568 case DDI_MODEL_ILP32: 26569 copysz = sizeof (struct mhioc_key_list32); 26570 if (ddi_copyin(ptr->li, &li32, copysz, flag)) { 26571 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26572 "sd_persistent_reservation_in_read_keys: " 26573 "failed ddi_copyin: mhioc_key_list32_t\n"); 26574 rval = EFAULT; 26575 goto done; 26576 } 26577 li.listsize = li32.listsize; 26578 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list; 26579 break; 26580 26581 case DDI_MODEL_NONE: 26582 copysz = sizeof (mhioc_key_list_t); 26583 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 26584 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26585 "sd_persistent_reservation_in_read_keys: " 26586 "failed ddi_copyin: mhioc_key_list_t\n"); 26587 rval = EFAULT; 26588 goto done; 26589 } 26590 break; 26591 } 26592 26593 #else /* ! _MULTI_DATAMODEL */ 26594 copysz = sizeof (mhioc_key_list_t); 26595 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 26596 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26597 "sd_persistent_reservation_in_read_keys: " 26598 "failed ddi_copyin: mhioc_key_list_t\n"); 26599 rval = EFAULT; 26600 goto done; 26601 } 26602 #endif 26603 26604 data_len = li.listsize * MHIOC_RESV_KEY_SIZE; 26605 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t)); 26606 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 26607 26608 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 26609 data_len, data_bufp); 26610 if (rval != 0) { 26611 if (rval == EIO) 26612 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 26613 else 26614 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 26615 goto done; 26616 } 26617 in = (sd_prin_readkeys_t *)data_bufp; 26618 ptr->generation = BE_32(in->generation); 26619 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE; 26620 26621 /* 26622 * Return the min(listsize, listlen) keys 26623 */ 26624 #ifdef _MULTI_DATAMODEL 26625 26626 switch (ddi_model_convert_from(flag & FMODELS)) { 26627 case DDI_MODEL_ILP32: 26628 li32.listlen = li.listlen; 26629 if (ddi_copyout(&li32, ptr->li, copysz, flag)) { 26630 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26631 "sd_persistent_reservation_in_read_keys: " 26632 "failed ddi_copyout: mhioc_key_list32_t\n"); 26633 rval = EFAULT; 26634 goto done; 26635 } 26636 break; 26637 26638 case DDI_MODEL_NONE: 26639 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 26640 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26641 "sd_persistent_reservation_in_read_keys: " 26642 "failed ddi_copyout: mhioc_key_list_t\n"); 26643 rval = EFAULT; 26644 goto done; 26645 } 26646 break; 26647 } 26648 26649 #else /* ! _MULTI_DATAMODEL */ 26650 26651 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 26652 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26653 "sd_persistent_reservation_in_read_keys: " 26654 "failed ddi_copyout: mhioc_key_list_t\n"); 26655 rval = EFAULT; 26656 goto done; 26657 } 26658 26659 #endif /* _MULTI_DATAMODEL */ 26660 26661 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE, 26662 li.listsize * MHIOC_RESV_KEY_SIZE); 26663 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) { 26664 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26665 "sd_persistent_reservation_in_read_keys: " 26666 "failed ddi_copyout: keylist\n"); 26667 rval = EFAULT; 26668 } 26669 done: 26670 sd_ssc_fini(ssc); 26671 kmem_free(data_bufp, data_len); 26672 return (rval); 26673 } 26674 26675 26676 /* 26677 * Function: sd_persistent_reservation_in_read_resv 26678 * 26679 * Description: This routine is the driver entry point for handling CD-ROM 26680 * multi-host persistent reservation requests (MHIOCGRP_INRESV) 26681 * by sending the SCSI-3 PRIN commands to the device. 26682 * Process the read persistent reservations command response by 26683 * copying the reservation information into the user provided 26684 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented. 26685 * 26686 * Arguments: un - Pointer to soft state struct for the target. 26687 * usrp - user provided pointer to multihost Persistent In Read 26688 * Keys structure (mhioc_inkeys_t) 26689 * flag - this argument is a pass through to ddi_copyxxx() 26690 * directly from the mode argument of ioctl(). 26691 * 26692 * Return Code: 0 - Success 26693 * EACCES 26694 * ENOTSUP 26695 * errno return code from sd_send_scsi_cmd() 26696 * 26697 * Context: Can sleep. Does not return until command is completed. 26698 */ 26699 26700 static int 26701 sd_persistent_reservation_in_read_resv(struct sd_lun *un, 26702 mhioc_inresvs_t *usrp, int flag) 26703 { 26704 #ifdef _MULTI_DATAMODEL 26705 struct mhioc_resv_desc_list32 resvlist32; 26706 #endif 26707 sd_prin_readresv_t *in; 26708 mhioc_inresvs_t *ptr; 26709 sd_readresv_desc_t *readresv_ptr; 26710 mhioc_resv_desc_list_t resvlist; 26711 mhioc_resv_desc_t resvdesc; 26712 uchar_t *data_bufp = NULL; 26713 int data_len; 26714 int rval = 0; 26715 int i; 26716 size_t copysz; 26717 mhioc_resv_desc_t *bufp; 26718 sd_ssc_t *ssc; 26719 26720 if ((ptr = usrp) == NULL) { 26721 return (EINVAL); 26722 } 26723 26724 ssc = sd_ssc_init(un); 26725 26726 /* 26727 * Get the listsize from user 26728 */ 26729 #ifdef _MULTI_DATAMODEL 26730 switch (ddi_model_convert_from(flag & FMODELS)) { 26731 case DDI_MODEL_ILP32: 26732 copysz = sizeof (struct mhioc_resv_desc_list32); 26733 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) { 26734 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26735 "sd_persistent_reservation_in_read_resv: " 26736 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26737 rval = EFAULT; 26738 goto done; 26739 } 26740 resvlist.listsize = resvlist32.listsize; 26741 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list; 26742 break; 26743 26744 case DDI_MODEL_NONE: 26745 copysz = sizeof (mhioc_resv_desc_list_t); 26746 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 26747 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26748 "sd_persistent_reservation_in_read_resv: " 26749 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26750 rval = EFAULT; 26751 goto done; 26752 } 26753 break; 26754 } 26755 #else /* ! _MULTI_DATAMODEL */ 26756 copysz = sizeof (mhioc_resv_desc_list_t); 26757 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 26758 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26759 "sd_persistent_reservation_in_read_resv: " 26760 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26761 rval = EFAULT; 26762 goto done; 26763 } 26764 #endif /* ! _MULTI_DATAMODEL */ 26765 26766 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN; 26767 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t)); 26768 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 26769 26770 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_RESV, 26771 data_len, data_bufp); 26772 if (rval != 0) { 26773 if (rval == EIO) 26774 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 26775 else 26776 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 26777 goto done; 26778 } 26779 in = (sd_prin_readresv_t *)data_bufp; 26780 ptr->generation = BE_32(in->generation); 26781 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN; 26782 26783 /* 26784 * Return the min(listsize, listlen( keys 26785 */ 26786 #ifdef _MULTI_DATAMODEL 26787 26788 switch (ddi_model_convert_from(flag & FMODELS)) { 26789 case DDI_MODEL_ILP32: 26790 resvlist32.listlen = resvlist.listlen; 26791 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) { 26792 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26793 "sd_persistent_reservation_in_read_resv: " 26794 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26795 rval = EFAULT; 26796 goto done; 26797 } 26798 break; 26799 26800 case DDI_MODEL_NONE: 26801 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 26802 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26803 "sd_persistent_reservation_in_read_resv: " 26804 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26805 rval = EFAULT; 26806 goto done; 26807 } 26808 break; 26809 } 26810 26811 #else /* ! _MULTI_DATAMODEL */ 26812 26813 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 26814 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26815 "sd_persistent_reservation_in_read_resv: " 26816 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26817 rval = EFAULT; 26818 goto done; 26819 } 26820 26821 #endif /* ! _MULTI_DATAMODEL */ 26822 26823 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc; 26824 bufp = resvlist.list; 26825 copysz = sizeof (mhioc_resv_desc_t); 26826 for (i = 0; i < min(resvlist.listlen, resvlist.listsize); 26827 i++, readresv_ptr++, bufp++) { 26828 26829 bcopy(&readresv_ptr->resvkey, &resvdesc.key, 26830 MHIOC_RESV_KEY_SIZE); 26831 resvdesc.type = readresv_ptr->type; 26832 resvdesc.scope = readresv_ptr->scope; 26833 resvdesc.scope_specific_addr = 26834 BE_32(readresv_ptr->scope_specific_addr); 26835 26836 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) { 26837 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26838 "sd_persistent_reservation_in_read_resv: " 26839 "failed ddi_copyout: resvlist\n"); 26840 rval = EFAULT; 26841 goto done; 26842 } 26843 } 26844 done: 26845 sd_ssc_fini(ssc); 26846 /* only if data_bufp is allocated, we need to free it */ 26847 if (data_bufp) { 26848 kmem_free(data_bufp, data_len); 26849 } 26850 return (rval); 26851 } 26852 26853 26854 /* 26855 * Function: sr_change_blkmode() 26856 * 26857 * Description: This routine is the driver entry point for handling CD-ROM 26858 * block mode ioctl requests. Support for returning and changing 26859 * the current block size in use by the device is implemented. The 26860 * LBA size is changed via a MODE SELECT Block Descriptor. 26861 * 26862 * This routine issues a mode sense with an allocation length of 26863 * 12 bytes for the mode page header and a single block descriptor. 26864 * 26865 * Arguments: dev - the device 'dev_t' 26866 * cmd - the request type; one of CDROMGBLKMODE (get) or 26867 * CDROMSBLKMODE (set) 26868 * data - current block size or requested block size 26869 * flag - this argument is a pass through to ddi_copyxxx() directly 26870 * from the mode argument of ioctl(). 26871 * 26872 * Return Code: the code returned by sd_send_scsi_cmd() 26873 * EINVAL if invalid arguments are provided 26874 * EFAULT if ddi_copyxxx() fails 26875 * ENXIO if fail ddi_get_soft_state 26876 * EIO if invalid mode sense block descriptor length 26877 * 26878 */ 26879 26880 static int 26881 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag) 26882 { 26883 struct sd_lun *un = NULL; 26884 struct mode_header *sense_mhp, *select_mhp; 26885 struct block_descriptor *sense_desc, *select_desc; 26886 int current_bsize; 26887 int rval = EINVAL; 26888 uchar_t *sense = NULL; 26889 uchar_t *select = NULL; 26890 sd_ssc_t *ssc; 26891 26892 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE)); 26893 26894 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26895 return (ENXIO); 26896 } 26897 26898 /* 26899 * The block length is changed via the Mode Select block descriptor, the 26900 * "Read/Write Error Recovery" mode page (0x1) contents are not actually 26901 * required as part of this routine. Therefore the mode sense allocation 26902 * length is specified to be the length of a mode page header and a 26903 * block descriptor. 26904 */ 26905 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 26906 26907 ssc = sd_ssc_init(un); 26908 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 26909 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD); 26910 sd_ssc_fini(ssc); 26911 if (rval != 0) { 26912 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26913 "sr_change_blkmode: Mode Sense Failed\n"); 26914 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26915 return (rval); 26916 } 26917 26918 /* Check the block descriptor len to handle only 1 block descriptor */ 26919 sense_mhp = (struct mode_header *)sense; 26920 if ((sense_mhp->bdesc_length == 0) || 26921 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) { 26922 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26923 "sr_change_blkmode: Mode Sense returned invalid block" 26924 " descriptor length\n"); 26925 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26926 return (EIO); 26927 } 26928 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH); 26929 current_bsize = ((sense_desc->blksize_hi << 16) | 26930 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo); 26931 26932 /* Process command */ 26933 switch (cmd) { 26934 case CDROMGBLKMODE: 26935 /* Return the block size obtained during the mode sense */ 26936 if (ddi_copyout(¤t_bsize, (void *)data, 26937 sizeof (int), flag) != 0) 26938 rval = EFAULT; 26939 break; 26940 case CDROMSBLKMODE: 26941 /* Validate the requested block size */ 26942 switch (data) { 26943 case CDROM_BLK_512: 26944 case CDROM_BLK_1024: 26945 case CDROM_BLK_2048: 26946 case CDROM_BLK_2056: 26947 case CDROM_BLK_2336: 26948 case CDROM_BLK_2340: 26949 case CDROM_BLK_2352: 26950 case CDROM_BLK_2368: 26951 case CDROM_BLK_2448: 26952 case CDROM_BLK_2646: 26953 case CDROM_BLK_2647: 26954 break; 26955 default: 26956 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26957 "sr_change_blkmode: " 26958 "Block Size '%ld' Not Supported\n", data); 26959 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26960 return (EINVAL); 26961 } 26962 26963 /* 26964 * The current block size matches the requested block size so 26965 * there is no need to send the mode select to change the size 26966 */ 26967 if (current_bsize == data) { 26968 break; 26969 } 26970 26971 /* Build the select data for the requested block size */ 26972 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 26973 select_mhp = (struct mode_header *)select; 26974 select_desc = 26975 (struct block_descriptor *)(select + MODE_HEADER_LENGTH); 26976 /* 26977 * The LBA size is changed via the block descriptor, so the 26978 * descriptor is built according to the user data 26979 */ 26980 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH; 26981 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16); 26982 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8); 26983 select_desc->blksize_lo = (char)((data) & 0x000000ff); 26984 26985 /* Send the mode select for the requested block size */ 26986 ssc = sd_ssc_init(un); 26987 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, 26988 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 26989 SD_PATH_STANDARD); 26990 sd_ssc_fini(ssc); 26991 if (rval != 0) { 26992 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26993 "sr_change_blkmode: Mode Select Failed\n"); 26994 /* 26995 * The mode select failed for the requested block size, 26996 * so reset the data for the original block size and 26997 * send it to the target. The error is indicated by the 26998 * return value for the failed mode select. 26999 */ 27000 select_desc->blksize_hi = sense_desc->blksize_hi; 27001 select_desc->blksize_mid = sense_desc->blksize_mid; 27002 select_desc->blksize_lo = sense_desc->blksize_lo; 27003 ssc = sd_ssc_init(un); 27004 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, 27005 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 27006 SD_PATH_STANDARD); 27007 sd_ssc_fini(ssc); 27008 } else { 27009 ASSERT(!mutex_owned(SD_MUTEX(un))); 27010 mutex_enter(SD_MUTEX(un)); 27011 sd_update_block_info(un, (uint32_t)data, 0); 27012 mutex_exit(SD_MUTEX(un)); 27013 } 27014 break; 27015 default: 27016 /* should not reach here, but check anyway */ 27017 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27018 "sr_change_blkmode: Command '%x' Not Supported\n", cmd); 27019 rval = EINVAL; 27020 break; 27021 } 27022 27023 if (select) { 27024 kmem_free(select, BUFLEN_CHG_BLK_MODE); 27025 } 27026 if (sense) { 27027 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 27028 } 27029 return (rval); 27030 } 27031 27032 27033 /* 27034 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines 27035 * implement driver support for getting and setting the CD speed. The command 27036 * set used will be based on the device type. If the device has not been 27037 * identified as MMC the Toshiba vendor specific mode page will be used. If 27038 * the device is MMC but does not support the Real Time Streaming feature 27039 * the SET CD SPEED command will be used to set speed and mode page 0x2A will 27040 * be used to read the speed. 27041 */ 27042 27043 /* 27044 * Function: sr_change_speed() 27045 * 27046 * Description: This routine is the driver entry point for handling CD-ROM 27047 * drive speed ioctl requests for devices supporting the Toshiba 27048 * vendor specific drive speed mode page. Support for returning 27049 * and changing the current drive speed in use by the device is 27050 * implemented. 27051 * 27052 * Arguments: dev - the device 'dev_t' 27053 * cmd - the request type; one of CDROMGDRVSPEED (get) or 27054 * CDROMSDRVSPEED (set) 27055 * data - current drive speed or requested drive speed 27056 * flag - this argument is a pass through to ddi_copyxxx() directly 27057 * from the mode argument of ioctl(). 27058 * 27059 * Return Code: the code returned by sd_send_scsi_cmd() 27060 * EINVAL if invalid arguments are provided 27061 * EFAULT if ddi_copyxxx() fails 27062 * ENXIO if fail ddi_get_soft_state 27063 * EIO if invalid mode sense block descriptor length 27064 */ 27065 27066 static int 27067 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 27068 { 27069 struct sd_lun *un = NULL; 27070 struct mode_header *sense_mhp, *select_mhp; 27071 struct mode_speed *sense_page, *select_page; 27072 int current_speed; 27073 int rval = EINVAL; 27074 int bd_len; 27075 uchar_t *sense = NULL; 27076 uchar_t *select = NULL; 27077 sd_ssc_t *ssc; 27078 27079 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 27080 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27081 return (ENXIO); 27082 } 27083 27084 /* 27085 * Note: The drive speed is being modified here according to a Toshiba 27086 * vendor specific mode page (0x31). 27087 */ 27088 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 27089 27090 ssc = sd_ssc_init(un); 27091 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 27092 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED, 27093 SD_PATH_STANDARD); 27094 sd_ssc_fini(ssc); 27095 if (rval != 0) { 27096 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27097 "sr_change_speed: Mode Sense Failed\n"); 27098 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 27099 return (rval); 27100 } 27101 sense_mhp = (struct mode_header *)sense; 27102 27103 /* Check the block descriptor len to handle only 1 block descriptor */ 27104 bd_len = sense_mhp->bdesc_length; 27105 if (bd_len > MODE_BLK_DESC_LENGTH) { 27106 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27107 "sr_change_speed: Mode Sense returned invalid block " 27108 "descriptor length\n"); 27109 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 27110 return (EIO); 27111 } 27112 27113 sense_page = (struct mode_speed *) 27114 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 27115 current_speed = sense_page->speed; 27116 27117 /* Process command */ 27118 switch (cmd) { 27119 case CDROMGDRVSPEED: 27120 /* Return the drive speed obtained during the mode sense */ 27121 if (current_speed == 0x2) { 27122 current_speed = CDROM_TWELVE_SPEED; 27123 } 27124 if (ddi_copyout(¤t_speed, (void *)data, 27125 sizeof (int), flag) != 0) { 27126 rval = EFAULT; 27127 } 27128 break; 27129 case CDROMSDRVSPEED: 27130 /* Validate the requested drive speed */ 27131 switch ((uchar_t)data) { 27132 case CDROM_TWELVE_SPEED: 27133 data = 0x2; 27134 /*FALLTHROUGH*/ 27135 case CDROM_NORMAL_SPEED: 27136 case CDROM_DOUBLE_SPEED: 27137 case CDROM_QUAD_SPEED: 27138 case CDROM_MAXIMUM_SPEED: 27139 break; 27140 default: 27141 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27142 "sr_change_speed: " 27143 "Drive Speed '%d' Not Supported\n", (uchar_t)data); 27144 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 27145 return (EINVAL); 27146 } 27147 27148 /* 27149 * The current drive speed matches the requested drive speed so 27150 * there is no need to send the mode select to change the speed 27151 */ 27152 if (current_speed == data) { 27153 break; 27154 } 27155 27156 /* Build the select data for the requested drive speed */ 27157 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 27158 select_mhp = (struct mode_header *)select; 27159 select_mhp->bdesc_length = 0; 27160 select_page = 27161 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 27162 select_page = 27163 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 27164 select_page->mode_page.code = CDROM_MODE_SPEED; 27165 select_page->mode_page.length = 2; 27166 select_page->speed = (uchar_t)data; 27167 27168 /* Send the mode select for the requested block size */ 27169 ssc = sd_ssc_init(un); 27170 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 27171 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 27172 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 27173 sd_ssc_fini(ssc); 27174 if (rval != 0) { 27175 /* 27176 * The mode select failed for the requested drive speed, 27177 * so reset the data for the original drive speed and 27178 * send it to the target. The error is indicated by the 27179 * return value for the failed mode select. 27180 */ 27181 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27182 "sr_drive_speed: Mode Select Failed\n"); 27183 select_page->speed = sense_page->speed; 27184 ssc = sd_ssc_init(un); 27185 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 27186 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 27187 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 27188 sd_ssc_fini(ssc); 27189 } 27190 break; 27191 default: 27192 /* should not reach here, but check anyway */ 27193 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27194 "sr_change_speed: Command '%x' Not Supported\n", cmd); 27195 rval = EINVAL; 27196 break; 27197 } 27198 27199 if (select) { 27200 kmem_free(select, BUFLEN_MODE_CDROM_SPEED); 27201 } 27202 if (sense) { 27203 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 27204 } 27205 27206 return (rval); 27207 } 27208 27209 27210 /* 27211 * Function: sr_atapi_change_speed() 27212 * 27213 * Description: This routine is the driver entry point for handling CD-ROM 27214 * drive speed ioctl requests for MMC devices that do not support 27215 * the Real Time Streaming feature (0x107). 27216 * 27217 * Note: This routine will use the SET SPEED command which may not 27218 * be supported by all devices. 27219 * 27220 * Arguments: dev- the device 'dev_t' 27221 * cmd- the request type; one of CDROMGDRVSPEED (get) or 27222 * CDROMSDRVSPEED (set) 27223 * data- current drive speed or requested drive speed 27224 * flag- this argument is a pass through to ddi_copyxxx() directly 27225 * from the mode argument of ioctl(). 27226 * 27227 * Return Code: the code returned by sd_send_scsi_cmd() 27228 * EINVAL if invalid arguments are provided 27229 * EFAULT if ddi_copyxxx() fails 27230 * ENXIO if fail ddi_get_soft_state 27231 * EIO if invalid mode sense block descriptor length 27232 */ 27233 27234 static int 27235 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 27236 { 27237 struct sd_lun *un; 27238 struct uscsi_cmd *com = NULL; 27239 struct mode_header_grp2 *sense_mhp; 27240 uchar_t *sense_page; 27241 uchar_t *sense = NULL; 27242 char cdb[CDB_GROUP5]; 27243 int bd_len; 27244 int current_speed = 0; 27245 int max_speed = 0; 27246 int rval; 27247 sd_ssc_t *ssc; 27248 27249 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 27250 27251 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27252 return (ENXIO); 27253 } 27254 27255 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 27256 27257 ssc = sd_ssc_init(un); 27258 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, 27259 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, 27260 SD_PATH_STANDARD); 27261 sd_ssc_fini(ssc); 27262 if (rval != 0) { 27263 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27264 "sr_atapi_change_speed: Mode Sense Failed\n"); 27265 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 27266 return (rval); 27267 } 27268 27269 /* Check the block descriptor len to handle only 1 block descriptor */ 27270 sense_mhp = (struct mode_header_grp2 *)sense; 27271 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 27272 if (bd_len > MODE_BLK_DESC_LENGTH) { 27273 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27274 "sr_atapi_change_speed: Mode Sense returned invalid " 27275 "block descriptor length\n"); 27276 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 27277 return (EIO); 27278 } 27279 27280 /* Calculate the current and maximum drive speeds */ 27281 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 27282 current_speed = (sense_page[14] << 8) | sense_page[15]; 27283 max_speed = (sense_page[8] << 8) | sense_page[9]; 27284 27285 /* Process the command */ 27286 switch (cmd) { 27287 case CDROMGDRVSPEED: 27288 current_speed /= SD_SPEED_1X; 27289 if (ddi_copyout(¤t_speed, (void *)data, 27290 sizeof (int), flag) != 0) 27291 rval = EFAULT; 27292 break; 27293 case CDROMSDRVSPEED: 27294 /* Convert the speed code to KB/sec */ 27295 switch ((uchar_t)data) { 27296 case CDROM_NORMAL_SPEED: 27297 current_speed = SD_SPEED_1X; 27298 break; 27299 case CDROM_DOUBLE_SPEED: 27300 current_speed = 2 * SD_SPEED_1X; 27301 break; 27302 case CDROM_QUAD_SPEED: 27303 current_speed = 4 * SD_SPEED_1X; 27304 break; 27305 case CDROM_TWELVE_SPEED: 27306 current_speed = 12 * SD_SPEED_1X; 27307 break; 27308 case CDROM_MAXIMUM_SPEED: 27309 current_speed = 0xffff; 27310 break; 27311 default: 27312 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27313 "sr_atapi_change_speed: invalid drive speed %d\n", 27314 (uchar_t)data); 27315 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 27316 return (EINVAL); 27317 } 27318 27319 /* Check the request against the drive's max speed. */ 27320 if (current_speed != 0xffff) { 27321 if (current_speed > max_speed) { 27322 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 27323 return (EINVAL); 27324 } 27325 } 27326 27327 /* 27328 * Build and send the SET SPEED command 27329 * 27330 * Note: The SET SPEED (0xBB) command used in this routine is 27331 * obsolete per the SCSI MMC spec but still supported in the 27332 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 27333 * therefore the command is still implemented in this routine. 27334 */ 27335 bzero(cdb, sizeof (cdb)); 27336 cdb[0] = (char)SCMD_SET_CDROM_SPEED; 27337 cdb[2] = (uchar_t)(current_speed >> 8); 27338 cdb[3] = (uchar_t)current_speed; 27339 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27340 com->uscsi_cdb = (caddr_t)cdb; 27341 com->uscsi_cdblen = CDB_GROUP5; 27342 com->uscsi_bufaddr = NULL; 27343 com->uscsi_buflen = 0; 27344 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27345 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, 0, SD_PATH_STANDARD); 27346 break; 27347 default: 27348 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27349 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd); 27350 rval = EINVAL; 27351 } 27352 27353 if (sense) { 27354 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 27355 } 27356 if (com) { 27357 kmem_free(com, sizeof (*com)); 27358 } 27359 return (rval); 27360 } 27361 27362 27363 /* 27364 * Function: sr_pause_resume() 27365 * 27366 * Description: This routine is the driver entry point for handling CD-ROM 27367 * pause/resume ioctl requests. This only affects the audio play 27368 * operation. 27369 * 27370 * Arguments: dev - the device 'dev_t' 27371 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used 27372 * for setting the resume bit of the cdb. 27373 * 27374 * Return Code: the code returned by sd_send_scsi_cmd() 27375 * EINVAL if invalid mode specified 27376 * 27377 */ 27378 27379 static int 27380 sr_pause_resume(dev_t dev, int cmd) 27381 { 27382 struct sd_lun *un; 27383 struct uscsi_cmd *com; 27384 char cdb[CDB_GROUP1]; 27385 int rval; 27386 27387 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27388 return (ENXIO); 27389 } 27390 27391 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27392 bzero(cdb, CDB_GROUP1); 27393 cdb[0] = SCMD_PAUSE_RESUME; 27394 switch (cmd) { 27395 case CDROMRESUME: 27396 cdb[8] = 1; 27397 break; 27398 case CDROMPAUSE: 27399 cdb[8] = 0; 27400 break; 27401 default: 27402 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:" 27403 " Command '%x' Not Supported\n", cmd); 27404 rval = EINVAL; 27405 goto done; 27406 } 27407 27408 com->uscsi_cdb = cdb; 27409 com->uscsi_cdblen = CDB_GROUP1; 27410 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27411 27412 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27413 SD_PATH_STANDARD); 27414 27415 done: 27416 kmem_free(com, sizeof (*com)); 27417 return (rval); 27418 } 27419 27420 27421 /* 27422 * Function: sr_play_msf() 27423 * 27424 * Description: This routine is the driver entry point for handling CD-ROM 27425 * ioctl requests to output the audio signals at the specified 27426 * starting address and continue the audio play until the specified 27427 * ending address (CDROMPLAYMSF) The address is in Minute Second 27428 * Frame (MSF) format. 27429 * 27430 * Arguments: dev - the device 'dev_t' 27431 * data - pointer to user provided audio msf structure, 27432 * specifying start/end addresses. 27433 * flag - this argument is a pass through to ddi_copyxxx() 27434 * directly from the mode argument of ioctl(). 27435 * 27436 * Return Code: the code returned by sd_send_scsi_cmd() 27437 * EFAULT if ddi_copyxxx() fails 27438 * ENXIO if fail ddi_get_soft_state 27439 * EINVAL if data pointer is NULL 27440 */ 27441 27442 static int 27443 sr_play_msf(dev_t dev, caddr_t data, int flag) 27444 { 27445 struct sd_lun *un; 27446 struct uscsi_cmd *com; 27447 struct cdrom_msf msf_struct; 27448 struct cdrom_msf *msf = &msf_struct; 27449 char cdb[CDB_GROUP1]; 27450 int rval; 27451 27452 if (data == NULL) { 27453 return (EINVAL); 27454 } 27455 27456 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27457 return (ENXIO); 27458 } 27459 27460 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) { 27461 return (EFAULT); 27462 } 27463 27464 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27465 bzero(cdb, CDB_GROUP1); 27466 cdb[0] = SCMD_PLAYAUDIO_MSF; 27467 if (un->un_f_cfg_playmsf_bcd == TRUE) { 27468 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0); 27469 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0); 27470 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0); 27471 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1); 27472 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1); 27473 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1); 27474 } else { 27475 cdb[3] = msf->cdmsf_min0; 27476 cdb[4] = msf->cdmsf_sec0; 27477 cdb[5] = msf->cdmsf_frame0; 27478 cdb[6] = msf->cdmsf_min1; 27479 cdb[7] = msf->cdmsf_sec1; 27480 cdb[8] = msf->cdmsf_frame1; 27481 } 27482 com->uscsi_cdb = cdb; 27483 com->uscsi_cdblen = CDB_GROUP1; 27484 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27485 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27486 SD_PATH_STANDARD); 27487 kmem_free(com, sizeof (*com)); 27488 return (rval); 27489 } 27490 27491 27492 /* 27493 * Function: sr_play_trkind() 27494 * 27495 * Description: This routine is the driver entry point for handling CD-ROM 27496 * ioctl requests to output the audio signals at the specified 27497 * starting address and continue the audio play until the specified 27498 * ending address (CDROMPLAYTRKIND). The address is in Track Index 27499 * format. 27500 * 27501 * Arguments: dev - the device 'dev_t' 27502 * data - pointer to user provided audio track/index structure, 27503 * specifying start/end addresses. 27504 * flag - this argument is a pass through to ddi_copyxxx() 27505 * directly from the mode argument of ioctl(). 27506 * 27507 * Return Code: the code returned by sd_send_scsi_cmd() 27508 * EFAULT if ddi_copyxxx() fails 27509 * ENXIO if fail ddi_get_soft_state 27510 * EINVAL if data pointer is NULL 27511 */ 27512 27513 static int 27514 sr_play_trkind(dev_t dev, caddr_t data, int flag) 27515 { 27516 struct cdrom_ti ti_struct; 27517 struct cdrom_ti *ti = &ti_struct; 27518 struct uscsi_cmd *com = NULL; 27519 char cdb[CDB_GROUP1]; 27520 int rval; 27521 27522 if (data == NULL) { 27523 return (EINVAL); 27524 } 27525 27526 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) { 27527 return (EFAULT); 27528 } 27529 27530 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27531 bzero(cdb, CDB_GROUP1); 27532 cdb[0] = SCMD_PLAYAUDIO_TI; 27533 cdb[4] = ti->cdti_trk0; 27534 cdb[5] = ti->cdti_ind0; 27535 cdb[7] = ti->cdti_trk1; 27536 cdb[8] = ti->cdti_ind1; 27537 com->uscsi_cdb = cdb; 27538 com->uscsi_cdblen = CDB_GROUP1; 27539 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27540 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27541 SD_PATH_STANDARD); 27542 kmem_free(com, sizeof (*com)); 27543 return (rval); 27544 } 27545 27546 27547 /* 27548 * Function: sr_read_all_subcodes() 27549 * 27550 * Description: This routine is the driver entry point for handling CD-ROM 27551 * ioctl requests to return raw subcode data while the target is 27552 * playing audio (CDROMSUBCODE). 27553 * 27554 * Arguments: dev - the device 'dev_t' 27555 * data - pointer to user provided cdrom subcode structure, 27556 * specifying the transfer length and address. 27557 * flag - this argument is a pass through to ddi_copyxxx() 27558 * directly from the mode argument of ioctl(). 27559 * 27560 * Return Code: the code returned by sd_send_scsi_cmd() 27561 * EFAULT if ddi_copyxxx() fails 27562 * ENXIO if fail ddi_get_soft_state 27563 * EINVAL if data pointer is NULL 27564 */ 27565 27566 static int 27567 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag) 27568 { 27569 struct sd_lun *un = NULL; 27570 struct uscsi_cmd *com = NULL; 27571 struct cdrom_subcode *subcode = NULL; 27572 int rval; 27573 size_t buflen; 27574 char cdb[CDB_GROUP5]; 27575 27576 #ifdef _MULTI_DATAMODEL 27577 /* To support ILP32 applications in an LP64 world */ 27578 struct cdrom_subcode32 cdrom_subcode32; 27579 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32; 27580 #endif 27581 if (data == NULL) { 27582 return (EINVAL); 27583 } 27584 27585 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27586 return (ENXIO); 27587 } 27588 27589 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP); 27590 27591 #ifdef _MULTI_DATAMODEL 27592 switch (ddi_model_convert_from(flag & FMODELS)) { 27593 case DDI_MODEL_ILP32: 27594 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) { 27595 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27596 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27597 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27598 return (EFAULT); 27599 } 27600 /* Convert the ILP32 uscsi data from the application to LP64 */ 27601 cdrom_subcode32tocdrom_subcode(cdsc32, subcode); 27602 break; 27603 case DDI_MODEL_NONE: 27604 if (ddi_copyin(data, subcode, 27605 sizeof (struct cdrom_subcode), flag)) { 27606 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27607 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27608 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27609 return (EFAULT); 27610 } 27611 break; 27612 } 27613 #else /* ! _MULTI_DATAMODEL */ 27614 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) { 27615 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27616 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27617 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27618 return (EFAULT); 27619 } 27620 #endif /* _MULTI_DATAMODEL */ 27621 27622 /* 27623 * Since MMC-2 expects max 3 bytes for length, check if the 27624 * length input is greater than 3 bytes 27625 */ 27626 if ((subcode->cdsc_length & 0xFF000000) != 0) { 27627 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27628 "sr_read_all_subcodes: " 27629 "cdrom transfer length too large: %d (limit %d)\n", 27630 subcode->cdsc_length, 0xFFFFFF); 27631 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27632 return (EINVAL); 27633 } 27634 27635 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length; 27636 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27637 bzero(cdb, CDB_GROUP5); 27638 27639 if (un->un_f_mmc_cap == TRUE) { 27640 cdb[0] = (char)SCMD_READ_CD; 27641 cdb[2] = (char)0xff; 27642 cdb[3] = (char)0xff; 27643 cdb[4] = (char)0xff; 27644 cdb[5] = (char)0xff; 27645 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 27646 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 27647 cdb[8] = ((subcode->cdsc_length) & 0x000000ff); 27648 cdb[10] = 1; 27649 } else { 27650 /* 27651 * Note: A vendor specific command (0xDF) is being used her to 27652 * request a read of all subcodes. 27653 */ 27654 cdb[0] = (char)SCMD_READ_ALL_SUBCODES; 27655 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24); 27656 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 27657 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 27658 cdb[9] = ((subcode->cdsc_length) & 0x000000ff); 27659 } 27660 com->uscsi_cdb = cdb; 27661 com->uscsi_cdblen = CDB_GROUP5; 27662 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr; 27663 com->uscsi_buflen = buflen; 27664 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27665 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27666 SD_PATH_STANDARD); 27667 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27668 kmem_free(com, sizeof (*com)); 27669 return (rval); 27670 } 27671 27672 27673 /* 27674 * Function: sr_read_subchannel() 27675 * 27676 * Description: This routine is the driver entry point for handling CD-ROM 27677 * ioctl requests to return the Q sub-channel data of the CD 27678 * current position block. (CDROMSUBCHNL) The data includes the 27679 * track number, index number, absolute CD-ROM address (LBA or MSF 27680 * format per the user) , track relative CD-ROM address (LBA or MSF 27681 * format per the user), control data and audio status. 27682 * 27683 * Arguments: dev - the device 'dev_t' 27684 * data - pointer to user provided cdrom sub-channel structure 27685 * flag - this argument is a pass through to ddi_copyxxx() 27686 * directly from the mode argument of ioctl(). 27687 * 27688 * Return Code: the code returned by sd_send_scsi_cmd() 27689 * EFAULT if ddi_copyxxx() fails 27690 * ENXIO if fail ddi_get_soft_state 27691 * EINVAL if data pointer is NULL 27692 */ 27693 27694 static int 27695 sr_read_subchannel(dev_t dev, caddr_t data, int flag) 27696 { 27697 struct sd_lun *un; 27698 struct uscsi_cmd *com; 27699 struct cdrom_subchnl subchanel; 27700 struct cdrom_subchnl *subchnl = &subchanel; 27701 char cdb[CDB_GROUP1]; 27702 caddr_t buffer; 27703 int rval; 27704 27705 if (data == NULL) { 27706 return (EINVAL); 27707 } 27708 27709 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27710 (un->un_state == SD_STATE_OFFLINE)) { 27711 return (ENXIO); 27712 } 27713 27714 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) { 27715 return (EFAULT); 27716 } 27717 27718 buffer = kmem_zalloc((size_t)16, KM_SLEEP); 27719 bzero(cdb, CDB_GROUP1); 27720 cdb[0] = SCMD_READ_SUBCHANNEL; 27721 /* Set the MSF bit based on the user requested address format */ 27722 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02; 27723 /* 27724 * Set the Q bit in byte 2 to indicate that Q sub-channel data be 27725 * returned 27726 */ 27727 cdb[2] = 0x40; 27728 /* 27729 * Set byte 3 to specify the return data format. A value of 0x01 27730 * indicates that the CD-ROM current position should be returned. 27731 */ 27732 cdb[3] = 0x01; 27733 cdb[8] = 0x10; 27734 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27735 com->uscsi_cdb = cdb; 27736 com->uscsi_cdblen = CDB_GROUP1; 27737 com->uscsi_bufaddr = buffer; 27738 com->uscsi_buflen = 16; 27739 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27740 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27741 SD_PATH_STANDARD); 27742 if (rval != 0) { 27743 kmem_free(buffer, 16); 27744 kmem_free(com, sizeof (*com)); 27745 return (rval); 27746 } 27747 27748 /* Process the returned Q sub-channel data */ 27749 subchnl->cdsc_audiostatus = buffer[1]; 27750 subchnl->cdsc_adr = (buffer[5] & 0xF0) >> 4; 27751 subchnl->cdsc_ctrl = (buffer[5] & 0x0F); 27752 subchnl->cdsc_trk = buffer[6]; 27753 subchnl->cdsc_ind = buffer[7]; 27754 if (subchnl->cdsc_format & CDROM_LBA) { 27755 subchnl->cdsc_absaddr.lba = 27756 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 27757 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 27758 subchnl->cdsc_reladdr.lba = 27759 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) + 27760 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]); 27761 } else if (un->un_f_cfg_readsub_bcd == TRUE) { 27762 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]); 27763 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]); 27764 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]); 27765 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]); 27766 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]); 27767 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]); 27768 } else { 27769 subchnl->cdsc_absaddr.msf.minute = buffer[9]; 27770 subchnl->cdsc_absaddr.msf.second = buffer[10]; 27771 subchnl->cdsc_absaddr.msf.frame = buffer[11]; 27772 subchnl->cdsc_reladdr.msf.minute = buffer[13]; 27773 subchnl->cdsc_reladdr.msf.second = buffer[14]; 27774 subchnl->cdsc_reladdr.msf.frame = buffer[15]; 27775 } 27776 kmem_free(buffer, 16); 27777 kmem_free(com, sizeof (*com)); 27778 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag) 27779 != 0) { 27780 return (EFAULT); 27781 } 27782 return (rval); 27783 } 27784 27785 27786 /* 27787 * Function: sr_read_tocentry() 27788 * 27789 * Description: This routine is the driver entry point for handling CD-ROM 27790 * ioctl requests to read from the Table of Contents (TOC) 27791 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL 27792 * fields, the starting address (LBA or MSF format per the user) 27793 * and the data mode if the user specified track is a data track. 27794 * 27795 * Note: The READ HEADER (0x44) command used in this routine is 27796 * obsolete per the SCSI MMC spec but still supported in the 27797 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 27798 * therefore the command is still implemented in this routine. 27799 * 27800 * Arguments: dev - the device 'dev_t' 27801 * data - pointer to user provided toc entry structure, 27802 * specifying the track # and the address format 27803 * (LBA or MSF). 27804 * flag - this argument is a pass through to ddi_copyxxx() 27805 * directly from the mode argument of ioctl(). 27806 * 27807 * Return Code: the code returned by sd_send_scsi_cmd() 27808 * EFAULT if ddi_copyxxx() fails 27809 * ENXIO if fail ddi_get_soft_state 27810 * EINVAL if data pointer is NULL 27811 */ 27812 27813 static int 27814 sr_read_tocentry(dev_t dev, caddr_t data, int flag) 27815 { 27816 struct sd_lun *un = NULL; 27817 struct uscsi_cmd *com; 27818 struct cdrom_tocentry toc_entry; 27819 struct cdrom_tocentry *entry = &toc_entry; 27820 caddr_t buffer; 27821 int rval; 27822 char cdb[CDB_GROUP1]; 27823 27824 if (data == NULL) { 27825 return (EINVAL); 27826 } 27827 27828 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27829 (un->un_state == SD_STATE_OFFLINE)) { 27830 return (ENXIO); 27831 } 27832 27833 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) { 27834 return (EFAULT); 27835 } 27836 27837 /* Validate the requested track and address format */ 27838 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) { 27839 return (EINVAL); 27840 } 27841 27842 if (entry->cdte_track == 0) { 27843 return (EINVAL); 27844 } 27845 27846 buffer = kmem_zalloc((size_t)12, KM_SLEEP); 27847 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27848 bzero(cdb, CDB_GROUP1); 27849 27850 cdb[0] = SCMD_READ_TOC; 27851 /* Set the MSF bit based on the user requested address format */ 27852 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2); 27853 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 27854 cdb[6] = BYTE_TO_BCD(entry->cdte_track); 27855 } else { 27856 cdb[6] = entry->cdte_track; 27857 } 27858 27859 /* 27860 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 27861 * (4 byte TOC response header + 8 byte track descriptor) 27862 */ 27863 cdb[8] = 12; 27864 com->uscsi_cdb = cdb; 27865 com->uscsi_cdblen = CDB_GROUP1; 27866 com->uscsi_bufaddr = buffer; 27867 com->uscsi_buflen = 0x0C; 27868 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ); 27869 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27870 SD_PATH_STANDARD); 27871 if (rval != 0) { 27872 kmem_free(buffer, 12); 27873 kmem_free(com, sizeof (*com)); 27874 return (rval); 27875 } 27876 27877 /* Process the toc entry */ 27878 entry->cdte_adr = (buffer[5] & 0xF0) >> 4; 27879 entry->cdte_ctrl = (buffer[5] & 0x0F); 27880 if (entry->cdte_format & CDROM_LBA) { 27881 entry->cdte_addr.lba = 27882 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 27883 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 27884 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) { 27885 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]); 27886 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]); 27887 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]); 27888 /* 27889 * Send a READ TOC command using the LBA address format to get 27890 * the LBA for the track requested so it can be used in the 27891 * READ HEADER request 27892 * 27893 * Note: The MSF bit of the READ HEADER command specifies the 27894 * output format. The block address specified in that command 27895 * must be in LBA format. 27896 */ 27897 cdb[1] = 0; 27898 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27899 SD_PATH_STANDARD); 27900 if (rval != 0) { 27901 kmem_free(buffer, 12); 27902 kmem_free(com, sizeof (*com)); 27903 return (rval); 27904 } 27905 } else { 27906 entry->cdte_addr.msf.minute = buffer[9]; 27907 entry->cdte_addr.msf.second = buffer[10]; 27908 entry->cdte_addr.msf.frame = buffer[11]; 27909 /* 27910 * Send a READ TOC command using the LBA address format to get 27911 * the LBA for the track requested so it can be used in the 27912 * READ HEADER request 27913 * 27914 * Note: The MSF bit of the READ HEADER command specifies the 27915 * output format. The block address specified in that command 27916 * must be in LBA format. 27917 */ 27918 cdb[1] = 0; 27919 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27920 SD_PATH_STANDARD); 27921 if (rval != 0) { 27922 kmem_free(buffer, 12); 27923 kmem_free(com, sizeof (*com)); 27924 return (rval); 27925 } 27926 } 27927 27928 /* 27929 * Build and send the READ HEADER command to determine the data mode of 27930 * the user specified track. 27931 */ 27932 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) && 27933 (entry->cdte_track != CDROM_LEADOUT)) { 27934 bzero(cdb, CDB_GROUP1); 27935 cdb[0] = SCMD_READ_HEADER; 27936 cdb[2] = buffer[8]; 27937 cdb[3] = buffer[9]; 27938 cdb[4] = buffer[10]; 27939 cdb[5] = buffer[11]; 27940 cdb[8] = 0x08; 27941 com->uscsi_buflen = 0x08; 27942 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27943 SD_PATH_STANDARD); 27944 if (rval == 0) { 27945 entry->cdte_datamode = buffer[0]; 27946 } else { 27947 /* 27948 * READ HEADER command failed, since this is 27949 * obsoleted in one spec, its better to return 27950 * -1 for an invlid track so that we can still 27951 * receive the rest of the TOC data. 27952 */ 27953 entry->cdte_datamode = (uchar_t)-1; 27954 } 27955 } else { 27956 entry->cdte_datamode = (uchar_t)-1; 27957 } 27958 27959 kmem_free(buffer, 12); 27960 kmem_free(com, sizeof (*com)); 27961 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0) 27962 return (EFAULT); 27963 27964 return (rval); 27965 } 27966 27967 27968 /* 27969 * Function: sr_read_tochdr() 27970 * 27971 * Description: This routine is the driver entry point for handling CD-ROM 27972 * ioctl requests to read the Table of Contents (TOC) header 27973 * (CDROMREADTOHDR). The TOC header consists of the disk starting 27974 * and ending track numbers 27975 * 27976 * Arguments: dev - the device 'dev_t' 27977 * data - pointer to user provided toc header structure, 27978 * specifying the starting and ending track numbers. 27979 * flag - this argument is a pass through to ddi_copyxxx() 27980 * directly from the mode argument of ioctl(). 27981 * 27982 * Return Code: the code returned by sd_send_scsi_cmd() 27983 * EFAULT if ddi_copyxxx() fails 27984 * ENXIO if fail ddi_get_soft_state 27985 * EINVAL if data pointer is NULL 27986 */ 27987 27988 static int 27989 sr_read_tochdr(dev_t dev, caddr_t data, int flag) 27990 { 27991 struct sd_lun *un; 27992 struct uscsi_cmd *com; 27993 struct cdrom_tochdr toc_header; 27994 struct cdrom_tochdr *hdr = &toc_header; 27995 char cdb[CDB_GROUP1]; 27996 int rval; 27997 caddr_t buffer; 27998 27999 if (data == NULL) { 28000 return (EINVAL); 28001 } 28002 28003 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28004 (un->un_state == SD_STATE_OFFLINE)) { 28005 return (ENXIO); 28006 } 28007 28008 buffer = kmem_zalloc(4, KM_SLEEP); 28009 bzero(cdb, CDB_GROUP1); 28010 cdb[0] = SCMD_READ_TOC; 28011 /* 28012 * Specifying a track number of 0x00 in the READ TOC command indicates 28013 * that the TOC header should be returned 28014 */ 28015 cdb[6] = 0x00; 28016 /* 28017 * Bytes 7 & 8 are the 4 byte allocation length for TOC header. 28018 * (2 byte data len + 1 byte starting track # + 1 byte ending track #) 28019 */ 28020 cdb[8] = 0x04; 28021 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28022 com->uscsi_cdb = cdb; 28023 com->uscsi_cdblen = CDB_GROUP1; 28024 com->uscsi_bufaddr = buffer; 28025 com->uscsi_buflen = 0x04; 28026 com->uscsi_timeout = 300; 28027 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28028 28029 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 28030 SD_PATH_STANDARD); 28031 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 28032 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]); 28033 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]); 28034 } else { 28035 hdr->cdth_trk0 = buffer[2]; 28036 hdr->cdth_trk1 = buffer[3]; 28037 } 28038 kmem_free(buffer, 4); 28039 kmem_free(com, sizeof (*com)); 28040 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) { 28041 return (EFAULT); 28042 } 28043 return (rval); 28044 } 28045 28046 28047 /* 28048 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(), 28049 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for 28050 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data, 28051 * digital audio and extended architecture digital audio. These modes are 28052 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3 28053 * MMC specs. 28054 * 28055 * In addition to support for the various data formats these routines also 28056 * include support for devices that implement only the direct access READ 28057 * commands (0x08, 0x28), devices that implement the READ_CD commands 28058 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and 28059 * READ CDXA commands (0xD8, 0xDB) 28060 */ 28061 28062 /* 28063 * Function: sr_read_mode1() 28064 * 28065 * Description: This routine is the driver entry point for handling CD-ROM 28066 * ioctl read mode1 requests (CDROMREADMODE1). 28067 * 28068 * Arguments: dev - the device 'dev_t' 28069 * data - pointer to user provided cd read structure specifying 28070 * the lba buffer address and length. 28071 * flag - this argument is a pass through to ddi_copyxxx() 28072 * directly from the mode argument of ioctl(). 28073 * 28074 * Return Code: the code returned by sd_send_scsi_cmd() 28075 * EFAULT if ddi_copyxxx() fails 28076 * ENXIO if fail ddi_get_soft_state 28077 * EINVAL if data pointer is NULL 28078 */ 28079 28080 static int 28081 sr_read_mode1(dev_t dev, caddr_t data, int flag) 28082 { 28083 struct sd_lun *un; 28084 struct cdrom_read mode1_struct; 28085 struct cdrom_read *mode1 = &mode1_struct; 28086 int rval; 28087 sd_ssc_t *ssc; 28088 28089 #ifdef _MULTI_DATAMODEL 28090 /* To support ILP32 applications in an LP64 world */ 28091 struct cdrom_read32 cdrom_read32; 28092 struct cdrom_read32 *cdrd32 = &cdrom_read32; 28093 #endif /* _MULTI_DATAMODEL */ 28094 28095 if (data == NULL) { 28096 return (EINVAL); 28097 } 28098 28099 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28100 (un->un_state == SD_STATE_OFFLINE)) { 28101 return (ENXIO); 28102 } 28103 28104 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 28105 "sd_read_mode1: entry: un:0x%p\n", un); 28106 28107 #ifdef _MULTI_DATAMODEL 28108 switch (ddi_model_convert_from(flag & FMODELS)) { 28109 case DDI_MODEL_ILP32: 28110 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 28111 return (EFAULT); 28112 } 28113 /* Convert the ILP32 uscsi data from the application to LP64 */ 28114 cdrom_read32tocdrom_read(cdrd32, mode1); 28115 break; 28116 case DDI_MODEL_NONE: 28117 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 28118 return (EFAULT); 28119 } 28120 } 28121 #else /* ! _MULTI_DATAMODEL */ 28122 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 28123 return (EFAULT); 28124 } 28125 #endif /* _MULTI_DATAMODEL */ 28126 28127 ssc = sd_ssc_init(un); 28128 rval = sd_send_scsi_READ(ssc, mode1->cdread_bufaddr, 28129 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD); 28130 sd_ssc_fini(ssc); 28131 28132 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 28133 "sd_read_mode1: exit: un:0x%p\n", un); 28134 28135 return (rval); 28136 } 28137 28138 28139 /* 28140 * Function: sr_read_cd_mode2() 28141 * 28142 * Description: This routine is the driver entry point for handling CD-ROM 28143 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 28144 * support the READ CD (0xBE) command or the 1st generation 28145 * READ CD (0xD4) command. 28146 * 28147 * Arguments: dev - the device 'dev_t' 28148 * data - pointer to user provided cd read structure specifying 28149 * the lba buffer address and length. 28150 * flag - this argument is a pass through to ddi_copyxxx() 28151 * directly from the mode argument of ioctl(). 28152 * 28153 * Return Code: the code returned by sd_send_scsi_cmd() 28154 * EFAULT if ddi_copyxxx() fails 28155 * ENXIO if fail ddi_get_soft_state 28156 * EINVAL if data pointer is NULL 28157 */ 28158 28159 static int 28160 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag) 28161 { 28162 struct sd_lun *un; 28163 struct uscsi_cmd *com; 28164 struct cdrom_read mode2_struct; 28165 struct cdrom_read *mode2 = &mode2_struct; 28166 uchar_t cdb[CDB_GROUP5]; 28167 int nblocks; 28168 int rval; 28169 #ifdef _MULTI_DATAMODEL 28170 /* To support ILP32 applications in an LP64 world */ 28171 struct cdrom_read32 cdrom_read32; 28172 struct cdrom_read32 *cdrd32 = &cdrom_read32; 28173 #endif /* _MULTI_DATAMODEL */ 28174 28175 if (data == NULL) { 28176 return (EINVAL); 28177 } 28178 28179 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28180 (un->un_state == SD_STATE_OFFLINE)) { 28181 return (ENXIO); 28182 } 28183 28184 #ifdef _MULTI_DATAMODEL 28185 switch (ddi_model_convert_from(flag & FMODELS)) { 28186 case DDI_MODEL_ILP32: 28187 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 28188 return (EFAULT); 28189 } 28190 /* Convert the ILP32 uscsi data from the application to LP64 */ 28191 cdrom_read32tocdrom_read(cdrd32, mode2); 28192 break; 28193 case DDI_MODEL_NONE: 28194 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 28195 return (EFAULT); 28196 } 28197 break; 28198 } 28199 28200 #else /* ! _MULTI_DATAMODEL */ 28201 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 28202 return (EFAULT); 28203 } 28204 #endif /* _MULTI_DATAMODEL */ 28205 28206 bzero(cdb, sizeof (cdb)); 28207 if (un->un_f_cfg_read_cd_xd4 == TRUE) { 28208 /* Read command supported by 1st generation atapi drives */ 28209 cdb[0] = SCMD_READ_CDD4; 28210 } else { 28211 /* Universal CD Access Command */ 28212 cdb[0] = SCMD_READ_CD; 28213 } 28214 28215 /* 28216 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book 28217 */ 28218 cdb[1] = CDROM_SECTOR_TYPE_MODE2; 28219 28220 /* set the start address */ 28221 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF); 28222 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF); 28223 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 28224 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF); 28225 28226 /* set the transfer length */ 28227 nblocks = mode2->cdread_buflen / 2336; 28228 cdb[6] = (uchar_t)(nblocks >> 16); 28229 cdb[7] = (uchar_t)(nblocks >> 8); 28230 cdb[8] = (uchar_t)nblocks; 28231 28232 /* set the filter bits */ 28233 cdb[9] = CDROM_READ_CD_USERDATA; 28234 28235 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28236 com->uscsi_cdb = (caddr_t)cdb; 28237 com->uscsi_cdblen = sizeof (cdb); 28238 com->uscsi_bufaddr = mode2->cdread_bufaddr; 28239 com->uscsi_buflen = mode2->cdread_buflen; 28240 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28241 28242 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 28243 SD_PATH_STANDARD); 28244 kmem_free(com, sizeof (*com)); 28245 return (rval); 28246 } 28247 28248 28249 /* 28250 * Function: sr_read_mode2() 28251 * 28252 * Description: This routine is the driver entry point for handling CD-ROM 28253 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 28254 * do not support the READ CD (0xBE) command. 28255 * 28256 * Arguments: dev - the device 'dev_t' 28257 * data - pointer to user provided cd read structure specifying 28258 * the lba buffer address and length. 28259 * flag - this argument is a pass through to ddi_copyxxx() 28260 * directly from the mode argument of ioctl(). 28261 * 28262 * Return Code: the code returned by sd_send_scsi_cmd() 28263 * EFAULT if ddi_copyxxx() fails 28264 * ENXIO if fail ddi_get_soft_state 28265 * EINVAL if data pointer is NULL 28266 * EIO if fail to reset block size 28267 * EAGAIN if commands are in progress in the driver 28268 */ 28269 28270 static int 28271 sr_read_mode2(dev_t dev, caddr_t data, int flag) 28272 { 28273 struct sd_lun *un; 28274 struct cdrom_read mode2_struct; 28275 struct cdrom_read *mode2 = &mode2_struct; 28276 int rval; 28277 uint32_t restore_blksize; 28278 struct uscsi_cmd *com; 28279 uchar_t cdb[CDB_GROUP0]; 28280 int nblocks; 28281 28282 #ifdef _MULTI_DATAMODEL 28283 /* To support ILP32 applications in an LP64 world */ 28284 struct cdrom_read32 cdrom_read32; 28285 struct cdrom_read32 *cdrd32 = &cdrom_read32; 28286 #endif /* _MULTI_DATAMODEL */ 28287 28288 if (data == NULL) { 28289 return (EINVAL); 28290 } 28291 28292 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28293 (un->un_state == SD_STATE_OFFLINE)) { 28294 return (ENXIO); 28295 } 28296 28297 /* 28298 * Because this routine will update the device and driver block size 28299 * being used we want to make sure there are no commands in progress. 28300 * If commands are in progress the user will have to try again. 28301 * 28302 * We check for 1 instead of 0 because we increment un_ncmds_in_driver 28303 * in sdioctl to protect commands from sdioctl through to the top of 28304 * sd_uscsi_strategy. See sdioctl for details. 28305 */ 28306 mutex_enter(SD_MUTEX(un)); 28307 if (un->un_ncmds_in_driver != 1) { 28308 mutex_exit(SD_MUTEX(un)); 28309 return (EAGAIN); 28310 } 28311 mutex_exit(SD_MUTEX(un)); 28312 28313 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 28314 "sd_read_mode2: entry: un:0x%p\n", un); 28315 28316 #ifdef _MULTI_DATAMODEL 28317 switch (ddi_model_convert_from(flag & FMODELS)) { 28318 case DDI_MODEL_ILP32: 28319 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 28320 return (EFAULT); 28321 } 28322 /* Convert the ILP32 uscsi data from the application to LP64 */ 28323 cdrom_read32tocdrom_read(cdrd32, mode2); 28324 break; 28325 case DDI_MODEL_NONE: 28326 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 28327 return (EFAULT); 28328 } 28329 break; 28330 } 28331 #else /* ! _MULTI_DATAMODEL */ 28332 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) { 28333 return (EFAULT); 28334 } 28335 #endif /* _MULTI_DATAMODEL */ 28336 28337 /* Store the current target block size for restoration later */ 28338 restore_blksize = un->un_tgt_blocksize; 28339 28340 /* Change the device and soft state target block size to 2336 */ 28341 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) { 28342 rval = EIO; 28343 goto done; 28344 } 28345 28346 28347 bzero(cdb, sizeof (cdb)); 28348 28349 /* set READ operation */ 28350 cdb[0] = SCMD_READ; 28351 28352 /* adjust lba for 2kbyte blocks from 512 byte blocks */ 28353 mode2->cdread_lba >>= 2; 28354 28355 /* set the start address */ 28356 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F); 28357 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 28358 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF); 28359 28360 /* set the transfer length */ 28361 nblocks = mode2->cdread_buflen / 2336; 28362 cdb[4] = (uchar_t)nblocks & 0xFF; 28363 28364 /* build command */ 28365 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28366 com->uscsi_cdb = (caddr_t)cdb; 28367 com->uscsi_cdblen = sizeof (cdb); 28368 com->uscsi_bufaddr = mode2->cdread_bufaddr; 28369 com->uscsi_buflen = mode2->cdread_buflen; 28370 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28371 28372 /* 28373 * Issue SCSI command with user space address for read buffer. 28374 * 28375 * This sends the command through main channel in the driver. 28376 * 28377 * Since this is accessed via an IOCTL call, we go through the 28378 * standard path, so that if the device was powered down, then 28379 * it would be 'awakened' to handle the command. 28380 */ 28381 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 28382 SD_PATH_STANDARD); 28383 28384 kmem_free(com, sizeof (*com)); 28385 28386 /* Restore the device and soft state target block size */ 28387 if (sr_sector_mode(dev, restore_blksize) != 0) { 28388 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28389 "can't do switch back to mode 1\n"); 28390 /* 28391 * If sd_send_scsi_READ succeeded we still need to report 28392 * an error because we failed to reset the block size 28393 */ 28394 if (rval == 0) { 28395 rval = EIO; 28396 } 28397 } 28398 28399 done: 28400 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 28401 "sd_read_mode2: exit: un:0x%p\n", un); 28402 28403 return (rval); 28404 } 28405 28406 28407 /* 28408 * Function: sr_sector_mode() 28409 * 28410 * Description: This utility function is used by sr_read_mode2 to set the target 28411 * block size based on the user specified size. This is a legacy 28412 * implementation based upon a vendor specific mode page 28413 * 28414 * Arguments: dev - the device 'dev_t' 28415 * data - flag indicating if block size is being set to 2336 or 28416 * 512. 28417 * 28418 * Return Code: the code returned by sd_send_scsi_cmd() 28419 * EFAULT if ddi_copyxxx() fails 28420 * ENXIO if fail ddi_get_soft_state 28421 * EINVAL if data pointer is NULL 28422 */ 28423 28424 static int 28425 sr_sector_mode(dev_t dev, uint32_t blksize) 28426 { 28427 struct sd_lun *un; 28428 uchar_t *sense; 28429 uchar_t *select; 28430 int rval; 28431 sd_ssc_t *ssc; 28432 28433 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28434 (un->un_state == SD_STATE_OFFLINE)) { 28435 return (ENXIO); 28436 } 28437 28438 sense = kmem_zalloc(20, KM_SLEEP); 28439 28440 /* Note: This is a vendor specific mode page (0x81) */ 28441 ssc = sd_ssc_init(un); 28442 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 20, 0x81, 28443 SD_PATH_STANDARD); 28444 sd_ssc_fini(ssc); 28445 if (rval != 0) { 28446 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 28447 "sr_sector_mode: Mode Sense failed\n"); 28448 kmem_free(sense, 20); 28449 return (rval); 28450 } 28451 select = kmem_zalloc(20, KM_SLEEP); 28452 select[3] = 0x08; 28453 select[10] = ((blksize >> 8) & 0xff); 28454 select[11] = (blksize & 0xff); 28455 select[12] = 0x01; 28456 select[13] = 0x06; 28457 select[14] = sense[14]; 28458 select[15] = sense[15]; 28459 if (blksize == SD_MODE2_BLKSIZE) { 28460 select[14] |= 0x01; 28461 } 28462 28463 ssc = sd_ssc_init(un); 28464 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 20, 28465 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 28466 sd_ssc_fini(ssc); 28467 if (rval != 0) { 28468 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 28469 "sr_sector_mode: Mode Select failed\n"); 28470 } else { 28471 /* 28472 * Only update the softstate block size if we successfully 28473 * changed the device block mode. 28474 */ 28475 mutex_enter(SD_MUTEX(un)); 28476 sd_update_block_info(un, blksize, 0); 28477 mutex_exit(SD_MUTEX(un)); 28478 } 28479 kmem_free(sense, 20); 28480 kmem_free(select, 20); 28481 return (rval); 28482 } 28483 28484 28485 /* 28486 * Function: sr_read_cdda() 28487 * 28488 * Description: This routine is the driver entry point for handling CD-ROM 28489 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If 28490 * the target supports CDDA these requests are handled via a vendor 28491 * specific command (0xD8) If the target does not support CDDA 28492 * these requests are handled via the READ CD command (0xBE). 28493 * 28494 * Arguments: dev - the device 'dev_t' 28495 * data - pointer to user provided CD-DA structure specifying 28496 * the track starting address, transfer length, and 28497 * subcode options. 28498 * flag - this argument is a pass through to ddi_copyxxx() 28499 * directly from the mode argument of ioctl(). 28500 * 28501 * Return Code: the code returned by sd_send_scsi_cmd() 28502 * EFAULT if ddi_copyxxx() fails 28503 * ENXIO if fail ddi_get_soft_state 28504 * EINVAL if invalid arguments are provided 28505 * ENOTTY 28506 */ 28507 28508 static int 28509 sr_read_cdda(dev_t dev, caddr_t data, int flag) 28510 { 28511 struct sd_lun *un; 28512 struct uscsi_cmd *com; 28513 struct cdrom_cdda *cdda; 28514 int rval; 28515 size_t buflen; 28516 char cdb[CDB_GROUP5]; 28517 28518 #ifdef _MULTI_DATAMODEL 28519 /* To support ILP32 applications in an LP64 world */ 28520 struct cdrom_cdda32 cdrom_cdda32; 28521 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32; 28522 #endif /* _MULTI_DATAMODEL */ 28523 28524 if (data == NULL) { 28525 return (EINVAL); 28526 } 28527 28528 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28529 return (ENXIO); 28530 } 28531 28532 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP); 28533 28534 #ifdef _MULTI_DATAMODEL 28535 switch (ddi_model_convert_from(flag & FMODELS)) { 28536 case DDI_MODEL_ILP32: 28537 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) { 28538 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28539 "sr_read_cdda: ddi_copyin Failed\n"); 28540 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28541 return (EFAULT); 28542 } 28543 /* Convert the ILP32 uscsi data from the application to LP64 */ 28544 cdrom_cdda32tocdrom_cdda(cdda32, cdda); 28545 break; 28546 case DDI_MODEL_NONE: 28547 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 28548 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28549 "sr_read_cdda: ddi_copyin Failed\n"); 28550 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28551 return (EFAULT); 28552 } 28553 break; 28554 } 28555 #else /* ! _MULTI_DATAMODEL */ 28556 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 28557 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28558 "sr_read_cdda: ddi_copyin Failed\n"); 28559 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28560 return (EFAULT); 28561 } 28562 #endif /* _MULTI_DATAMODEL */ 28563 28564 /* 28565 * Since MMC-2 expects max 3 bytes for length, check if the 28566 * length input is greater than 3 bytes 28567 */ 28568 if ((cdda->cdda_length & 0xFF000000) != 0) { 28569 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: " 28570 "cdrom transfer length too large: %d (limit %d)\n", 28571 cdda->cdda_length, 0xFFFFFF); 28572 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28573 return (EINVAL); 28574 } 28575 28576 switch (cdda->cdda_subcode) { 28577 case CDROM_DA_NO_SUBCODE: 28578 buflen = CDROM_BLK_2352 * cdda->cdda_length; 28579 break; 28580 case CDROM_DA_SUBQ: 28581 buflen = CDROM_BLK_2368 * cdda->cdda_length; 28582 break; 28583 case CDROM_DA_ALL_SUBCODE: 28584 buflen = CDROM_BLK_2448 * cdda->cdda_length; 28585 break; 28586 case CDROM_DA_SUBCODE_ONLY: 28587 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length; 28588 break; 28589 default: 28590 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28591 "sr_read_cdda: Subcode '0x%x' Not Supported\n", 28592 cdda->cdda_subcode); 28593 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28594 return (EINVAL); 28595 } 28596 28597 /* Build and send the command */ 28598 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28599 bzero(cdb, CDB_GROUP5); 28600 28601 if (un->un_f_cfg_cdda == TRUE) { 28602 cdb[0] = (char)SCMD_READ_CD; 28603 cdb[1] = 0x04; 28604 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 28605 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 28606 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 28607 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 28608 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 28609 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 28610 cdb[8] = ((cdda->cdda_length) & 0x000000ff); 28611 cdb[9] = 0x10; 28612 switch (cdda->cdda_subcode) { 28613 case CDROM_DA_NO_SUBCODE : 28614 cdb[10] = 0x0; 28615 break; 28616 case CDROM_DA_SUBQ : 28617 cdb[10] = 0x2; 28618 break; 28619 case CDROM_DA_ALL_SUBCODE : 28620 cdb[10] = 0x1; 28621 break; 28622 case CDROM_DA_SUBCODE_ONLY : 28623 /* FALLTHROUGH */ 28624 default : 28625 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28626 kmem_free(com, sizeof (*com)); 28627 return (ENOTTY); 28628 } 28629 } else { 28630 cdb[0] = (char)SCMD_READ_CDDA; 28631 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 28632 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 28633 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 28634 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 28635 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24); 28636 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 28637 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 28638 cdb[9] = ((cdda->cdda_length) & 0x000000ff); 28639 cdb[10] = cdda->cdda_subcode; 28640 } 28641 28642 com->uscsi_cdb = cdb; 28643 com->uscsi_cdblen = CDB_GROUP5; 28644 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data; 28645 com->uscsi_buflen = buflen; 28646 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28647 28648 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 28649 SD_PATH_STANDARD); 28650 28651 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28652 kmem_free(com, sizeof (*com)); 28653 return (rval); 28654 } 28655 28656 28657 /* 28658 * Function: sr_read_cdxa() 28659 * 28660 * Description: This routine is the driver entry point for handling CD-ROM 28661 * ioctl requests to return CD-XA (Extended Architecture) data. 28662 * (CDROMCDXA). 28663 * 28664 * Arguments: dev - the device 'dev_t' 28665 * data - pointer to user provided CD-XA structure specifying 28666 * the data starting address, transfer length, and format 28667 * flag - this argument is a pass through to ddi_copyxxx() 28668 * directly from the mode argument of ioctl(). 28669 * 28670 * Return Code: the code returned by sd_send_scsi_cmd() 28671 * EFAULT if ddi_copyxxx() fails 28672 * ENXIO if fail ddi_get_soft_state 28673 * EINVAL if data pointer is NULL 28674 */ 28675 28676 static int 28677 sr_read_cdxa(dev_t dev, caddr_t data, int flag) 28678 { 28679 struct sd_lun *un; 28680 struct uscsi_cmd *com; 28681 struct cdrom_cdxa *cdxa; 28682 int rval; 28683 size_t buflen; 28684 char cdb[CDB_GROUP5]; 28685 uchar_t read_flags; 28686 28687 #ifdef _MULTI_DATAMODEL 28688 /* To support ILP32 applications in an LP64 world */ 28689 struct cdrom_cdxa32 cdrom_cdxa32; 28690 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32; 28691 #endif /* _MULTI_DATAMODEL */ 28692 28693 if (data == NULL) { 28694 return (EINVAL); 28695 } 28696 28697 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28698 return (ENXIO); 28699 } 28700 28701 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP); 28702 28703 #ifdef _MULTI_DATAMODEL 28704 switch (ddi_model_convert_from(flag & FMODELS)) { 28705 case DDI_MODEL_ILP32: 28706 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) { 28707 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28708 return (EFAULT); 28709 } 28710 /* 28711 * Convert the ILP32 uscsi data from the 28712 * application to LP64 for internal use. 28713 */ 28714 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa); 28715 break; 28716 case DDI_MODEL_NONE: 28717 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 28718 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28719 return (EFAULT); 28720 } 28721 break; 28722 } 28723 #else /* ! _MULTI_DATAMODEL */ 28724 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 28725 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28726 return (EFAULT); 28727 } 28728 #endif /* _MULTI_DATAMODEL */ 28729 28730 /* 28731 * Since MMC-2 expects max 3 bytes for length, check if the 28732 * length input is greater than 3 bytes 28733 */ 28734 if ((cdxa->cdxa_length & 0xFF000000) != 0) { 28735 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: " 28736 "cdrom transfer length too large: %d (limit %d)\n", 28737 cdxa->cdxa_length, 0xFFFFFF); 28738 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28739 return (EINVAL); 28740 } 28741 28742 switch (cdxa->cdxa_format) { 28743 case CDROM_XA_DATA: 28744 buflen = CDROM_BLK_2048 * cdxa->cdxa_length; 28745 read_flags = 0x10; 28746 break; 28747 case CDROM_XA_SECTOR_DATA: 28748 buflen = CDROM_BLK_2352 * cdxa->cdxa_length; 28749 read_flags = 0xf8; 28750 break; 28751 case CDROM_XA_DATA_W_ERROR: 28752 buflen = CDROM_BLK_2646 * cdxa->cdxa_length; 28753 read_flags = 0xfc; 28754 break; 28755 default: 28756 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28757 "sr_read_cdxa: Format '0x%x' Not Supported\n", 28758 cdxa->cdxa_format); 28759 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28760 return (EINVAL); 28761 } 28762 28763 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28764 bzero(cdb, CDB_GROUP5); 28765 if (un->un_f_mmc_cap == TRUE) { 28766 cdb[0] = (char)SCMD_READ_CD; 28767 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 28768 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 28769 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 28770 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 28771 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 28772 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 28773 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff); 28774 cdb[9] = (char)read_flags; 28775 } else { 28776 /* 28777 * Note: A vendor specific command (0xDB) is being used her to 28778 * request a read of all subcodes. 28779 */ 28780 cdb[0] = (char)SCMD_READ_CDXA; 28781 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 28782 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 28783 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 28784 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 28785 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24); 28786 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 28787 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 28788 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff); 28789 cdb[10] = cdxa->cdxa_format; 28790 } 28791 com->uscsi_cdb = cdb; 28792 com->uscsi_cdblen = CDB_GROUP5; 28793 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data; 28794 com->uscsi_buflen = buflen; 28795 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28796 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 28797 SD_PATH_STANDARD); 28798 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28799 kmem_free(com, sizeof (*com)); 28800 return (rval); 28801 } 28802 28803 28804 /* 28805 * Function: sr_eject() 28806 * 28807 * Description: This routine is the driver entry point for handling CD-ROM 28808 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT) 28809 * 28810 * Arguments: dev - the device 'dev_t' 28811 * 28812 * Return Code: the code returned by sd_send_scsi_cmd() 28813 */ 28814 28815 static int 28816 sr_eject(dev_t dev) 28817 { 28818 struct sd_lun *un; 28819 int rval; 28820 sd_ssc_t *ssc; 28821 28822 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28823 (un->un_state == SD_STATE_OFFLINE)) { 28824 return (ENXIO); 28825 } 28826 28827 /* 28828 * To prevent race conditions with the eject 28829 * command, keep track of an eject command as 28830 * it progresses. If we are already handling 28831 * an eject command in the driver for the given 28832 * unit and another request to eject is received 28833 * immediately return EAGAIN so we don't lose 28834 * the command if the current eject command fails. 28835 */ 28836 mutex_enter(SD_MUTEX(un)); 28837 if (un->un_f_ejecting == TRUE) { 28838 mutex_exit(SD_MUTEX(un)); 28839 return (EAGAIN); 28840 } 28841 un->un_f_ejecting = TRUE; 28842 mutex_exit(SD_MUTEX(un)); 28843 28844 ssc = sd_ssc_init(un); 28845 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW, 28846 SD_PATH_STANDARD); 28847 sd_ssc_fini(ssc); 28848 28849 if (rval != 0) { 28850 mutex_enter(SD_MUTEX(un)); 28851 un->un_f_ejecting = FALSE; 28852 mutex_exit(SD_MUTEX(un)); 28853 return (rval); 28854 } 28855 28856 ssc = sd_ssc_init(un); 28857 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 28858 SD_TARGET_EJECT, SD_PATH_STANDARD); 28859 sd_ssc_fini(ssc); 28860 28861 if (rval == 0) { 28862 mutex_enter(SD_MUTEX(un)); 28863 sr_ejected(un); 28864 un->un_mediastate = DKIO_EJECTED; 28865 un->un_f_ejecting = FALSE; 28866 cv_broadcast(&un->un_state_cv); 28867 mutex_exit(SD_MUTEX(un)); 28868 } else { 28869 mutex_enter(SD_MUTEX(un)); 28870 un->un_f_ejecting = FALSE; 28871 mutex_exit(SD_MUTEX(un)); 28872 } 28873 return (rval); 28874 } 28875 28876 28877 /* 28878 * Function: sr_ejected() 28879 * 28880 * Description: This routine updates the soft state structure to invalidate the 28881 * geometry information after the media has been ejected or a 28882 * media eject has been detected. 28883 * 28884 * Arguments: un - driver soft state (unit) structure 28885 */ 28886 28887 static void 28888 sr_ejected(struct sd_lun *un) 28889 { 28890 struct sd_errstats *stp; 28891 28892 ASSERT(un != NULL); 28893 ASSERT(mutex_owned(SD_MUTEX(un))); 28894 28895 un->un_f_blockcount_is_valid = FALSE; 28896 un->un_f_tgt_blocksize_is_valid = FALSE; 28897 mutex_exit(SD_MUTEX(un)); 28898 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 28899 mutex_enter(SD_MUTEX(un)); 28900 28901 if (un->un_errstats != NULL) { 28902 stp = (struct sd_errstats *)un->un_errstats->ks_data; 28903 stp->sd_capacity.value.ui64 = 0; 28904 } 28905 } 28906 28907 28908 /* 28909 * Function: sr_check_wp() 28910 * 28911 * Description: This routine checks the write protection of a removable 28912 * media disk and hotpluggable devices via the write protect bit of 28913 * the Mode Page Header device specific field. Some devices choke 28914 * on unsupported mode page. In order to workaround this issue, 28915 * this routine has been implemented to use 0x3f mode page(request 28916 * for all pages) for all device types. 28917 * 28918 * Arguments: dev - the device 'dev_t' 28919 * 28920 * Return Code: int indicating if the device is write protected (1) or not (0) 28921 * 28922 * Context: Kernel thread. 28923 * 28924 */ 28925 28926 static int 28927 sr_check_wp(dev_t dev) 28928 { 28929 struct sd_lun *un; 28930 uchar_t device_specific; 28931 uchar_t *sense; 28932 int hdrlen; 28933 int rval = FALSE; 28934 int status; 28935 sd_ssc_t *ssc; 28936 28937 /* 28938 * Note: The return codes for this routine should be reworked to 28939 * properly handle the case of a NULL softstate. 28940 */ 28941 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28942 return (FALSE); 28943 } 28944 28945 if (un->un_f_cfg_is_atapi == TRUE) { 28946 /* 28947 * The mode page contents are not required; set the allocation 28948 * length for the mode page header only 28949 */ 28950 hdrlen = MODE_HEADER_LENGTH_GRP2; 28951 sense = kmem_zalloc(hdrlen, KM_SLEEP); 28952 ssc = sd_ssc_init(un); 28953 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, hdrlen, 28954 MODEPAGE_ALLPAGES, SD_PATH_STANDARD); 28955 sd_ssc_fini(ssc); 28956 if (status != 0) 28957 goto err_exit; 28958 device_specific = 28959 ((struct mode_header_grp2 *)sense)->device_specific; 28960 } else { 28961 hdrlen = MODE_HEADER_LENGTH; 28962 sense = kmem_zalloc(hdrlen, KM_SLEEP); 28963 ssc = sd_ssc_init(un); 28964 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, hdrlen, 28965 MODEPAGE_ALLPAGES, SD_PATH_STANDARD); 28966 sd_ssc_fini(ssc); 28967 if (status != 0) 28968 goto err_exit; 28969 device_specific = 28970 ((struct mode_header *)sense)->device_specific; 28971 } 28972 28973 28974 /* 28975 * Write protect mode sense failed; not all disks 28976 * understand this query. Return FALSE assuming that 28977 * these devices are not writable. 28978 */ 28979 if (device_specific & WRITE_PROTECT) { 28980 rval = TRUE; 28981 } 28982 28983 err_exit: 28984 kmem_free(sense, hdrlen); 28985 return (rval); 28986 } 28987 28988 /* 28989 * Function: sr_volume_ctrl() 28990 * 28991 * Description: This routine is the driver entry point for handling CD-ROM 28992 * audio output volume ioctl requests. (CDROMVOLCTRL) 28993 * 28994 * Arguments: dev - the device 'dev_t' 28995 * data - pointer to user audio volume control structure 28996 * flag - this argument is a pass through to ddi_copyxxx() 28997 * directly from the mode argument of ioctl(). 28998 * 28999 * Return Code: the code returned by sd_send_scsi_cmd() 29000 * EFAULT if ddi_copyxxx() fails 29001 * ENXIO if fail ddi_get_soft_state 29002 * EINVAL if data pointer is NULL 29003 * 29004 */ 29005 29006 static int 29007 sr_volume_ctrl(dev_t dev, caddr_t data, int flag) 29008 { 29009 struct sd_lun *un; 29010 struct cdrom_volctrl volume; 29011 struct cdrom_volctrl *vol = &volume; 29012 uchar_t *sense_page; 29013 uchar_t *select_page; 29014 uchar_t *sense; 29015 uchar_t *select; 29016 int sense_buflen; 29017 int select_buflen; 29018 int rval; 29019 sd_ssc_t *ssc; 29020 29021 if (data == NULL) { 29022 return (EINVAL); 29023 } 29024 29025 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 29026 (un->un_state == SD_STATE_OFFLINE)) { 29027 return (ENXIO); 29028 } 29029 29030 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) { 29031 return (EFAULT); 29032 } 29033 29034 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 29035 struct mode_header_grp2 *sense_mhp; 29036 struct mode_header_grp2 *select_mhp; 29037 int bd_len; 29038 29039 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN; 29040 select_buflen = MODE_HEADER_LENGTH_GRP2 + 29041 MODEPAGE_AUDIO_CTRL_LEN; 29042 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 29043 select = kmem_zalloc(select_buflen, KM_SLEEP); 29044 ssc = sd_ssc_init(un); 29045 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, 29046 sense_buflen, MODEPAGE_AUDIO_CTRL, 29047 SD_PATH_STANDARD); 29048 sd_ssc_fini(ssc); 29049 29050 if (rval != 0) { 29051 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 29052 "sr_volume_ctrl: Mode Sense Failed\n"); 29053 kmem_free(sense, sense_buflen); 29054 kmem_free(select, select_buflen); 29055 return (rval); 29056 } 29057 sense_mhp = (struct mode_header_grp2 *)sense; 29058 select_mhp = (struct mode_header_grp2 *)select; 29059 bd_len = (sense_mhp->bdesc_length_hi << 8) | 29060 sense_mhp->bdesc_length_lo; 29061 if (bd_len > MODE_BLK_DESC_LENGTH) { 29062 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29063 "sr_volume_ctrl: Mode Sense returned invalid " 29064 "block descriptor length\n"); 29065 kmem_free(sense, sense_buflen); 29066 kmem_free(select, select_buflen); 29067 return (EIO); 29068 } 29069 sense_page = (uchar_t *) 29070 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 29071 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2); 29072 select_mhp->length_msb = 0; 29073 select_mhp->length_lsb = 0; 29074 select_mhp->bdesc_length_hi = 0; 29075 select_mhp->bdesc_length_lo = 0; 29076 } else { 29077 struct mode_header *sense_mhp, *select_mhp; 29078 29079 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 29080 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 29081 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 29082 select = kmem_zalloc(select_buflen, KM_SLEEP); 29083 ssc = sd_ssc_init(un); 29084 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 29085 sense_buflen, MODEPAGE_AUDIO_CTRL, 29086 SD_PATH_STANDARD); 29087 sd_ssc_fini(ssc); 29088 29089 if (rval != 0) { 29090 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29091 "sr_volume_ctrl: Mode Sense Failed\n"); 29092 kmem_free(sense, sense_buflen); 29093 kmem_free(select, select_buflen); 29094 return (rval); 29095 } 29096 sense_mhp = (struct mode_header *)sense; 29097 select_mhp = (struct mode_header *)select; 29098 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) { 29099 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29100 "sr_volume_ctrl: Mode Sense returned invalid " 29101 "block descriptor length\n"); 29102 kmem_free(sense, sense_buflen); 29103 kmem_free(select, select_buflen); 29104 return (EIO); 29105 } 29106 sense_page = (uchar_t *) 29107 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 29108 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH); 29109 select_mhp->length = 0; 29110 select_mhp->bdesc_length = 0; 29111 } 29112 /* 29113 * Note: An audio control data structure could be created and overlayed 29114 * on the following in place of the array indexing method implemented. 29115 */ 29116 29117 /* Build the select data for the user volume data */ 29118 select_page[0] = MODEPAGE_AUDIO_CTRL; 29119 select_page[1] = 0xE; 29120 /* Set the immediate bit */ 29121 select_page[2] = 0x04; 29122 /* Zero out reserved fields */ 29123 select_page[3] = 0x00; 29124 select_page[4] = 0x00; 29125 /* Return sense data for fields not to be modified */ 29126 select_page[5] = sense_page[5]; 29127 select_page[6] = sense_page[6]; 29128 select_page[7] = sense_page[7]; 29129 /* Set the user specified volume levels for channel 0 and 1 */ 29130 select_page[8] = 0x01; 29131 select_page[9] = vol->channel0; 29132 select_page[10] = 0x02; 29133 select_page[11] = vol->channel1; 29134 /* Channel 2 and 3 are currently unsupported so return the sense data */ 29135 select_page[12] = sense_page[12]; 29136 select_page[13] = sense_page[13]; 29137 select_page[14] = sense_page[14]; 29138 select_page[15] = sense_page[15]; 29139 29140 ssc = sd_ssc_init(un); 29141 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 29142 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP1, select, 29143 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 29144 } else { 29145 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 29146 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 29147 } 29148 sd_ssc_fini(ssc); 29149 29150 kmem_free(sense, sense_buflen); 29151 kmem_free(select, select_buflen); 29152 return (rval); 29153 } 29154 29155 29156 /* 29157 * Function: sr_read_sony_session_offset() 29158 * 29159 * Description: This routine is the driver entry point for handling CD-ROM 29160 * ioctl requests for session offset information. (CDROMREADOFFSET) 29161 * The address of the first track in the last session of a 29162 * multi-session CD-ROM is returned 29163 * 29164 * Note: This routine uses a vendor specific key value in the 29165 * command control field without implementing any vendor check here 29166 * or in the ioctl routine. 29167 * 29168 * Arguments: dev - the device 'dev_t' 29169 * data - pointer to an int to hold the requested address 29170 * flag - this argument is a pass through to ddi_copyxxx() 29171 * directly from the mode argument of ioctl(). 29172 * 29173 * Return Code: the code returned by sd_send_scsi_cmd() 29174 * EFAULT if ddi_copyxxx() fails 29175 * ENXIO if fail ddi_get_soft_state 29176 * EINVAL if data pointer is NULL 29177 */ 29178 29179 static int 29180 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag) 29181 { 29182 struct sd_lun *un; 29183 struct uscsi_cmd *com; 29184 caddr_t buffer; 29185 char cdb[CDB_GROUP1]; 29186 int session_offset = 0; 29187 int rval; 29188 29189 if (data == NULL) { 29190 return (EINVAL); 29191 } 29192 29193 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 29194 (un->un_state == SD_STATE_OFFLINE)) { 29195 return (ENXIO); 29196 } 29197 29198 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP); 29199 bzero(cdb, CDB_GROUP1); 29200 cdb[0] = SCMD_READ_TOC; 29201 /* 29202 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 29203 * (4 byte TOC response header + 8 byte response data) 29204 */ 29205 cdb[8] = SONY_SESSION_OFFSET_LEN; 29206 /* Byte 9 is the control byte. A vendor specific value is used */ 29207 cdb[9] = SONY_SESSION_OFFSET_KEY; 29208 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 29209 com->uscsi_cdb = cdb; 29210 com->uscsi_cdblen = CDB_GROUP1; 29211 com->uscsi_bufaddr = buffer; 29212 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN; 29213 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 29214 29215 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 29216 SD_PATH_STANDARD); 29217 if (rval != 0) { 29218 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 29219 kmem_free(com, sizeof (*com)); 29220 return (rval); 29221 } 29222 if (buffer[1] == SONY_SESSION_OFFSET_VALID) { 29223 session_offset = 29224 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 29225 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 29226 /* 29227 * Offset returned offset in current lbasize block's. Convert to 29228 * 2k block's to return to the user 29229 */ 29230 if (un->un_tgt_blocksize == CDROM_BLK_512) { 29231 session_offset >>= 2; 29232 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) { 29233 session_offset >>= 1; 29234 } 29235 } 29236 29237 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) { 29238 rval = EFAULT; 29239 } 29240 29241 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 29242 kmem_free(com, sizeof (*com)); 29243 return (rval); 29244 } 29245 29246 29247 /* 29248 * Function: sd_wm_cache_constructor() 29249 * 29250 * Description: Cache Constructor for the wmap cache for the read/modify/write 29251 * devices. 29252 * 29253 * Arguments: wm - A pointer to the sd_w_map to be initialized. 29254 * un - sd_lun structure for the device. 29255 * flag - the km flags passed to constructor 29256 * 29257 * Return Code: 0 on success. 29258 * -1 on failure. 29259 */ 29260 29261 /*ARGSUSED*/ 29262 static int 29263 sd_wm_cache_constructor(void *wm, void *un, int flags) 29264 { 29265 bzero(wm, sizeof (struct sd_w_map)); 29266 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL); 29267 return (0); 29268 } 29269 29270 29271 /* 29272 * Function: sd_wm_cache_destructor() 29273 * 29274 * Description: Cache destructor for the wmap cache for the read/modify/write 29275 * devices. 29276 * 29277 * Arguments: wm - A pointer to the sd_w_map to be initialized. 29278 * un - sd_lun structure for the device. 29279 */ 29280 /*ARGSUSED*/ 29281 static void 29282 sd_wm_cache_destructor(void *wm, void *un) 29283 { 29284 cv_destroy(&((struct sd_w_map *)wm)->wm_avail); 29285 } 29286 29287 29288 /* 29289 * Function: sd_range_lock() 29290 * 29291 * Description: Lock the range of blocks specified as parameter to ensure 29292 * that read, modify write is atomic and no other i/o writes 29293 * to the same location. The range is specified in terms 29294 * of start and end blocks. Block numbers are the actual 29295 * media block numbers and not system. 29296 * 29297 * Arguments: un - sd_lun structure for the device. 29298 * startb - The starting block number 29299 * endb - The end block number 29300 * typ - type of i/o - simple/read_modify_write 29301 * 29302 * Return Code: wm - pointer to the wmap structure. 29303 * 29304 * Context: This routine can sleep. 29305 */ 29306 29307 static struct sd_w_map * 29308 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ) 29309 { 29310 struct sd_w_map *wmp = NULL; 29311 struct sd_w_map *sl_wmp = NULL; 29312 struct sd_w_map *tmp_wmp; 29313 wm_state state = SD_WM_CHK_LIST; 29314 29315 29316 ASSERT(un != NULL); 29317 ASSERT(!mutex_owned(SD_MUTEX(un))); 29318 29319 mutex_enter(SD_MUTEX(un)); 29320 29321 while (state != SD_WM_DONE) { 29322 29323 switch (state) { 29324 case SD_WM_CHK_LIST: 29325 /* 29326 * This is the starting state. Check the wmap list 29327 * to see if the range is currently available. 29328 */ 29329 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) { 29330 /* 29331 * If this is a simple write and no rmw 29332 * i/o is pending then try to lock the 29333 * range as the range should be available. 29334 */ 29335 state = SD_WM_LOCK_RANGE; 29336 } else { 29337 tmp_wmp = sd_get_range(un, startb, endb); 29338 if (tmp_wmp != NULL) { 29339 if ((wmp != NULL) && ONLIST(un, wmp)) { 29340 /* 29341 * Should not keep onlist wmps 29342 * while waiting this macro 29343 * will also do wmp = NULL; 29344 */ 29345 FREE_ONLIST_WMAP(un, wmp); 29346 } 29347 /* 29348 * sl_wmp is the wmap on which wait 29349 * is done, since the tmp_wmp points 29350 * to the inuse wmap, set sl_wmp to 29351 * tmp_wmp and change the state to sleep 29352 */ 29353 sl_wmp = tmp_wmp; 29354 state = SD_WM_WAIT_MAP; 29355 } else { 29356 state = SD_WM_LOCK_RANGE; 29357 } 29358 29359 } 29360 break; 29361 29362 case SD_WM_LOCK_RANGE: 29363 ASSERT(un->un_wm_cache); 29364 /* 29365 * The range need to be locked, try to get a wmap. 29366 * First attempt it with NO_SLEEP, want to avoid a sleep 29367 * if possible as we will have to release the sd mutex 29368 * if we have to sleep. 29369 */ 29370 if (wmp == NULL) 29371 wmp = kmem_cache_alloc(un->un_wm_cache, 29372 KM_NOSLEEP); 29373 if (wmp == NULL) { 29374 mutex_exit(SD_MUTEX(un)); 29375 _NOTE(DATA_READABLE_WITHOUT_LOCK 29376 (sd_lun::un_wm_cache)) 29377 wmp = kmem_cache_alloc(un->un_wm_cache, 29378 KM_SLEEP); 29379 mutex_enter(SD_MUTEX(un)); 29380 /* 29381 * we released the mutex so recheck and go to 29382 * check list state. 29383 */ 29384 state = SD_WM_CHK_LIST; 29385 } else { 29386 /* 29387 * We exit out of state machine since we 29388 * have the wmap. Do the housekeeping first. 29389 * place the wmap on the wmap list if it is not 29390 * on it already and then set the state to done. 29391 */ 29392 wmp->wm_start = startb; 29393 wmp->wm_end = endb; 29394 wmp->wm_flags = typ | SD_WM_BUSY; 29395 if (typ & SD_WTYPE_RMW) { 29396 un->un_rmw_count++; 29397 } 29398 /* 29399 * If not already on the list then link 29400 */ 29401 if (!ONLIST(un, wmp)) { 29402 wmp->wm_next = un->un_wm; 29403 wmp->wm_prev = NULL; 29404 if (wmp->wm_next) 29405 wmp->wm_next->wm_prev = wmp; 29406 un->un_wm = wmp; 29407 } 29408 state = SD_WM_DONE; 29409 } 29410 break; 29411 29412 case SD_WM_WAIT_MAP: 29413 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY); 29414 /* 29415 * Wait is done on sl_wmp, which is set in the 29416 * check_list state. 29417 */ 29418 sl_wmp->wm_wanted_count++; 29419 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un)); 29420 sl_wmp->wm_wanted_count--; 29421 /* 29422 * We can reuse the memory from the completed sl_wmp 29423 * lock range for our new lock, but only if noone is 29424 * waiting for it. 29425 */ 29426 ASSERT(!(sl_wmp->wm_flags & SD_WM_BUSY)); 29427 if (sl_wmp->wm_wanted_count == 0) { 29428 if (wmp != NULL) 29429 CHK_N_FREEWMP(un, wmp); 29430 wmp = sl_wmp; 29431 } 29432 sl_wmp = NULL; 29433 /* 29434 * After waking up, need to recheck for availability of 29435 * range. 29436 */ 29437 state = SD_WM_CHK_LIST; 29438 break; 29439 29440 default: 29441 panic("sd_range_lock: " 29442 "Unknown state %d in sd_range_lock", state); 29443 /*NOTREACHED*/ 29444 } /* switch(state) */ 29445 29446 } /* while(state != SD_WM_DONE) */ 29447 29448 mutex_exit(SD_MUTEX(un)); 29449 29450 ASSERT(wmp != NULL); 29451 29452 return (wmp); 29453 } 29454 29455 29456 /* 29457 * Function: sd_get_range() 29458 * 29459 * Description: Find if there any overlapping I/O to this one 29460 * Returns the write-map of 1st such I/O, NULL otherwise. 29461 * 29462 * Arguments: un - sd_lun structure for the device. 29463 * startb - The starting block number 29464 * endb - The end block number 29465 * 29466 * Return Code: wm - pointer to the wmap structure. 29467 */ 29468 29469 static struct sd_w_map * 29470 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb) 29471 { 29472 struct sd_w_map *wmp; 29473 29474 ASSERT(un != NULL); 29475 29476 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) { 29477 if (!(wmp->wm_flags & SD_WM_BUSY)) { 29478 continue; 29479 } 29480 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) { 29481 break; 29482 } 29483 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) { 29484 break; 29485 } 29486 } 29487 29488 return (wmp); 29489 } 29490 29491 29492 /* 29493 * Function: sd_free_inlist_wmap() 29494 * 29495 * Description: Unlink and free a write map struct. 29496 * 29497 * Arguments: un - sd_lun structure for the device. 29498 * wmp - sd_w_map which needs to be unlinked. 29499 */ 29500 29501 static void 29502 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp) 29503 { 29504 ASSERT(un != NULL); 29505 29506 if (un->un_wm == wmp) { 29507 un->un_wm = wmp->wm_next; 29508 } else { 29509 wmp->wm_prev->wm_next = wmp->wm_next; 29510 } 29511 29512 if (wmp->wm_next) { 29513 wmp->wm_next->wm_prev = wmp->wm_prev; 29514 } 29515 29516 wmp->wm_next = wmp->wm_prev = NULL; 29517 29518 kmem_cache_free(un->un_wm_cache, wmp); 29519 } 29520 29521 29522 /* 29523 * Function: sd_range_unlock() 29524 * 29525 * Description: Unlock the range locked by wm. 29526 * Free write map if nobody else is waiting on it. 29527 * 29528 * Arguments: un - sd_lun structure for the device. 29529 * wmp - sd_w_map which needs to be unlinked. 29530 */ 29531 29532 static void 29533 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm) 29534 { 29535 ASSERT(un != NULL); 29536 ASSERT(wm != NULL); 29537 ASSERT(!mutex_owned(SD_MUTEX(un))); 29538 29539 mutex_enter(SD_MUTEX(un)); 29540 29541 if (wm->wm_flags & SD_WTYPE_RMW) { 29542 un->un_rmw_count--; 29543 } 29544 29545 if (wm->wm_wanted_count) { 29546 wm->wm_flags = 0; 29547 /* 29548 * Broadcast that the wmap is available now. 29549 */ 29550 cv_broadcast(&wm->wm_avail); 29551 } else { 29552 /* 29553 * If no one is waiting on the map, it should be free'ed. 29554 */ 29555 sd_free_inlist_wmap(un, wm); 29556 } 29557 29558 mutex_exit(SD_MUTEX(un)); 29559 } 29560 29561 29562 /* 29563 * Function: sd_read_modify_write_task 29564 * 29565 * Description: Called from a taskq thread to initiate the write phase of 29566 * a read-modify-write request. This is used for targets where 29567 * un->un_sys_blocksize != un->un_tgt_blocksize. 29568 * 29569 * Arguments: arg - a pointer to the buf(9S) struct for the write command. 29570 * 29571 * Context: Called under taskq thread context. 29572 */ 29573 29574 static void 29575 sd_read_modify_write_task(void *arg) 29576 { 29577 struct sd_mapblocksize_info *bsp; 29578 struct buf *bp; 29579 struct sd_xbuf *xp; 29580 struct sd_lun *un; 29581 29582 bp = arg; /* The bp is given in arg */ 29583 ASSERT(bp != NULL); 29584 29585 /* Get the pointer to the layer-private data struct */ 29586 xp = SD_GET_XBUF(bp); 29587 ASSERT(xp != NULL); 29588 bsp = xp->xb_private; 29589 ASSERT(bsp != NULL); 29590 29591 un = SD_GET_UN(bp); 29592 ASSERT(un != NULL); 29593 ASSERT(!mutex_owned(SD_MUTEX(un))); 29594 29595 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 29596 "sd_read_modify_write_task: entry: buf:0x%p\n", bp); 29597 29598 /* 29599 * This is the write phase of a read-modify-write request, called 29600 * under the context of a taskq thread in response to the completion 29601 * of the read portion of the rmw request completing under interrupt 29602 * context. The write request must be sent from here down the iostart 29603 * chain as if it were being sent from sd_mapblocksize_iostart(), so 29604 * we use the layer index saved in the layer-private data area. 29605 */ 29606 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp); 29607 29608 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 29609 "sd_read_modify_write_task: exit: buf:0x%p\n", bp); 29610 } 29611 29612 29613 /* 29614 * Function: sddump_do_read_of_rmw() 29615 * 29616 * Description: This routine will be called from sddump, If sddump is called 29617 * with an I/O which not aligned on device blocksize boundary 29618 * then the write has to be converted to read-modify-write. 29619 * Do the read part here in order to keep sddump simple. 29620 * Note - That the sd_mutex is held across the call to this 29621 * routine. 29622 * 29623 * Arguments: un - sd_lun 29624 * blkno - block number in terms of media block size. 29625 * nblk - number of blocks. 29626 * bpp - pointer to pointer to the buf structure. On return 29627 * from this function, *bpp points to the valid buffer 29628 * to which the write has to be done. 29629 * 29630 * Return Code: 0 for success or errno-type return code 29631 */ 29632 29633 static int 29634 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 29635 struct buf **bpp) 29636 { 29637 int err; 29638 int i; 29639 int rval; 29640 struct buf *bp; 29641 struct scsi_pkt *pkt = NULL; 29642 uint32_t target_blocksize; 29643 29644 ASSERT(un != NULL); 29645 ASSERT(mutex_owned(SD_MUTEX(un))); 29646 29647 target_blocksize = un->un_tgt_blocksize; 29648 29649 mutex_exit(SD_MUTEX(un)); 29650 29651 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL, 29652 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL); 29653 if (bp == NULL) { 29654 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29655 "no resources for dumping; giving up"); 29656 err = ENOMEM; 29657 goto done; 29658 } 29659 29660 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL, 29661 blkno, nblk); 29662 if (rval != 0) { 29663 scsi_free_consistent_buf(bp); 29664 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29665 "no resources for dumping; giving up"); 29666 err = ENOMEM; 29667 goto done; 29668 } 29669 29670 pkt->pkt_flags |= FLAG_NOINTR; 29671 29672 err = EIO; 29673 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 29674 29675 /* 29676 * Scsi_poll returns 0 (success) if the command completes and 29677 * the status block is STATUS_GOOD. We should only check 29678 * errors if this condition is not true. Even then we should 29679 * send our own request sense packet only if we have a check 29680 * condition and auto request sense has not been performed by 29681 * the hba. 29682 */ 29683 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n"); 29684 29685 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) { 29686 err = 0; 29687 break; 29688 } 29689 29690 /* 29691 * Check CMD_DEV_GONE 1st, give up if device is gone, 29692 * no need to read RQS data. 29693 */ 29694 if (pkt->pkt_reason == CMD_DEV_GONE) { 29695 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29696 "Error while dumping state with rmw..." 29697 "Device is gone\n"); 29698 break; 29699 } 29700 29701 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) { 29702 SD_INFO(SD_LOG_DUMP, un, 29703 "sddump: read failed with CHECK, try # %d\n", i); 29704 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) { 29705 (void) sd_send_polled_RQS(un); 29706 } 29707 29708 continue; 29709 } 29710 29711 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) { 29712 int reset_retval = 0; 29713 29714 SD_INFO(SD_LOG_DUMP, un, 29715 "sddump: read failed with BUSY, try # %d\n", i); 29716 29717 if (un->un_f_lun_reset_enabled == TRUE) { 29718 reset_retval = scsi_reset(SD_ADDRESS(un), 29719 RESET_LUN); 29720 } 29721 if (reset_retval == 0) { 29722 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 29723 } 29724 (void) sd_send_polled_RQS(un); 29725 29726 } else { 29727 SD_INFO(SD_LOG_DUMP, un, 29728 "sddump: read failed with 0x%x, try # %d\n", 29729 SD_GET_PKT_STATUS(pkt), i); 29730 mutex_enter(SD_MUTEX(un)); 29731 sd_reset_target(un, pkt); 29732 mutex_exit(SD_MUTEX(un)); 29733 } 29734 29735 /* 29736 * If we are not getting anywhere with lun/target resets, 29737 * let's reset the bus. 29738 */ 29739 if (i > SD_NDUMP_RETRIES/2) { 29740 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 29741 (void) sd_send_polled_RQS(un); 29742 } 29743 29744 } 29745 scsi_destroy_pkt(pkt); 29746 29747 if (err != 0) { 29748 scsi_free_consistent_buf(bp); 29749 *bpp = NULL; 29750 } else { 29751 *bpp = bp; 29752 } 29753 29754 done: 29755 mutex_enter(SD_MUTEX(un)); 29756 return (err); 29757 } 29758 29759 29760 /* 29761 * Function: sd_failfast_flushq 29762 * 29763 * Description: Take all bp's on the wait queue that have B_FAILFAST set 29764 * in b_flags and move them onto the failfast queue, then kick 29765 * off a thread to return all bp's on the failfast queue to 29766 * their owners with an error set. 29767 * 29768 * Arguments: un - pointer to the soft state struct for the instance. 29769 * 29770 * Context: may execute in interrupt context. 29771 */ 29772 29773 static void 29774 sd_failfast_flushq(struct sd_lun *un) 29775 { 29776 struct buf *bp; 29777 struct buf *next_waitq_bp; 29778 struct buf *prev_waitq_bp = NULL; 29779 29780 ASSERT(un != NULL); 29781 ASSERT(mutex_owned(SD_MUTEX(un))); 29782 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE); 29783 ASSERT(un->un_failfast_bp == NULL); 29784 29785 SD_TRACE(SD_LOG_IO_FAILFAST, un, 29786 "sd_failfast_flushq: entry: un:0x%p\n", un); 29787 29788 /* 29789 * Check if we should flush all bufs when entering failfast state, or 29790 * just those with B_FAILFAST set. 29791 */ 29792 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { 29793 /* 29794 * Move *all* bp's on the wait queue to the failfast flush 29795 * queue, including those that do NOT have B_FAILFAST set. 29796 */ 29797 if (un->un_failfast_headp == NULL) { 29798 ASSERT(un->un_failfast_tailp == NULL); 29799 un->un_failfast_headp = un->un_waitq_headp; 29800 } else { 29801 ASSERT(un->un_failfast_tailp != NULL); 29802 un->un_failfast_tailp->av_forw = un->un_waitq_headp; 29803 } 29804 29805 un->un_failfast_tailp = un->un_waitq_tailp; 29806 29807 /* update kstat for each bp moved out of the waitq */ 29808 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) { 29809 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 29810 } 29811 29812 /* empty the waitq */ 29813 un->un_waitq_headp = un->un_waitq_tailp = NULL; 29814 29815 } else { 29816 /* 29817 * Go thru the wait queue, pick off all entries with 29818 * B_FAILFAST set, and move these onto the failfast queue. 29819 */ 29820 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) { 29821 /* 29822 * Save the pointer to the next bp on the wait queue, 29823 * so we get to it on the next iteration of this loop. 29824 */ 29825 next_waitq_bp = bp->av_forw; 29826 29827 /* 29828 * If this bp from the wait queue does NOT have 29829 * B_FAILFAST set, just move on to the next element 29830 * in the wait queue. Note, this is the only place 29831 * where it is correct to set prev_waitq_bp. 29832 */ 29833 if ((bp->b_flags & B_FAILFAST) == 0) { 29834 prev_waitq_bp = bp; 29835 continue; 29836 } 29837 29838 /* 29839 * Remove the bp from the wait queue. 29840 */ 29841 if (bp == un->un_waitq_headp) { 29842 /* The bp is the first element of the waitq. */ 29843 un->un_waitq_headp = next_waitq_bp; 29844 if (un->un_waitq_headp == NULL) { 29845 /* The wait queue is now empty */ 29846 un->un_waitq_tailp = NULL; 29847 } 29848 } else { 29849 /* 29850 * The bp is either somewhere in the middle 29851 * or at the end of the wait queue. 29852 */ 29853 ASSERT(un->un_waitq_headp != NULL); 29854 ASSERT(prev_waitq_bp != NULL); 29855 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST) 29856 == 0); 29857 if (bp == un->un_waitq_tailp) { 29858 /* bp is the last entry on the waitq. */ 29859 ASSERT(next_waitq_bp == NULL); 29860 un->un_waitq_tailp = prev_waitq_bp; 29861 } 29862 prev_waitq_bp->av_forw = next_waitq_bp; 29863 } 29864 bp->av_forw = NULL; 29865 29866 /* 29867 * update kstat since the bp is moved out of 29868 * the waitq 29869 */ 29870 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 29871 29872 /* 29873 * Now put the bp onto the failfast queue. 29874 */ 29875 if (un->un_failfast_headp == NULL) { 29876 /* failfast queue is currently empty */ 29877 ASSERT(un->un_failfast_tailp == NULL); 29878 un->un_failfast_headp = 29879 un->un_failfast_tailp = bp; 29880 } else { 29881 /* Add the bp to the end of the failfast q */ 29882 ASSERT(un->un_failfast_tailp != NULL); 29883 ASSERT(un->un_failfast_tailp->b_flags & 29884 B_FAILFAST); 29885 un->un_failfast_tailp->av_forw = bp; 29886 un->un_failfast_tailp = bp; 29887 } 29888 } 29889 } 29890 29891 /* 29892 * Now return all bp's on the failfast queue to their owners. 29893 */ 29894 while ((bp = un->un_failfast_headp) != NULL) { 29895 29896 un->un_failfast_headp = bp->av_forw; 29897 if (un->un_failfast_headp == NULL) { 29898 un->un_failfast_tailp = NULL; 29899 } 29900 29901 /* 29902 * We want to return the bp with a failure error code, but 29903 * we do not want a call to sd_start_cmds() to occur here, 29904 * so use sd_return_failed_command_no_restart() instead of 29905 * sd_return_failed_command(). 29906 */ 29907 sd_return_failed_command_no_restart(un, bp, EIO); 29908 } 29909 29910 /* Flush the xbuf queues if required. */ 29911 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) { 29912 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback); 29913 } 29914 29915 SD_TRACE(SD_LOG_IO_FAILFAST, un, 29916 "sd_failfast_flushq: exit: un:0x%p\n", un); 29917 } 29918 29919 29920 /* 29921 * Function: sd_failfast_flushq_callback 29922 * 29923 * Description: Return TRUE if the given bp meets the criteria for failfast 29924 * flushing. Used with ddi_xbuf_flushq(9F). 29925 * 29926 * Arguments: bp - ptr to buf struct to be examined. 29927 * 29928 * Context: Any 29929 */ 29930 29931 static int 29932 sd_failfast_flushq_callback(struct buf *bp) 29933 { 29934 /* 29935 * Return TRUE if (1) we want to flush ALL bufs when the failfast 29936 * state is entered; OR (2) the given bp has B_FAILFAST set. 29937 */ 29938 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || 29939 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE); 29940 } 29941 29942 29943 29944 /* 29945 * Function: sd_setup_next_xfer 29946 * 29947 * Description: Prepare next I/O operation using DMA_PARTIAL 29948 * 29949 */ 29950 29951 static int 29952 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 29953 struct scsi_pkt *pkt, struct sd_xbuf *xp) 29954 { 29955 ssize_t num_blks_not_xfered; 29956 daddr_t strt_blk_num; 29957 ssize_t bytes_not_xfered; 29958 int rval; 29959 29960 ASSERT(pkt->pkt_resid == 0); 29961 29962 /* 29963 * Calculate next block number and amount to be transferred. 29964 * 29965 * How much data NOT transfered to the HBA yet. 29966 */ 29967 bytes_not_xfered = xp->xb_dma_resid; 29968 29969 /* 29970 * figure how many blocks NOT transfered to the HBA yet. 29971 */ 29972 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered); 29973 29974 /* 29975 * set starting block number to the end of what WAS transfered. 29976 */ 29977 strt_blk_num = xp->xb_blkno + 29978 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered); 29979 29980 /* 29981 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt 29982 * will call scsi_initpkt with NULL_FUNC so we do not have to release 29983 * the disk mutex here. 29984 */ 29985 rval = sd_setup_next_rw_pkt(un, pkt, bp, 29986 strt_blk_num, num_blks_not_xfered); 29987 29988 if (rval == 0) { 29989 29990 /* 29991 * Success. 29992 * 29993 * Adjust things if there are still more blocks to be 29994 * transfered. 29995 */ 29996 xp->xb_dma_resid = pkt->pkt_resid; 29997 pkt->pkt_resid = 0; 29998 29999 return (1); 30000 } 30001 30002 /* 30003 * There's really only one possible return value from 30004 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt 30005 * returns NULL. 30006 */ 30007 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 30008 30009 bp->b_resid = bp->b_bcount; 30010 bp->b_flags |= B_ERROR; 30011 30012 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 30013 "Error setting up next portion of DMA transfer\n"); 30014 30015 return (0); 30016 } 30017 30018 /* 30019 * Function: sd_panic_for_res_conflict 30020 * 30021 * Description: Call panic with a string formatted with "Reservation Conflict" 30022 * and a human readable identifier indicating the SD instance 30023 * that experienced the reservation conflict. 30024 * 30025 * Arguments: un - pointer to the soft state struct for the instance. 30026 * 30027 * Context: may execute in interrupt context. 30028 */ 30029 30030 #define SD_RESV_CONFLICT_FMT_LEN 40 30031 void 30032 sd_panic_for_res_conflict(struct sd_lun *un) 30033 { 30034 char panic_str[SD_RESV_CONFLICT_FMT_LEN+MAXPATHLEN]; 30035 char path_str[MAXPATHLEN]; 30036 30037 (void) snprintf(panic_str, sizeof (panic_str), 30038 "Reservation Conflict\nDisk: %s", 30039 ddi_pathname(SD_DEVINFO(un), path_str)); 30040 30041 panic(panic_str); 30042 } 30043 30044 /* 30045 * Note: The following sd_faultinjection_ioctl( ) routines implement 30046 * driver support for handling fault injection for error analysis 30047 * causing faults in multiple layers of the driver. 30048 * 30049 */ 30050 30051 #ifdef SD_FAULT_INJECTION 30052 static uint_t sd_fault_injection_on = 0; 30053 30054 /* 30055 * Function: sd_faultinjection_ioctl() 30056 * 30057 * Description: This routine is the driver entry point for handling 30058 * faultinjection ioctls to inject errors into the 30059 * layer model 30060 * 30061 * Arguments: cmd - the ioctl cmd received 30062 * arg - the arguments from user and returns 30063 */ 30064 30065 static void 30066 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) { 30067 30068 uint_t i = 0; 30069 uint_t rval; 30070 30071 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); 30072 30073 mutex_enter(SD_MUTEX(un)); 30074 30075 switch (cmd) { 30076 case SDIOCRUN: 30077 /* Allow pushed faults to be injected */ 30078 SD_INFO(SD_LOG_SDTEST, un, 30079 "sd_faultinjection_ioctl: Injecting Fault Run\n"); 30080 30081 sd_fault_injection_on = 1; 30082 30083 SD_INFO(SD_LOG_IOERR, un, 30084 "sd_faultinjection_ioctl: run finished\n"); 30085 break; 30086 30087 case SDIOCSTART: 30088 /* Start Injection Session */ 30089 SD_INFO(SD_LOG_SDTEST, un, 30090 "sd_faultinjection_ioctl: Injecting Fault Start\n"); 30091 30092 sd_fault_injection_on = 0; 30093 un->sd_injection_mask = 0xFFFFFFFF; 30094 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 30095 un->sd_fi_fifo_pkt[i] = NULL; 30096 un->sd_fi_fifo_xb[i] = NULL; 30097 un->sd_fi_fifo_un[i] = NULL; 30098 un->sd_fi_fifo_arq[i] = NULL; 30099 } 30100 un->sd_fi_fifo_start = 0; 30101 un->sd_fi_fifo_end = 0; 30102 30103 mutex_enter(&(un->un_fi_mutex)); 30104 un->sd_fi_log[0] = '\0'; 30105 un->sd_fi_buf_len = 0; 30106 mutex_exit(&(un->un_fi_mutex)); 30107 30108 SD_INFO(SD_LOG_IOERR, un, 30109 "sd_faultinjection_ioctl: start finished\n"); 30110 break; 30111 30112 case SDIOCSTOP: 30113 /* Stop Injection Session */ 30114 SD_INFO(SD_LOG_SDTEST, un, 30115 "sd_faultinjection_ioctl: Injecting Fault Stop\n"); 30116 sd_fault_injection_on = 0; 30117 un->sd_injection_mask = 0x0; 30118 30119 /* Empty stray or unuseds structs from fifo */ 30120 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 30121 if (un->sd_fi_fifo_pkt[i] != NULL) { 30122 kmem_free(un->sd_fi_fifo_pkt[i], 30123 sizeof (struct sd_fi_pkt)); 30124 } 30125 if (un->sd_fi_fifo_xb[i] != NULL) { 30126 kmem_free(un->sd_fi_fifo_xb[i], 30127 sizeof (struct sd_fi_xb)); 30128 } 30129 if (un->sd_fi_fifo_un[i] != NULL) { 30130 kmem_free(un->sd_fi_fifo_un[i], 30131 sizeof (struct sd_fi_un)); 30132 } 30133 if (un->sd_fi_fifo_arq[i] != NULL) { 30134 kmem_free(un->sd_fi_fifo_arq[i], 30135 sizeof (struct sd_fi_arq)); 30136 } 30137 un->sd_fi_fifo_pkt[i] = NULL; 30138 un->sd_fi_fifo_un[i] = NULL; 30139 un->sd_fi_fifo_xb[i] = NULL; 30140 un->sd_fi_fifo_arq[i] = NULL; 30141 } 30142 un->sd_fi_fifo_start = 0; 30143 un->sd_fi_fifo_end = 0; 30144 30145 SD_INFO(SD_LOG_IOERR, un, 30146 "sd_faultinjection_ioctl: stop finished\n"); 30147 break; 30148 30149 case SDIOCINSERTPKT: 30150 /* Store a packet struct to be pushed onto fifo */ 30151 SD_INFO(SD_LOG_SDTEST, un, 30152 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); 30153 30154 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 30155 30156 sd_fault_injection_on = 0; 30157 30158 /* No more that SD_FI_MAX_ERROR allowed in Queue */ 30159 if (un->sd_fi_fifo_pkt[i] != NULL) { 30160 kmem_free(un->sd_fi_fifo_pkt[i], 30161 sizeof (struct sd_fi_pkt)); 30162 } 30163 if (arg != NULL) { 30164 un->sd_fi_fifo_pkt[i] = 30165 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); 30166 if (un->sd_fi_fifo_pkt[i] == NULL) { 30167 /* Alloc failed don't store anything */ 30168 break; 30169 } 30170 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], 30171 sizeof (struct sd_fi_pkt), 0); 30172 if (rval == -1) { 30173 kmem_free(un->sd_fi_fifo_pkt[i], 30174 sizeof (struct sd_fi_pkt)); 30175 un->sd_fi_fifo_pkt[i] = NULL; 30176 } 30177 } else { 30178 SD_INFO(SD_LOG_IOERR, un, 30179 "sd_faultinjection_ioctl: pkt null\n"); 30180 } 30181 break; 30182 30183 case SDIOCINSERTXB: 30184 /* Store a xb struct to be pushed onto fifo */ 30185 SD_INFO(SD_LOG_SDTEST, un, 30186 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); 30187 30188 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 30189 30190 sd_fault_injection_on = 0; 30191 30192 if (un->sd_fi_fifo_xb[i] != NULL) { 30193 kmem_free(un->sd_fi_fifo_xb[i], 30194 sizeof (struct sd_fi_xb)); 30195 un->sd_fi_fifo_xb[i] = NULL; 30196 } 30197 if (arg != NULL) { 30198 un->sd_fi_fifo_xb[i] = 30199 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); 30200 if (un->sd_fi_fifo_xb[i] == NULL) { 30201 /* Alloc failed don't store anything */ 30202 break; 30203 } 30204 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], 30205 sizeof (struct sd_fi_xb), 0); 30206 30207 if (rval == -1) { 30208 kmem_free(un->sd_fi_fifo_xb[i], 30209 sizeof (struct sd_fi_xb)); 30210 un->sd_fi_fifo_xb[i] = NULL; 30211 } 30212 } else { 30213 SD_INFO(SD_LOG_IOERR, un, 30214 "sd_faultinjection_ioctl: xb null\n"); 30215 } 30216 break; 30217 30218 case SDIOCINSERTUN: 30219 /* Store a un struct to be pushed onto fifo */ 30220 SD_INFO(SD_LOG_SDTEST, un, 30221 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); 30222 30223 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 30224 30225 sd_fault_injection_on = 0; 30226 30227 if (un->sd_fi_fifo_un[i] != NULL) { 30228 kmem_free(un->sd_fi_fifo_un[i], 30229 sizeof (struct sd_fi_un)); 30230 un->sd_fi_fifo_un[i] = NULL; 30231 } 30232 if (arg != NULL) { 30233 un->sd_fi_fifo_un[i] = 30234 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); 30235 if (un->sd_fi_fifo_un[i] == NULL) { 30236 /* Alloc failed don't store anything */ 30237 break; 30238 } 30239 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], 30240 sizeof (struct sd_fi_un), 0); 30241 if (rval == -1) { 30242 kmem_free(un->sd_fi_fifo_un[i], 30243 sizeof (struct sd_fi_un)); 30244 un->sd_fi_fifo_un[i] = NULL; 30245 } 30246 30247 } else { 30248 SD_INFO(SD_LOG_IOERR, un, 30249 "sd_faultinjection_ioctl: un null\n"); 30250 } 30251 30252 break; 30253 30254 case SDIOCINSERTARQ: 30255 /* Store a arq struct to be pushed onto fifo */ 30256 SD_INFO(SD_LOG_SDTEST, un, 30257 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); 30258 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 30259 30260 sd_fault_injection_on = 0; 30261 30262 if (un->sd_fi_fifo_arq[i] != NULL) { 30263 kmem_free(un->sd_fi_fifo_arq[i], 30264 sizeof (struct sd_fi_arq)); 30265 un->sd_fi_fifo_arq[i] = NULL; 30266 } 30267 if (arg != NULL) { 30268 un->sd_fi_fifo_arq[i] = 30269 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); 30270 if (un->sd_fi_fifo_arq[i] == NULL) { 30271 /* Alloc failed don't store anything */ 30272 break; 30273 } 30274 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], 30275 sizeof (struct sd_fi_arq), 0); 30276 if (rval == -1) { 30277 kmem_free(un->sd_fi_fifo_arq[i], 30278 sizeof (struct sd_fi_arq)); 30279 un->sd_fi_fifo_arq[i] = NULL; 30280 } 30281 30282 } else { 30283 SD_INFO(SD_LOG_IOERR, un, 30284 "sd_faultinjection_ioctl: arq null\n"); 30285 } 30286 30287 break; 30288 30289 case SDIOCPUSH: 30290 /* Push stored xb, pkt, un, and arq onto fifo */ 30291 sd_fault_injection_on = 0; 30292 30293 if (arg != NULL) { 30294 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); 30295 if (rval != -1 && 30296 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 30297 un->sd_fi_fifo_end += i; 30298 } 30299 } else { 30300 SD_INFO(SD_LOG_IOERR, un, 30301 "sd_faultinjection_ioctl: push arg null\n"); 30302 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 30303 un->sd_fi_fifo_end++; 30304 } 30305 } 30306 SD_INFO(SD_LOG_IOERR, un, 30307 "sd_faultinjection_ioctl: push to end=%d\n", 30308 un->sd_fi_fifo_end); 30309 break; 30310 30311 case SDIOCRETRIEVE: 30312 /* Return buffer of log from Injection session */ 30313 SD_INFO(SD_LOG_SDTEST, un, 30314 "sd_faultinjection_ioctl: Injecting Fault Retreive"); 30315 30316 sd_fault_injection_on = 0; 30317 30318 mutex_enter(&(un->un_fi_mutex)); 30319 rval = ddi_copyout(un->sd_fi_log, (void *)arg, 30320 un->sd_fi_buf_len+1, 0); 30321 mutex_exit(&(un->un_fi_mutex)); 30322 30323 if (rval == -1) { 30324 /* 30325 * arg is possibly invalid setting 30326 * it to NULL for return 30327 */ 30328 arg = NULL; 30329 } 30330 break; 30331 } 30332 30333 mutex_exit(SD_MUTEX(un)); 30334 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl:" 30335 " exit\n"); 30336 } 30337 30338 30339 /* 30340 * Function: sd_injection_log() 30341 * 30342 * Description: This routine adds buff to the already existing injection log 30343 * for retrieval via faultinjection_ioctl for use in fault 30344 * detection and recovery 30345 * 30346 * Arguments: buf - the string to add to the log 30347 */ 30348 30349 static void 30350 sd_injection_log(char *buf, struct sd_lun *un) 30351 { 30352 uint_t len; 30353 30354 ASSERT(un != NULL); 30355 ASSERT(buf != NULL); 30356 30357 mutex_enter(&(un->un_fi_mutex)); 30358 30359 len = min(strlen(buf), 255); 30360 /* Add logged value to Injection log to be returned later */ 30361 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) { 30362 uint_t offset = strlen((char *)un->sd_fi_log); 30363 char *destp = (char *)un->sd_fi_log + offset; 30364 int i; 30365 for (i = 0; i < len; i++) { 30366 *destp++ = *buf++; 30367 } 30368 un->sd_fi_buf_len += len; 30369 un->sd_fi_log[un->sd_fi_buf_len] = '\0'; 30370 } 30371 30372 mutex_exit(&(un->un_fi_mutex)); 30373 } 30374 30375 30376 /* 30377 * Function: sd_faultinjection() 30378 * 30379 * Description: This routine takes the pkt and changes its 30380 * content based on error injection scenerio. 30381 * 30382 * Arguments: pktp - packet to be changed 30383 */ 30384 30385 static void 30386 sd_faultinjection(struct scsi_pkt *pktp) 30387 { 30388 uint_t i; 30389 struct sd_fi_pkt *fi_pkt; 30390 struct sd_fi_xb *fi_xb; 30391 struct sd_fi_un *fi_un; 30392 struct sd_fi_arq *fi_arq; 30393 struct buf *bp; 30394 struct sd_xbuf *xb; 30395 struct sd_lun *un; 30396 30397 ASSERT(pktp != NULL); 30398 30399 /* pull bp xb and un from pktp */ 30400 bp = (struct buf *)pktp->pkt_private; 30401 xb = SD_GET_XBUF(bp); 30402 un = SD_GET_UN(bp); 30403 30404 ASSERT(un != NULL); 30405 30406 mutex_enter(SD_MUTEX(un)); 30407 30408 SD_TRACE(SD_LOG_SDTEST, un, 30409 "sd_faultinjection: entry Injection from sdintr\n"); 30410 30411 /* if injection is off return */ 30412 if (sd_fault_injection_on == 0 || 30413 un->sd_fi_fifo_start == un->sd_fi_fifo_end) { 30414 mutex_exit(SD_MUTEX(un)); 30415 return; 30416 } 30417 30418 SD_INFO(SD_LOG_SDTEST, un, 30419 "sd_faultinjection: is working for copying\n"); 30420 30421 /* take next set off fifo */ 30422 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; 30423 30424 fi_pkt = un->sd_fi_fifo_pkt[i]; 30425 fi_xb = un->sd_fi_fifo_xb[i]; 30426 fi_un = un->sd_fi_fifo_un[i]; 30427 fi_arq = un->sd_fi_fifo_arq[i]; 30428 30429 30430 /* set variables accordingly */ 30431 /* set pkt if it was on fifo */ 30432 if (fi_pkt != NULL) { 30433 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags"); 30434 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp"); 30435 if (fi_pkt->pkt_cdbp != 0xff) 30436 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp"); 30437 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state"); 30438 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics"); 30439 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason"); 30440 30441 } 30442 /* set xb if it was on fifo */ 30443 if (fi_xb != NULL) { 30444 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno"); 30445 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid"); 30446 if (fi_xb->xb_retry_count != 0) 30447 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count"); 30448 SD_CONDSET(xb, xb, xb_victim_retry_count, 30449 "xb_victim_retry_count"); 30450 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status"); 30451 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state"); 30452 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid"); 30453 30454 /* copy in block data from sense */ 30455 /* 30456 * if (fi_xb->xb_sense_data[0] != -1) { 30457 * bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, 30458 * SENSE_LENGTH); 30459 * } 30460 */ 30461 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, SENSE_LENGTH); 30462 30463 /* copy in extended sense codes */ 30464 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 30465 xb, es_code, "es_code"); 30466 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 30467 xb, es_key, "es_key"); 30468 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 30469 xb, es_add_code, "es_add_code"); 30470 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 30471 xb, es_qual_code, "es_qual_code"); 30472 struct scsi_extended_sense *esp; 30473 esp = (struct scsi_extended_sense *)xb->xb_sense_data; 30474 esp->es_class = CLASS_EXTENDED_SENSE; 30475 } 30476 30477 /* set un if it was on fifo */ 30478 if (fi_un != NULL) { 30479 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb"); 30480 SD_CONDSET(un, un, un_ctype, "un_ctype"); 30481 SD_CONDSET(un, un, un_reset_retry_count, 30482 "un_reset_retry_count"); 30483 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type"); 30484 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status"); 30485 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled"); 30486 SD_CONDSET(un, un, un_f_allow_bus_device_reset, 30487 "un_f_allow_bus_device_reset"); 30488 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing"); 30489 30490 } 30491 30492 /* copy in auto request sense if it was on fifo */ 30493 if (fi_arq != NULL) { 30494 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq)); 30495 } 30496 30497 /* free structs */ 30498 if (un->sd_fi_fifo_pkt[i] != NULL) { 30499 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); 30500 } 30501 if (un->sd_fi_fifo_xb[i] != NULL) { 30502 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); 30503 } 30504 if (un->sd_fi_fifo_un[i] != NULL) { 30505 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); 30506 } 30507 if (un->sd_fi_fifo_arq[i] != NULL) { 30508 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); 30509 } 30510 30511 /* 30512 * kmem_free does not gurantee to set to NULL 30513 * since we uses these to determine if we set 30514 * values or not lets confirm they are always 30515 * NULL after free 30516 */ 30517 un->sd_fi_fifo_pkt[i] = NULL; 30518 un->sd_fi_fifo_un[i] = NULL; 30519 un->sd_fi_fifo_xb[i] = NULL; 30520 un->sd_fi_fifo_arq[i] = NULL; 30521 30522 un->sd_fi_fifo_start++; 30523 30524 mutex_exit(SD_MUTEX(un)); 30525 30526 SD_INFO(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n"); 30527 } 30528 30529 #endif /* SD_FAULT_INJECTION */ 30530 30531 /* 30532 * This routine is invoked in sd_unit_attach(). Before calling it, the 30533 * properties in conf file should be processed already, and "hotpluggable" 30534 * property was processed also. 30535 * 30536 * The sd driver distinguishes 3 different type of devices: removable media, 30537 * non-removable media, and hotpluggable. Below the differences are defined: 30538 * 30539 * 1. Device ID 30540 * 30541 * The device ID of a device is used to identify this device. Refer to 30542 * ddi_devid_register(9F). 30543 * 30544 * For a non-removable media disk device which can provide 0x80 or 0x83 30545 * VPD page (refer to INQUIRY command of SCSI SPC specification), a unique 30546 * device ID is created to identify this device. For other non-removable 30547 * media devices, a default device ID is created only if this device has 30548 * at least 2 alter cylinders. Otherwise, this device has no devid. 30549 * 30550 * ------------------------------------------------------- 30551 * removable media hotpluggable | Can Have Device ID 30552 * ------------------------------------------------------- 30553 * false false | Yes 30554 * false true | Yes 30555 * true x | No 30556 * ------------------------------------------------------ 30557 * 30558 * 30559 * 2. SCSI group 4 commands 30560 * 30561 * In SCSI specs, only some commands in group 4 command set can use 30562 * 8-byte addresses that can be used to access >2TB storage spaces. 30563 * Other commands have no such capability. Without supporting group4, 30564 * it is impossible to make full use of storage spaces of a disk with 30565 * capacity larger than 2TB. 30566 * 30567 * ----------------------------------------------- 30568 * removable media hotpluggable LP64 | Group 30569 * ----------------------------------------------- 30570 * false false false | 1 30571 * false false true | 4 30572 * false true false | 1 30573 * false true true | 4 30574 * true x x | 5 30575 * ----------------------------------------------- 30576 * 30577 * 30578 * 3. Check for VTOC Label 30579 * 30580 * If a direct-access disk has no EFI label, sd will check if it has a 30581 * valid VTOC label. Now, sd also does that check for removable media 30582 * and hotpluggable devices. 30583 * 30584 * -------------------------------------------------------------- 30585 * Direct-Access removable media hotpluggable | Check Label 30586 * ------------------------------------------------------------- 30587 * false false false | No 30588 * false false true | No 30589 * false true false | Yes 30590 * false true true | Yes 30591 * true x x | Yes 30592 * -------------------------------------------------------------- 30593 * 30594 * 30595 * 4. Building default VTOC label 30596 * 30597 * As section 3 says, sd checks if some kinds of devices have VTOC label. 30598 * If those devices have no valid VTOC label, sd(7d) will attempt to 30599 * create default VTOC for them. Currently sd creates default VTOC label 30600 * for all devices on x86 platform (VTOC_16), but only for removable 30601 * media devices on SPARC (VTOC_8). 30602 * 30603 * ----------------------------------------------------------- 30604 * removable media hotpluggable platform | Default Label 30605 * ----------------------------------------------------------- 30606 * false false sparc | No 30607 * false true x86 | Yes 30608 * false true sparc | Yes 30609 * true x x | Yes 30610 * ---------------------------------------------------------- 30611 * 30612 * 30613 * 5. Supported blocksizes of target devices 30614 * 30615 * Sd supports non-512-byte blocksize for removable media devices only. 30616 * For other devices, only 512-byte blocksize is supported. This may be 30617 * changed in near future because some RAID devices require non-512-byte 30618 * blocksize 30619 * 30620 * ----------------------------------------------------------- 30621 * removable media hotpluggable | non-512-byte blocksize 30622 * ----------------------------------------------------------- 30623 * false false | No 30624 * false true | No 30625 * true x | Yes 30626 * ----------------------------------------------------------- 30627 * 30628 * 30629 * 6. Automatic mount & unmount 30630 * 30631 * Sd(7d) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query 30632 * if a device is removable media device. It return 1 for removable media 30633 * devices, and 0 for others. 30634 * 30635 * The automatic mounting subsystem should distinguish between the types 30636 * of devices and apply automounting policies to each. 30637 * 30638 * 30639 * 7. fdisk partition management 30640 * 30641 * Fdisk is traditional partition method on x86 platform. Sd(7d) driver 30642 * just supports fdisk partitions on x86 platform. On sparc platform, sd 30643 * doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize 30644 * fdisk partitions on both x86 and SPARC platform. 30645 * 30646 * ----------------------------------------------------------- 30647 * platform removable media USB/1394 | fdisk supported 30648 * ----------------------------------------------------------- 30649 * x86 X X | true 30650 * ------------------------------------------------------------ 30651 * sparc X X | false 30652 * ------------------------------------------------------------ 30653 * 30654 * 30655 * 8. MBOOT/MBR 30656 * 30657 * Although sd(7d) doesn't support fdisk on SPARC platform, it does support 30658 * read/write mboot for removable media devices on sparc platform. 30659 * 30660 * ----------------------------------------------------------- 30661 * platform removable media USB/1394 | mboot supported 30662 * ----------------------------------------------------------- 30663 * x86 X X | true 30664 * ------------------------------------------------------------ 30665 * sparc false false | false 30666 * sparc false true | true 30667 * sparc true false | true 30668 * sparc true true | true 30669 * ------------------------------------------------------------ 30670 * 30671 * 30672 * 9. error handling during opening device 30673 * 30674 * If failed to open a disk device, an errno is returned. For some kinds 30675 * of errors, different errno is returned depending on if this device is 30676 * a removable media device. This brings USB/1394 hard disks in line with 30677 * expected hard disk behavior. It is not expected that this breaks any 30678 * application. 30679 * 30680 * ------------------------------------------------------ 30681 * removable media hotpluggable | errno 30682 * ------------------------------------------------------ 30683 * false false | EIO 30684 * false true | EIO 30685 * true x | ENXIO 30686 * ------------------------------------------------------ 30687 * 30688 * 30689 * 11. ioctls: DKIOCEJECT, CDROMEJECT 30690 * 30691 * These IOCTLs are applicable only to removable media devices. 30692 * 30693 * ----------------------------------------------------------- 30694 * removable media hotpluggable |DKIOCEJECT, CDROMEJECT 30695 * ----------------------------------------------------------- 30696 * false false | No 30697 * false true | No 30698 * true x | Yes 30699 * ----------------------------------------------------------- 30700 * 30701 * 30702 * 12. Kstats for partitions 30703 * 30704 * sd creates partition kstat for non-removable media devices. USB and 30705 * Firewire hard disks now have partition kstats 30706 * 30707 * ------------------------------------------------------ 30708 * removable media hotpluggable | kstat 30709 * ------------------------------------------------------ 30710 * false false | Yes 30711 * false true | Yes 30712 * true x | No 30713 * ------------------------------------------------------ 30714 * 30715 * 30716 * 13. Removable media & hotpluggable properties 30717 * 30718 * Sd driver creates a "removable-media" property for removable media 30719 * devices. Parent nexus drivers create a "hotpluggable" property if 30720 * it supports hotplugging. 30721 * 30722 * --------------------------------------------------------------------- 30723 * removable media hotpluggable | "removable-media" " hotpluggable" 30724 * --------------------------------------------------------------------- 30725 * false false | No No 30726 * false true | No Yes 30727 * true false | Yes No 30728 * true true | Yes Yes 30729 * --------------------------------------------------------------------- 30730 * 30731 * 30732 * 14. Power Management 30733 * 30734 * sd only power manages removable media devices or devices that support 30735 * LOG_SENSE or have a "pm-capable" property (PSARC/2002/250) 30736 * 30737 * A parent nexus that supports hotplugging can also set "pm-capable" 30738 * if the disk can be power managed. 30739 * 30740 * ------------------------------------------------------------ 30741 * removable media hotpluggable pm-capable | power manage 30742 * ------------------------------------------------------------ 30743 * false false false | No 30744 * false false true | Yes 30745 * false true false | No 30746 * false true true | Yes 30747 * true x x | Yes 30748 * ------------------------------------------------------------ 30749 * 30750 * USB and firewire hard disks can now be power managed independently 30751 * of the framebuffer 30752 * 30753 * 30754 * 15. Support for USB disks with capacity larger than 1TB 30755 * 30756 * Currently, sd doesn't permit a fixed disk device with capacity 30757 * larger than 1TB to be used in a 32-bit operating system environment. 30758 * However, sd doesn't do that for removable media devices. Instead, it 30759 * assumes that removable media devices cannot have a capacity larger 30760 * than 1TB. Therefore, using those devices on 32-bit system is partially 30761 * supported, which can cause some unexpected results. 30762 * 30763 * --------------------------------------------------------------------- 30764 * removable media USB/1394 | Capacity > 1TB | Used in 32-bit env 30765 * --------------------------------------------------------------------- 30766 * false false | true | no 30767 * false true | true | no 30768 * true false | true | Yes 30769 * true true | true | Yes 30770 * --------------------------------------------------------------------- 30771 * 30772 * 30773 * 16. Check write-protection at open time 30774 * 30775 * When a removable media device is being opened for writing without NDELAY 30776 * flag, sd will check if this device is writable. If attempting to open 30777 * without NDELAY flag a write-protected device, this operation will abort. 30778 * 30779 * ------------------------------------------------------------ 30780 * removable media USB/1394 | WP Check 30781 * ------------------------------------------------------------ 30782 * false false | No 30783 * false true | No 30784 * true false | Yes 30785 * true true | Yes 30786 * ------------------------------------------------------------ 30787 * 30788 * 30789 * 17. syslog when corrupted VTOC is encountered 30790 * 30791 * Currently, if an invalid VTOC is encountered, sd only print syslog 30792 * for fixed SCSI disks. 30793 * ------------------------------------------------------------ 30794 * removable media USB/1394 | print syslog 30795 * ------------------------------------------------------------ 30796 * false false | Yes 30797 * false true | No 30798 * true false | No 30799 * true true | No 30800 * ------------------------------------------------------------ 30801 */ 30802 static void 30803 sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi) 30804 { 30805 int pm_cap; 30806 30807 ASSERT(un->un_sd); 30808 ASSERT(un->un_sd->sd_inq); 30809 30810 /* 30811 * Enable SYNC CACHE support for all devices. 30812 */ 30813 un->un_f_sync_cache_supported = TRUE; 30814 30815 /* 30816 * Set the sync cache required flag to false. 30817 * This would ensure that there is no SYNC CACHE 30818 * sent when there are no writes 30819 */ 30820 un->un_f_sync_cache_required = FALSE; 30821 30822 if (un->un_sd->sd_inq->inq_rmb) { 30823 /* 30824 * The media of this device is removable. And for this kind 30825 * of devices, it is possible to change medium after opening 30826 * devices. Thus we should support this operation. 30827 */ 30828 un->un_f_has_removable_media = TRUE; 30829 30830 /* 30831 * support non-512-byte blocksize of removable media devices 30832 */ 30833 un->un_f_non_devbsize_supported = TRUE; 30834 30835 /* 30836 * Assume that all removable media devices support DOOR_LOCK 30837 */ 30838 un->un_f_doorlock_supported = TRUE; 30839 30840 /* 30841 * For a removable media device, it is possible to be opened 30842 * with NDELAY flag when there is no media in drive, in this 30843 * case we don't care if device is writable. But if without 30844 * NDELAY flag, we need to check if media is write-protected. 30845 */ 30846 un->un_f_chk_wp_open = TRUE; 30847 30848 /* 30849 * need to start a SCSI watch thread to monitor media state, 30850 * when media is being inserted or ejected, notify syseventd. 30851 */ 30852 un->un_f_monitor_media_state = TRUE; 30853 30854 /* 30855 * Some devices don't support START_STOP_UNIT command. 30856 * Therefore, we'd better check if a device supports it 30857 * before sending it. 30858 */ 30859 un->un_f_check_start_stop = TRUE; 30860 30861 /* 30862 * support eject media ioctl: 30863 * FDEJECT, DKIOCEJECT, CDROMEJECT 30864 */ 30865 un->un_f_eject_media_supported = TRUE; 30866 30867 /* 30868 * Because many removable-media devices don't support 30869 * LOG_SENSE, we couldn't use this command to check if 30870 * a removable media device support power-management. 30871 * We assume that they support power-management via 30872 * START_STOP_UNIT command and can be spun up and down 30873 * without limitations. 30874 */ 30875 un->un_f_pm_supported = TRUE; 30876 30877 /* 30878 * Need to create a zero length (Boolean) property 30879 * removable-media for the removable media devices. 30880 * Note that the return value of the property is not being 30881 * checked, since if unable to create the property 30882 * then do not want the attach to fail altogether. Consistent 30883 * with other property creation in attach. 30884 */ 30885 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, 30886 DDI_PROP_CANSLEEP, "removable-media", NULL, 0); 30887 30888 } else { 30889 /* 30890 * create device ID for device 30891 */ 30892 un->un_f_devid_supported = TRUE; 30893 30894 /* 30895 * Spin up non-removable-media devices once it is attached 30896 */ 30897 un->un_f_attach_spinup = TRUE; 30898 30899 /* 30900 * According to SCSI specification, Sense data has two kinds of 30901 * format: fixed format, and descriptor format. At present, we 30902 * don't support descriptor format sense data for removable 30903 * media. 30904 */ 30905 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT) { 30906 un->un_f_descr_format_supported = TRUE; 30907 } 30908 30909 /* 30910 * kstats are created only for non-removable media devices. 30911 * 30912 * Set this in sd.conf to 0 in order to disable kstats. The 30913 * default is 1, so they are enabled by default. 30914 */ 30915 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY, 30916 SD_DEVINFO(un), DDI_PROP_DONTPASS, 30917 "enable-partition-kstats", 1)); 30918 30919 /* 30920 * Check if HBA has set the "pm-capable" property. 30921 * If "pm-capable" exists and is non-zero then we can 30922 * power manage the device without checking the start/stop 30923 * cycle count log sense page. 30924 * 30925 * If "pm-capable" exists and is set to be false (0), 30926 * then we should not power manage the device. 30927 * 30928 * If "pm-capable" doesn't exist then pm_cap will 30929 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case, 30930 * sd will check the start/stop cycle count log sense page 30931 * and power manage the device if the cycle count limit has 30932 * not been exceeded. 30933 */ 30934 pm_cap = ddi_prop_get_int(DDI_DEV_T_ANY, devi, 30935 DDI_PROP_DONTPASS, "pm-capable", SD_PM_CAPABLE_UNDEFINED); 30936 if (SD_PM_CAPABLE_IS_UNDEFINED(pm_cap)) { 30937 un->un_f_log_sense_supported = TRUE; 30938 if (!un->un_f_power_condition_disabled && 30939 SD_INQUIRY(un)->inq_ansi == 6) { 30940 un->un_f_power_condition_supported = TRUE; 30941 } 30942 } else { 30943 /* 30944 * pm-capable property exists. 30945 * 30946 * Convert "TRUE" values for pm_cap to 30947 * SD_PM_CAPABLE_IS_TRUE to make it easier to check 30948 * later. "TRUE" values are any values defined in 30949 * inquiry.h. 30950 */ 30951 if (SD_PM_CAPABLE_IS_FALSE(pm_cap)) { 30952 un->un_f_log_sense_supported = FALSE; 30953 } else { 30954 /* SD_PM_CAPABLE_IS_TRUE case */ 30955 un->un_f_pm_supported = TRUE; 30956 if (!un->un_f_power_condition_disabled && 30957 SD_PM_CAPABLE_IS_SPC_4(pm_cap)) { 30958 un->un_f_power_condition_supported = 30959 TRUE; 30960 } 30961 if (SD_PM_CAP_LOG_SUPPORTED(pm_cap)) { 30962 un->un_f_log_sense_supported = TRUE; 30963 un->un_f_pm_log_sense_smart = 30964 SD_PM_CAP_SMART_LOG(pm_cap); 30965 } 30966 } 30967 30968 SD_INFO(SD_LOG_ATTACH_DETACH, un, 30969 "sd_unit_attach: un:0x%p pm-capable " 30970 "property set to %d.\n", un, un->un_f_pm_supported); 30971 } 30972 } 30973 30974 if (un->un_f_is_hotpluggable) { 30975 30976 /* 30977 * Have to watch hotpluggable devices as well, since 30978 * that's the only way for userland applications to 30979 * detect hot removal while device is busy/mounted. 30980 */ 30981 un->un_f_monitor_media_state = TRUE; 30982 30983 un->un_f_check_start_stop = TRUE; 30984 30985 } 30986 } 30987 30988 /* 30989 * sd_tg_rdwr: 30990 * Provides rdwr access for cmlb via sd_tgops. The start_block is 30991 * in sys block size, req_length in bytes. 30992 * 30993 */ 30994 static int 30995 sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 30996 diskaddr_t start_block, size_t reqlength, void *tg_cookie) 30997 { 30998 struct sd_lun *un; 30999 int path_flag = (int)(uintptr_t)tg_cookie; 31000 char *dkl = NULL; 31001 diskaddr_t real_addr = start_block; 31002 diskaddr_t first_byte, end_block; 31003 31004 size_t buffer_size = reqlength; 31005 int rval = 0; 31006 diskaddr_t cap; 31007 uint32_t lbasize; 31008 sd_ssc_t *ssc; 31009 31010 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 31011 if (un == NULL) 31012 return (ENXIO); 31013 31014 if (cmd != TG_READ && cmd != TG_WRITE) 31015 return (EINVAL); 31016 31017 ssc = sd_ssc_init(un); 31018 mutex_enter(SD_MUTEX(un)); 31019 if (un->un_f_tgt_blocksize_is_valid == FALSE) { 31020 mutex_exit(SD_MUTEX(un)); 31021 rval = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap, 31022 &lbasize, path_flag); 31023 if (rval != 0) 31024 goto done1; 31025 mutex_enter(SD_MUTEX(un)); 31026 sd_update_block_info(un, lbasize, cap); 31027 if ((un->un_f_tgt_blocksize_is_valid == FALSE)) { 31028 mutex_exit(SD_MUTEX(un)); 31029 rval = EIO; 31030 goto done; 31031 } 31032 } 31033 31034 if (NOT_DEVBSIZE(un)) { 31035 /* 31036 * sys_blocksize != tgt_blocksize, need to re-adjust 31037 * blkno and save the index to beginning of dk_label 31038 */ 31039 first_byte = SD_SYSBLOCKS2BYTES(start_block); 31040 real_addr = first_byte / un->un_tgt_blocksize; 31041 31042 end_block = (first_byte + reqlength + 31043 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize; 31044 31045 /* round up buffer size to multiple of target block size */ 31046 buffer_size = (end_block - real_addr) * un->un_tgt_blocksize; 31047 31048 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_tg_rdwr", 31049 "label_addr: 0x%x allocation size: 0x%x\n", 31050 real_addr, buffer_size); 31051 31052 if (((first_byte % un->un_tgt_blocksize) != 0) || 31053 (reqlength % un->un_tgt_blocksize) != 0) 31054 /* the request is not aligned */ 31055 dkl = kmem_zalloc(buffer_size, KM_SLEEP); 31056 } 31057 31058 /* 31059 * The MMC standard allows READ CAPACITY to be 31060 * inaccurate by a bounded amount (in the interest of 31061 * response latency). As a result, failed READs are 31062 * commonplace (due to the reading of metadata and not 31063 * data). Depending on the per-Vendor/drive Sense data, 31064 * the failed READ can cause many (unnecessary) retries. 31065 */ 31066 31067 if (ISCD(un) && (cmd == TG_READ) && 31068 (un->un_f_blockcount_is_valid == TRUE) && 31069 ((start_block == (un->un_blockcount - 1))|| 31070 (start_block == (un->un_blockcount - 2)))) { 31071 path_flag = SD_PATH_DIRECT_PRIORITY; 31072 } 31073 31074 mutex_exit(SD_MUTEX(un)); 31075 if (cmd == TG_READ) { 31076 rval = sd_send_scsi_READ(ssc, (dkl != NULL)? dkl: bufaddr, 31077 buffer_size, real_addr, path_flag); 31078 if (dkl != NULL) 31079 bcopy(dkl + SD_TGTBYTEOFFSET(un, start_block, 31080 real_addr), bufaddr, reqlength); 31081 } else { 31082 if (dkl) { 31083 rval = sd_send_scsi_READ(ssc, dkl, buffer_size, 31084 real_addr, path_flag); 31085 if (rval) { 31086 goto done1; 31087 } 31088 bcopy(bufaddr, dkl + SD_TGTBYTEOFFSET(un, start_block, 31089 real_addr), reqlength); 31090 } 31091 rval = sd_send_scsi_WRITE(ssc, (dkl != NULL)? dkl: bufaddr, 31092 buffer_size, real_addr, path_flag); 31093 } 31094 31095 done1: 31096 if (dkl != NULL) 31097 kmem_free(dkl, buffer_size); 31098 31099 if (rval != 0) { 31100 if (rval == EIO) 31101 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 31102 else 31103 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 31104 } 31105 done: 31106 sd_ssc_fini(ssc); 31107 return (rval); 31108 } 31109 31110 31111 static int 31112 sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie) 31113 { 31114 31115 struct sd_lun *un; 31116 diskaddr_t cap; 31117 uint32_t lbasize; 31118 int path_flag = (int)(uintptr_t)tg_cookie; 31119 int ret = 0; 31120 31121 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 31122 if (un == NULL) 31123 return (ENXIO); 31124 31125 switch (cmd) { 31126 case TG_GETPHYGEOM: 31127 case TG_GETVIRTGEOM: 31128 case TG_GETCAPACITY: 31129 case TG_GETBLOCKSIZE: 31130 mutex_enter(SD_MUTEX(un)); 31131 31132 if ((un->un_f_blockcount_is_valid == TRUE) && 31133 (un->un_f_tgt_blocksize_is_valid == TRUE)) { 31134 cap = un->un_blockcount; 31135 lbasize = un->un_tgt_blocksize; 31136 mutex_exit(SD_MUTEX(un)); 31137 } else { 31138 sd_ssc_t *ssc; 31139 mutex_exit(SD_MUTEX(un)); 31140 ssc = sd_ssc_init(un); 31141 ret = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap, 31142 &lbasize, path_flag); 31143 if (ret != 0) { 31144 if (ret == EIO) 31145 sd_ssc_assessment(ssc, 31146 SD_FMT_STATUS_CHECK); 31147 else 31148 sd_ssc_assessment(ssc, 31149 SD_FMT_IGNORE); 31150 sd_ssc_fini(ssc); 31151 return (ret); 31152 } 31153 sd_ssc_fini(ssc); 31154 mutex_enter(SD_MUTEX(un)); 31155 sd_update_block_info(un, lbasize, cap); 31156 if ((un->un_f_blockcount_is_valid == FALSE) || 31157 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 31158 mutex_exit(SD_MUTEX(un)); 31159 return (EIO); 31160 } 31161 mutex_exit(SD_MUTEX(un)); 31162 } 31163 31164 if (cmd == TG_GETCAPACITY) { 31165 *(diskaddr_t *)arg = cap; 31166 return (0); 31167 } 31168 31169 if (cmd == TG_GETBLOCKSIZE) { 31170 *(uint32_t *)arg = lbasize; 31171 return (0); 31172 } 31173 31174 if (cmd == TG_GETPHYGEOM) 31175 ret = sd_get_physical_geometry(un, (cmlb_geom_t *)arg, 31176 cap, lbasize, path_flag); 31177 else 31178 /* TG_GETVIRTGEOM */ 31179 ret = sd_get_virtual_geometry(un, 31180 (cmlb_geom_t *)arg, cap, lbasize); 31181 31182 return (ret); 31183 31184 case TG_GETATTR: 31185 mutex_enter(SD_MUTEX(un)); 31186 ((tg_attribute_t *)arg)->media_is_writable = 31187 un->un_f_mmc_writable_media; 31188 ((tg_attribute_t *)arg)->media_is_solid_state = 31189 un->un_f_is_solid_state; 31190 mutex_exit(SD_MUTEX(un)); 31191 return (0); 31192 default: 31193 return (ENOTTY); 31194 31195 } 31196 } 31197 31198 /* 31199 * Function: sd_ssc_ereport_post 31200 * 31201 * Description: Will be called when SD driver need to post an ereport. 31202 * 31203 * Context: Kernel thread or interrupt context. 31204 */ 31205 31206 #define DEVID_IF_KNOWN(d) "devid", DATA_TYPE_STRING, (d) ? (d) : "unknown" 31207 31208 static void 31209 sd_ssc_ereport_post(sd_ssc_t *ssc, enum sd_driver_assessment drv_assess) 31210 { 31211 int uscsi_path_instance = 0; 31212 uchar_t uscsi_pkt_reason; 31213 uint32_t uscsi_pkt_state; 31214 uint32_t uscsi_pkt_statistics; 31215 uint64_t uscsi_ena; 31216 uchar_t op_code; 31217 uint8_t *sensep; 31218 union scsi_cdb *cdbp; 31219 uint_t cdblen = 0; 31220 uint_t senlen = 0; 31221 struct sd_lun *un; 31222 dev_info_t *dip; 31223 char *devid; 31224 int ssc_invalid_flags = SSC_FLAGS_INVALID_PKT_REASON | 31225 SSC_FLAGS_INVALID_STATUS | 31226 SSC_FLAGS_INVALID_SENSE | 31227 SSC_FLAGS_INVALID_DATA; 31228 char assessment[16]; 31229 31230 ASSERT(ssc != NULL); 31231 ASSERT(ssc->ssc_uscsi_cmd != NULL); 31232 ASSERT(ssc->ssc_uscsi_info != NULL); 31233 31234 un = ssc->ssc_un; 31235 ASSERT(un != NULL); 31236 31237 dip = un->un_sd->sd_dev; 31238 31239 /* 31240 * Get the devid: 31241 * devid will only be passed to non-transport error reports. 31242 */ 31243 devid = DEVI(dip)->devi_devid_str; 31244 31245 /* 31246 * If we are syncing or dumping, the command will not be executed 31247 * so we bypass this situation. 31248 */ 31249 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 31250 (un->un_state == SD_STATE_DUMPING)) 31251 return; 31252 31253 uscsi_pkt_reason = ssc->ssc_uscsi_info->ui_pkt_reason; 31254 uscsi_path_instance = ssc->ssc_uscsi_cmd->uscsi_path_instance; 31255 uscsi_pkt_state = ssc->ssc_uscsi_info->ui_pkt_state; 31256 uscsi_pkt_statistics = ssc->ssc_uscsi_info->ui_pkt_statistics; 31257 uscsi_ena = ssc->ssc_uscsi_info->ui_ena; 31258 31259 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 31260 cdbp = (union scsi_cdb *)ssc->ssc_uscsi_cmd->uscsi_cdb; 31261 31262 /* In rare cases, EG:DOORLOCK, the cdb could be NULL */ 31263 if (cdbp == NULL) { 31264 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 31265 "sd_ssc_ereport_post meet empty cdb\n"); 31266 return; 31267 } 31268 31269 op_code = cdbp->scc_cmd; 31270 31271 cdblen = (int)ssc->ssc_uscsi_cmd->uscsi_cdblen; 31272 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 31273 ssc->ssc_uscsi_cmd->uscsi_rqresid); 31274 31275 if (senlen > 0) 31276 ASSERT(sensep != NULL); 31277 31278 /* 31279 * Initialize drv_assess to corresponding values. 31280 * SD_FM_DRV_FATAL will be mapped to "fail" or "fatal" depending 31281 * on the sense-key returned back. 31282 */ 31283 switch (drv_assess) { 31284 case SD_FM_DRV_RECOVERY: 31285 (void) sprintf(assessment, "%s", "recovered"); 31286 break; 31287 case SD_FM_DRV_RETRY: 31288 (void) sprintf(assessment, "%s", "retry"); 31289 break; 31290 case SD_FM_DRV_NOTICE: 31291 (void) sprintf(assessment, "%s", "info"); 31292 break; 31293 case SD_FM_DRV_FATAL: 31294 default: 31295 (void) sprintf(assessment, "%s", "unknown"); 31296 } 31297 /* 31298 * If drv_assess == SD_FM_DRV_RECOVERY, this should be a recovered 31299 * command, we will post ereport.io.scsi.cmd.disk.recovered. 31300 * driver-assessment will always be "recovered" here. 31301 */ 31302 if (drv_assess == SD_FM_DRV_RECOVERY) { 31303 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, NULL, 31304 "cmd.disk.recovered", uscsi_ena, devid, NULL, 31305 DDI_NOSLEEP, NULL, 31306 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 31307 DEVID_IF_KNOWN(devid), 31308 "driver-assessment", DATA_TYPE_STRING, assessment, 31309 "op-code", DATA_TYPE_UINT8, op_code, 31310 "cdb", DATA_TYPE_UINT8_ARRAY, 31311 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 31312 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 31313 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 31314 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics, 31315 NULL); 31316 return; 31317 } 31318 31319 /* 31320 * If there is un-expected/un-decodable data, we should post 31321 * ereport.io.scsi.cmd.disk.dev.uderr. 31322 * driver-assessment will be set based on parameter drv_assess. 31323 * SSC_FLAGS_INVALID_SENSE - invalid sense data sent back. 31324 * SSC_FLAGS_INVALID_PKT_REASON - invalid pkt-reason encountered. 31325 * SSC_FLAGS_INVALID_STATUS - invalid stat-code encountered. 31326 * SSC_FLAGS_INVALID_DATA - invalid data sent back. 31327 */ 31328 if (ssc->ssc_flags & ssc_invalid_flags) { 31329 if (ssc->ssc_flags & SSC_FLAGS_INVALID_SENSE) { 31330 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 31331 NULL, "cmd.disk.dev.uderr", uscsi_ena, devid, 31332 NULL, DDI_NOSLEEP, NULL, 31333 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 31334 DEVID_IF_KNOWN(devid), 31335 "driver-assessment", DATA_TYPE_STRING, 31336 drv_assess == SD_FM_DRV_FATAL ? 31337 "fail" : assessment, 31338 "op-code", DATA_TYPE_UINT8, op_code, 31339 "cdb", DATA_TYPE_UINT8_ARRAY, 31340 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 31341 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 31342 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 31343 "pkt-stats", DATA_TYPE_UINT32, 31344 uscsi_pkt_statistics, 31345 "stat-code", DATA_TYPE_UINT8, 31346 ssc->ssc_uscsi_cmd->uscsi_status, 31347 "un-decode-info", DATA_TYPE_STRING, 31348 ssc->ssc_info, 31349 "un-decode-value", DATA_TYPE_UINT8_ARRAY, 31350 senlen, sensep, 31351 NULL); 31352 } else { 31353 /* 31354 * For other type of invalid data, the 31355 * un-decode-value field would be empty because the 31356 * un-decodable content could be seen from upper 31357 * level payload or inside un-decode-info. 31358 */ 31359 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 31360 NULL, 31361 "cmd.disk.dev.uderr", uscsi_ena, devid, 31362 NULL, DDI_NOSLEEP, NULL, 31363 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 31364 DEVID_IF_KNOWN(devid), 31365 "driver-assessment", DATA_TYPE_STRING, 31366 drv_assess == SD_FM_DRV_FATAL ? 31367 "fail" : assessment, 31368 "op-code", DATA_TYPE_UINT8, op_code, 31369 "cdb", DATA_TYPE_UINT8_ARRAY, 31370 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 31371 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 31372 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 31373 "pkt-stats", DATA_TYPE_UINT32, 31374 uscsi_pkt_statistics, 31375 "stat-code", DATA_TYPE_UINT8, 31376 ssc->ssc_uscsi_cmd->uscsi_status, 31377 "un-decode-info", DATA_TYPE_STRING, 31378 ssc->ssc_info, 31379 "un-decode-value", DATA_TYPE_UINT8_ARRAY, 31380 0, NULL, 31381 NULL); 31382 } 31383 ssc->ssc_flags &= ~ssc_invalid_flags; 31384 return; 31385 } 31386 31387 if (uscsi_pkt_reason != CMD_CMPLT || 31388 (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT)) { 31389 /* 31390 * pkt-reason != CMD_CMPLT or SSC_FLAGS_TRAN_ABORT was 31391 * set inside sd_start_cmds due to errors(bad packet or 31392 * fatal transport error), we should take it as a 31393 * transport error, so we post ereport.io.scsi.cmd.disk.tran. 31394 * driver-assessment will be set based on drv_assess. 31395 * We will set devid to NULL because it is a transport 31396 * error. 31397 */ 31398 if (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT) 31399 ssc->ssc_flags &= ~SSC_FLAGS_TRAN_ABORT; 31400 31401 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, NULL, 31402 "cmd.disk.tran", uscsi_ena, NULL, NULL, DDI_NOSLEEP, NULL, 31403 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 31404 DEVID_IF_KNOWN(devid), 31405 "driver-assessment", DATA_TYPE_STRING, 31406 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment, 31407 "op-code", DATA_TYPE_UINT8, op_code, 31408 "cdb", DATA_TYPE_UINT8_ARRAY, 31409 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 31410 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 31411 "pkt-state", DATA_TYPE_UINT8, uscsi_pkt_state, 31412 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics, 31413 NULL); 31414 } else { 31415 /* 31416 * If we got here, we have a completed command, and we need 31417 * to further investigate the sense data to see what kind 31418 * of ereport we should post. 31419 * Post ereport.io.scsi.cmd.disk.dev.rqs.merr 31420 * if sense-key == 0x3. 31421 * Post ereport.io.scsi.cmd.disk.dev.rqs.derr otherwise. 31422 * driver-assessment will be set based on the parameter 31423 * drv_assess. 31424 */ 31425 if (senlen > 0) { 31426 /* 31427 * Here we have sense data available. 31428 */ 31429 uint8_t sense_key; 31430 sense_key = scsi_sense_key(sensep); 31431 if (sense_key == 0x3) { 31432 /* 31433 * sense-key == 0x3(medium error), 31434 * driver-assessment should be "fatal" if 31435 * drv_assess is SD_FM_DRV_FATAL. 31436 */ 31437 scsi_fm_ereport_post(un->un_sd, 31438 uscsi_path_instance, NULL, 31439 "cmd.disk.dev.rqs.merr", 31440 uscsi_ena, devid, NULL, DDI_NOSLEEP, NULL, 31441 FM_VERSION, DATA_TYPE_UINT8, 31442 FM_EREPORT_VERS0, 31443 DEVID_IF_KNOWN(devid), 31444 "driver-assessment", 31445 DATA_TYPE_STRING, 31446 drv_assess == SD_FM_DRV_FATAL ? 31447 "fatal" : assessment, 31448 "op-code", 31449 DATA_TYPE_UINT8, op_code, 31450 "cdb", 31451 DATA_TYPE_UINT8_ARRAY, cdblen, 31452 ssc->ssc_uscsi_cmd->uscsi_cdb, 31453 "pkt-reason", 31454 DATA_TYPE_UINT8, uscsi_pkt_reason, 31455 "pkt-state", 31456 DATA_TYPE_UINT8, uscsi_pkt_state, 31457 "pkt-stats", 31458 DATA_TYPE_UINT32, 31459 uscsi_pkt_statistics, 31460 "stat-code", 31461 DATA_TYPE_UINT8, 31462 ssc->ssc_uscsi_cmd->uscsi_status, 31463 "key", 31464 DATA_TYPE_UINT8, 31465 scsi_sense_key(sensep), 31466 "asc", 31467 DATA_TYPE_UINT8, 31468 scsi_sense_asc(sensep), 31469 "ascq", 31470 DATA_TYPE_UINT8, 31471 scsi_sense_ascq(sensep), 31472 "sense-data", 31473 DATA_TYPE_UINT8_ARRAY, 31474 senlen, sensep, 31475 "lba", 31476 DATA_TYPE_UINT64, 31477 ssc->ssc_uscsi_info->ui_lba, 31478 NULL); 31479 } else { 31480 /* 31481 * if sense-key == 0x4(hardware 31482 * error), driver-assessment should 31483 * be "fatal" if drv_assess is 31484 * SD_FM_DRV_FATAL. 31485 */ 31486 scsi_fm_ereport_post(un->un_sd, 31487 uscsi_path_instance, NULL, 31488 "cmd.disk.dev.rqs.derr", 31489 uscsi_ena, devid, 31490 NULL, DDI_NOSLEEP, NULL, 31491 FM_VERSION, 31492 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 31493 DEVID_IF_KNOWN(devid), 31494 "driver-assessment", 31495 DATA_TYPE_STRING, 31496 drv_assess == SD_FM_DRV_FATAL ? 31497 (sense_key == 0x4 ? 31498 "fatal" : "fail") : assessment, 31499 "op-code", 31500 DATA_TYPE_UINT8, op_code, 31501 "cdb", 31502 DATA_TYPE_UINT8_ARRAY, cdblen, 31503 ssc->ssc_uscsi_cmd->uscsi_cdb, 31504 "pkt-reason", 31505 DATA_TYPE_UINT8, uscsi_pkt_reason, 31506 "pkt-state", 31507 DATA_TYPE_UINT8, uscsi_pkt_state, 31508 "pkt-stats", 31509 DATA_TYPE_UINT32, 31510 uscsi_pkt_statistics, 31511 "stat-code", 31512 DATA_TYPE_UINT8, 31513 ssc->ssc_uscsi_cmd->uscsi_status, 31514 "key", 31515 DATA_TYPE_UINT8, 31516 scsi_sense_key(sensep), 31517 "asc", 31518 DATA_TYPE_UINT8, 31519 scsi_sense_asc(sensep), 31520 "ascq", 31521 DATA_TYPE_UINT8, 31522 scsi_sense_ascq(sensep), 31523 "sense-data", 31524 DATA_TYPE_UINT8_ARRAY, 31525 senlen, sensep, 31526 NULL); 31527 } 31528 } else { 31529 /* 31530 * For stat_code == STATUS_GOOD, this is not a 31531 * hardware error. 31532 */ 31533 if (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD) 31534 return; 31535 31536 /* 31537 * Post ereport.io.scsi.cmd.disk.dev.serr if we got the 31538 * stat-code but with sense data unavailable. 31539 * driver-assessment will be set based on parameter 31540 * drv_assess. 31541 */ 31542 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 31543 NULL, 31544 "cmd.disk.dev.serr", uscsi_ena, 31545 devid, NULL, DDI_NOSLEEP, NULL, 31546 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 31547 DEVID_IF_KNOWN(devid), 31548 "driver-assessment", DATA_TYPE_STRING, 31549 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment, 31550 "op-code", DATA_TYPE_UINT8, op_code, 31551 "cdb", 31552 DATA_TYPE_UINT8_ARRAY, 31553 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 31554 "pkt-reason", 31555 DATA_TYPE_UINT8, uscsi_pkt_reason, 31556 "pkt-state", 31557 DATA_TYPE_UINT8, uscsi_pkt_state, 31558 "pkt-stats", 31559 DATA_TYPE_UINT32, uscsi_pkt_statistics, 31560 "stat-code", 31561 DATA_TYPE_UINT8, 31562 ssc->ssc_uscsi_cmd->uscsi_status, 31563 NULL); 31564 } 31565 } 31566 } 31567 31568 /* 31569 * Function: sd_ssc_extract_info 31570 * 31571 * Description: Extract information available to help generate ereport. 31572 * 31573 * Context: Kernel thread or interrupt context. 31574 */ 31575 static void 31576 sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, struct scsi_pkt *pktp, 31577 struct buf *bp, struct sd_xbuf *xp) 31578 { 31579 size_t senlen = 0; 31580 union scsi_cdb *cdbp; 31581 int path_instance; 31582 /* 31583 * Need scsi_cdb_size array to determine the cdb length. 31584 */ 31585 extern uchar_t scsi_cdb_size[]; 31586 31587 ASSERT(un != NULL); 31588 ASSERT(pktp != NULL); 31589 ASSERT(bp != NULL); 31590 ASSERT(xp != NULL); 31591 ASSERT(ssc != NULL); 31592 ASSERT(mutex_owned(SD_MUTEX(un))); 31593 31594 /* 31595 * Transfer the cdb buffer pointer here. 31596 */ 31597 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 31598 31599 ssc->ssc_uscsi_cmd->uscsi_cdblen = scsi_cdb_size[GETGROUP(cdbp)]; 31600 ssc->ssc_uscsi_cmd->uscsi_cdb = (caddr_t)cdbp; 31601 31602 /* 31603 * Transfer the sense data buffer pointer if sense data is available, 31604 * calculate the sense data length first. 31605 */ 31606 if ((xp->xb_sense_state & STATE_XARQ_DONE) || 31607 (xp->xb_sense_state & STATE_ARQ_DONE)) { 31608 /* 31609 * For arq case, we will enter here. 31610 */ 31611 if (xp->xb_sense_state & STATE_XARQ_DONE) { 31612 senlen = MAX_SENSE_LENGTH - xp->xb_sense_resid; 31613 } else { 31614 senlen = SENSE_LENGTH; 31615 } 31616 } else { 31617 /* 31618 * For non-arq case, we will enter this branch. 31619 */ 31620 if (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK && 31621 (xp->xb_sense_state & STATE_XFERRED_DATA)) { 31622 senlen = SENSE_LENGTH - xp->xb_sense_resid; 31623 } 31624 31625 } 31626 31627 ssc->ssc_uscsi_cmd->uscsi_rqlen = (senlen & 0xff); 31628 ssc->ssc_uscsi_cmd->uscsi_rqresid = 0; 31629 ssc->ssc_uscsi_cmd->uscsi_rqbuf = (caddr_t)xp->xb_sense_data; 31630 31631 ssc->ssc_uscsi_cmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 31632 31633 /* 31634 * Only transfer path_instance when scsi_pkt was properly allocated. 31635 */ 31636 path_instance = pktp->pkt_path_instance; 31637 if (scsi_pkt_allocated_correctly(pktp) && path_instance) 31638 ssc->ssc_uscsi_cmd->uscsi_path_instance = path_instance; 31639 else 31640 ssc->ssc_uscsi_cmd->uscsi_path_instance = 0; 31641 31642 /* 31643 * Copy in the other fields we may need when posting ereport. 31644 */ 31645 ssc->ssc_uscsi_info->ui_pkt_reason = pktp->pkt_reason; 31646 ssc->ssc_uscsi_info->ui_pkt_state = pktp->pkt_state; 31647 ssc->ssc_uscsi_info->ui_pkt_statistics = pktp->pkt_statistics; 31648 ssc->ssc_uscsi_info->ui_lba = (uint64_t)SD_GET_BLKNO(bp); 31649 31650 /* 31651 * For partially read/write command, we will not create ena 31652 * in case of a successful command be reconized as recovered. 31653 */ 31654 if ((pktp->pkt_reason == CMD_CMPLT) && 31655 (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD) && 31656 (senlen == 0)) { 31657 return; 31658 } 31659 31660 /* 31661 * To associate ereports of a single command execution flow, we 31662 * need a shared ena for a specific command. 31663 */ 31664 if (xp->xb_ena == 0) 31665 xp->xb_ena = fm_ena_generate(0, FM_ENA_FMT1); 31666 ssc->ssc_uscsi_info->ui_ena = xp->xb_ena; 31667 } 31668 31669 31670 /* 31671 * Function: sd_check_solid_state 31672 * 31673 * Description: Query the optional INQUIRY VPD page 0xb1. If the device 31674 * supports VPD page 0xb1, sd examines the MEDIUM ROTATION 31675 * RATE. If the MEDIUM ROTATION RATE is 1, sd assumes the 31676 * device is a solid state drive. 31677 * 31678 * Context: Kernel thread or interrupt context. 31679 */ 31680 31681 static void 31682 sd_check_solid_state(sd_ssc_t *ssc) 31683 { 31684 int rval = 0; 31685 uchar_t *inqb1 = NULL; 31686 size_t inqb1_len = MAX_INQUIRY_SIZE; 31687 size_t inqb1_resid = 0; 31688 struct sd_lun *un; 31689 31690 ASSERT(ssc != NULL); 31691 un = ssc->ssc_un; 31692 ASSERT(un != NULL); 31693 ASSERT(!mutex_owned(SD_MUTEX(un))); 31694 31695 mutex_enter(SD_MUTEX(un)); 31696 un->un_f_is_solid_state = FALSE; 31697 31698 if (ISCD(un)) { 31699 mutex_exit(SD_MUTEX(un)); 31700 return; 31701 } 31702 31703 if (sd_check_vpd_page_support(ssc) == 0 && 31704 un->un_vpd_page_mask & SD_VPD_DEV_CHARACTER_PG) { 31705 mutex_exit(SD_MUTEX(un)); 31706 /* collect page b1 data */ 31707 inqb1 = kmem_zalloc(inqb1_len, KM_SLEEP); 31708 31709 rval = sd_send_scsi_INQUIRY(ssc, inqb1, inqb1_len, 31710 0x01, 0xB1, &inqb1_resid); 31711 31712 if (rval == 0 && (inqb1_len - inqb1_resid > 5)) { 31713 SD_TRACE(SD_LOG_COMMON, un, 31714 "sd_check_solid_state: \ 31715 successfully get VPD page: %x \ 31716 PAGE LENGTH: %x BYTE 4: %x \ 31717 BYTE 5: %x", inqb1[1], inqb1[3], inqb1[4], 31718 inqb1[5]); 31719 31720 mutex_enter(SD_MUTEX(un)); 31721 /* 31722 * Check the MEDIUM ROTATION RATE. If it is set 31723 * to 1, the device is a solid state drive. 31724 */ 31725 if (inqb1[4] == 0 && inqb1[5] == 1) { 31726 un->un_f_is_solid_state = TRUE; 31727 /* solid state drives don't need disksort */ 31728 un->un_f_disksort_disabled = TRUE; 31729 } 31730 mutex_exit(SD_MUTEX(un)); 31731 } else if (rval != 0) { 31732 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 31733 } 31734 31735 kmem_free(inqb1, inqb1_len); 31736 } else { 31737 mutex_exit(SD_MUTEX(un)); 31738 } 31739 } 31740 31741 /* 31742 * Function: sd_check_emulation_mode 31743 * 31744 * Description: Check whether the SSD is at emulation mode 31745 * by issuing READ_CAPACITY_16 to see whether 31746 * we can get physical block size of the drive. 31747 * 31748 * Context: Kernel thread or interrupt context. 31749 */ 31750 31751 static void 31752 sd_check_emulation_mode(sd_ssc_t *ssc) 31753 { 31754 int rval = 0; 31755 uint64_t capacity; 31756 uint_t lbasize; 31757 uint_t pbsize; 31758 int i; 31759 int devid_len; 31760 struct sd_lun *un; 31761 31762 ASSERT(ssc != NULL); 31763 un = ssc->ssc_un; 31764 ASSERT(un != NULL); 31765 ASSERT(!mutex_owned(SD_MUTEX(un))); 31766 31767 mutex_enter(SD_MUTEX(un)); 31768 if (ISCD(un)) { 31769 mutex_exit(SD_MUTEX(un)); 31770 return; 31771 } 31772 31773 if (un->un_f_descr_format_supported) { 31774 mutex_exit(SD_MUTEX(un)); 31775 rval = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, &lbasize, 31776 &pbsize, SD_PATH_DIRECT); 31777 mutex_enter(SD_MUTEX(un)); 31778 31779 if (rval != 0) { 31780 un->un_phy_blocksize = DEV_BSIZE; 31781 } else { 31782 if (!ISP2(pbsize % DEV_BSIZE) || pbsize == 0) { 31783 un->un_phy_blocksize = DEV_BSIZE; 31784 } else if (pbsize > un->un_phy_blocksize) { 31785 /* 31786 * Don't reset the physical blocksize 31787 * unless we've detected a larger value. 31788 */ 31789 un->un_phy_blocksize = pbsize; 31790 } 31791 } 31792 } 31793 31794 for (i = 0; i < sd_flash_dev_table_size; i++) { 31795 devid_len = (int)strlen(sd_flash_dev_table[i]); 31796 if (sd_sdconf_id_match(un, sd_flash_dev_table[i], devid_len) 31797 == SD_SUCCESS) { 31798 un->un_phy_blocksize = SSD_SECSIZE; 31799 if (un->un_f_is_solid_state && 31800 un->un_phy_blocksize != un->un_tgt_blocksize) 31801 un->un_f_enable_rmw = TRUE; 31802 } 31803 } 31804 31805 mutex_exit(SD_MUTEX(un)); 31806 } 31807