1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * SCSI disk target driver. 30 */ 31 #include <sys/scsi/scsi.h> 32 #include <sys/dkbad.h> 33 #include <sys/dklabel.h> 34 #include <sys/dkio.h> 35 #include <sys/fdio.h> 36 #include <sys/cdio.h> 37 #include <sys/mhd.h> 38 #include <sys/vtoc.h> 39 #include <sys/dktp/fdisk.h> 40 #include <sys/kstat.h> 41 #include <sys/vtrace.h> 42 #include <sys/note.h> 43 #include <sys/thread.h> 44 #include <sys/proc.h> 45 #include <sys/efi_partition.h> 46 #include <sys/var.h> 47 #include <sys/aio_req.h> 48 49 #ifdef __lock_lint 50 #define _LP64 51 #define __amd64 52 #endif 53 54 #if (defined(__fibre)) 55 /* Note: is there a leadville version of the following? */ 56 #include <sys/fc4/fcal_linkapp.h> 57 #endif 58 #include <sys/taskq.h> 59 #include <sys/uuid.h> 60 #include <sys/byteorder.h> 61 #include <sys/sdt.h> 62 63 #include "sd_xbuf.h" 64 65 #include <sys/scsi/targets/sddef.h> 66 #include <sys/cmlb.h> 67 68 69 /* 70 * Loadable module info. 71 */ 72 #if (defined(__fibre)) 73 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver %I%" 74 char _depends_on[] = "misc/scsi misc/cmlb drv/fcp"; 75 #else 76 #define SD_MODULE_NAME "SCSI Disk Driver %I%" 77 char _depends_on[] = "misc/scsi misc/cmlb"; 78 #endif 79 80 /* 81 * Define the interconnect type, to allow the driver to distinguish 82 * between parallel SCSI (sd) and fibre channel (ssd) behaviors. 83 * 84 * This is really for backward compatibility. In the future, the driver 85 * should actually check the "interconnect-type" property as reported by 86 * the HBA; however at present this property is not defined by all HBAs, 87 * so we will use this #define (1) to permit the driver to run in 88 * backward-compatibility mode; and (2) to print a notification message 89 * if an FC HBA does not support the "interconnect-type" property. The 90 * behavior of the driver will be to assume parallel SCSI behaviors unless 91 * the "interconnect-type" property is defined by the HBA **AND** has a 92 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or 93 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre 94 * Channel behaviors (as per the old ssd). (Note that the 95 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and 96 * will result in the driver assuming parallel SCSI behaviors.) 97 * 98 * (see common/sys/scsi/impl/services.h) 99 * 100 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default 101 * since some FC HBAs may already support that, and there is some code in 102 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the 103 * default would confuse that code, and besides things should work fine 104 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the 105 * "interconnect_type" property. 106 * 107 */ 108 #if (defined(__fibre)) 109 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE 110 #else 111 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL 112 #endif 113 114 /* 115 * The name of the driver, established from the module name in _init. 116 */ 117 static char *sd_label = NULL; 118 119 /* 120 * Driver name is unfortunately prefixed on some driver.conf properties. 121 */ 122 #if (defined(__fibre)) 123 #define sd_max_xfer_size ssd_max_xfer_size 124 #define sd_config_list ssd_config_list 125 static char *sd_max_xfer_size = "ssd_max_xfer_size"; 126 static char *sd_config_list = "ssd-config-list"; 127 #else 128 static char *sd_max_xfer_size = "sd_max_xfer_size"; 129 static char *sd_config_list = "sd-config-list"; 130 #endif 131 132 /* 133 * Driver global variables 134 */ 135 136 #if (defined(__fibre)) 137 /* 138 * These #defines are to avoid namespace collisions that occur because this 139 * code is currently used to compile two separate driver modules: sd and ssd. 140 * All global variables need to be treated this way (even if declared static) 141 * in order to allow the debugger to resolve the names properly. 142 * It is anticipated that in the near future the ssd module will be obsoleted, 143 * at which time this namespace issue should go away. 144 */ 145 #define sd_state ssd_state 146 #define sd_io_time ssd_io_time 147 #define sd_failfast_enable ssd_failfast_enable 148 #define sd_ua_retry_count ssd_ua_retry_count 149 #define sd_report_pfa ssd_report_pfa 150 #define sd_max_throttle ssd_max_throttle 151 #define sd_min_throttle ssd_min_throttle 152 #define sd_rot_delay ssd_rot_delay 153 154 #define sd_retry_on_reservation_conflict \ 155 ssd_retry_on_reservation_conflict 156 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay 157 #define sd_resv_conflict_name ssd_resv_conflict_name 158 159 #define sd_component_mask ssd_component_mask 160 #define sd_level_mask ssd_level_mask 161 #define sd_debug_un ssd_debug_un 162 #define sd_error_level ssd_error_level 163 164 #define sd_xbuf_active_limit ssd_xbuf_active_limit 165 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit 166 167 #define sd_tr ssd_tr 168 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout 169 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout 170 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable 171 #define sd_check_media_time ssd_check_media_time 172 #define sd_wait_cmds_complete ssd_wait_cmds_complete 173 #define sd_label_mutex ssd_label_mutex 174 #define sd_detach_mutex ssd_detach_mutex 175 #define sd_log_buf ssd_log_buf 176 #define sd_log_mutex ssd_log_mutex 177 178 #define sd_disk_table ssd_disk_table 179 #define sd_disk_table_size ssd_disk_table_size 180 #define sd_sense_mutex ssd_sense_mutex 181 #define sd_cdbtab ssd_cdbtab 182 183 #define sd_cb_ops ssd_cb_ops 184 #define sd_ops ssd_ops 185 #define sd_additional_codes ssd_additional_codes 186 #define sd_tgops ssd_tgops 187 188 #define sd_minor_data ssd_minor_data 189 #define sd_minor_data_efi ssd_minor_data_efi 190 191 #define sd_tq ssd_tq 192 #define sd_wmr_tq ssd_wmr_tq 193 #define sd_taskq_name ssd_taskq_name 194 #define sd_wmr_taskq_name ssd_wmr_taskq_name 195 #define sd_taskq_minalloc ssd_taskq_minalloc 196 #define sd_taskq_maxalloc ssd_taskq_maxalloc 197 198 #define sd_dump_format_string ssd_dump_format_string 199 200 #define sd_iostart_chain ssd_iostart_chain 201 #define sd_iodone_chain ssd_iodone_chain 202 203 #define sd_pm_idletime ssd_pm_idletime 204 205 #define sd_force_pm_supported ssd_force_pm_supported 206 207 #define sd_dtype_optical_bind ssd_dtype_optical_bind 208 209 #endif 210 211 212 #ifdef SDDEBUG 213 int sd_force_pm_supported = 0; 214 #endif /* SDDEBUG */ 215 216 void *sd_state = NULL; 217 int sd_io_time = SD_IO_TIME; 218 int sd_failfast_enable = 1; 219 int sd_ua_retry_count = SD_UA_RETRY_COUNT; 220 int sd_report_pfa = 1; 221 int sd_max_throttle = SD_MAX_THROTTLE; 222 int sd_min_throttle = SD_MIN_THROTTLE; 223 int sd_rot_delay = 4; /* Default 4ms Rotation delay */ 224 int sd_qfull_throttle_enable = TRUE; 225 226 int sd_retry_on_reservation_conflict = 1; 227 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 228 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) 229 230 static int sd_dtype_optical_bind = -1; 231 232 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ 233 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; 234 235 /* 236 * Global data for debug logging. To enable debug printing, sd_component_mask 237 * and sd_level_mask should be set to the desired bit patterns as outlined in 238 * sddef.h. 239 */ 240 uint_t sd_component_mask = 0x0; 241 uint_t sd_level_mask = 0x0; 242 struct sd_lun *sd_debug_un = NULL; 243 uint_t sd_error_level = SCSI_ERR_RETRYABLE; 244 245 /* Note: these may go away in the future... */ 246 static uint32_t sd_xbuf_active_limit = 512; 247 static uint32_t sd_xbuf_reserve_limit = 16; 248 249 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 }; 250 251 /* 252 * Timer value used to reset the throttle after it has been reduced 253 * (typically in response to TRAN_BUSY or STATUS_QFULL) 254 */ 255 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT; 256 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT; 257 258 /* 259 * Interval value associated with the media change scsi watch. 260 */ 261 static int sd_check_media_time = 3000000; 262 263 /* 264 * Wait value used for in progress operations during a DDI_SUSPEND 265 */ 266 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE; 267 268 /* 269 * sd_label_mutex protects a static buffer used in the disk label 270 * component of the driver 271 */ 272 static kmutex_t sd_label_mutex; 273 274 /* 275 * sd_detach_mutex protects un_layer_count, un_detach_count, and 276 * un_opens_in_progress in the sd_lun structure. 277 */ 278 static kmutex_t sd_detach_mutex; 279 280 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex, 281 sd_lun::{un_layer_count un_detach_count un_opens_in_progress})) 282 283 /* 284 * Global buffer and mutex for debug logging 285 */ 286 static char sd_log_buf[1024]; 287 static kmutex_t sd_log_mutex; 288 289 /* 290 * Structs and globals for recording attached lun information. 291 * This maintains a chain. Each node in the chain represents a SCSI controller. 292 * The structure records the number of luns attached to each target connected 293 * with the controller. 294 * For parallel scsi device only. 295 */ 296 struct sd_scsi_hba_tgt_lun { 297 struct sd_scsi_hba_tgt_lun *next; 298 dev_info_t *pdip; 299 int nlun[NTARGETS_WIDE]; 300 }; 301 302 /* 303 * Flag to indicate the lun is attached or detached 304 */ 305 #define SD_SCSI_LUN_ATTACH 0 306 #define SD_SCSI_LUN_DETACH 1 307 308 static kmutex_t sd_scsi_target_lun_mutex; 309 static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL; 310 311 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 312 sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip)) 313 314 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 315 sd_scsi_target_lun_head)) 316 317 /* 318 * "Smart" Probe Caching structs, globals, #defines, etc. 319 * For parallel scsi and non-self-identify device only. 320 */ 321 322 /* 323 * The following resources and routines are implemented to support 324 * "smart" probing, which caches the scsi_probe() results in an array, 325 * in order to help avoid long probe times. 326 */ 327 struct sd_scsi_probe_cache { 328 struct sd_scsi_probe_cache *next; 329 dev_info_t *pdip; 330 int cache[NTARGETS_WIDE]; 331 }; 332 333 static kmutex_t sd_scsi_probe_cache_mutex; 334 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; 335 336 /* 337 * Really we only need protection on the head of the linked list, but 338 * better safe than sorry. 339 */ 340 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 341 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) 342 343 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 344 sd_scsi_probe_cache_head)) 345 346 347 /* 348 * Vendor specific data name property declarations 349 */ 350 351 #if defined(__fibre) || defined(__i386) ||defined(__amd64) 352 353 static sd_tunables seagate_properties = { 354 SEAGATE_THROTTLE_VALUE, 355 0, 356 0, 357 0, 358 0, 359 0, 360 0, 361 0, 362 0 363 }; 364 365 366 static sd_tunables fujitsu_properties = { 367 FUJITSU_THROTTLE_VALUE, 368 0, 369 0, 370 0, 371 0, 372 0, 373 0, 374 0, 375 0 376 }; 377 378 static sd_tunables ibm_properties = { 379 IBM_THROTTLE_VALUE, 380 0, 381 0, 382 0, 383 0, 384 0, 385 0, 386 0, 387 0 388 }; 389 390 static sd_tunables purple_properties = { 391 PURPLE_THROTTLE_VALUE, 392 0, 393 0, 394 PURPLE_BUSY_RETRIES, 395 PURPLE_RESET_RETRY_COUNT, 396 PURPLE_RESERVE_RELEASE_TIME, 397 0, 398 0, 399 0 400 }; 401 402 static sd_tunables sve_properties = { 403 SVE_THROTTLE_VALUE, 404 0, 405 0, 406 SVE_BUSY_RETRIES, 407 SVE_RESET_RETRY_COUNT, 408 SVE_RESERVE_RELEASE_TIME, 409 SVE_MIN_THROTTLE_VALUE, 410 SVE_DISKSORT_DISABLED_FLAG, 411 0 412 }; 413 414 static sd_tunables maserati_properties = { 415 0, 416 0, 417 0, 418 0, 419 0, 420 0, 421 0, 422 MASERATI_DISKSORT_DISABLED_FLAG, 423 MASERATI_LUN_RESET_ENABLED_FLAG 424 }; 425 426 static sd_tunables pirus_properties = { 427 PIRUS_THROTTLE_VALUE, 428 0, 429 PIRUS_NRR_COUNT, 430 PIRUS_BUSY_RETRIES, 431 PIRUS_RESET_RETRY_COUNT, 432 0, 433 PIRUS_MIN_THROTTLE_VALUE, 434 PIRUS_DISKSORT_DISABLED_FLAG, 435 PIRUS_LUN_RESET_ENABLED_FLAG 436 }; 437 438 #endif 439 440 #if (defined(__sparc) && !defined(__fibre)) || \ 441 (defined(__i386) || defined(__amd64)) 442 443 444 static sd_tunables elite_properties = { 445 ELITE_THROTTLE_VALUE, 446 0, 447 0, 448 0, 449 0, 450 0, 451 0, 452 0, 453 0 454 }; 455 456 static sd_tunables st31200n_properties = { 457 ST31200N_THROTTLE_VALUE, 458 0, 459 0, 460 0, 461 0, 462 0, 463 0, 464 0, 465 0 466 }; 467 468 #endif /* Fibre or not */ 469 470 static sd_tunables lsi_properties_scsi = { 471 LSI_THROTTLE_VALUE, 472 0, 473 LSI_NOTREADY_RETRIES, 474 0, 475 0, 476 0, 477 0, 478 0, 479 0 480 }; 481 482 static sd_tunables symbios_properties = { 483 SYMBIOS_THROTTLE_VALUE, 484 0, 485 SYMBIOS_NOTREADY_RETRIES, 486 0, 487 0, 488 0, 489 0, 490 0, 491 0 492 }; 493 494 static sd_tunables lsi_properties = { 495 0, 496 0, 497 LSI_NOTREADY_RETRIES, 498 0, 499 0, 500 0, 501 0, 502 0, 503 0 504 }; 505 506 static sd_tunables lsi_oem_properties = { 507 0, 508 0, 509 LSI_OEM_NOTREADY_RETRIES, 510 0, 511 0, 512 0, 513 0, 514 0, 515 0, 516 1 517 }; 518 519 520 521 #if (defined(SD_PROP_TST)) 522 523 #define SD_TST_CTYPE_VAL CTYPE_CDROM 524 #define SD_TST_THROTTLE_VAL 16 525 #define SD_TST_NOTREADY_VAL 12 526 #define SD_TST_BUSY_VAL 60 527 #define SD_TST_RST_RETRY_VAL 36 528 #define SD_TST_RSV_REL_TIME 60 529 530 static sd_tunables tst_properties = { 531 SD_TST_THROTTLE_VAL, 532 SD_TST_CTYPE_VAL, 533 SD_TST_NOTREADY_VAL, 534 SD_TST_BUSY_VAL, 535 SD_TST_RST_RETRY_VAL, 536 SD_TST_RSV_REL_TIME, 537 0, 538 0, 539 0 540 }; 541 #endif 542 543 /* This is similar to the ANSI toupper implementation */ 544 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C)) 545 546 /* 547 * Static Driver Configuration Table 548 * 549 * This is the table of disks which need throttle adjustment (or, perhaps 550 * something else as defined by the flags at a future time.) device_id 551 * is a string consisting of concatenated vid (vendor), pid (product/model) 552 * and revision strings as defined in the scsi_inquiry structure. Offsets of 553 * the parts of the string are as defined by the sizes in the scsi_inquiry 554 * structure. Device type is searched as far as the device_id string is 555 * defined. Flags defines which values are to be set in the driver from the 556 * properties list. 557 * 558 * Entries below which begin and end with a "*" are a special case. 559 * These do not have a specific vendor, and the string which follows 560 * can appear anywhere in the 16 byte PID portion of the inquiry data. 561 * 562 * Entries below which begin and end with a " " (blank) are a special 563 * case. The comparison function will treat multiple consecutive blanks 564 * as equivalent to a single blank. For example, this causes a 565 * sd_disk_table entry of " NEC CDROM " to match a device's id string 566 * of "NEC CDROM". 567 * 568 * Note: The MD21 controller type has been obsoleted. 569 * ST318202F is a Legacy device 570 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been 571 * made with an FC connection. The entries here are a legacy. 572 */ 573 static sd_disk_config_t sd_disk_table[] = { 574 #if defined(__fibre) || defined(__i386) || defined(__amd64) 575 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 576 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 577 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 578 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 579 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 580 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 581 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 582 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 583 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 584 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 585 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 586 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 587 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 588 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 589 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 590 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 591 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 592 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 593 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 594 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 595 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 596 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 597 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 598 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties }, 599 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties }, 600 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties }, 601 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties }, 602 { "IBM 1724-100", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 603 { "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 604 { "IBM 1726-22x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 605 { "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 606 { "IBM 1726-42x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 607 { "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 608 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 609 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 610 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 611 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 612 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 613 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 614 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 615 { "IBM 1814", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 616 { "IBM 1814-200", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 617 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 618 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 619 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 620 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 621 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT | 622 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 623 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT | 624 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 625 { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties }, 626 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, 627 { "SUN T3", SD_CONF_BSET_THROTTLE | 628 SD_CONF_BSET_BSY_RETRY_COUNT| 629 SD_CONF_BSET_RST_RETRIES| 630 SD_CONF_BSET_RSV_REL_TIME, 631 &purple_properties }, 632 { "SUN SESS01", SD_CONF_BSET_THROTTLE | 633 SD_CONF_BSET_BSY_RETRY_COUNT| 634 SD_CONF_BSET_RST_RETRIES| 635 SD_CONF_BSET_RSV_REL_TIME| 636 SD_CONF_BSET_MIN_THROTTLE| 637 SD_CONF_BSET_DISKSORT_DISABLED, 638 &sve_properties }, 639 { "SUN T4", SD_CONF_BSET_THROTTLE | 640 SD_CONF_BSET_BSY_RETRY_COUNT| 641 SD_CONF_BSET_RST_RETRIES| 642 SD_CONF_BSET_RSV_REL_TIME, 643 &purple_properties }, 644 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | 645 SD_CONF_BSET_LUN_RESET_ENABLED, 646 &maserati_properties }, 647 { "SUN SE6920", SD_CONF_BSET_THROTTLE | 648 SD_CONF_BSET_NRR_COUNT| 649 SD_CONF_BSET_BSY_RETRY_COUNT| 650 SD_CONF_BSET_RST_RETRIES| 651 SD_CONF_BSET_MIN_THROTTLE| 652 SD_CONF_BSET_DISKSORT_DISABLED| 653 SD_CONF_BSET_LUN_RESET_ENABLED, 654 &pirus_properties }, 655 { "SUN SE6940", SD_CONF_BSET_THROTTLE | 656 SD_CONF_BSET_NRR_COUNT| 657 SD_CONF_BSET_BSY_RETRY_COUNT| 658 SD_CONF_BSET_RST_RETRIES| 659 SD_CONF_BSET_MIN_THROTTLE| 660 SD_CONF_BSET_DISKSORT_DISABLED| 661 SD_CONF_BSET_LUN_RESET_ENABLED, 662 &pirus_properties }, 663 { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE | 664 SD_CONF_BSET_NRR_COUNT| 665 SD_CONF_BSET_BSY_RETRY_COUNT| 666 SD_CONF_BSET_RST_RETRIES| 667 SD_CONF_BSET_MIN_THROTTLE| 668 SD_CONF_BSET_DISKSORT_DISABLED| 669 SD_CONF_BSET_LUN_RESET_ENABLED, 670 &pirus_properties }, 671 { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE | 672 SD_CONF_BSET_NRR_COUNT| 673 SD_CONF_BSET_BSY_RETRY_COUNT| 674 SD_CONF_BSET_RST_RETRIES| 675 SD_CONF_BSET_MIN_THROTTLE| 676 SD_CONF_BSET_DISKSORT_DISABLED| 677 SD_CONF_BSET_LUN_RESET_ENABLED, 678 &pirus_properties }, 679 { "SUN PSX1000", SD_CONF_BSET_THROTTLE | 680 SD_CONF_BSET_NRR_COUNT| 681 SD_CONF_BSET_BSY_RETRY_COUNT| 682 SD_CONF_BSET_RST_RETRIES| 683 SD_CONF_BSET_MIN_THROTTLE| 684 SD_CONF_BSET_DISKSORT_DISABLED| 685 SD_CONF_BSET_LUN_RESET_ENABLED, 686 &pirus_properties }, 687 { "SUN SE6330", SD_CONF_BSET_THROTTLE | 688 SD_CONF_BSET_NRR_COUNT| 689 SD_CONF_BSET_BSY_RETRY_COUNT| 690 SD_CONF_BSET_RST_RETRIES| 691 SD_CONF_BSET_MIN_THROTTLE| 692 SD_CONF_BSET_DISKSORT_DISABLED| 693 SD_CONF_BSET_LUN_RESET_ENABLED, 694 &pirus_properties }, 695 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 696 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 697 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 698 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 699 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, 700 #endif /* fibre or NON-sparc platforms */ 701 #if ((defined(__sparc) && !defined(__fibre)) ||\ 702 (defined(__i386) || defined(__amd64))) 703 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, 704 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, 705 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, 706 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, 707 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, 708 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL }, 709 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, 710 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, 711 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, 712 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, 713 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, 714 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, 715 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, 716 &symbios_properties }, 717 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, 718 &lsi_properties_scsi }, 719 #if defined(__i386) || defined(__amd64) 720 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD 721 | SD_CONF_BSET_READSUB_BCD 722 | SD_CONF_BSET_READ_TOC_ADDR_BCD 723 | SD_CONF_BSET_NO_READ_HEADER 724 | SD_CONF_BSET_READ_CD_XD4), NULL }, 725 726 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD 727 | SD_CONF_BSET_READSUB_BCD 728 | SD_CONF_BSET_READ_TOC_ADDR_BCD 729 | SD_CONF_BSET_NO_READ_HEADER 730 | SD_CONF_BSET_READ_CD_XD4), NULL }, 731 #endif /* __i386 || __amd64 */ 732 #endif /* sparc NON-fibre or NON-sparc platforms */ 733 734 #if (defined(SD_PROP_TST)) 735 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE 736 | SD_CONF_BSET_CTYPE 737 | SD_CONF_BSET_NRR_COUNT 738 | SD_CONF_BSET_FAB_DEVID 739 | SD_CONF_BSET_NOCACHE 740 | SD_CONF_BSET_BSY_RETRY_COUNT 741 | SD_CONF_BSET_PLAYMSF_BCD 742 | SD_CONF_BSET_READSUB_BCD 743 | SD_CONF_BSET_READ_TOC_TRK_BCD 744 | SD_CONF_BSET_READ_TOC_ADDR_BCD 745 | SD_CONF_BSET_NO_READ_HEADER 746 | SD_CONF_BSET_READ_CD_XD4 747 | SD_CONF_BSET_RST_RETRIES 748 | SD_CONF_BSET_RSV_REL_TIME 749 | SD_CONF_BSET_TUR_CHECK), &tst_properties}, 750 #endif 751 }; 752 753 static const int sd_disk_table_size = 754 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); 755 756 757 758 #define SD_INTERCONNECT_PARALLEL 0 759 #define SD_INTERCONNECT_FABRIC 1 760 #define SD_INTERCONNECT_FIBRE 2 761 #define SD_INTERCONNECT_SSA 3 762 #define SD_INTERCONNECT_SATA 4 763 #define SD_IS_PARALLEL_SCSI(un) \ 764 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL) 765 #define SD_IS_SERIAL(un) \ 766 ((un)->un_interconnect_type == SD_INTERCONNECT_SATA) 767 768 /* 769 * Definitions used by device id registration routines 770 */ 771 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */ 772 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */ 773 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */ 774 775 static kmutex_t sd_sense_mutex = {0}; 776 777 /* 778 * Macros for updates of the driver state 779 */ 780 #define New_state(un, s) \ 781 (un)->un_last_state = (un)->un_state, (un)->un_state = (s) 782 #define Restore_state(un) \ 783 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); } 784 785 static struct sd_cdbinfo sd_cdbtab[] = { 786 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, }, 787 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, }, 788 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, }, 789 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, }, 790 }; 791 792 /* 793 * Specifies the number of seconds that must have elapsed since the last 794 * cmd. has completed for a device to be declared idle to the PM framework. 795 */ 796 static int sd_pm_idletime = 1; 797 798 /* 799 * Internal function prototypes 800 */ 801 802 #if (defined(__fibre)) 803 /* 804 * These #defines are to avoid namespace collisions that occur because this 805 * code is currently used to compile two separate driver modules: sd and ssd. 806 * All function names need to be treated this way (even if declared static) 807 * in order to allow the debugger to resolve the names properly. 808 * It is anticipated that in the near future the ssd module will be obsoleted, 809 * at which time this ugliness should go away. 810 */ 811 #define sd_log_trace ssd_log_trace 812 #define sd_log_info ssd_log_info 813 #define sd_log_err ssd_log_err 814 #define sdprobe ssdprobe 815 #define sdinfo ssdinfo 816 #define sd_prop_op ssd_prop_op 817 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init 818 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini 819 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache 820 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache 821 #define sd_scsi_target_lun_init ssd_scsi_target_lun_init 822 #define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini 823 #define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count 824 #define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target 825 #define sd_spin_up_unit ssd_spin_up_unit 826 #define sd_enable_descr_sense ssd_enable_descr_sense 827 #define sd_reenable_dsense_task ssd_reenable_dsense_task 828 #define sd_set_mmc_caps ssd_set_mmc_caps 829 #define sd_read_unit_properties ssd_read_unit_properties 830 #define sd_process_sdconf_file ssd_process_sdconf_file 831 #define sd_process_sdconf_table ssd_process_sdconf_table 832 #define sd_sdconf_id_match ssd_sdconf_id_match 833 #define sd_blank_cmp ssd_blank_cmp 834 #define sd_chk_vers1_data ssd_chk_vers1_data 835 #define sd_set_vers1_properties ssd_set_vers1_properties 836 837 #define sd_get_physical_geometry ssd_get_physical_geometry 838 #define sd_get_virtual_geometry ssd_get_virtual_geometry 839 #define sd_update_block_info ssd_update_block_info 840 #define sd_register_devid ssd_register_devid 841 #define sd_get_devid ssd_get_devid 842 #define sd_create_devid ssd_create_devid 843 #define sd_write_deviceid ssd_write_deviceid 844 #define sd_check_vpd_page_support ssd_check_vpd_page_support 845 #define sd_setup_pm ssd_setup_pm 846 #define sd_create_pm_components ssd_create_pm_components 847 #define sd_ddi_suspend ssd_ddi_suspend 848 #define sd_ddi_pm_suspend ssd_ddi_pm_suspend 849 #define sd_ddi_resume ssd_ddi_resume 850 #define sd_ddi_pm_resume ssd_ddi_pm_resume 851 #define sdpower ssdpower 852 #define sdattach ssdattach 853 #define sddetach ssddetach 854 #define sd_unit_attach ssd_unit_attach 855 #define sd_unit_detach ssd_unit_detach 856 #define sd_set_unit_attributes ssd_set_unit_attributes 857 #define sd_create_errstats ssd_create_errstats 858 #define sd_set_errstats ssd_set_errstats 859 #define sd_set_pstats ssd_set_pstats 860 #define sddump ssddump 861 #define sd_scsi_poll ssd_scsi_poll 862 #define sd_send_polled_RQS ssd_send_polled_RQS 863 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll 864 #define sd_init_event_callbacks ssd_init_event_callbacks 865 #define sd_event_callback ssd_event_callback 866 #define sd_cache_control ssd_cache_control 867 #define sd_get_write_cache_enabled ssd_get_write_cache_enabled 868 #define sd_get_nv_sup ssd_get_nv_sup 869 #define sd_make_device ssd_make_device 870 #define sdopen ssdopen 871 #define sdclose ssdclose 872 #define sd_ready_and_valid ssd_ready_and_valid 873 #define sdmin ssdmin 874 #define sdread ssdread 875 #define sdwrite ssdwrite 876 #define sdaread ssdaread 877 #define sdawrite ssdawrite 878 #define sdstrategy ssdstrategy 879 #define sdioctl ssdioctl 880 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart 881 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart 882 #define sd_checksum_iostart ssd_checksum_iostart 883 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart 884 #define sd_pm_iostart ssd_pm_iostart 885 #define sd_core_iostart ssd_core_iostart 886 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone 887 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone 888 #define sd_checksum_iodone ssd_checksum_iodone 889 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone 890 #define sd_pm_iodone ssd_pm_iodone 891 #define sd_initpkt_for_buf ssd_initpkt_for_buf 892 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf 893 #define sd_setup_rw_pkt ssd_setup_rw_pkt 894 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt 895 #define sd_buf_iodone ssd_buf_iodone 896 #define sd_uscsi_strategy ssd_uscsi_strategy 897 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi 898 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi 899 #define sd_uscsi_iodone ssd_uscsi_iodone 900 #define sd_xbuf_strategy ssd_xbuf_strategy 901 #define sd_xbuf_init ssd_xbuf_init 902 #define sd_pm_entry ssd_pm_entry 903 #define sd_pm_exit ssd_pm_exit 904 905 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler 906 #define sd_pm_timeout_handler ssd_pm_timeout_handler 907 908 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq 909 #define sdintr ssdintr 910 #define sd_start_cmds ssd_start_cmds 911 #define sd_send_scsi_cmd ssd_send_scsi_cmd 912 #define sd_bioclone_alloc ssd_bioclone_alloc 913 #define sd_bioclone_free ssd_bioclone_free 914 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc 915 #define sd_shadow_buf_free ssd_shadow_buf_free 916 #define sd_print_transport_rejected_message \ 917 ssd_print_transport_rejected_message 918 #define sd_retry_command ssd_retry_command 919 #define sd_set_retry_bp ssd_set_retry_bp 920 #define sd_send_request_sense_command ssd_send_request_sense_command 921 #define sd_start_retry_command ssd_start_retry_command 922 #define sd_start_direct_priority_command \ 923 ssd_start_direct_priority_command 924 #define sd_return_failed_command ssd_return_failed_command 925 #define sd_return_failed_command_no_restart \ 926 ssd_return_failed_command_no_restart 927 #define sd_return_command ssd_return_command 928 #define sd_sync_with_callback ssd_sync_with_callback 929 #define sdrunout ssdrunout 930 #define sd_mark_rqs_busy ssd_mark_rqs_busy 931 #define sd_mark_rqs_idle ssd_mark_rqs_idle 932 #define sd_reduce_throttle ssd_reduce_throttle 933 #define sd_restore_throttle ssd_restore_throttle 934 #define sd_print_incomplete_msg ssd_print_incomplete_msg 935 #define sd_init_cdb_limits ssd_init_cdb_limits 936 #define sd_pkt_status_good ssd_pkt_status_good 937 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition 938 #define sd_pkt_status_busy ssd_pkt_status_busy 939 #define sd_pkt_status_reservation_conflict \ 940 ssd_pkt_status_reservation_conflict 941 #define sd_pkt_status_qfull ssd_pkt_status_qfull 942 #define sd_handle_request_sense ssd_handle_request_sense 943 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense 944 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg 945 #define sd_validate_sense_data ssd_validate_sense_data 946 #define sd_decode_sense ssd_decode_sense 947 #define sd_print_sense_msg ssd_print_sense_msg 948 #define sd_sense_key_no_sense ssd_sense_key_no_sense 949 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error 950 #define sd_sense_key_not_ready ssd_sense_key_not_ready 951 #define sd_sense_key_medium_or_hardware_error \ 952 ssd_sense_key_medium_or_hardware_error 953 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request 954 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention 955 #define sd_sense_key_fail_command ssd_sense_key_fail_command 956 #define sd_sense_key_blank_check ssd_sense_key_blank_check 957 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command 958 #define sd_sense_key_default ssd_sense_key_default 959 #define sd_print_retry_msg ssd_print_retry_msg 960 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg 961 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete 962 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err 963 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset 964 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted 965 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout 966 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free 967 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject 968 #define sd_pkt_reason_default ssd_pkt_reason_default 969 #define sd_reset_target ssd_reset_target 970 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback 971 #define sd_start_stop_unit_task ssd_start_stop_unit_task 972 #define sd_taskq_create ssd_taskq_create 973 #define sd_taskq_delete ssd_taskq_delete 974 #define sd_media_change_task ssd_media_change_task 975 #define sd_handle_mchange ssd_handle_mchange 976 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK 977 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY 978 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 979 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION 980 #define sd_send_scsi_feature_GET_CONFIGURATION \ 981 sd_send_scsi_feature_GET_CONFIGURATION 982 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT 983 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY 984 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY 985 #define sd_send_scsi_PERSISTENT_RESERVE_IN \ 986 ssd_send_scsi_PERSISTENT_RESERVE_IN 987 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ 988 ssd_send_scsi_PERSISTENT_RESERVE_OUT 989 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE 990 #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \ 991 ssd_send_scsi_SYNCHRONIZE_CACHE_biodone 992 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE 993 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT 994 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR 995 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE 996 #define sd_alloc_rqs ssd_alloc_rqs 997 #define sd_free_rqs ssd_free_rqs 998 #define sd_dump_memory ssd_dump_memory 999 #define sd_get_media_info ssd_get_media_info 1000 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info 1001 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf 1002 #define sd_setup_next_xfer ssd_setup_next_xfer 1003 #define sd_dkio_get_temp ssd_dkio_get_temp 1004 #define sd_check_mhd ssd_check_mhd 1005 #define sd_mhd_watch_cb ssd_mhd_watch_cb 1006 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete 1007 #define sd_sname ssd_sname 1008 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover 1009 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread 1010 #define sd_take_ownership ssd_take_ownership 1011 #define sd_reserve_release ssd_reserve_release 1012 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req 1013 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb 1014 #define sd_persistent_reservation_in_read_keys \ 1015 ssd_persistent_reservation_in_read_keys 1016 #define sd_persistent_reservation_in_read_resv \ 1017 ssd_persistent_reservation_in_read_resv 1018 #define sd_mhdioc_takeown ssd_mhdioc_takeown 1019 #define sd_mhdioc_failfast ssd_mhdioc_failfast 1020 #define sd_mhdioc_release ssd_mhdioc_release 1021 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid 1022 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys 1023 #define sd_mhdioc_inresv ssd_mhdioc_inresv 1024 #define sr_change_blkmode ssr_change_blkmode 1025 #define sr_change_speed ssr_change_speed 1026 #define sr_atapi_change_speed ssr_atapi_change_speed 1027 #define sr_pause_resume ssr_pause_resume 1028 #define sr_play_msf ssr_play_msf 1029 #define sr_play_trkind ssr_play_trkind 1030 #define sr_read_all_subcodes ssr_read_all_subcodes 1031 #define sr_read_subchannel ssr_read_subchannel 1032 #define sr_read_tocentry ssr_read_tocentry 1033 #define sr_read_tochdr ssr_read_tochdr 1034 #define sr_read_cdda ssr_read_cdda 1035 #define sr_read_cdxa ssr_read_cdxa 1036 #define sr_read_mode1 ssr_read_mode1 1037 #define sr_read_mode2 ssr_read_mode2 1038 #define sr_read_cd_mode2 ssr_read_cd_mode2 1039 #define sr_sector_mode ssr_sector_mode 1040 #define sr_eject ssr_eject 1041 #define sr_ejected ssr_ejected 1042 #define sr_check_wp ssr_check_wp 1043 #define sd_check_media ssd_check_media 1044 #define sd_media_watch_cb ssd_media_watch_cb 1045 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast 1046 #define sr_volume_ctrl ssr_volume_ctrl 1047 #define sr_read_sony_session_offset ssr_read_sony_session_offset 1048 #define sd_log_page_supported ssd_log_page_supported 1049 #define sd_check_for_writable_cd ssd_check_for_writable_cd 1050 #define sd_wm_cache_constructor ssd_wm_cache_constructor 1051 #define sd_wm_cache_destructor ssd_wm_cache_destructor 1052 #define sd_range_lock ssd_range_lock 1053 #define sd_get_range ssd_get_range 1054 #define sd_free_inlist_wmap ssd_free_inlist_wmap 1055 #define sd_range_unlock ssd_range_unlock 1056 #define sd_read_modify_write_task ssd_read_modify_write_task 1057 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw 1058 1059 #define sd_iostart_chain ssd_iostart_chain 1060 #define sd_iodone_chain ssd_iodone_chain 1061 #define sd_initpkt_map ssd_initpkt_map 1062 #define sd_destroypkt_map ssd_destroypkt_map 1063 #define sd_chain_type_map ssd_chain_type_map 1064 #define sd_chain_index_map ssd_chain_index_map 1065 1066 #define sd_failfast_flushctl ssd_failfast_flushctl 1067 #define sd_failfast_flushq ssd_failfast_flushq 1068 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback 1069 1070 #define sd_is_lsi ssd_is_lsi 1071 #define sd_tg_rdwr ssd_tg_rdwr 1072 #define sd_tg_getinfo ssd_tg_getinfo 1073 1074 #endif /* #if (defined(__fibre)) */ 1075 1076 1077 int _init(void); 1078 int _fini(void); 1079 int _info(struct modinfo *modinfop); 1080 1081 /*PRINTFLIKE3*/ 1082 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1083 /*PRINTFLIKE3*/ 1084 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1085 /*PRINTFLIKE3*/ 1086 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1087 1088 static int sdprobe(dev_info_t *devi); 1089 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 1090 void **result); 1091 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1092 int mod_flags, char *name, caddr_t valuep, int *lengthp); 1093 1094 /* 1095 * Smart probe for parallel scsi 1096 */ 1097 static void sd_scsi_probe_cache_init(void); 1098 static void sd_scsi_probe_cache_fini(void); 1099 static void sd_scsi_clear_probe_cache(void); 1100 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)()); 1101 1102 /* 1103 * Attached luns on target for parallel scsi 1104 */ 1105 static void sd_scsi_target_lun_init(void); 1106 static void sd_scsi_target_lun_fini(void); 1107 static int sd_scsi_get_target_lun_count(dev_info_t *dip, int target); 1108 static void sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag); 1109 1110 static int sd_spin_up_unit(struct sd_lun *un); 1111 #ifdef _LP64 1112 static void sd_enable_descr_sense(struct sd_lun *un); 1113 static void sd_reenable_dsense_task(void *arg); 1114 #endif /* _LP64 */ 1115 1116 static void sd_set_mmc_caps(struct sd_lun *un); 1117 1118 static void sd_read_unit_properties(struct sd_lun *un); 1119 static int sd_process_sdconf_file(struct sd_lun *un); 1120 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags, 1121 int *data_list, sd_tunables *values); 1122 static void sd_process_sdconf_table(struct sd_lun *un); 1123 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen); 1124 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen); 1125 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 1126 int list_len, char *dataname_ptr); 1127 static void sd_set_vers1_properties(struct sd_lun *un, int flags, 1128 sd_tunables *prop_list); 1129 1130 static void sd_register_devid(struct sd_lun *un, dev_info_t *devi, 1131 int reservation_flag); 1132 static int sd_get_devid(struct sd_lun *un); 1133 static ddi_devid_t sd_create_devid(struct sd_lun *un); 1134 static int sd_write_deviceid(struct sd_lun *un); 1135 static int sd_get_devid_page(struct sd_lun *un, uchar_t *wwn, int *len); 1136 static int sd_check_vpd_page_support(struct sd_lun *un); 1137 1138 static void sd_setup_pm(struct sd_lun *un, dev_info_t *devi); 1139 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); 1140 1141 static int sd_ddi_suspend(dev_info_t *devi); 1142 static int sd_ddi_pm_suspend(struct sd_lun *un); 1143 static int sd_ddi_resume(dev_info_t *devi); 1144 static int sd_ddi_pm_resume(struct sd_lun *un); 1145 static int sdpower(dev_info_t *devi, int component, int level); 1146 1147 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 1148 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 1149 static int sd_unit_attach(dev_info_t *devi); 1150 static int sd_unit_detach(dev_info_t *devi); 1151 1152 static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi); 1153 static void sd_create_errstats(struct sd_lun *un, int instance); 1154 static void sd_set_errstats(struct sd_lun *un); 1155 static void sd_set_pstats(struct sd_lun *un); 1156 1157 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 1158 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); 1159 static int sd_send_polled_RQS(struct sd_lun *un); 1160 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); 1161 1162 #if (defined(__fibre)) 1163 /* 1164 * Event callbacks (photon) 1165 */ 1166 static void sd_init_event_callbacks(struct sd_lun *un); 1167 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); 1168 #endif 1169 1170 /* 1171 * Defines for sd_cache_control 1172 */ 1173 1174 #define SD_CACHE_ENABLE 1 1175 #define SD_CACHE_DISABLE 0 1176 #define SD_CACHE_NOCHANGE -1 1177 1178 static int sd_cache_control(struct sd_lun *un, int rcd_flag, int wce_flag); 1179 static int sd_get_write_cache_enabled(struct sd_lun *un, int *is_enabled); 1180 static void sd_get_nv_sup(struct sd_lun *un); 1181 static dev_t sd_make_device(dev_info_t *devi); 1182 1183 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize, 1184 uint64_t capacity); 1185 1186 /* 1187 * Driver entry point functions. 1188 */ 1189 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 1190 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 1191 static int sd_ready_and_valid(struct sd_lun *un); 1192 1193 static void sdmin(struct buf *bp); 1194 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p); 1195 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 1196 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1197 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1198 1199 static int sdstrategy(struct buf *bp); 1200 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1201 1202 /* 1203 * Function prototypes for layering functions in the iostart chain. 1204 */ 1205 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un, 1206 struct buf *bp); 1207 static void sd_mapblocksize_iostart(int index, struct sd_lun *un, 1208 struct buf *bp); 1209 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp); 1210 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un, 1211 struct buf *bp); 1212 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp); 1213 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp); 1214 1215 /* 1216 * Function prototypes for layering functions in the iodone chain. 1217 */ 1218 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp); 1219 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp); 1220 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un, 1221 struct buf *bp); 1222 static void sd_mapblocksize_iodone(int index, struct sd_lun *un, 1223 struct buf *bp); 1224 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp); 1225 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un, 1226 struct buf *bp); 1227 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp); 1228 1229 /* 1230 * Prototypes for functions to support buf(9S) based IO. 1231 */ 1232 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg); 1233 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **); 1234 static void sd_destroypkt_for_buf(struct buf *); 1235 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp, 1236 struct buf *bp, int flags, 1237 int (*callback)(caddr_t), caddr_t callback_arg, 1238 diskaddr_t lba, uint32_t blockcount); 1239 #if defined(__i386) || defined(__amd64) 1240 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp, 1241 struct buf *bp, diskaddr_t lba, uint32_t blockcount); 1242 #endif /* defined(__i386) || defined(__amd64) */ 1243 1244 /* 1245 * Prototypes for functions to support USCSI IO. 1246 */ 1247 static int sd_uscsi_strategy(struct buf *bp); 1248 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **); 1249 static void sd_destroypkt_for_uscsi(struct buf *); 1250 1251 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 1252 uchar_t chain_type, void *pktinfop); 1253 1254 static int sd_pm_entry(struct sd_lun *un); 1255 static void sd_pm_exit(struct sd_lun *un); 1256 1257 static void sd_pm_idletimeout_handler(void *arg); 1258 1259 /* 1260 * sd_core internal functions (used at the sd_core_io layer). 1261 */ 1262 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp); 1263 static void sdintr(struct scsi_pkt *pktp); 1264 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp); 1265 1266 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1267 enum uio_seg dataspace, int path_flag); 1268 1269 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen, 1270 daddr_t blkno, int (*func)(struct buf *)); 1271 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen, 1272 uint_t bflags, daddr_t blkno, int (*func)(struct buf *)); 1273 static void sd_bioclone_free(struct buf *bp); 1274 static void sd_shadow_buf_free(struct buf *bp); 1275 1276 static void sd_print_transport_rejected_message(struct sd_lun *un, 1277 struct sd_xbuf *xp, int code); 1278 static void sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, 1279 void *arg, int code); 1280 static void sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, 1281 void *arg, int code); 1282 static void sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, 1283 void *arg, int code); 1284 1285 static void sd_retry_command(struct sd_lun *un, struct buf *bp, 1286 int retry_check_flag, 1287 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, 1288 int c), 1289 void *user_arg, int failure_code, clock_t retry_delay, 1290 void (*statp)(kstat_io_t *)); 1291 1292 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, 1293 clock_t retry_delay, void (*statp)(kstat_io_t *)); 1294 1295 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 1296 struct scsi_pkt *pktp); 1297 static void sd_start_retry_command(void *arg); 1298 static void sd_start_direct_priority_command(void *arg); 1299 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, 1300 int errcode); 1301 static void sd_return_failed_command_no_restart(struct sd_lun *un, 1302 struct buf *bp, int errcode); 1303 static void sd_return_command(struct sd_lun *un, struct buf *bp); 1304 static void sd_sync_with_callback(struct sd_lun *un); 1305 static int sdrunout(caddr_t arg); 1306 1307 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp); 1308 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp); 1309 1310 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type); 1311 static void sd_restore_throttle(void *arg); 1312 1313 static void sd_init_cdb_limits(struct sd_lun *un); 1314 1315 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 1316 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1317 1318 /* 1319 * Error handling functions 1320 */ 1321 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 1322 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1323 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, 1324 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1325 static void sd_pkt_status_reservation_conflict(struct sd_lun *un, 1326 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1327 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 1328 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1329 1330 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp, 1331 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1332 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 1333 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1334 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp, 1335 struct sd_xbuf *xp); 1336 static void sd_decode_sense(struct sd_lun *un, struct buf *bp, 1337 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1338 1339 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp, 1340 void *arg, int code); 1341 1342 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 1343 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1344 static void sd_sense_key_recoverable_error(struct sd_lun *un, 1345 uint8_t *sense_datap, 1346 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1347 static void sd_sense_key_not_ready(struct sd_lun *un, 1348 uint8_t *sense_datap, 1349 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1350 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 1351 uint8_t *sense_datap, 1352 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1353 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 1354 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1355 static void sd_sense_key_unit_attention(struct sd_lun *un, 1356 uint8_t *sense_datap, 1357 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1358 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 1359 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1360 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 1361 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1362 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 1363 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1364 static void sd_sense_key_default(struct sd_lun *un, 1365 uint8_t *sense_datap, 1366 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1367 1368 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp, 1369 void *arg, int flag); 1370 1371 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 1372 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1373 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 1374 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1375 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 1376 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1377 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 1378 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1379 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 1380 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1381 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 1382 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1383 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 1384 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1385 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 1386 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1387 1388 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp); 1389 1390 static void sd_start_stop_unit_callback(void *arg); 1391 static void sd_start_stop_unit_task(void *arg); 1392 1393 static void sd_taskq_create(void); 1394 static void sd_taskq_delete(void); 1395 static void sd_media_change_task(void *arg); 1396 1397 static int sd_handle_mchange(struct sd_lun *un); 1398 static int sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag); 1399 static int sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, 1400 uint32_t *lbap, int path_flag); 1401 static int sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 1402 uint32_t *lbap, int path_flag); 1403 static int sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, 1404 int path_flag); 1405 static int sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, 1406 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp); 1407 static int sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag); 1408 static int sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, 1409 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp); 1410 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, 1411 uchar_t usr_cmd, uchar_t *usr_bufp); 1412 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, 1413 struct dk_callback *dkc); 1414 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp); 1415 static int sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, 1416 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1417 uchar_t *bufaddr, uint_t buflen, int path_flag); 1418 static int sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 1419 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1420 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag); 1421 static int sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, 1422 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag); 1423 static int sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, 1424 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag); 1425 static int sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 1426 size_t buflen, daddr_t start_block, int path_flag); 1427 #define sd_send_scsi_READ(un, bufaddr, buflen, start_block, path_flag) \ 1428 sd_send_scsi_RDWR(un, SCMD_READ, bufaddr, buflen, start_block, \ 1429 path_flag) 1430 #define sd_send_scsi_WRITE(un, bufaddr, buflen, start_block, path_flag) \ 1431 sd_send_scsi_RDWR(un, SCMD_WRITE, bufaddr, buflen, start_block,\ 1432 path_flag) 1433 1434 static int sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, 1435 uint16_t buflen, uchar_t page_code, uchar_t page_control, 1436 uint16_t param_ptr, int path_flag); 1437 1438 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un); 1439 static void sd_free_rqs(struct sd_lun *un); 1440 1441 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, 1442 uchar_t *data, int len, int fmt); 1443 static void sd_panic_for_res_conflict(struct sd_lun *un); 1444 1445 /* 1446 * Disk Ioctl Function Prototypes 1447 */ 1448 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag); 1449 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag); 1450 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag); 1451 1452 /* 1453 * Multi-host Ioctl Prototypes 1454 */ 1455 static int sd_check_mhd(dev_t dev, int interval); 1456 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1457 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt); 1458 static char *sd_sname(uchar_t status); 1459 static void sd_mhd_resvd_recover(void *arg); 1460 static void sd_resv_reclaim_thread(); 1461 static int sd_take_ownership(dev_t dev, struct mhioctkown *p); 1462 static int sd_reserve_release(dev_t dev, int cmd); 1463 static void sd_rmv_resv_reclaim_req(dev_t dev); 1464 static void sd_mhd_reset_notify_cb(caddr_t arg); 1465 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un, 1466 mhioc_inkeys_t *usrp, int flag); 1467 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un, 1468 mhioc_inresvs_t *usrp, int flag); 1469 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag); 1470 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag); 1471 static int sd_mhdioc_release(dev_t dev); 1472 static int sd_mhdioc_register_devid(dev_t dev); 1473 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag); 1474 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag); 1475 1476 /* 1477 * SCSI removable prototypes 1478 */ 1479 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag); 1480 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1481 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1482 static int sr_pause_resume(dev_t dev, int mode); 1483 static int sr_play_msf(dev_t dev, caddr_t data, int flag); 1484 static int sr_play_trkind(dev_t dev, caddr_t data, int flag); 1485 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag); 1486 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag); 1487 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag); 1488 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag); 1489 static int sr_read_cdda(dev_t dev, caddr_t data, int flag); 1490 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag); 1491 static int sr_read_mode1(dev_t dev, caddr_t data, int flag); 1492 static int sr_read_mode2(dev_t dev, caddr_t data, int flag); 1493 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag); 1494 static int sr_sector_mode(dev_t dev, uint32_t blksize); 1495 static int sr_eject(dev_t dev); 1496 static void sr_ejected(register struct sd_lun *un); 1497 static int sr_check_wp(dev_t dev); 1498 static int sd_check_media(dev_t dev, enum dkio_state state); 1499 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1500 static void sd_delayed_cv_broadcast(void *arg); 1501 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); 1502 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); 1503 1504 static int sd_log_page_supported(struct sd_lun *un, int log_page); 1505 1506 /* 1507 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. 1508 */ 1509 static void sd_check_for_writable_cd(struct sd_lun *un, int path_flag); 1510 static int sd_wm_cache_constructor(void *wm, void *un, int flags); 1511 static void sd_wm_cache_destructor(void *wm, void *un); 1512 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb, 1513 daddr_t endb, ushort_t typ); 1514 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb, 1515 daddr_t endb); 1516 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp); 1517 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm); 1518 static void sd_read_modify_write_task(void * arg); 1519 static int 1520 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 1521 struct buf **bpp); 1522 1523 1524 /* 1525 * Function prototypes for failfast support. 1526 */ 1527 static void sd_failfast_flushq(struct sd_lun *un); 1528 static int sd_failfast_flushq_callback(struct buf *bp); 1529 1530 /* 1531 * Function prototypes to check for lsi devices 1532 */ 1533 static void sd_is_lsi(struct sd_lun *un); 1534 1535 /* 1536 * Function prototypes for x86 support 1537 */ 1538 #if defined(__i386) || defined(__amd64) 1539 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 1540 struct scsi_pkt *pkt, struct sd_xbuf *xp); 1541 #endif 1542 1543 1544 /* Function prototypes for cmlb */ 1545 static int sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 1546 diskaddr_t start_block, size_t reqlength, void *tg_cookie); 1547 1548 static int sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie); 1549 1550 /* 1551 * Constants for failfast support: 1552 * 1553 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO 1554 * failfast processing being performed. 1555 * 1556 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing 1557 * failfast processing on all bufs with B_FAILFAST set. 1558 */ 1559 1560 #define SD_FAILFAST_INACTIVE 0 1561 #define SD_FAILFAST_ACTIVE 1 1562 1563 /* 1564 * Bitmask to control behavior of buf(9S) flushes when a transition to 1565 * the failfast state occurs. Optional bits include: 1566 * 1567 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that 1568 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will 1569 * be flushed. 1570 * 1571 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the 1572 * driver, in addition to the regular wait queue. This includes the xbuf 1573 * queues. When clear, only the driver's wait queue will be flushed. 1574 */ 1575 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 1576 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 1577 1578 /* 1579 * The default behavior is to only flush bufs that have B_FAILFAST set, but 1580 * to flush all queues within the driver. 1581 */ 1582 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; 1583 1584 1585 /* 1586 * SD Testing Fault Injection 1587 */ 1588 #ifdef SD_FAULT_INJECTION 1589 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); 1590 static void sd_faultinjection(struct scsi_pkt *pktp); 1591 static void sd_injection_log(char *buf, struct sd_lun *un); 1592 #endif 1593 1594 /* 1595 * Device driver ops vector 1596 */ 1597 static struct cb_ops sd_cb_ops = { 1598 sdopen, /* open */ 1599 sdclose, /* close */ 1600 sdstrategy, /* strategy */ 1601 nodev, /* print */ 1602 sddump, /* dump */ 1603 sdread, /* read */ 1604 sdwrite, /* write */ 1605 sdioctl, /* ioctl */ 1606 nodev, /* devmap */ 1607 nodev, /* mmap */ 1608 nodev, /* segmap */ 1609 nochpoll, /* poll */ 1610 sd_prop_op, /* cb_prop_op */ 1611 0, /* streamtab */ 1612 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */ 1613 CB_REV, /* cb_rev */ 1614 sdaread, /* async I/O read entry point */ 1615 sdawrite /* async I/O write entry point */ 1616 }; 1617 1618 static struct dev_ops sd_ops = { 1619 DEVO_REV, /* devo_rev, */ 1620 0, /* refcnt */ 1621 sdinfo, /* info */ 1622 nulldev, /* identify */ 1623 sdprobe, /* probe */ 1624 sdattach, /* attach */ 1625 sddetach, /* detach */ 1626 nodev, /* reset */ 1627 &sd_cb_ops, /* driver operations */ 1628 NULL, /* bus operations */ 1629 sdpower /* power */ 1630 }; 1631 1632 1633 /* 1634 * This is the loadable module wrapper. 1635 */ 1636 #include <sys/modctl.h> 1637 1638 static struct modldrv modldrv = { 1639 &mod_driverops, /* Type of module. This one is a driver */ 1640 SD_MODULE_NAME, /* Module name. */ 1641 &sd_ops /* driver ops */ 1642 }; 1643 1644 1645 static struct modlinkage modlinkage = { 1646 MODREV_1, 1647 &modldrv, 1648 NULL 1649 }; 1650 1651 static cmlb_tg_ops_t sd_tgops = { 1652 TG_DK_OPS_VERSION_1, 1653 sd_tg_rdwr, 1654 sd_tg_getinfo 1655 }; 1656 1657 static struct scsi_asq_key_strings sd_additional_codes[] = { 1658 0x81, 0, "Logical Unit is Reserved", 1659 0x85, 0, "Audio Address Not Valid", 1660 0xb6, 0, "Media Load Mechanism Failed", 1661 0xB9, 0, "Audio Play Operation Aborted", 1662 0xbf, 0, "Buffer Overflow for Read All Subcodes Command", 1663 0x53, 2, "Medium removal prevented", 1664 0x6f, 0, "Authentication failed during key exchange", 1665 0x6f, 1, "Key not present", 1666 0x6f, 2, "Key not established", 1667 0x6f, 3, "Read without proper authentication", 1668 0x6f, 4, "Mismatched region to this logical unit", 1669 0x6f, 5, "Region reset count error", 1670 0xffff, 0x0, NULL 1671 }; 1672 1673 1674 /* 1675 * Struct for passing printing information for sense data messages 1676 */ 1677 struct sd_sense_info { 1678 int ssi_severity; 1679 int ssi_pfa_flag; 1680 }; 1681 1682 /* 1683 * Table of function pointers for iostart-side routines. Separate "chains" 1684 * of layered function calls are formed by placing the function pointers 1685 * sequentially in the desired order. Functions are called according to an 1686 * incrementing table index ordering. The last function in each chain must 1687 * be sd_core_iostart(). The corresponding iodone-side routines are expected 1688 * in the sd_iodone_chain[] array. 1689 * 1690 * Note: It may seem more natural to organize both the iostart and iodone 1691 * functions together, into an array of structures (or some similar 1692 * organization) with a common index, rather than two separate arrays which 1693 * must be maintained in synchronization. The purpose of this division is 1694 * to achieve improved performance: individual arrays allows for more 1695 * effective cache line utilization on certain platforms. 1696 */ 1697 1698 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp); 1699 1700 1701 static sd_chain_t sd_iostart_chain[] = { 1702 1703 /* Chain for buf IO for disk drive targets (PM enabled) */ 1704 sd_mapblockaddr_iostart, /* Index: 0 */ 1705 sd_pm_iostart, /* Index: 1 */ 1706 sd_core_iostart, /* Index: 2 */ 1707 1708 /* Chain for buf IO for disk drive targets (PM disabled) */ 1709 sd_mapblockaddr_iostart, /* Index: 3 */ 1710 sd_core_iostart, /* Index: 4 */ 1711 1712 /* Chain for buf IO for removable-media targets (PM enabled) */ 1713 sd_mapblockaddr_iostart, /* Index: 5 */ 1714 sd_mapblocksize_iostart, /* Index: 6 */ 1715 sd_pm_iostart, /* Index: 7 */ 1716 sd_core_iostart, /* Index: 8 */ 1717 1718 /* Chain for buf IO for removable-media targets (PM disabled) */ 1719 sd_mapblockaddr_iostart, /* Index: 9 */ 1720 sd_mapblocksize_iostart, /* Index: 10 */ 1721 sd_core_iostart, /* Index: 11 */ 1722 1723 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1724 sd_mapblockaddr_iostart, /* Index: 12 */ 1725 sd_checksum_iostart, /* Index: 13 */ 1726 sd_pm_iostart, /* Index: 14 */ 1727 sd_core_iostart, /* Index: 15 */ 1728 1729 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1730 sd_mapblockaddr_iostart, /* Index: 16 */ 1731 sd_checksum_iostart, /* Index: 17 */ 1732 sd_core_iostart, /* Index: 18 */ 1733 1734 /* Chain for USCSI commands (all targets) */ 1735 sd_pm_iostart, /* Index: 19 */ 1736 sd_core_iostart, /* Index: 20 */ 1737 1738 /* Chain for checksumming USCSI commands (all targets) */ 1739 sd_checksum_uscsi_iostart, /* Index: 21 */ 1740 sd_pm_iostart, /* Index: 22 */ 1741 sd_core_iostart, /* Index: 23 */ 1742 1743 /* Chain for "direct" USCSI commands (all targets) */ 1744 sd_core_iostart, /* Index: 24 */ 1745 1746 /* Chain for "direct priority" USCSI commands (all targets) */ 1747 sd_core_iostart, /* Index: 25 */ 1748 }; 1749 1750 /* 1751 * Macros to locate the first function of each iostart chain in the 1752 * sd_iostart_chain[] array. These are located by the index in the array. 1753 */ 1754 #define SD_CHAIN_DISK_IOSTART 0 1755 #define SD_CHAIN_DISK_IOSTART_NO_PM 3 1756 #define SD_CHAIN_RMMEDIA_IOSTART 5 1757 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9 1758 #define SD_CHAIN_CHKSUM_IOSTART 12 1759 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16 1760 #define SD_CHAIN_USCSI_CMD_IOSTART 19 1761 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21 1762 #define SD_CHAIN_DIRECT_CMD_IOSTART 24 1763 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25 1764 1765 1766 /* 1767 * Table of function pointers for the iodone-side routines for the driver- 1768 * internal layering mechanism. The calling sequence for iodone routines 1769 * uses a decrementing table index, so the last routine called in a chain 1770 * must be at the lowest array index location for that chain. The last 1771 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs) 1772 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering 1773 * of the functions in an iodone side chain must correspond to the ordering 1774 * of the iostart routines for that chain. Note that there is no iodone 1775 * side routine that corresponds to sd_core_iostart(), so there is no 1776 * entry in the table for this. 1777 */ 1778 1779 static sd_chain_t sd_iodone_chain[] = { 1780 1781 /* Chain for buf IO for disk drive targets (PM enabled) */ 1782 sd_buf_iodone, /* Index: 0 */ 1783 sd_mapblockaddr_iodone, /* Index: 1 */ 1784 sd_pm_iodone, /* Index: 2 */ 1785 1786 /* Chain for buf IO for disk drive targets (PM disabled) */ 1787 sd_buf_iodone, /* Index: 3 */ 1788 sd_mapblockaddr_iodone, /* Index: 4 */ 1789 1790 /* Chain for buf IO for removable-media targets (PM enabled) */ 1791 sd_buf_iodone, /* Index: 5 */ 1792 sd_mapblockaddr_iodone, /* Index: 6 */ 1793 sd_mapblocksize_iodone, /* Index: 7 */ 1794 sd_pm_iodone, /* Index: 8 */ 1795 1796 /* Chain for buf IO for removable-media targets (PM disabled) */ 1797 sd_buf_iodone, /* Index: 9 */ 1798 sd_mapblockaddr_iodone, /* Index: 10 */ 1799 sd_mapblocksize_iodone, /* Index: 11 */ 1800 1801 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1802 sd_buf_iodone, /* Index: 12 */ 1803 sd_mapblockaddr_iodone, /* Index: 13 */ 1804 sd_checksum_iodone, /* Index: 14 */ 1805 sd_pm_iodone, /* Index: 15 */ 1806 1807 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1808 sd_buf_iodone, /* Index: 16 */ 1809 sd_mapblockaddr_iodone, /* Index: 17 */ 1810 sd_checksum_iodone, /* Index: 18 */ 1811 1812 /* Chain for USCSI commands (non-checksum targets) */ 1813 sd_uscsi_iodone, /* Index: 19 */ 1814 sd_pm_iodone, /* Index: 20 */ 1815 1816 /* Chain for USCSI commands (checksum targets) */ 1817 sd_uscsi_iodone, /* Index: 21 */ 1818 sd_checksum_uscsi_iodone, /* Index: 22 */ 1819 sd_pm_iodone, /* Index: 22 */ 1820 1821 /* Chain for "direct" USCSI commands (all targets) */ 1822 sd_uscsi_iodone, /* Index: 24 */ 1823 1824 /* Chain for "direct priority" USCSI commands (all targets) */ 1825 sd_uscsi_iodone, /* Index: 25 */ 1826 }; 1827 1828 1829 /* 1830 * Macros to locate the "first" function in the sd_iodone_chain[] array for 1831 * each iodone-side chain. These are located by the array index, but as the 1832 * iodone side functions are called in a decrementing-index order, the 1833 * highest index number in each chain must be specified (as these correspond 1834 * to the first function in the iodone chain that will be called by the core 1835 * at IO completion time). 1836 */ 1837 1838 #define SD_CHAIN_DISK_IODONE 2 1839 #define SD_CHAIN_DISK_IODONE_NO_PM 4 1840 #define SD_CHAIN_RMMEDIA_IODONE 8 1841 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11 1842 #define SD_CHAIN_CHKSUM_IODONE 15 1843 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18 1844 #define SD_CHAIN_USCSI_CMD_IODONE 20 1845 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22 1846 #define SD_CHAIN_DIRECT_CMD_IODONE 24 1847 #define SD_CHAIN_PRIORITY_CMD_IODONE 25 1848 1849 1850 1851 1852 /* 1853 * Array to map a layering chain index to the appropriate initpkt routine. 1854 * The redundant entries are present so that the index used for accessing 1855 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1856 * with this table as well. 1857 */ 1858 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **); 1859 1860 static sd_initpkt_t sd_initpkt_map[] = { 1861 1862 /* Chain for buf IO for disk drive targets (PM enabled) */ 1863 sd_initpkt_for_buf, /* Index: 0 */ 1864 sd_initpkt_for_buf, /* Index: 1 */ 1865 sd_initpkt_for_buf, /* Index: 2 */ 1866 1867 /* Chain for buf IO for disk drive targets (PM disabled) */ 1868 sd_initpkt_for_buf, /* Index: 3 */ 1869 sd_initpkt_for_buf, /* Index: 4 */ 1870 1871 /* Chain for buf IO for removable-media targets (PM enabled) */ 1872 sd_initpkt_for_buf, /* Index: 5 */ 1873 sd_initpkt_for_buf, /* Index: 6 */ 1874 sd_initpkt_for_buf, /* Index: 7 */ 1875 sd_initpkt_for_buf, /* Index: 8 */ 1876 1877 /* Chain for buf IO for removable-media targets (PM disabled) */ 1878 sd_initpkt_for_buf, /* Index: 9 */ 1879 sd_initpkt_for_buf, /* Index: 10 */ 1880 sd_initpkt_for_buf, /* Index: 11 */ 1881 1882 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1883 sd_initpkt_for_buf, /* Index: 12 */ 1884 sd_initpkt_for_buf, /* Index: 13 */ 1885 sd_initpkt_for_buf, /* Index: 14 */ 1886 sd_initpkt_for_buf, /* Index: 15 */ 1887 1888 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1889 sd_initpkt_for_buf, /* Index: 16 */ 1890 sd_initpkt_for_buf, /* Index: 17 */ 1891 sd_initpkt_for_buf, /* Index: 18 */ 1892 1893 /* Chain for USCSI commands (non-checksum targets) */ 1894 sd_initpkt_for_uscsi, /* Index: 19 */ 1895 sd_initpkt_for_uscsi, /* Index: 20 */ 1896 1897 /* Chain for USCSI commands (checksum targets) */ 1898 sd_initpkt_for_uscsi, /* Index: 21 */ 1899 sd_initpkt_for_uscsi, /* Index: 22 */ 1900 sd_initpkt_for_uscsi, /* Index: 22 */ 1901 1902 /* Chain for "direct" USCSI commands (all targets) */ 1903 sd_initpkt_for_uscsi, /* Index: 24 */ 1904 1905 /* Chain for "direct priority" USCSI commands (all targets) */ 1906 sd_initpkt_for_uscsi, /* Index: 25 */ 1907 1908 }; 1909 1910 1911 /* 1912 * Array to map a layering chain index to the appropriate destroypktpkt routine. 1913 * The redundant entries are present so that the index used for accessing 1914 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1915 * with this table as well. 1916 */ 1917 typedef void (*sd_destroypkt_t)(struct buf *); 1918 1919 static sd_destroypkt_t sd_destroypkt_map[] = { 1920 1921 /* Chain for buf IO for disk drive targets (PM enabled) */ 1922 sd_destroypkt_for_buf, /* Index: 0 */ 1923 sd_destroypkt_for_buf, /* Index: 1 */ 1924 sd_destroypkt_for_buf, /* Index: 2 */ 1925 1926 /* Chain for buf IO for disk drive targets (PM disabled) */ 1927 sd_destroypkt_for_buf, /* Index: 3 */ 1928 sd_destroypkt_for_buf, /* Index: 4 */ 1929 1930 /* Chain for buf IO for removable-media targets (PM enabled) */ 1931 sd_destroypkt_for_buf, /* Index: 5 */ 1932 sd_destroypkt_for_buf, /* Index: 6 */ 1933 sd_destroypkt_for_buf, /* Index: 7 */ 1934 sd_destroypkt_for_buf, /* Index: 8 */ 1935 1936 /* Chain for buf IO for removable-media targets (PM disabled) */ 1937 sd_destroypkt_for_buf, /* Index: 9 */ 1938 sd_destroypkt_for_buf, /* Index: 10 */ 1939 sd_destroypkt_for_buf, /* Index: 11 */ 1940 1941 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1942 sd_destroypkt_for_buf, /* Index: 12 */ 1943 sd_destroypkt_for_buf, /* Index: 13 */ 1944 sd_destroypkt_for_buf, /* Index: 14 */ 1945 sd_destroypkt_for_buf, /* Index: 15 */ 1946 1947 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1948 sd_destroypkt_for_buf, /* Index: 16 */ 1949 sd_destroypkt_for_buf, /* Index: 17 */ 1950 sd_destroypkt_for_buf, /* Index: 18 */ 1951 1952 /* Chain for USCSI commands (non-checksum targets) */ 1953 sd_destroypkt_for_uscsi, /* Index: 19 */ 1954 sd_destroypkt_for_uscsi, /* Index: 20 */ 1955 1956 /* Chain for USCSI commands (checksum targets) */ 1957 sd_destroypkt_for_uscsi, /* Index: 21 */ 1958 sd_destroypkt_for_uscsi, /* Index: 22 */ 1959 sd_destroypkt_for_uscsi, /* Index: 22 */ 1960 1961 /* Chain for "direct" USCSI commands (all targets) */ 1962 sd_destroypkt_for_uscsi, /* Index: 24 */ 1963 1964 /* Chain for "direct priority" USCSI commands (all targets) */ 1965 sd_destroypkt_for_uscsi, /* Index: 25 */ 1966 1967 }; 1968 1969 1970 1971 /* 1972 * Array to map a layering chain index to the appropriate chain "type". 1973 * The chain type indicates a specific property/usage of the chain. 1974 * The redundant entries are present so that the index used for accessing 1975 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1976 * with this table as well. 1977 */ 1978 1979 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */ 1980 #define SD_CHAIN_BUFIO 1 /* regular buf IO */ 1981 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */ 1982 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */ 1983 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */ 1984 /* (for error recovery) */ 1985 1986 static int sd_chain_type_map[] = { 1987 1988 /* Chain for buf IO for disk drive targets (PM enabled) */ 1989 SD_CHAIN_BUFIO, /* Index: 0 */ 1990 SD_CHAIN_BUFIO, /* Index: 1 */ 1991 SD_CHAIN_BUFIO, /* Index: 2 */ 1992 1993 /* Chain for buf IO for disk drive targets (PM disabled) */ 1994 SD_CHAIN_BUFIO, /* Index: 3 */ 1995 SD_CHAIN_BUFIO, /* Index: 4 */ 1996 1997 /* Chain for buf IO for removable-media targets (PM enabled) */ 1998 SD_CHAIN_BUFIO, /* Index: 5 */ 1999 SD_CHAIN_BUFIO, /* Index: 6 */ 2000 SD_CHAIN_BUFIO, /* Index: 7 */ 2001 SD_CHAIN_BUFIO, /* Index: 8 */ 2002 2003 /* Chain for buf IO for removable-media targets (PM disabled) */ 2004 SD_CHAIN_BUFIO, /* Index: 9 */ 2005 SD_CHAIN_BUFIO, /* Index: 10 */ 2006 SD_CHAIN_BUFIO, /* Index: 11 */ 2007 2008 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2009 SD_CHAIN_BUFIO, /* Index: 12 */ 2010 SD_CHAIN_BUFIO, /* Index: 13 */ 2011 SD_CHAIN_BUFIO, /* Index: 14 */ 2012 SD_CHAIN_BUFIO, /* Index: 15 */ 2013 2014 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2015 SD_CHAIN_BUFIO, /* Index: 16 */ 2016 SD_CHAIN_BUFIO, /* Index: 17 */ 2017 SD_CHAIN_BUFIO, /* Index: 18 */ 2018 2019 /* Chain for USCSI commands (non-checksum targets) */ 2020 SD_CHAIN_USCSI, /* Index: 19 */ 2021 SD_CHAIN_USCSI, /* Index: 20 */ 2022 2023 /* Chain for USCSI commands (checksum targets) */ 2024 SD_CHAIN_USCSI, /* Index: 21 */ 2025 SD_CHAIN_USCSI, /* Index: 22 */ 2026 SD_CHAIN_USCSI, /* Index: 22 */ 2027 2028 /* Chain for "direct" USCSI commands (all targets) */ 2029 SD_CHAIN_DIRECT, /* Index: 24 */ 2030 2031 /* Chain for "direct priority" USCSI commands (all targets) */ 2032 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */ 2033 }; 2034 2035 2036 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */ 2037 #define SD_IS_BUFIO(xp) \ 2038 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO) 2039 2040 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */ 2041 #define SD_IS_DIRECT_PRIORITY(xp) \ 2042 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY) 2043 2044 2045 2046 /* 2047 * Struct, array, and macros to map a specific chain to the appropriate 2048 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays. 2049 * 2050 * The sd_chain_index_map[] array is used at attach time to set the various 2051 * un_xxx_chain type members of the sd_lun softstate to the specific layering 2052 * chain to be used with the instance. This allows different instances to use 2053 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart 2054 * and xb_chain_iodone index values in the sd_xbuf are initialized to these 2055 * values at sd_xbuf init time, this allows (1) layering chains may be changed 2056 * dynamically & without the use of locking; and (2) a layer may update the 2057 * xb_chain_io[start|done] member in a given xbuf with its current index value, 2058 * to allow for deferred processing of an IO within the same chain from a 2059 * different execution context. 2060 */ 2061 2062 struct sd_chain_index { 2063 int sci_iostart_index; 2064 int sci_iodone_index; 2065 }; 2066 2067 static struct sd_chain_index sd_chain_index_map[] = { 2068 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE }, 2069 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM }, 2070 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE }, 2071 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM }, 2072 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE }, 2073 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM }, 2074 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE }, 2075 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE }, 2076 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE }, 2077 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE }, 2078 }; 2079 2080 2081 /* 2082 * The following are indexes into the sd_chain_index_map[] array. 2083 */ 2084 2085 /* un->un_buf_chain_type must be set to one of these */ 2086 #define SD_CHAIN_INFO_DISK 0 2087 #define SD_CHAIN_INFO_DISK_NO_PM 1 2088 #define SD_CHAIN_INFO_RMMEDIA 2 2089 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3 2090 #define SD_CHAIN_INFO_CHKSUM 4 2091 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5 2092 2093 /* un->un_uscsi_chain_type must be set to one of these */ 2094 #define SD_CHAIN_INFO_USCSI_CMD 6 2095 /* USCSI with PM disabled is the same as DIRECT */ 2096 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8 2097 #define SD_CHAIN_INFO_USCSI_CHKSUM 7 2098 2099 /* un->un_direct_chain_type must be set to one of these */ 2100 #define SD_CHAIN_INFO_DIRECT_CMD 8 2101 2102 /* un->un_priority_chain_type must be set to one of these */ 2103 #define SD_CHAIN_INFO_PRIORITY_CMD 9 2104 2105 /* size for devid inquiries */ 2106 #define MAX_INQUIRY_SIZE 0xF0 2107 2108 /* 2109 * Macros used by functions to pass a given buf(9S) struct along to the 2110 * next function in the layering chain for further processing. 2111 * 2112 * In the following macros, passing more than three arguments to the called 2113 * routines causes the optimizer for the SPARC compiler to stop doing tail 2114 * call elimination which results in significant performance degradation. 2115 */ 2116 #define SD_BEGIN_IOSTART(index, un, bp) \ 2117 ((*(sd_iostart_chain[index]))(index, un, bp)) 2118 2119 #define SD_BEGIN_IODONE(index, un, bp) \ 2120 ((*(sd_iodone_chain[index]))(index, un, bp)) 2121 2122 #define SD_NEXT_IOSTART(index, un, bp) \ 2123 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp)) 2124 2125 #define SD_NEXT_IODONE(index, un, bp) \ 2126 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp)) 2127 2128 /* 2129 * Function: _init 2130 * 2131 * Description: This is the driver _init(9E) entry point. 2132 * 2133 * Return Code: Returns the value from mod_install(9F) or 2134 * ddi_soft_state_init(9F) as appropriate. 2135 * 2136 * Context: Called when driver module loaded. 2137 */ 2138 2139 int 2140 _init(void) 2141 { 2142 int err; 2143 2144 /* establish driver name from module name */ 2145 sd_label = mod_modname(&modlinkage); 2146 2147 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun), 2148 SD_MAXUNIT); 2149 2150 if (err != 0) { 2151 return (err); 2152 } 2153 2154 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL); 2155 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL); 2156 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL); 2157 2158 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL); 2159 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL); 2160 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL); 2161 2162 /* 2163 * it's ok to init here even for fibre device 2164 */ 2165 sd_scsi_probe_cache_init(); 2166 2167 sd_scsi_target_lun_init(); 2168 2169 /* 2170 * Creating taskq before mod_install ensures that all callers (threads) 2171 * that enter the module after a successfull mod_install encounter 2172 * a valid taskq. 2173 */ 2174 sd_taskq_create(); 2175 2176 err = mod_install(&modlinkage); 2177 if (err != 0) { 2178 /* delete taskq if install fails */ 2179 sd_taskq_delete(); 2180 2181 mutex_destroy(&sd_detach_mutex); 2182 mutex_destroy(&sd_log_mutex); 2183 mutex_destroy(&sd_label_mutex); 2184 2185 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2186 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2187 cv_destroy(&sd_tr.srq_inprocess_cv); 2188 2189 sd_scsi_probe_cache_fini(); 2190 2191 sd_scsi_target_lun_fini(); 2192 2193 ddi_soft_state_fini(&sd_state); 2194 return (err); 2195 } 2196 2197 return (err); 2198 } 2199 2200 2201 /* 2202 * Function: _fini 2203 * 2204 * Description: This is the driver _fini(9E) entry point. 2205 * 2206 * Return Code: Returns the value from mod_remove(9F) 2207 * 2208 * Context: Called when driver module is unloaded. 2209 */ 2210 2211 int 2212 _fini(void) 2213 { 2214 int err; 2215 2216 if ((err = mod_remove(&modlinkage)) != 0) { 2217 return (err); 2218 } 2219 2220 sd_taskq_delete(); 2221 2222 mutex_destroy(&sd_detach_mutex); 2223 mutex_destroy(&sd_log_mutex); 2224 mutex_destroy(&sd_label_mutex); 2225 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2226 2227 sd_scsi_probe_cache_fini(); 2228 2229 sd_scsi_target_lun_fini(); 2230 2231 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2232 cv_destroy(&sd_tr.srq_inprocess_cv); 2233 2234 ddi_soft_state_fini(&sd_state); 2235 2236 return (err); 2237 } 2238 2239 2240 /* 2241 * Function: _info 2242 * 2243 * Description: This is the driver _info(9E) entry point. 2244 * 2245 * Arguments: modinfop - pointer to the driver modinfo structure 2246 * 2247 * Return Code: Returns the value from mod_info(9F). 2248 * 2249 * Context: Kernel thread context 2250 */ 2251 2252 int 2253 _info(struct modinfo *modinfop) 2254 { 2255 return (mod_info(&modlinkage, modinfop)); 2256 } 2257 2258 2259 /* 2260 * The following routines implement the driver message logging facility. 2261 * They provide component- and level- based debug output filtering. 2262 * Output may also be restricted to messages for a single instance by 2263 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set 2264 * to NULL, then messages for all instances are printed. 2265 * 2266 * These routines have been cloned from each other due to the language 2267 * constraints of macros and variable argument list processing. 2268 */ 2269 2270 2271 /* 2272 * Function: sd_log_err 2273 * 2274 * Description: This routine is called by the SD_ERROR macro for debug 2275 * logging of error conditions. 2276 * 2277 * Arguments: comp - driver component being logged 2278 * dev - pointer to driver info structure 2279 * fmt - error string and format to be logged 2280 */ 2281 2282 static void 2283 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...) 2284 { 2285 va_list ap; 2286 dev_info_t *dev; 2287 2288 ASSERT(un != NULL); 2289 dev = SD_DEVINFO(un); 2290 ASSERT(dev != NULL); 2291 2292 /* 2293 * Filter messages based on the global component and level masks. 2294 * Also print if un matches the value of sd_debug_un, or if 2295 * sd_debug_un is set to NULL. 2296 */ 2297 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) && 2298 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2299 mutex_enter(&sd_log_mutex); 2300 va_start(ap, fmt); 2301 (void) vsprintf(sd_log_buf, fmt, ap); 2302 va_end(ap); 2303 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2304 mutex_exit(&sd_log_mutex); 2305 } 2306 #ifdef SD_FAULT_INJECTION 2307 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2308 if (un->sd_injection_mask & comp) { 2309 mutex_enter(&sd_log_mutex); 2310 va_start(ap, fmt); 2311 (void) vsprintf(sd_log_buf, fmt, ap); 2312 va_end(ap); 2313 sd_injection_log(sd_log_buf, un); 2314 mutex_exit(&sd_log_mutex); 2315 } 2316 #endif 2317 } 2318 2319 2320 /* 2321 * Function: sd_log_info 2322 * 2323 * Description: This routine is called by the SD_INFO macro for debug 2324 * logging of general purpose informational conditions. 2325 * 2326 * Arguments: comp - driver component being logged 2327 * dev - pointer to driver info structure 2328 * fmt - info string and format to be logged 2329 */ 2330 2331 static void 2332 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...) 2333 { 2334 va_list ap; 2335 dev_info_t *dev; 2336 2337 ASSERT(un != NULL); 2338 dev = SD_DEVINFO(un); 2339 ASSERT(dev != NULL); 2340 2341 /* 2342 * Filter messages based on the global component and level masks. 2343 * Also print if un matches the value of sd_debug_un, or if 2344 * sd_debug_un is set to NULL. 2345 */ 2346 if ((sd_component_mask & component) && 2347 (sd_level_mask & SD_LOGMASK_INFO) && 2348 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2349 mutex_enter(&sd_log_mutex); 2350 va_start(ap, fmt); 2351 (void) vsprintf(sd_log_buf, fmt, ap); 2352 va_end(ap); 2353 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2354 mutex_exit(&sd_log_mutex); 2355 } 2356 #ifdef SD_FAULT_INJECTION 2357 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2358 if (un->sd_injection_mask & component) { 2359 mutex_enter(&sd_log_mutex); 2360 va_start(ap, fmt); 2361 (void) vsprintf(sd_log_buf, fmt, ap); 2362 va_end(ap); 2363 sd_injection_log(sd_log_buf, un); 2364 mutex_exit(&sd_log_mutex); 2365 } 2366 #endif 2367 } 2368 2369 2370 /* 2371 * Function: sd_log_trace 2372 * 2373 * Description: This routine is called by the SD_TRACE macro for debug 2374 * logging of trace conditions (i.e. function entry/exit). 2375 * 2376 * Arguments: comp - driver component being logged 2377 * dev - pointer to driver info structure 2378 * fmt - trace string and format to be logged 2379 */ 2380 2381 static void 2382 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...) 2383 { 2384 va_list ap; 2385 dev_info_t *dev; 2386 2387 ASSERT(un != NULL); 2388 dev = SD_DEVINFO(un); 2389 ASSERT(dev != NULL); 2390 2391 /* 2392 * Filter messages based on the global component and level masks. 2393 * Also print if un matches the value of sd_debug_un, or if 2394 * sd_debug_un is set to NULL. 2395 */ 2396 if ((sd_component_mask & component) && 2397 (sd_level_mask & SD_LOGMASK_TRACE) && 2398 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2399 mutex_enter(&sd_log_mutex); 2400 va_start(ap, fmt); 2401 (void) vsprintf(sd_log_buf, fmt, ap); 2402 va_end(ap); 2403 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2404 mutex_exit(&sd_log_mutex); 2405 } 2406 #ifdef SD_FAULT_INJECTION 2407 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2408 if (un->sd_injection_mask & component) { 2409 mutex_enter(&sd_log_mutex); 2410 va_start(ap, fmt); 2411 (void) vsprintf(sd_log_buf, fmt, ap); 2412 va_end(ap); 2413 sd_injection_log(sd_log_buf, un); 2414 mutex_exit(&sd_log_mutex); 2415 } 2416 #endif 2417 } 2418 2419 2420 /* 2421 * Function: sdprobe 2422 * 2423 * Description: This is the driver probe(9e) entry point function. 2424 * 2425 * Arguments: devi - opaque device info handle 2426 * 2427 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful. 2428 * DDI_PROBE_FAILURE: If the probe failed. 2429 * DDI_PROBE_PARTIAL: If the instance is not present now, 2430 * but may be present in the future. 2431 */ 2432 2433 static int 2434 sdprobe(dev_info_t *devi) 2435 { 2436 struct scsi_device *devp; 2437 int rval; 2438 int instance; 2439 2440 /* 2441 * if it wasn't for pln, sdprobe could actually be nulldev 2442 * in the "__fibre" case. 2443 */ 2444 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { 2445 return (DDI_PROBE_DONTCARE); 2446 } 2447 2448 devp = ddi_get_driver_private(devi); 2449 2450 if (devp == NULL) { 2451 /* Ooops... nexus driver is mis-configured... */ 2452 return (DDI_PROBE_FAILURE); 2453 } 2454 2455 instance = ddi_get_instance(devi); 2456 2457 if (ddi_get_soft_state(sd_state, instance) != NULL) { 2458 return (DDI_PROBE_PARTIAL); 2459 } 2460 2461 /* 2462 * Call the SCSA utility probe routine to see if we actually 2463 * have a target at this SCSI nexus. 2464 */ 2465 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) { 2466 case SCSIPROBE_EXISTS: 2467 switch (devp->sd_inq->inq_dtype) { 2468 case DTYPE_DIRECT: 2469 rval = DDI_PROBE_SUCCESS; 2470 break; 2471 case DTYPE_RODIRECT: 2472 /* CDs etc. Can be removable media */ 2473 rval = DDI_PROBE_SUCCESS; 2474 break; 2475 case DTYPE_OPTICAL: 2476 /* 2477 * Rewritable optical driver HP115AA 2478 * Can also be removable media 2479 */ 2480 2481 /* 2482 * Do not attempt to bind to DTYPE_OPTICAL if 2483 * pre solaris 9 sparc sd behavior is required 2484 * 2485 * If first time through and sd_dtype_optical_bind 2486 * has not been set in /etc/system check properties 2487 */ 2488 2489 if (sd_dtype_optical_bind < 0) { 2490 sd_dtype_optical_bind = ddi_prop_get_int 2491 (DDI_DEV_T_ANY, devi, 0, 2492 "optical-device-bind", 1); 2493 } 2494 2495 if (sd_dtype_optical_bind == 0) { 2496 rval = DDI_PROBE_FAILURE; 2497 } else { 2498 rval = DDI_PROBE_SUCCESS; 2499 } 2500 break; 2501 2502 case DTYPE_NOTPRESENT: 2503 default: 2504 rval = DDI_PROBE_FAILURE; 2505 break; 2506 } 2507 break; 2508 default: 2509 rval = DDI_PROBE_PARTIAL; 2510 break; 2511 } 2512 2513 /* 2514 * This routine checks for resource allocation prior to freeing, 2515 * so it will take care of the "smart probing" case where a 2516 * scsi_probe() may or may not have been issued and will *not* 2517 * free previously-freed resources. 2518 */ 2519 scsi_unprobe(devp); 2520 return (rval); 2521 } 2522 2523 2524 /* 2525 * Function: sdinfo 2526 * 2527 * Description: This is the driver getinfo(9e) entry point function. 2528 * Given the device number, return the devinfo pointer from 2529 * the scsi_device structure or the instance number 2530 * associated with the dev_t. 2531 * 2532 * Arguments: dip - pointer to device info structure 2533 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO, 2534 * DDI_INFO_DEVT2INSTANCE) 2535 * arg - driver dev_t 2536 * resultp - user buffer for request response 2537 * 2538 * Return Code: DDI_SUCCESS 2539 * DDI_FAILURE 2540 */ 2541 /* ARGSUSED */ 2542 static int 2543 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2544 { 2545 struct sd_lun *un; 2546 dev_t dev; 2547 int instance; 2548 int error; 2549 2550 switch (infocmd) { 2551 case DDI_INFO_DEVT2DEVINFO: 2552 dev = (dev_t)arg; 2553 instance = SDUNIT(dev); 2554 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 2555 return (DDI_FAILURE); 2556 } 2557 *result = (void *) SD_DEVINFO(un); 2558 error = DDI_SUCCESS; 2559 break; 2560 case DDI_INFO_DEVT2INSTANCE: 2561 dev = (dev_t)arg; 2562 instance = SDUNIT(dev); 2563 *result = (void *)(uintptr_t)instance; 2564 error = DDI_SUCCESS; 2565 break; 2566 default: 2567 error = DDI_FAILURE; 2568 } 2569 return (error); 2570 } 2571 2572 /* 2573 * Function: sd_prop_op 2574 * 2575 * Description: This is the driver prop_op(9e) entry point function. 2576 * Return the number of blocks for the partition in question 2577 * or forward the request to the property facilities. 2578 * 2579 * Arguments: dev - device number 2580 * dip - pointer to device info structure 2581 * prop_op - property operator 2582 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent 2583 * name - pointer to property name 2584 * valuep - pointer or address of the user buffer 2585 * lengthp - property length 2586 * 2587 * Return Code: DDI_PROP_SUCCESS 2588 * DDI_PROP_NOT_FOUND 2589 * DDI_PROP_UNDEFINED 2590 * DDI_PROP_NO_MEMORY 2591 * DDI_PROP_BUF_TOO_SMALL 2592 */ 2593 2594 static int 2595 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 2596 char *name, caddr_t valuep, int *lengthp) 2597 { 2598 int instance = ddi_get_instance(dip); 2599 struct sd_lun *un; 2600 uint64_t nblocks64; 2601 uint_t dblk; 2602 2603 /* 2604 * Our dynamic properties are all device specific and size oriented. 2605 * Requests issued under conditions where size is valid are passed 2606 * to ddi_prop_op_nblocks with the size information, otherwise the 2607 * request is passed to ddi_prop_op. Size depends on valid geometry. 2608 */ 2609 un = ddi_get_soft_state(sd_state, instance); 2610 if ((dev == DDI_DEV_T_ANY) || (un == NULL)) { 2611 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2612 name, valuep, lengthp)); 2613 } else if (!SD_IS_VALID_LABEL(un)) { 2614 return (ddi_prop_op(dev, dip, prop_op, mod_flags, name, 2615 valuep, lengthp)); 2616 } 2617 2618 /* get nblocks value */ 2619 ASSERT(!mutex_owned(SD_MUTEX(un))); 2620 2621 (void) cmlb_partinfo(un->un_cmlbhandle, SDPART(dev), 2622 (diskaddr_t *)&nblocks64, NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 2623 2624 /* report size in target size blocks */ 2625 dblk = un->un_tgt_blocksize / un->un_sys_blocksize; 2626 return (ddi_prop_op_nblocks_blksize(dev, dip, prop_op, mod_flags, 2627 name, valuep, lengthp, nblocks64 / dblk, un->un_tgt_blocksize)); 2628 } 2629 2630 /* 2631 * The following functions are for smart probing: 2632 * sd_scsi_probe_cache_init() 2633 * sd_scsi_probe_cache_fini() 2634 * sd_scsi_clear_probe_cache() 2635 * sd_scsi_probe_with_cache() 2636 */ 2637 2638 /* 2639 * Function: sd_scsi_probe_cache_init 2640 * 2641 * Description: Initializes the probe response cache mutex and head pointer. 2642 * 2643 * Context: Kernel thread context 2644 */ 2645 2646 static void 2647 sd_scsi_probe_cache_init(void) 2648 { 2649 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL); 2650 sd_scsi_probe_cache_head = NULL; 2651 } 2652 2653 2654 /* 2655 * Function: sd_scsi_probe_cache_fini 2656 * 2657 * Description: Frees all resources associated with the probe response cache. 2658 * 2659 * Context: Kernel thread context 2660 */ 2661 2662 static void 2663 sd_scsi_probe_cache_fini(void) 2664 { 2665 struct sd_scsi_probe_cache *cp; 2666 struct sd_scsi_probe_cache *ncp; 2667 2668 /* Clean up our smart probing linked list */ 2669 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) { 2670 ncp = cp->next; 2671 kmem_free(cp, sizeof (struct sd_scsi_probe_cache)); 2672 } 2673 sd_scsi_probe_cache_head = NULL; 2674 mutex_destroy(&sd_scsi_probe_cache_mutex); 2675 } 2676 2677 2678 /* 2679 * Function: sd_scsi_clear_probe_cache 2680 * 2681 * Description: This routine clears the probe response cache. This is 2682 * done when open() returns ENXIO so that when deferred 2683 * attach is attempted (possibly after a device has been 2684 * turned on) we will retry the probe. Since we don't know 2685 * which target we failed to open, we just clear the 2686 * entire cache. 2687 * 2688 * Context: Kernel thread context 2689 */ 2690 2691 static void 2692 sd_scsi_clear_probe_cache(void) 2693 { 2694 struct sd_scsi_probe_cache *cp; 2695 int i; 2696 2697 mutex_enter(&sd_scsi_probe_cache_mutex); 2698 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2699 /* 2700 * Reset all entries to SCSIPROBE_EXISTS. This will 2701 * force probing to be performed the next time 2702 * sd_scsi_probe_with_cache is called. 2703 */ 2704 for (i = 0; i < NTARGETS_WIDE; i++) { 2705 cp->cache[i] = SCSIPROBE_EXISTS; 2706 } 2707 } 2708 mutex_exit(&sd_scsi_probe_cache_mutex); 2709 } 2710 2711 2712 /* 2713 * Function: sd_scsi_probe_with_cache 2714 * 2715 * Description: This routine implements support for a scsi device probe 2716 * with cache. The driver maintains a cache of the target 2717 * responses to scsi probes. If we get no response from a 2718 * target during a probe inquiry, we remember that, and we 2719 * avoid additional calls to scsi_probe on non-zero LUNs 2720 * on the same target until the cache is cleared. By doing 2721 * so we avoid the 1/4 sec selection timeout for nonzero 2722 * LUNs. lun0 of a target is always probed. 2723 * 2724 * Arguments: devp - Pointer to a scsi_device(9S) structure 2725 * waitfunc - indicates what the allocator routines should 2726 * do when resources are not available. This value 2727 * is passed on to scsi_probe() when that routine 2728 * is called. 2729 * 2730 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache; 2731 * otherwise the value returned by scsi_probe(9F). 2732 * 2733 * Context: Kernel thread context 2734 */ 2735 2736 static int 2737 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)()) 2738 { 2739 struct sd_scsi_probe_cache *cp; 2740 dev_info_t *pdip = ddi_get_parent(devp->sd_dev); 2741 int lun, tgt; 2742 2743 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2744 SCSI_ADDR_PROP_LUN, 0); 2745 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2746 SCSI_ADDR_PROP_TARGET, -1); 2747 2748 /* Make sure caching enabled and target in range */ 2749 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) { 2750 /* do it the old way (no cache) */ 2751 return (scsi_probe(devp, waitfn)); 2752 } 2753 2754 mutex_enter(&sd_scsi_probe_cache_mutex); 2755 2756 /* Find the cache for this scsi bus instance */ 2757 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2758 if (cp->pdip == pdip) { 2759 break; 2760 } 2761 } 2762 2763 /* If we can't find a cache for this pdip, create one */ 2764 if (cp == NULL) { 2765 int i; 2766 2767 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache), 2768 KM_SLEEP); 2769 cp->pdip = pdip; 2770 cp->next = sd_scsi_probe_cache_head; 2771 sd_scsi_probe_cache_head = cp; 2772 for (i = 0; i < NTARGETS_WIDE; i++) { 2773 cp->cache[i] = SCSIPROBE_EXISTS; 2774 } 2775 } 2776 2777 mutex_exit(&sd_scsi_probe_cache_mutex); 2778 2779 /* Recompute the cache for this target if LUN zero */ 2780 if (lun == 0) { 2781 cp->cache[tgt] = SCSIPROBE_EXISTS; 2782 } 2783 2784 /* Don't probe if cache remembers a NORESP from a previous LUN. */ 2785 if (cp->cache[tgt] != SCSIPROBE_EXISTS) { 2786 return (SCSIPROBE_NORESP); 2787 } 2788 2789 /* Do the actual probe; save & return the result */ 2790 return (cp->cache[tgt] = scsi_probe(devp, waitfn)); 2791 } 2792 2793 2794 /* 2795 * Function: sd_scsi_target_lun_init 2796 * 2797 * Description: Initializes the attached lun chain mutex and head pointer. 2798 * 2799 * Context: Kernel thread context 2800 */ 2801 2802 static void 2803 sd_scsi_target_lun_init(void) 2804 { 2805 mutex_init(&sd_scsi_target_lun_mutex, NULL, MUTEX_DRIVER, NULL); 2806 sd_scsi_target_lun_head = NULL; 2807 } 2808 2809 2810 /* 2811 * Function: sd_scsi_target_lun_fini 2812 * 2813 * Description: Frees all resources associated with the attached lun 2814 * chain 2815 * 2816 * Context: Kernel thread context 2817 */ 2818 2819 static void 2820 sd_scsi_target_lun_fini(void) 2821 { 2822 struct sd_scsi_hba_tgt_lun *cp; 2823 struct sd_scsi_hba_tgt_lun *ncp; 2824 2825 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = ncp) { 2826 ncp = cp->next; 2827 kmem_free(cp, sizeof (struct sd_scsi_hba_tgt_lun)); 2828 } 2829 sd_scsi_target_lun_head = NULL; 2830 mutex_destroy(&sd_scsi_target_lun_mutex); 2831 } 2832 2833 2834 /* 2835 * Function: sd_scsi_get_target_lun_count 2836 * 2837 * Description: This routine will check in the attached lun chain to see 2838 * how many luns are attached on the required SCSI controller 2839 * and target. Currently, some capabilities like tagged queue 2840 * are supported per target based by HBA. So all luns in a 2841 * target have the same capabilities. Based on this assumption, 2842 * sd should only set these capabilities once per target. This 2843 * function is called when sd needs to decide how many luns 2844 * already attached on a target. 2845 * 2846 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2847 * controller device. 2848 * target - The target ID on the controller's SCSI bus. 2849 * 2850 * Return Code: The number of luns attached on the required target and 2851 * controller. 2852 * -1 if target ID is not in parallel SCSI scope or the given 2853 * dip is not in the chain. 2854 * 2855 * Context: Kernel thread context 2856 */ 2857 2858 static int 2859 sd_scsi_get_target_lun_count(dev_info_t *dip, int target) 2860 { 2861 struct sd_scsi_hba_tgt_lun *cp; 2862 2863 if ((target < 0) || (target >= NTARGETS_WIDE)) { 2864 return (-1); 2865 } 2866 2867 mutex_enter(&sd_scsi_target_lun_mutex); 2868 2869 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2870 if (cp->pdip == dip) { 2871 break; 2872 } 2873 } 2874 2875 mutex_exit(&sd_scsi_target_lun_mutex); 2876 2877 if (cp == NULL) { 2878 return (-1); 2879 } 2880 2881 return (cp->nlun[target]); 2882 } 2883 2884 2885 /* 2886 * Function: sd_scsi_update_lun_on_target 2887 * 2888 * Description: This routine is used to update the attached lun chain when a 2889 * lun is attached or detached on a target. 2890 * 2891 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2892 * controller device. 2893 * target - The target ID on the controller's SCSI bus. 2894 * flag - Indicate the lun is attached or detached. 2895 * 2896 * Context: Kernel thread context 2897 */ 2898 2899 static void 2900 sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag) 2901 { 2902 struct sd_scsi_hba_tgt_lun *cp; 2903 2904 mutex_enter(&sd_scsi_target_lun_mutex); 2905 2906 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2907 if (cp->pdip == dip) { 2908 break; 2909 } 2910 } 2911 2912 if ((cp == NULL) && (flag == SD_SCSI_LUN_ATTACH)) { 2913 cp = kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun), 2914 KM_SLEEP); 2915 cp->pdip = dip; 2916 cp->next = sd_scsi_target_lun_head; 2917 sd_scsi_target_lun_head = cp; 2918 } 2919 2920 mutex_exit(&sd_scsi_target_lun_mutex); 2921 2922 if (cp != NULL) { 2923 if (flag == SD_SCSI_LUN_ATTACH) { 2924 cp->nlun[target] ++; 2925 } else { 2926 cp->nlun[target] --; 2927 } 2928 } 2929 } 2930 2931 2932 /* 2933 * Function: sd_spin_up_unit 2934 * 2935 * Description: Issues the following commands to spin-up the device: 2936 * START STOP UNIT, and INQUIRY. 2937 * 2938 * Arguments: un - driver soft state (unit) structure 2939 * 2940 * Return Code: 0 - success 2941 * EIO - failure 2942 * EACCES - reservation conflict 2943 * 2944 * Context: Kernel thread context 2945 */ 2946 2947 static int 2948 sd_spin_up_unit(struct sd_lun *un) 2949 { 2950 size_t resid = 0; 2951 int has_conflict = FALSE; 2952 uchar_t *bufaddr; 2953 2954 ASSERT(un != NULL); 2955 2956 /* 2957 * Send a throwaway START UNIT command. 2958 * 2959 * If we fail on this, we don't care presently what precisely 2960 * is wrong. EMC's arrays will also fail this with a check 2961 * condition (0x2/0x4/0x3) if the device is "inactive," but 2962 * we don't want to fail the attach because it may become 2963 * "active" later. 2964 */ 2965 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, SD_PATH_DIRECT) 2966 == EACCES) 2967 has_conflict = TRUE; 2968 2969 /* 2970 * Send another INQUIRY command to the target. This is necessary for 2971 * non-removable media direct access devices because their INQUIRY data 2972 * may not be fully qualified until they are spun up (perhaps via the 2973 * START command above). Note: This seems to be needed for some 2974 * legacy devices only.) The INQUIRY command should succeed even if a 2975 * Reservation Conflict is present. 2976 */ 2977 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP); 2978 if (sd_send_scsi_INQUIRY(un, bufaddr, SUN_INQSIZE, 0, 0, &resid) != 0) { 2979 kmem_free(bufaddr, SUN_INQSIZE); 2980 return (EIO); 2981 } 2982 2983 /* 2984 * If we got enough INQUIRY data, copy it over the old INQUIRY data. 2985 * Note that this routine does not return a failure here even if the 2986 * INQUIRY command did not return any data. This is a legacy behavior. 2987 */ 2988 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) { 2989 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE); 2990 } 2991 2992 kmem_free(bufaddr, SUN_INQSIZE); 2993 2994 /* If we hit a reservation conflict above, tell the caller. */ 2995 if (has_conflict == TRUE) { 2996 return (EACCES); 2997 } 2998 2999 return (0); 3000 } 3001 3002 #ifdef _LP64 3003 /* 3004 * Function: sd_enable_descr_sense 3005 * 3006 * Description: This routine attempts to select descriptor sense format 3007 * using the Control mode page. Devices that support 64 bit 3008 * LBAs (for >2TB luns) should also implement descriptor 3009 * sense data so we will call this function whenever we see 3010 * a lun larger than 2TB. If for some reason the device 3011 * supports 64 bit LBAs but doesn't support descriptor sense 3012 * presumably the mode select will fail. Everything will 3013 * continue to work normally except that we will not get 3014 * complete sense data for commands that fail with an LBA 3015 * larger than 32 bits. 3016 * 3017 * Arguments: un - driver soft state (unit) structure 3018 * 3019 * Context: Kernel thread context only 3020 */ 3021 3022 static void 3023 sd_enable_descr_sense(struct sd_lun *un) 3024 { 3025 uchar_t *header; 3026 struct mode_control_scsi3 *ctrl_bufp; 3027 size_t buflen; 3028 size_t bd_len; 3029 3030 /* 3031 * Read MODE SENSE page 0xA, Control Mode Page 3032 */ 3033 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH + 3034 sizeof (struct mode_control_scsi3); 3035 header = kmem_zalloc(buflen, KM_SLEEP); 3036 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 3037 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT) != 0) { 3038 SD_ERROR(SD_LOG_COMMON, un, 3039 "sd_enable_descr_sense: mode sense ctrl page failed\n"); 3040 goto eds_exit; 3041 } 3042 3043 /* 3044 * Determine size of Block Descriptors in order to locate 3045 * the mode page data. ATAPI devices return 0, SCSI devices 3046 * should return MODE_BLK_DESC_LENGTH. 3047 */ 3048 bd_len = ((struct mode_header *)header)->bdesc_length; 3049 3050 /* Clear the mode data length field for MODE SELECT */ 3051 ((struct mode_header *)header)->length = 0; 3052 3053 ctrl_bufp = (struct mode_control_scsi3 *) 3054 (header + MODE_HEADER_LENGTH + bd_len); 3055 3056 /* 3057 * If the page length is smaller than the expected value, 3058 * the target device doesn't support D_SENSE. Bail out here. 3059 */ 3060 if (ctrl_bufp->mode_page.length < 3061 sizeof (struct mode_control_scsi3) - 2) { 3062 SD_ERROR(SD_LOG_COMMON, un, 3063 "sd_enable_descr_sense: enable D_SENSE failed\n"); 3064 goto eds_exit; 3065 } 3066 3067 /* 3068 * Clear PS bit for MODE SELECT 3069 */ 3070 ctrl_bufp->mode_page.ps = 0; 3071 3072 /* 3073 * Set D_SENSE to enable descriptor sense format. 3074 */ 3075 ctrl_bufp->d_sense = 1; 3076 3077 /* 3078 * Use MODE SELECT to commit the change to the D_SENSE bit 3079 */ 3080 if (sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 3081 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT) != 0) { 3082 SD_INFO(SD_LOG_COMMON, un, 3083 "sd_enable_descr_sense: mode select ctrl page failed\n"); 3084 goto eds_exit; 3085 } 3086 3087 eds_exit: 3088 kmem_free(header, buflen); 3089 } 3090 3091 /* 3092 * Function: sd_reenable_dsense_task 3093 * 3094 * Description: Re-enable descriptor sense after device or bus reset 3095 * 3096 * Context: Executes in a taskq() thread context 3097 */ 3098 static void 3099 sd_reenable_dsense_task(void *arg) 3100 { 3101 struct sd_lun *un = arg; 3102 3103 ASSERT(un != NULL); 3104 sd_enable_descr_sense(un); 3105 } 3106 #endif /* _LP64 */ 3107 3108 /* 3109 * Function: sd_set_mmc_caps 3110 * 3111 * Description: This routine determines if the device is MMC compliant and if 3112 * the device supports CDDA via a mode sense of the CDVD 3113 * capabilities mode page. Also checks if the device is a 3114 * dvdram writable device. 3115 * 3116 * Arguments: un - driver soft state (unit) structure 3117 * 3118 * Context: Kernel thread context only 3119 */ 3120 3121 static void 3122 sd_set_mmc_caps(struct sd_lun *un) 3123 { 3124 struct mode_header_grp2 *sense_mhp; 3125 uchar_t *sense_page; 3126 caddr_t buf; 3127 int bd_len; 3128 int status; 3129 struct uscsi_cmd com; 3130 int rtn; 3131 uchar_t *out_data_rw, *out_data_hd; 3132 uchar_t *rqbuf_rw, *rqbuf_hd; 3133 3134 ASSERT(un != NULL); 3135 3136 /* 3137 * The flags which will be set in this function are - mmc compliant, 3138 * dvdram writable device, cdda support. Initialize them to FALSE 3139 * and if a capability is detected - it will be set to TRUE. 3140 */ 3141 un->un_f_mmc_cap = FALSE; 3142 un->un_f_dvdram_writable_device = FALSE; 3143 un->un_f_cfg_cdda = FALSE; 3144 3145 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3146 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 3147 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 3148 3149 if (status != 0) { 3150 /* command failed; just return */ 3151 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3152 return; 3153 } 3154 /* 3155 * If the mode sense request for the CDROM CAPABILITIES 3156 * page (0x2A) succeeds the device is assumed to be MMC. 3157 */ 3158 un->un_f_mmc_cap = TRUE; 3159 3160 /* Get to the page data */ 3161 sense_mhp = (struct mode_header_grp2 *)buf; 3162 bd_len = (sense_mhp->bdesc_length_hi << 8) | 3163 sense_mhp->bdesc_length_lo; 3164 if (bd_len > MODE_BLK_DESC_LENGTH) { 3165 /* 3166 * We did not get back the expected block descriptor 3167 * length so we cannot determine if the device supports 3168 * CDDA. However, we still indicate the device is MMC 3169 * according to the successful response to the page 3170 * 0x2A mode sense request. 3171 */ 3172 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3173 "sd_set_mmc_caps: Mode Sense returned " 3174 "invalid block descriptor length\n"); 3175 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3176 return; 3177 } 3178 3179 /* See if read CDDA is supported */ 3180 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + 3181 bd_len); 3182 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE; 3183 3184 /* See if writing DVD RAM is supported. */ 3185 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE; 3186 if (un->un_f_dvdram_writable_device == TRUE) { 3187 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3188 return; 3189 } 3190 3191 /* 3192 * If the device presents DVD or CD capabilities in the mode 3193 * page, we can return here since a RRD will not have 3194 * these capabilities. 3195 */ 3196 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3197 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3198 return; 3199 } 3200 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3201 3202 /* 3203 * If un->un_f_dvdram_writable_device is still FALSE, 3204 * check for a Removable Rigid Disk (RRD). A RRD 3205 * device is identified by the features RANDOM_WRITABLE and 3206 * HARDWARE_DEFECT_MANAGEMENT. 3207 */ 3208 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3209 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3210 3211 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3212 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3213 RANDOM_WRITABLE, SD_PATH_STANDARD); 3214 if (rtn != 0) { 3215 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3216 kmem_free(rqbuf_rw, SENSE_LENGTH); 3217 return; 3218 } 3219 3220 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3221 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3222 3223 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3224 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3225 HARDWARE_DEFECT_MANAGEMENT, SD_PATH_STANDARD); 3226 if (rtn == 0) { 3227 /* 3228 * We have good information, check for random writable 3229 * and hardware defect features. 3230 */ 3231 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3232 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) { 3233 un->un_f_dvdram_writable_device = TRUE; 3234 } 3235 } 3236 3237 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3238 kmem_free(rqbuf_rw, SENSE_LENGTH); 3239 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3240 kmem_free(rqbuf_hd, SENSE_LENGTH); 3241 } 3242 3243 /* 3244 * Function: sd_check_for_writable_cd 3245 * 3246 * Description: This routine determines if the media in the device is 3247 * writable or not. It uses the get configuration command (0x46) 3248 * to determine if the media is writable 3249 * 3250 * Arguments: un - driver soft state (unit) structure 3251 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" 3252 * chain and the normal command waitq, or 3253 * SD_PATH_DIRECT_PRIORITY to use the USCSI 3254 * "direct" chain and bypass the normal command 3255 * waitq. 3256 * 3257 * Context: Never called at interrupt context. 3258 */ 3259 3260 static void 3261 sd_check_for_writable_cd(struct sd_lun *un, int path_flag) 3262 { 3263 struct uscsi_cmd com; 3264 uchar_t *out_data; 3265 uchar_t *rqbuf; 3266 int rtn; 3267 uchar_t *out_data_rw, *out_data_hd; 3268 uchar_t *rqbuf_rw, *rqbuf_hd; 3269 struct mode_header_grp2 *sense_mhp; 3270 uchar_t *sense_page; 3271 caddr_t buf; 3272 int bd_len; 3273 int status; 3274 3275 ASSERT(un != NULL); 3276 ASSERT(mutex_owned(SD_MUTEX(un))); 3277 3278 /* 3279 * Initialize the writable media to false, if configuration info. 3280 * tells us otherwise then only we will set it. 3281 */ 3282 un->un_f_mmc_writable_media = FALSE; 3283 mutex_exit(SD_MUTEX(un)); 3284 3285 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 3286 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3287 3288 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, SENSE_LENGTH, 3289 out_data, SD_PROFILE_HEADER_LEN, path_flag); 3290 3291 mutex_enter(SD_MUTEX(un)); 3292 if (rtn == 0) { 3293 /* 3294 * We have good information, check for writable DVD. 3295 */ 3296 if ((out_data[6] == 0) && (out_data[7] == 0x12)) { 3297 un->un_f_mmc_writable_media = TRUE; 3298 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3299 kmem_free(rqbuf, SENSE_LENGTH); 3300 return; 3301 } 3302 } 3303 3304 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3305 kmem_free(rqbuf, SENSE_LENGTH); 3306 3307 /* 3308 * Determine if this is a RRD type device. 3309 */ 3310 mutex_exit(SD_MUTEX(un)); 3311 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3312 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 3313 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, path_flag); 3314 mutex_enter(SD_MUTEX(un)); 3315 if (status != 0) { 3316 /* command failed; just return */ 3317 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3318 return; 3319 } 3320 3321 /* Get to the page data */ 3322 sense_mhp = (struct mode_header_grp2 *)buf; 3323 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 3324 if (bd_len > MODE_BLK_DESC_LENGTH) { 3325 /* 3326 * We did not get back the expected block descriptor length so 3327 * we cannot check the mode page. 3328 */ 3329 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3330 "sd_check_for_writable_cd: Mode Sense returned " 3331 "invalid block descriptor length\n"); 3332 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3333 return; 3334 } 3335 3336 /* 3337 * If the device presents DVD or CD capabilities in the mode 3338 * page, we can return here since a RRD device will not have 3339 * these capabilities. 3340 */ 3341 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len); 3342 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3343 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3344 return; 3345 } 3346 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3347 3348 /* 3349 * If un->un_f_mmc_writable_media is still FALSE, 3350 * check for RRD type media. A RRD device is identified 3351 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT. 3352 */ 3353 mutex_exit(SD_MUTEX(un)); 3354 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3355 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3356 3357 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3358 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3359 RANDOM_WRITABLE, path_flag); 3360 if (rtn != 0) { 3361 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3362 kmem_free(rqbuf_rw, SENSE_LENGTH); 3363 mutex_enter(SD_MUTEX(un)); 3364 return; 3365 } 3366 3367 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3368 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3369 3370 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3371 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3372 HARDWARE_DEFECT_MANAGEMENT, path_flag); 3373 mutex_enter(SD_MUTEX(un)); 3374 if (rtn == 0) { 3375 /* 3376 * We have good information, check for random writable 3377 * and hardware defect features as current. 3378 */ 3379 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3380 (out_data_rw[10] & 0x1) && 3381 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) && 3382 (out_data_hd[10] & 0x1)) { 3383 un->un_f_mmc_writable_media = TRUE; 3384 } 3385 } 3386 3387 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3388 kmem_free(rqbuf_rw, SENSE_LENGTH); 3389 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3390 kmem_free(rqbuf_hd, SENSE_LENGTH); 3391 } 3392 3393 /* 3394 * Function: sd_read_unit_properties 3395 * 3396 * Description: The following implements a property lookup mechanism. 3397 * Properties for particular disks (keyed on vendor, model 3398 * and rev numbers) are sought in the sd.conf file via 3399 * sd_process_sdconf_file(), and if not found there, are 3400 * looked for in a list hardcoded in this driver via 3401 * sd_process_sdconf_table() Once located the properties 3402 * are used to update the driver unit structure. 3403 * 3404 * Arguments: un - driver soft state (unit) structure 3405 */ 3406 3407 static void 3408 sd_read_unit_properties(struct sd_lun *un) 3409 { 3410 /* 3411 * sd_process_sdconf_file returns SD_FAILURE if it cannot find 3412 * the "sd-config-list" property (from the sd.conf file) or if 3413 * there was not a match for the inquiry vid/pid. If this event 3414 * occurs the static driver configuration table is searched for 3415 * a match. 3416 */ 3417 ASSERT(un != NULL); 3418 if (sd_process_sdconf_file(un) == SD_FAILURE) { 3419 sd_process_sdconf_table(un); 3420 } 3421 3422 /* check for LSI device */ 3423 sd_is_lsi(un); 3424 3425 3426 } 3427 3428 3429 /* 3430 * Function: sd_process_sdconf_file 3431 * 3432 * Description: Use ddi_getlongprop to obtain the properties from the 3433 * driver's config file (ie, sd.conf) and update the driver 3434 * soft state structure accordingly. 3435 * 3436 * Arguments: un - driver soft state (unit) structure 3437 * 3438 * Return Code: SD_SUCCESS - The properties were successfully set according 3439 * to the driver configuration file. 3440 * SD_FAILURE - The driver config list was not obtained or 3441 * there was no vid/pid match. This indicates that 3442 * the static config table should be used. 3443 * 3444 * The config file has a property, "sd-config-list", which consists of 3445 * one or more duplets as follows: 3446 * 3447 * sd-config-list= 3448 * <duplet>, 3449 * [<duplet>,] 3450 * [<duplet>]; 3451 * 3452 * The structure of each duplet is as follows: 3453 * 3454 * <duplet>:= <vid+pid>,<data-property-name_list> 3455 * 3456 * The first entry of the duplet is the device ID string (the concatenated 3457 * vid & pid; not to be confused with a device_id). This is defined in 3458 * the same way as in the sd_disk_table. 3459 * 3460 * The second part of the duplet is a string that identifies a 3461 * data-property-name-list. The data-property-name-list is defined as 3462 * follows: 3463 * 3464 * <data-property-name-list>:=<data-property-name> [<data-property-name>] 3465 * 3466 * The syntax of <data-property-name> depends on the <version> field. 3467 * 3468 * If version = SD_CONF_VERSION_1 we have the following syntax: 3469 * 3470 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3471 * 3472 * where the prop0 value will be used to set prop0 if bit0 set in the 3473 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1 3474 * 3475 */ 3476 3477 static int 3478 sd_process_sdconf_file(struct sd_lun *un) 3479 { 3480 char *config_list = NULL; 3481 int config_list_len; 3482 int len; 3483 int dupletlen = 0; 3484 char *vidptr; 3485 int vidlen; 3486 char *dnlist_ptr; 3487 char *dataname_ptr; 3488 int dnlist_len; 3489 int dataname_len; 3490 int *data_list; 3491 int data_list_len; 3492 int rval = SD_FAILURE; 3493 int i; 3494 3495 ASSERT(un != NULL); 3496 3497 /* Obtain the configuration list associated with the .conf file */ 3498 if (ddi_getlongprop(DDI_DEV_T_ANY, SD_DEVINFO(un), DDI_PROP_DONTPASS, 3499 sd_config_list, (caddr_t)&config_list, &config_list_len) 3500 != DDI_PROP_SUCCESS) { 3501 return (SD_FAILURE); 3502 } 3503 3504 /* 3505 * Compare vids in each duplet to the inquiry vid - if a match is 3506 * made, get the data value and update the soft state structure 3507 * accordingly. 3508 * 3509 * Note: This algorithm is complex and difficult to maintain. It should 3510 * be replaced with a more robust implementation. 3511 */ 3512 for (len = config_list_len, vidptr = config_list; len > 0; 3513 vidptr += dupletlen, len -= dupletlen) { 3514 /* 3515 * Note: The assumption here is that each vid entry is on 3516 * a unique line from its associated duplet. 3517 */ 3518 vidlen = dupletlen = (int)strlen(vidptr); 3519 if ((vidlen == 0) || 3520 (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS)) { 3521 dupletlen++; 3522 continue; 3523 } 3524 3525 /* 3526 * dnlist contains 1 or more blank separated 3527 * data-property-name entries 3528 */ 3529 dnlist_ptr = vidptr + vidlen + 1; 3530 dnlist_len = (int)strlen(dnlist_ptr); 3531 dupletlen += dnlist_len + 2; 3532 3533 /* 3534 * Set a pointer for the first data-property-name 3535 * entry in the list 3536 */ 3537 dataname_ptr = dnlist_ptr; 3538 dataname_len = 0; 3539 3540 /* 3541 * Loop through all data-property-name entries in the 3542 * data-property-name-list setting the properties for each. 3543 */ 3544 while (dataname_len < dnlist_len) { 3545 int version; 3546 3547 /* 3548 * Determine the length of the current 3549 * data-property-name entry by indexing until a 3550 * blank or NULL is encountered. When the space is 3551 * encountered reset it to a NULL for compliance 3552 * with ddi_getlongprop(). 3553 */ 3554 for (i = 0; ((dataname_ptr[i] != ' ') && 3555 (dataname_ptr[i] != '\0')); i++) { 3556 ; 3557 } 3558 3559 dataname_len += i; 3560 /* If not null terminated, Make it so */ 3561 if (dataname_ptr[i] == ' ') { 3562 dataname_ptr[i] = '\0'; 3563 } 3564 dataname_len++; 3565 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3566 "sd_process_sdconf_file: disk:%s, data:%s\n", 3567 vidptr, dataname_ptr); 3568 3569 /* Get the data list */ 3570 if (ddi_getlongprop(DDI_DEV_T_ANY, SD_DEVINFO(un), 0, 3571 dataname_ptr, (caddr_t)&data_list, &data_list_len) 3572 != DDI_PROP_SUCCESS) { 3573 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3574 "sd_process_sdconf_file: data property (%s)" 3575 " has no value\n", dataname_ptr); 3576 dataname_ptr = dnlist_ptr + dataname_len; 3577 continue; 3578 } 3579 3580 version = data_list[0]; 3581 3582 if (version == SD_CONF_VERSION_1) { 3583 sd_tunables values; 3584 3585 /* Set the properties */ 3586 if (sd_chk_vers1_data(un, data_list[1], 3587 &data_list[2], data_list_len, dataname_ptr) 3588 == SD_SUCCESS) { 3589 sd_get_tunables_from_conf(un, 3590 data_list[1], &data_list[2], 3591 &values); 3592 sd_set_vers1_properties(un, 3593 data_list[1], &values); 3594 rval = SD_SUCCESS; 3595 } else { 3596 rval = SD_FAILURE; 3597 } 3598 } else { 3599 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3600 "data property %s version 0x%x is invalid.", 3601 dataname_ptr, version); 3602 rval = SD_FAILURE; 3603 } 3604 kmem_free(data_list, data_list_len); 3605 dataname_ptr = dnlist_ptr + dataname_len; 3606 } 3607 } 3608 3609 /* free up the memory allocated by ddi_getlongprop */ 3610 if (config_list) { 3611 kmem_free(config_list, config_list_len); 3612 } 3613 3614 return (rval); 3615 } 3616 3617 /* 3618 * Function: sd_get_tunables_from_conf() 3619 * 3620 * 3621 * This function reads the data list from the sd.conf file and pulls 3622 * the values that can have numeric values as arguments and places 3623 * the values in the appropriate sd_tunables member. 3624 * Since the order of the data list members varies across platforms 3625 * This function reads them from the data list in a platform specific 3626 * order and places them into the correct sd_tunable member that is 3627 * consistent across all platforms. 3628 */ 3629 static void 3630 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list, 3631 sd_tunables *values) 3632 { 3633 int i; 3634 int mask; 3635 3636 bzero(values, sizeof (sd_tunables)); 3637 3638 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3639 3640 mask = 1 << i; 3641 if (mask > flags) { 3642 break; 3643 } 3644 3645 switch (mask & flags) { 3646 case 0: /* This mask bit not set in flags */ 3647 continue; 3648 case SD_CONF_BSET_THROTTLE: 3649 values->sdt_throttle = data_list[i]; 3650 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3651 "sd_get_tunables_from_conf: throttle = %d\n", 3652 values->sdt_throttle); 3653 break; 3654 case SD_CONF_BSET_CTYPE: 3655 values->sdt_ctype = data_list[i]; 3656 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3657 "sd_get_tunables_from_conf: ctype = %d\n", 3658 values->sdt_ctype); 3659 break; 3660 case SD_CONF_BSET_NRR_COUNT: 3661 values->sdt_not_rdy_retries = data_list[i]; 3662 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3663 "sd_get_tunables_from_conf: not_rdy_retries = %d\n", 3664 values->sdt_not_rdy_retries); 3665 break; 3666 case SD_CONF_BSET_BSY_RETRY_COUNT: 3667 values->sdt_busy_retries = data_list[i]; 3668 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3669 "sd_get_tunables_from_conf: busy_retries = %d\n", 3670 values->sdt_busy_retries); 3671 break; 3672 case SD_CONF_BSET_RST_RETRIES: 3673 values->sdt_reset_retries = data_list[i]; 3674 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3675 "sd_get_tunables_from_conf: reset_retries = %d\n", 3676 values->sdt_reset_retries); 3677 break; 3678 case SD_CONF_BSET_RSV_REL_TIME: 3679 values->sdt_reserv_rel_time = data_list[i]; 3680 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3681 "sd_get_tunables_from_conf: reserv_rel_time = %d\n", 3682 values->sdt_reserv_rel_time); 3683 break; 3684 case SD_CONF_BSET_MIN_THROTTLE: 3685 values->sdt_min_throttle = data_list[i]; 3686 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3687 "sd_get_tunables_from_conf: min_throttle = %d\n", 3688 values->sdt_min_throttle); 3689 break; 3690 case SD_CONF_BSET_DISKSORT_DISABLED: 3691 values->sdt_disk_sort_dis = data_list[i]; 3692 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3693 "sd_get_tunables_from_conf: disk_sort_dis = %d\n", 3694 values->sdt_disk_sort_dis); 3695 break; 3696 case SD_CONF_BSET_LUN_RESET_ENABLED: 3697 values->sdt_lun_reset_enable = data_list[i]; 3698 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3699 "sd_get_tunables_from_conf: lun_reset_enable = %d" 3700 "\n", values->sdt_lun_reset_enable); 3701 break; 3702 case SD_CONF_BSET_CACHE_IS_NV: 3703 values->sdt_suppress_cache_flush = data_list[i]; 3704 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3705 "sd_get_tunables_from_conf: \ 3706 suppress_cache_flush = %d" 3707 "\n", values->sdt_suppress_cache_flush); 3708 break; 3709 } 3710 } 3711 } 3712 3713 /* 3714 * Function: sd_process_sdconf_table 3715 * 3716 * Description: Search the static configuration table for a match on the 3717 * inquiry vid/pid and update the driver soft state structure 3718 * according to the table property values for the device. 3719 * 3720 * The form of a configuration table entry is: 3721 * <vid+pid>,<flags>,<property-data> 3722 * "SEAGATE ST42400N",1,0x40000, 3723 * 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1; 3724 * 3725 * Arguments: un - driver soft state (unit) structure 3726 */ 3727 3728 static void 3729 sd_process_sdconf_table(struct sd_lun *un) 3730 { 3731 char *id = NULL; 3732 int table_index; 3733 int idlen; 3734 3735 ASSERT(un != NULL); 3736 for (table_index = 0; table_index < sd_disk_table_size; 3737 table_index++) { 3738 id = sd_disk_table[table_index].device_id; 3739 idlen = strlen(id); 3740 if (idlen == 0) { 3741 continue; 3742 } 3743 3744 /* 3745 * The static configuration table currently does not 3746 * implement version 10 properties. Additionally, 3747 * multiple data-property-name entries are not 3748 * implemented in the static configuration table. 3749 */ 3750 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 3751 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3752 "sd_process_sdconf_table: disk %s\n", id); 3753 sd_set_vers1_properties(un, 3754 sd_disk_table[table_index].flags, 3755 sd_disk_table[table_index].properties); 3756 break; 3757 } 3758 } 3759 } 3760 3761 3762 /* 3763 * Function: sd_sdconf_id_match 3764 * 3765 * Description: This local function implements a case sensitive vid/pid 3766 * comparison as well as the boundary cases of wild card and 3767 * multiple blanks. 3768 * 3769 * Note: An implicit assumption made here is that the scsi 3770 * inquiry structure will always keep the vid, pid and 3771 * revision strings in consecutive sequence, so they can be 3772 * read as a single string. If this assumption is not the 3773 * case, a separate string, to be used for the check, needs 3774 * to be built with these strings concatenated. 3775 * 3776 * Arguments: un - driver soft state (unit) structure 3777 * id - table or config file vid/pid 3778 * idlen - length of the vid/pid (bytes) 3779 * 3780 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 3781 * SD_FAILURE - Indicates no match with the inquiry vid/pid 3782 */ 3783 3784 static int 3785 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen) 3786 { 3787 struct scsi_inquiry *sd_inq; 3788 int rval = SD_SUCCESS; 3789 3790 ASSERT(un != NULL); 3791 sd_inq = un->un_sd->sd_inq; 3792 ASSERT(id != NULL); 3793 3794 /* 3795 * We use the inq_vid as a pointer to a buffer containing the 3796 * vid and pid and use the entire vid/pid length of the table 3797 * entry for the comparison. This works because the inq_pid 3798 * data member follows inq_vid in the scsi_inquiry structure. 3799 */ 3800 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) { 3801 /* 3802 * The user id string is compared to the inquiry vid/pid 3803 * using a case insensitive comparison and ignoring 3804 * multiple spaces. 3805 */ 3806 rval = sd_blank_cmp(un, id, idlen); 3807 if (rval != SD_SUCCESS) { 3808 /* 3809 * User id strings that start and end with a "*" 3810 * are a special case. These do not have a 3811 * specific vendor, and the product string can 3812 * appear anywhere in the 16 byte PID portion of 3813 * the inquiry data. This is a simple strstr() 3814 * type search for the user id in the inquiry data. 3815 */ 3816 if ((id[0] == '*') && (id[idlen - 1] == '*')) { 3817 char *pidptr = &id[1]; 3818 int i; 3819 int j; 3820 int pidstrlen = idlen - 2; 3821 j = sizeof (SD_INQUIRY(un)->inq_pid) - 3822 pidstrlen; 3823 3824 if (j < 0) { 3825 return (SD_FAILURE); 3826 } 3827 for (i = 0; i < j; i++) { 3828 if (bcmp(&SD_INQUIRY(un)->inq_pid[i], 3829 pidptr, pidstrlen) == 0) { 3830 rval = SD_SUCCESS; 3831 break; 3832 } 3833 } 3834 } 3835 } 3836 } 3837 return (rval); 3838 } 3839 3840 3841 /* 3842 * Function: sd_blank_cmp 3843 * 3844 * Description: If the id string starts and ends with a space, treat 3845 * multiple consecutive spaces as equivalent to a single 3846 * space. For example, this causes a sd_disk_table entry 3847 * of " NEC CDROM " to match a device's id string of 3848 * "NEC CDROM". 3849 * 3850 * Note: The success exit condition for this routine is if 3851 * the pointer to the table entry is '\0' and the cnt of 3852 * the inquiry length is zero. This will happen if the inquiry 3853 * string returned by the device is padded with spaces to be 3854 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The 3855 * SCSI spec states that the inquiry string is to be padded with 3856 * spaces. 3857 * 3858 * Arguments: un - driver soft state (unit) structure 3859 * id - table or config file vid/pid 3860 * idlen - length of the vid/pid (bytes) 3861 * 3862 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 3863 * SD_FAILURE - Indicates no match with the inquiry vid/pid 3864 */ 3865 3866 static int 3867 sd_blank_cmp(struct sd_lun *un, char *id, int idlen) 3868 { 3869 char *p1; 3870 char *p2; 3871 int cnt; 3872 cnt = sizeof (SD_INQUIRY(un)->inq_vid) + 3873 sizeof (SD_INQUIRY(un)->inq_pid); 3874 3875 ASSERT(un != NULL); 3876 p2 = un->un_sd->sd_inq->inq_vid; 3877 ASSERT(id != NULL); 3878 p1 = id; 3879 3880 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) { 3881 /* 3882 * Note: string p1 is terminated by a NUL but string p2 3883 * isn't. The end of p2 is determined by cnt. 3884 */ 3885 for (;;) { 3886 /* skip over any extra blanks in both strings */ 3887 while ((*p1 != '\0') && (*p1 == ' ')) { 3888 p1++; 3889 } 3890 while ((cnt != 0) && (*p2 == ' ')) { 3891 p2++; 3892 cnt--; 3893 } 3894 3895 /* compare the two strings */ 3896 if ((cnt == 0) || 3897 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) { 3898 break; 3899 } 3900 while ((cnt > 0) && 3901 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) { 3902 p1++; 3903 p2++; 3904 cnt--; 3905 } 3906 } 3907 } 3908 3909 /* return SD_SUCCESS if both strings match */ 3910 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE); 3911 } 3912 3913 3914 /* 3915 * Function: sd_chk_vers1_data 3916 * 3917 * Description: Verify the version 1 device properties provided by the 3918 * user via the configuration file 3919 * 3920 * Arguments: un - driver soft state (unit) structure 3921 * flags - integer mask indicating properties to be set 3922 * prop_list - integer list of property values 3923 * list_len - length of user provided data 3924 * 3925 * Return Code: SD_SUCCESS - Indicates the user provided data is valid 3926 * SD_FAILURE - Indicates the user provided data is invalid 3927 */ 3928 3929 static int 3930 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 3931 int list_len, char *dataname_ptr) 3932 { 3933 int i; 3934 int mask = 1; 3935 int index = 0; 3936 3937 ASSERT(un != NULL); 3938 3939 /* Check for a NULL property name and list */ 3940 if (dataname_ptr == NULL) { 3941 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3942 "sd_chk_vers1_data: NULL data property name."); 3943 return (SD_FAILURE); 3944 } 3945 if (prop_list == NULL) { 3946 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3947 "sd_chk_vers1_data: %s NULL data property list.", 3948 dataname_ptr); 3949 return (SD_FAILURE); 3950 } 3951 3952 /* Display a warning if undefined bits are set in the flags */ 3953 if (flags & ~SD_CONF_BIT_MASK) { 3954 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3955 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. " 3956 "Properties not set.", 3957 (flags & ~SD_CONF_BIT_MASK), dataname_ptr); 3958 return (SD_FAILURE); 3959 } 3960 3961 /* 3962 * Verify the length of the list by identifying the highest bit set 3963 * in the flags and validating that the property list has a length 3964 * up to the index of this bit. 3965 */ 3966 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3967 if (flags & mask) { 3968 index++; 3969 } 3970 mask = 1 << i; 3971 } 3972 if ((list_len / sizeof (int)) < (index + 2)) { 3973 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3974 "sd_chk_vers1_data: " 3975 "Data property list %s size is incorrect. " 3976 "Properties not set.", dataname_ptr); 3977 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: " 3978 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS); 3979 return (SD_FAILURE); 3980 } 3981 return (SD_SUCCESS); 3982 } 3983 3984 3985 /* 3986 * Function: sd_set_vers1_properties 3987 * 3988 * Description: Set version 1 device properties based on a property list 3989 * retrieved from the driver configuration file or static 3990 * configuration table. Version 1 properties have the format: 3991 * 3992 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3993 * 3994 * where the prop0 value will be used to set prop0 if bit0 3995 * is set in the flags 3996 * 3997 * Arguments: un - driver soft state (unit) structure 3998 * flags - integer mask indicating properties to be set 3999 * prop_list - integer list of property values 4000 */ 4001 4002 static void 4003 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list) 4004 { 4005 ASSERT(un != NULL); 4006 4007 /* 4008 * Set the flag to indicate cache is to be disabled. An attempt 4009 * to disable the cache via sd_cache_control() will be made 4010 * later during attach once the basic initialization is complete. 4011 */ 4012 if (flags & SD_CONF_BSET_NOCACHE) { 4013 un->un_f_opt_disable_cache = TRUE; 4014 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4015 "sd_set_vers1_properties: caching disabled flag set\n"); 4016 } 4017 4018 /* CD-specific configuration parameters */ 4019 if (flags & SD_CONF_BSET_PLAYMSF_BCD) { 4020 un->un_f_cfg_playmsf_bcd = TRUE; 4021 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4022 "sd_set_vers1_properties: playmsf_bcd set\n"); 4023 } 4024 if (flags & SD_CONF_BSET_READSUB_BCD) { 4025 un->un_f_cfg_readsub_bcd = TRUE; 4026 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4027 "sd_set_vers1_properties: readsub_bcd set\n"); 4028 } 4029 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) { 4030 un->un_f_cfg_read_toc_trk_bcd = TRUE; 4031 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4032 "sd_set_vers1_properties: read_toc_trk_bcd set\n"); 4033 } 4034 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) { 4035 un->un_f_cfg_read_toc_addr_bcd = TRUE; 4036 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4037 "sd_set_vers1_properties: read_toc_addr_bcd set\n"); 4038 } 4039 if (flags & SD_CONF_BSET_NO_READ_HEADER) { 4040 un->un_f_cfg_no_read_header = TRUE; 4041 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4042 "sd_set_vers1_properties: no_read_header set\n"); 4043 } 4044 if (flags & SD_CONF_BSET_READ_CD_XD4) { 4045 un->un_f_cfg_read_cd_xd4 = TRUE; 4046 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4047 "sd_set_vers1_properties: read_cd_xd4 set\n"); 4048 } 4049 4050 /* Support for devices which do not have valid/unique serial numbers */ 4051 if (flags & SD_CONF_BSET_FAB_DEVID) { 4052 un->un_f_opt_fab_devid = TRUE; 4053 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4054 "sd_set_vers1_properties: fab_devid bit set\n"); 4055 } 4056 4057 /* Support for user throttle configuration */ 4058 if (flags & SD_CONF_BSET_THROTTLE) { 4059 ASSERT(prop_list != NULL); 4060 un->un_saved_throttle = un->un_throttle = 4061 prop_list->sdt_throttle; 4062 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4063 "sd_set_vers1_properties: throttle set to %d\n", 4064 prop_list->sdt_throttle); 4065 } 4066 4067 /* Set the per disk retry count according to the conf file or table. */ 4068 if (flags & SD_CONF_BSET_NRR_COUNT) { 4069 ASSERT(prop_list != NULL); 4070 if (prop_list->sdt_not_rdy_retries) { 4071 un->un_notready_retry_count = 4072 prop_list->sdt_not_rdy_retries; 4073 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4074 "sd_set_vers1_properties: not ready retry count" 4075 " set to %d\n", un->un_notready_retry_count); 4076 } 4077 } 4078 4079 /* The controller type is reported for generic disk driver ioctls */ 4080 if (flags & SD_CONF_BSET_CTYPE) { 4081 ASSERT(prop_list != NULL); 4082 switch (prop_list->sdt_ctype) { 4083 case CTYPE_CDROM: 4084 un->un_ctype = prop_list->sdt_ctype; 4085 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4086 "sd_set_vers1_properties: ctype set to " 4087 "CTYPE_CDROM\n"); 4088 break; 4089 case CTYPE_CCS: 4090 un->un_ctype = prop_list->sdt_ctype; 4091 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4092 "sd_set_vers1_properties: ctype set to " 4093 "CTYPE_CCS\n"); 4094 break; 4095 case CTYPE_ROD: /* RW optical */ 4096 un->un_ctype = prop_list->sdt_ctype; 4097 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4098 "sd_set_vers1_properties: ctype set to " 4099 "CTYPE_ROD\n"); 4100 break; 4101 default: 4102 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4103 "sd_set_vers1_properties: Could not set " 4104 "invalid ctype value (%d)", 4105 prop_list->sdt_ctype); 4106 } 4107 } 4108 4109 /* Purple failover timeout */ 4110 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) { 4111 ASSERT(prop_list != NULL); 4112 un->un_busy_retry_count = 4113 prop_list->sdt_busy_retries; 4114 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4115 "sd_set_vers1_properties: " 4116 "busy retry count set to %d\n", 4117 un->un_busy_retry_count); 4118 } 4119 4120 /* Purple reset retry count */ 4121 if (flags & SD_CONF_BSET_RST_RETRIES) { 4122 ASSERT(prop_list != NULL); 4123 un->un_reset_retry_count = 4124 prop_list->sdt_reset_retries; 4125 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4126 "sd_set_vers1_properties: " 4127 "reset retry count set to %d\n", 4128 un->un_reset_retry_count); 4129 } 4130 4131 /* Purple reservation release timeout */ 4132 if (flags & SD_CONF_BSET_RSV_REL_TIME) { 4133 ASSERT(prop_list != NULL); 4134 un->un_reserve_release_time = 4135 prop_list->sdt_reserv_rel_time; 4136 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4137 "sd_set_vers1_properties: " 4138 "reservation release timeout set to %d\n", 4139 un->un_reserve_release_time); 4140 } 4141 4142 /* 4143 * Driver flag telling the driver to verify that no commands are pending 4144 * for a device before issuing a Test Unit Ready. This is a workaround 4145 * for a firmware bug in some Seagate eliteI drives. 4146 */ 4147 if (flags & SD_CONF_BSET_TUR_CHECK) { 4148 un->un_f_cfg_tur_check = TRUE; 4149 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4150 "sd_set_vers1_properties: tur queue check set\n"); 4151 } 4152 4153 if (flags & SD_CONF_BSET_MIN_THROTTLE) { 4154 un->un_min_throttle = prop_list->sdt_min_throttle; 4155 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4156 "sd_set_vers1_properties: min throttle set to %d\n", 4157 un->un_min_throttle); 4158 } 4159 4160 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) { 4161 un->un_f_disksort_disabled = 4162 (prop_list->sdt_disk_sort_dis != 0) ? 4163 TRUE : FALSE; 4164 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4165 "sd_set_vers1_properties: disksort disabled " 4166 "flag set to %d\n", 4167 prop_list->sdt_disk_sort_dis); 4168 } 4169 4170 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) { 4171 un->un_f_lun_reset_enabled = 4172 (prop_list->sdt_lun_reset_enable != 0) ? 4173 TRUE : FALSE; 4174 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4175 "sd_set_vers1_properties: lun reset enabled " 4176 "flag set to %d\n", 4177 prop_list->sdt_lun_reset_enable); 4178 } 4179 4180 if (flags & SD_CONF_BSET_CACHE_IS_NV) { 4181 un->un_f_suppress_cache_flush = 4182 (prop_list->sdt_suppress_cache_flush != 0) ? 4183 TRUE : FALSE; 4184 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4185 "sd_set_vers1_properties: suppress_cache_flush " 4186 "flag set to %d\n", 4187 prop_list->sdt_suppress_cache_flush); 4188 } 4189 4190 /* 4191 * Validate the throttle values. 4192 * If any of the numbers are invalid, set everything to defaults. 4193 */ 4194 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4195 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4196 (un->un_min_throttle > un->un_throttle)) { 4197 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4198 un->un_min_throttle = sd_min_throttle; 4199 } 4200 } 4201 4202 /* 4203 * Function: sd_is_lsi() 4204 * 4205 * Description: Check for lsi devices, step through the static device 4206 * table to match vid/pid. 4207 * 4208 * Args: un - ptr to sd_lun 4209 * 4210 * Notes: When creating new LSI property, need to add the new LSI property 4211 * to this function. 4212 */ 4213 static void 4214 sd_is_lsi(struct sd_lun *un) 4215 { 4216 char *id = NULL; 4217 int table_index; 4218 int idlen; 4219 void *prop; 4220 4221 ASSERT(un != NULL); 4222 for (table_index = 0; table_index < sd_disk_table_size; 4223 table_index++) { 4224 id = sd_disk_table[table_index].device_id; 4225 idlen = strlen(id); 4226 if (idlen == 0) { 4227 continue; 4228 } 4229 4230 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4231 prop = sd_disk_table[table_index].properties; 4232 if (prop == &lsi_properties || 4233 prop == &lsi_oem_properties || 4234 prop == &lsi_properties_scsi || 4235 prop == &symbios_properties) { 4236 un->un_f_cfg_is_lsi = TRUE; 4237 } 4238 break; 4239 } 4240 } 4241 } 4242 4243 /* 4244 * Function: sd_get_physical_geometry 4245 * 4246 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and 4247 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the 4248 * target, and use this information to initialize the physical 4249 * geometry cache specified by pgeom_p. 4250 * 4251 * MODE SENSE is an optional command, so failure in this case 4252 * does not necessarily denote an error. We want to use the 4253 * MODE SENSE commands to derive the physical geometry of the 4254 * device, but if either command fails, the logical geometry is 4255 * used as the fallback for disk label geometry in cmlb. 4256 * 4257 * This requires that un->un_blockcount and un->un_tgt_blocksize 4258 * have already been initialized for the current target and 4259 * that the current values be passed as args so that we don't 4260 * end up ever trying to use -1 as a valid value. This could 4261 * happen if either value is reset while we're not holding 4262 * the mutex. 4263 * 4264 * Arguments: un - driver soft state (unit) structure 4265 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4266 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4267 * to use the USCSI "direct" chain and bypass the normal 4268 * command waitq. 4269 * 4270 * Context: Kernel thread only (can sleep). 4271 */ 4272 4273 static int 4274 sd_get_physical_geometry(struct sd_lun *un, cmlb_geom_t *pgeom_p, 4275 diskaddr_t capacity, int lbasize, int path_flag) 4276 { 4277 struct mode_format *page3p; 4278 struct mode_geometry *page4p; 4279 struct mode_header *headerp; 4280 int sector_size; 4281 int nsect; 4282 int nhead; 4283 int ncyl; 4284 int intrlv; 4285 int spc; 4286 diskaddr_t modesense_capacity; 4287 int rpm; 4288 int bd_len; 4289 int mode_header_length; 4290 uchar_t *p3bufp; 4291 uchar_t *p4bufp; 4292 int cdbsize; 4293 int ret = EIO; 4294 4295 ASSERT(un != NULL); 4296 4297 if (lbasize == 0) { 4298 if (ISCD(un)) { 4299 lbasize = 2048; 4300 } else { 4301 lbasize = un->un_sys_blocksize; 4302 } 4303 } 4304 pgeom_p->g_secsize = (unsigned short)lbasize; 4305 4306 /* 4307 * If the unit is a cd/dvd drive MODE SENSE page three 4308 * and MODE SENSE page four are reserved (see SBC spec 4309 * and MMC spec). To prevent soft errors just return 4310 * using the default LBA size. 4311 */ 4312 if (ISCD(un)) 4313 return (ret); 4314 4315 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0; 4316 4317 /* 4318 * Retrieve MODE SENSE page 3 - Format Device Page 4319 */ 4320 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP); 4321 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p3bufp, 4322 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag) 4323 != 0) { 4324 SD_ERROR(SD_LOG_COMMON, un, 4325 "sd_get_physical_geometry: mode sense page 3 failed\n"); 4326 goto page3_exit; 4327 } 4328 4329 /* 4330 * Determine size of Block Descriptors in order to locate the mode 4331 * page data. ATAPI devices return 0, SCSI devices should return 4332 * MODE_BLK_DESC_LENGTH. 4333 */ 4334 headerp = (struct mode_header *)p3bufp; 4335 if (un->un_f_cfg_is_atapi == TRUE) { 4336 struct mode_header_grp2 *mhp = 4337 (struct mode_header_grp2 *)headerp; 4338 mode_header_length = MODE_HEADER_LENGTH_GRP2; 4339 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4340 } else { 4341 mode_header_length = MODE_HEADER_LENGTH; 4342 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4343 } 4344 4345 if (bd_len > MODE_BLK_DESC_LENGTH) { 4346 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4347 "received unexpected bd_len of %d, page3\n", bd_len); 4348 goto page3_exit; 4349 } 4350 4351 page3p = (struct mode_format *) 4352 ((caddr_t)headerp + mode_header_length + bd_len); 4353 4354 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) { 4355 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4356 "mode sense pg3 code mismatch %d\n", 4357 page3p->mode_page.code); 4358 goto page3_exit; 4359 } 4360 4361 /* 4362 * Use this physical geometry data only if BOTH MODE SENSE commands 4363 * complete successfully; otherwise, revert to the logical geometry. 4364 * So, we need to save everything in temporary variables. 4365 */ 4366 sector_size = BE_16(page3p->data_bytes_sect); 4367 4368 /* 4369 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size 4370 */ 4371 if (sector_size == 0) { 4372 sector_size = un->un_sys_blocksize; 4373 } else { 4374 sector_size &= ~(un->un_sys_blocksize - 1); 4375 } 4376 4377 nsect = BE_16(page3p->sect_track); 4378 intrlv = BE_16(page3p->interleave); 4379 4380 SD_INFO(SD_LOG_COMMON, un, 4381 "sd_get_physical_geometry: Format Parameters (page 3)\n"); 4382 SD_INFO(SD_LOG_COMMON, un, 4383 " mode page: %d; nsect: %d; sector size: %d;\n", 4384 page3p->mode_page.code, nsect, sector_size); 4385 SD_INFO(SD_LOG_COMMON, un, 4386 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv, 4387 BE_16(page3p->track_skew), 4388 BE_16(page3p->cylinder_skew)); 4389 4390 4391 /* 4392 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page 4393 */ 4394 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP); 4395 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p4bufp, 4396 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag) 4397 != 0) { 4398 SD_ERROR(SD_LOG_COMMON, un, 4399 "sd_get_physical_geometry: mode sense page 4 failed\n"); 4400 goto page4_exit; 4401 } 4402 4403 /* 4404 * Determine size of Block Descriptors in order to locate the mode 4405 * page data. ATAPI devices return 0, SCSI devices should return 4406 * MODE_BLK_DESC_LENGTH. 4407 */ 4408 headerp = (struct mode_header *)p4bufp; 4409 if (un->un_f_cfg_is_atapi == TRUE) { 4410 struct mode_header_grp2 *mhp = 4411 (struct mode_header_grp2 *)headerp; 4412 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4413 } else { 4414 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4415 } 4416 4417 if (bd_len > MODE_BLK_DESC_LENGTH) { 4418 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4419 "received unexpected bd_len of %d, page4\n", bd_len); 4420 goto page4_exit; 4421 } 4422 4423 page4p = (struct mode_geometry *) 4424 ((caddr_t)headerp + mode_header_length + bd_len); 4425 4426 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) { 4427 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4428 "mode sense pg4 code mismatch %d\n", 4429 page4p->mode_page.code); 4430 goto page4_exit; 4431 } 4432 4433 /* 4434 * Stash the data now, after we know that both commands completed. 4435 */ 4436 4437 4438 nhead = (int)page4p->heads; /* uchar, so no conversion needed */ 4439 spc = nhead * nsect; 4440 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb; 4441 rpm = BE_16(page4p->rpm); 4442 4443 modesense_capacity = spc * ncyl; 4444 4445 SD_INFO(SD_LOG_COMMON, un, 4446 "sd_get_physical_geometry: Geometry Parameters (page 4)\n"); 4447 SD_INFO(SD_LOG_COMMON, un, 4448 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm); 4449 SD_INFO(SD_LOG_COMMON, un, 4450 " computed capacity(h*s*c): %d;\n", modesense_capacity); 4451 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n", 4452 (void *)pgeom_p, capacity); 4453 4454 /* 4455 * Compensate if the drive's geometry is not rectangular, i.e., 4456 * the product of C * H * S returned by MODE SENSE >= that returned 4457 * by read capacity. This is an idiosyncrasy of the original x86 4458 * disk subsystem. 4459 */ 4460 if (modesense_capacity >= capacity) { 4461 SD_INFO(SD_LOG_COMMON, un, 4462 "sd_get_physical_geometry: adjusting acyl; " 4463 "old: %d; new: %d\n", pgeom_p->g_acyl, 4464 (modesense_capacity - capacity + spc - 1) / spc); 4465 if (sector_size != 0) { 4466 /* 1243403: NEC D38x7 drives don't support sec size */ 4467 pgeom_p->g_secsize = (unsigned short)sector_size; 4468 } 4469 pgeom_p->g_nsect = (unsigned short)nsect; 4470 pgeom_p->g_nhead = (unsigned short)nhead; 4471 pgeom_p->g_capacity = capacity; 4472 pgeom_p->g_acyl = 4473 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc; 4474 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl; 4475 } 4476 4477 pgeom_p->g_rpm = (unsigned short)rpm; 4478 pgeom_p->g_intrlv = (unsigned short)intrlv; 4479 ret = 0; 4480 4481 SD_INFO(SD_LOG_COMMON, un, 4482 "sd_get_physical_geometry: mode sense geometry:\n"); 4483 SD_INFO(SD_LOG_COMMON, un, 4484 " nsect: %d; sector size: %d; interlv: %d\n", 4485 nsect, sector_size, intrlv); 4486 SD_INFO(SD_LOG_COMMON, un, 4487 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n", 4488 nhead, ncyl, rpm, modesense_capacity); 4489 SD_INFO(SD_LOG_COMMON, un, 4490 "sd_get_physical_geometry: (cached)\n"); 4491 SD_INFO(SD_LOG_COMMON, un, 4492 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 4493 pgeom_p->g_ncyl, pgeom_p->g_acyl, 4494 pgeom_p->g_nhead, pgeom_p->g_nsect); 4495 SD_INFO(SD_LOG_COMMON, un, 4496 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n", 4497 pgeom_p->g_secsize, pgeom_p->g_capacity, 4498 pgeom_p->g_intrlv, pgeom_p->g_rpm); 4499 4500 page4_exit: 4501 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH); 4502 page3_exit: 4503 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH); 4504 4505 return (ret); 4506 } 4507 4508 /* 4509 * Function: sd_get_virtual_geometry 4510 * 4511 * Description: Ask the controller to tell us about the target device. 4512 * 4513 * Arguments: un - pointer to softstate 4514 * capacity - disk capacity in #blocks 4515 * lbasize - disk block size in bytes 4516 * 4517 * Context: Kernel thread only 4518 */ 4519 4520 static int 4521 sd_get_virtual_geometry(struct sd_lun *un, cmlb_geom_t *lgeom_p, 4522 diskaddr_t capacity, int lbasize) 4523 { 4524 uint_t geombuf; 4525 int spc; 4526 4527 ASSERT(un != NULL); 4528 4529 /* Set sector size, and total number of sectors */ 4530 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1); 4531 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1); 4532 4533 /* Let the HBA tell us its geometry */ 4534 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1); 4535 4536 /* A value of -1 indicates an undefined "geometry" property */ 4537 if (geombuf == (-1)) { 4538 return (EINVAL); 4539 } 4540 4541 /* Initialize the logical geometry cache. */ 4542 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff; 4543 lgeom_p->g_nsect = geombuf & 0xffff; 4544 lgeom_p->g_secsize = un->un_sys_blocksize; 4545 4546 spc = lgeom_p->g_nhead * lgeom_p->g_nsect; 4547 4548 /* 4549 * Note: The driver originally converted the capacity value from 4550 * target blocks to system blocks. However, the capacity value passed 4551 * to this routine is already in terms of system blocks (this scaling 4552 * is done when the READ CAPACITY command is issued and processed). 4553 * This 'error' may have gone undetected because the usage of g_ncyl 4554 * (which is based upon g_capacity) is very limited within the driver 4555 */ 4556 lgeom_p->g_capacity = capacity; 4557 4558 /* 4559 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The 4560 * hba may return zero values if the device has been removed. 4561 */ 4562 if (spc == 0) { 4563 lgeom_p->g_ncyl = 0; 4564 } else { 4565 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc; 4566 } 4567 lgeom_p->g_acyl = 0; 4568 4569 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n"); 4570 return (0); 4571 4572 } 4573 /* 4574 * Function: sd_update_block_info 4575 * 4576 * Description: Calculate a byte count to sector count bitshift value 4577 * from sector size. 4578 * 4579 * Arguments: un: unit struct. 4580 * lbasize: new target sector size 4581 * capacity: new target capacity, ie. block count 4582 * 4583 * Context: Kernel thread context 4584 */ 4585 4586 static void 4587 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity) 4588 { 4589 uint_t dblk; 4590 4591 if (lbasize != 0) { 4592 un->un_tgt_blocksize = lbasize; 4593 un->un_f_tgt_blocksize_is_valid = TRUE; 4594 } 4595 4596 if (capacity != 0) { 4597 un->un_blockcount = capacity; 4598 un->un_f_blockcount_is_valid = TRUE; 4599 } 4600 4601 /* 4602 * Update device capacity properties. 4603 * 4604 * 'device-nblocks' number of blocks in target's units 4605 * 'device-blksize' data bearing size of target's block 4606 * 4607 * NOTE: math is complicated by the fact that un_tgt_blocksize may 4608 * not be a power of two for checksumming disks with 520/528 byte 4609 * sectors. 4610 */ 4611 if (un->un_f_tgt_blocksize_is_valid && 4612 un->un_f_blockcount_is_valid && 4613 un->un_sys_blocksize) { 4614 dblk = un->un_tgt_blocksize / un->un_sys_blocksize; 4615 (void) ddi_prop_update_int64(DDI_DEV_T_NONE, SD_DEVINFO(un), 4616 "device-nblocks", un->un_blockcount / dblk); 4617 /* 4618 * To save memory, only define "device-blksize" when its 4619 * value is differnet than the default DEV_BSIZE value. 4620 */ 4621 if ((un->un_sys_blocksize * dblk) != DEV_BSIZE) 4622 (void) ddi_prop_update_int(DDI_DEV_T_NONE, 4623 SD_DEVINFO(un), "device-blksize", 4624 un->un_sys_blocksize * dblk); 4625 } 4626 } 4627 4628 4629 /* 4630 * Function: sd_register_devid 4631 * 4632 * Description: This routine will obtain the device id information from the 4633 * target, obtain the serial number, and register the device 4634 * id with the ddi framework. 4635 * 4636 * Arguments: devi - the system's dev_info_t for the device. 4637 * un - driver soft state (unit) structure 4638 * reservation_flag - indicates if a reservation conflict 4639 * occurred during attach 4640 * 4641 * Context: Kernel Thread 4642 */ 4643 static void 4644 sd_register_devid(struct sd_lun *un, dev_info_t *devi, int reservation_flag) 4645 { 4646 int rval = 0; 4647 uchar_t *inq80 = NULL; 4648 size_t inq80_len = MAX_INQUIRY_SIZE; 4649 size_t inq80_resid = 0; 4650 uchar_t *inq83 = NULL; 4651 size_t inq83_len = MAX_INQUIRY_SIZE; 4652 size_t inq83_resid = 0; 4653 int dlen, len; 4654 char *sn; 4655 4656 ASSERT(un != NULL); 4657 ASSERT(mutex_owned(SD_MUTEX(un))); 4658 ASSERT((SD_DEVINFO(un)) == devi); 4659 4660 /* 4661 * If transport has already registered a devid for this target 4662 * then that takes precedence over the driver's determination 4663 * of the devid. 4664 */ 4665 if (ddi_devid_get(SD_DEVINFO(un), &un->un_devid) == DDI_SUCCESS) { 4666 ASSERT(un->un_devid); 4667 return; /* use devid registered by the transport */ 4668 } 4669 4670 /* 4671 * This is the case of antiquated Sun disk drives that have the 4672 * FAB_DEVID property set in the disk_table. These drives 4673 * manage the devid's by storing them in last 2 available sectors 4674 * on the drive and have them fabricated by the ddi layer by calling 4675 * ddi_devid_init and passing the DEVID_FAB flag. 4676 */ 4677 if (un->un_f_opt_fab_devid == TRUE) { 4678 /* 4679 * Depending on EINVAL isn't reliable, since a reserved disk 4680 * may result in invalid geometry, so check to make sure a 4681 * reservation conflict did not occur during attach. 4682 */ 4683 if ((sd_get_devid(un) == EINVAL) && 4684 (reservation_flag != SD_TARGET_IS_RESERVED)) { 4685 /* 4686 * The devid is invalid AND there is no reservation 4687 * conflict. Fabricate a new devid. 4688 */ 4689 (void) sd_create_devid(un); 4690 } 4691 4692 /* Register the devid if it exists */ 4693 if (un->un_devid != NULL) { 4694 (void) ddi_devid_register(SD_DEVINFO(un), 4695 un->un_devid); 4696 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4697 "sd_register_devid: Devid Fabricated\n"); 4698 } 4699 return; 4700 } 4701 4702 /* 4703 * We check the availibility of the World Wide Name (0x83) and Unit 4704 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using 4705 * un_vpd_page_mask from them, we decide which way to get the WWN. If 4706 * 0x83 is availible, that is the best choice. Our next choice is 4707 * 0x80. If neither are availible, we munge the devid from the device 4708 * vid/pid/serial # for Sun qualified disks, or use the ddi framework 4709 * to fabricate a devid for non-Sun qualified disks. 4710 */ 4711 if (sd_check_vpd_page_support(un) == 0) { 4712 /* collect page 80 data if available */ 4713 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) { 4714 4715 mutex_exit(SD_MUTEX(un)); 4716 inq80 = kmem_zalloc(inq80_len, KM_SLEEP); 4717 rval = sd_send_scsi_INQUIRY(un, inq80, inq80_len, 4718 0x01, 0x80, &inq80_resid); 4719 4720 if (rval != 0) { 4721 kmem_free(inq80, inq80_len); 4722 inq80 = NULL; 4723 inq80_len = 0; 4724 } else if (ddi_prop_exists( 4725 DDI_DEV_T_NONE, SD_DEVINFO(un), 4726 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 4727 INQUIRY_SERIAL_NO) == 0) { 4728 /* 4729 * If we don't already have a serial number 4730 * property, do quick verify of data returned 4731 * and define property. 4732 */ 4733 dlen = inq80_len - inq80_resid; 4734 len = (size_t)inq80[3]; 4735 if ((dlen >= 4) && ((len + 4) <= dlen)) { 4736 /* 4737 * Ensure sn termination, skip leading 4738 * blanks, and create property 4739 * 'inquiry-serial-no'. 4740 */ 4741 sn = (char *)&inq80[4]; 4742 sn[len] = 0; 4743 while (*sn && (*sn == ' ')) 4744 sn++; 4745 if (*sn) { 4746 (void) ddi_prop_update_string( 4747 DDI_DEV_T_NONE, 4748 SD_DEVINFO(un), 4749 INQUIRY_SERIAL_NO, sn); 4750 } 4751 } 4752 } 4753 mutex_enter(SD_MUTEX(un)); 4754 } 4755 4756 /* collect page 83 data if available */ 4757 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) { 4758 mutex_exit(SD_MUTEX(un)); 4759 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 4760 rval = sd_send_scsi_INQUIRY(un, inq83, inq83_len, 4761 0x01, 0x83, &inq83_resid); 4762 4763 if (rval != 0) { 4764 kmem_free(inq83, inq83_len); 4765 inq83 = NULL; 4766 inq83_len = 0; 4767 } 4768 mutex_enter(SD_MUTEX(un)); 4769 } 4770 } 4771 4772 /* encode best devid possible based on data available */ 4773 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, 4774 (char *)ddi_driver_name(SD_DEVINFO(un)), 4775 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)), 4776 inq80, inq80_len - inq80_resid, inq83, inq83_len - 4777 inq83_resid, &un->un_devid) == DDI_SUCCESS) { 4778 4779 /* devid successfully encoded, register devid */ 4780 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); 4781 4782 } else { 4783 /* 4784 * Unable to encode a devid based on data available. 4785 * This is not a Sun qualified disk. Older Sun disk 4786 * drives that have the SD_FAB_DEVID property 4787 * set in the disk_table and non Sun qualified 4788 * disks are treated in the same manner. These 4789 * drives manage the devid's by storing them in 4790 * last 2 available sectors on the drive and 4791 * have them fabricated by the ddi layer by 4792 * calling ddi_devid_init and passing the 4793 * DEVID_FAB flag. 4794 * Create a fabricate devid only if there's no 4795 * fabricate devid existed. 4796 */ 4797 if (sd_get_devid(un) == EINVAL) { 4798 (void) sd_create_devid(un); 4799 } 4800 un->un_f_opt_fab_devid = TRUE; 4801 4802 /* Register the devid if it exists */ 4803 if (un->un_devid != NULL) { 4804 (void) ddi_devid_register(SD_DEVINFO(un), 4805 un->un_devid); 4806 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4807 "sd_register_devid: devid fabricated using " 4808 "ddi framework\n"); 4809 } 4810 } 4811 4812 /* clean up resources */ 4813 if (inq80 != NULL) { 4814 kmem_free(inq80, inq80_len); 4815 } 4816 if (inq83 != NULL) { 4817 kmem_free(inq83, inq83_len); 4818 } 4819 } 4820 4821 4822 4823 /* 4824 * Function: sd_get_devid 4825 * 4826 * Description: This routine will return 0 if a valid device id has been 4827 * obtained from the target and stored in the soft state. If a 4828 * valid device id has not been previously read and stored, a 4829 * read attempt will be made. 4830 * 4831 * Arguments: un - driver soft state (unit) structure 4832 * 4833 * Return Code: 0 if we successfully get the device id 4834 * 4835 * Context: Kernel Thread 4836 */ 4837 4838 static int 4839 sd_get_devid(struct sd_lun *un) 4840 { 4841 struct dk_devid *dkdevid; 4842 ddi_devid_t tmpid; 4843 uint_t *ip; 4844 size_t sz; 4845 diskaddr_t blk; 4846 int status; 4847 int chksum; 4848 int i; 4849 size_t buffer_size; 4850 4851 ASSERT(un != NULL); 4852 ASSERT(mutex_owned(SD_MUTEX(un))); 4853 4854 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n", 4855 un); 4856 4857 if (un->un_devid != NULL) { 4858 return (0); 4859 } 4860 4861 mutex_exit(SD_MUTEX(un)); 4862 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 4863 (void *)SD_PATH_DIRECT) != 0) { 4864 mutex_enter(SD_MUTEX(un)); 4865 return (EINVAL); 4866 } 4867 4868 /* 4869 * Read and verify device id, stored in the reserved cylinders at the 4870 * end of the disk. Backup label is on the odd sectors of the last 4871 * track of the last cylinder. Device id will be on track of the next 4872 * to last cylinder. 4873 */ 4874 mutex_enter(SD_MUTEX(un)); 4875 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid)); 4876 mutex_exit(SD_MUTEX(un)); 4877 dkdevid = kmem_alloc(buffer_size, KM_SLEEP); 4878 status = sd_send_scsi_READ(un, dkdevid, buffer_size, blk, 4879 SD_PATH_DIRECT); 4880 if (status != 0) { 4881 goto error; 4882 } 4883 4884 /* Validate the revision */ 4885 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 4886 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 4887 status = EINVAL; 4888 goto error; 4889 } 4890 4891 /* Calculate the checksum */ 4892 chksum = 0; 4893 ip = (uint_t *)dkdevid; 4894 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 4895 i++) { 4896 chksum ^= ip[i]; 4897 } 4898 4899 /* Compare the checksums */ 4900 if (DKD_GETCHKSUM(dkdevid) != chksum) { 4901 status = EINVAL; 4902 goto error; 4903 } 4904 4905 /* Validate the device id */ 4906 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 4907 status = EINVAL; 4908 goto error; 4909 } 4910 4911 /* 4912 * Store the device id in the driver soft state 4913 */ 4914 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 4915 tmpid = kmem_alloc(sz, KM_SLEEP); 4916 4917 mutex_enter(SD_MUTEX(un)); 4918 4919 un->un_devid = tmpid; 4920 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 4921 4922 kmem_free(dkdevid, buffer_size); 4923 4924 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un); 4925 4926 return (status); 4927 error: 4928 mutex_enter(SD_MUTEX(un)); 4929 kmem_free(dkdevid, buffer_size); 4930 return (status); 4931 } 4932 4933 4934 /* 4935 * Function: sd_create_devid 4936 * 4937 * Description: This routine will fabricate the device id and write it 4938 * to the disk. 4939 * 4940 * Arguments: un - driver soft state (unit) structure 4941 * 4942 * Return Code: value of the fabricated device id 4943 * 4944 * Context: Kernel Thread 4945 */ 4946 4947 static ddi_devid_t 4948 sd_create_devid(struct sd_lun *un) 4949 { 4950 ASSERT(un != NULL); 4951 4952 /* Fabricate the devid */ 4953 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid) 4954 == DDI_FAILURE) { 4955 return (NULL); 4956 } 4957 4958 /* Write the devid to disk */ 4959 if (sd_write_deviceid(un) != 0) { 4960 ddi_devid_free(un->un_devid); 4961 un->un_devid = NULL; 4962 } 4963 4964 return (un->un_devid); 4965 } 4966 4967 4968 /* 4969 * Function: sd_write_deviceid 4970 * 4971 * Description: This routine will write the device id to the disk 4972 * reserved sector. 4973 * 4974 * Arguments: un - driver soft state (unit) structure 4975 * 4976 * Return Code: EINVAL 4977 * value returned by sd_send_scsi_cmd 4978 * 4979 * Context: Kernel Thread 4980 */ 4981 4982 static int 4983 sd_write_deviceid(struct sd_lun *un) 4984 { 4985 struct dk_devid *dkdevid; 4986 diskaddr_t blk; 4987 uint_t *ip, chksum; 4988 int status; 4989 int i; 4990 4991 ASSERT(mutex_owned(SD_MUTEX(un))); 4992 4993 mutex_exit(SD_MUTEX(un)); 4994 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 4995 (void *)SD_PATH_DIRECT) != 0) { 4996 mutex_enter(SD_MUTEX(un)); 4997 return (-1); 4998 } 4999 5000 5001 /* Allocate the buffer */ 5002 dkdevid = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP); 5003 5004 /* Fill in the revision */ 5005 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 5006 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 5007 5008 /* Copy in the device id */ 5009 mutex_enter(SD_MUTEX(un)); 5010 bcopy(un->un_devid, &dkdevid->dkd_devid, 5011 ddi_devid_sizeof(un->un_devid)); 5012 mutex_exit(SD_MUTEX(un)); 5013 5014 /* Calculate the checksum */ 5015 chksum = 0; 5016 ip = (uint_t *)dkdevid; 5017 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 5018 i++) { 5019 chksum ^= ip[i]; 5020 } 5021 5022 /* Fill-in checksum */ 5023 DKD_FORMCHKSUM(chksum, dkdevid); 5024 5025 /* Write the reserved sector */ 5026 status = sd_send_scsi_WRITE(un, dkdevid, un->un_sys_blocksize, blk, 5027 SD_PATH_DIRECT); 5028 5029 kmem_free(dkdevid, un->un_sys_blocksize); 5030 5031 mutex_enter(SD_MUTEX(un)); 5032 return (status); 5033 } 5034 5035 5036 /* 5037 * Function: sd_check_vpd_page_support 5038 * 5039 * Description: This routine sends an inquiry command with the EVPD bit set and 5040 * a page code of 0x00 to the device. It is used to determine which 5041 * vital product pages are availible to find the devid. We are 5042 * looking for pages 0x83 or 0x80. If we return a negative 1, the 5043 * device does not support that command. 5044 * 5045 * Arguments: un - driver soft state (unit) structure 5046 * 5047 * Return Code: 0 - success 5048 * 1 - check condition 5049 * 5050 * Context: This routine can sleep. 5051 */ 5052 5053 static int 5054 sd_check_vpd_page_support(struct sd_lun *un) 5055 { 5056 uchar_t *page_list = NULL; 5057 uchar_t page_length = 0xff; /* Use max possible length */ 5058 uchar_t evpd = 0x01; /* Set the EVPD bit */ 5059 uchar_t page_code = 0x00; /* Supported VPD Pages */ 5060 int rval = 0; 5061 int counter; 5062 5063 ASSERT(un != NULL); 5064 ASSERT(mutex_owned(SD_MUTEX(un))); 5065 5066 mutex_exit(SD_MUTEX(un)); 5067 5068 /* 5069 * We'll set the page length to the maximum to save figuring it out 5070 * with an additional call. 5071 */ 5072 page_list = kmem_zalloc(page_length, KM_SLEEP); 5073 5074 rval = sd_send_scsi_INQUIRY(un, page_list, page_length, evpd, 5075 page_code, NULL); 5076 5077 mutex_enter(SD_MUTEX(un)); 5078 5079 /* 5080 * Now we must validate that the device accepted the command, as some 5081 * drives do not support it. If the drive does support it, we will 5082 * return 0, and the supported pages will be in un_vpd_page_mask. If 5083 * not, we return -1. 5084 */ 5085 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) { 5086 /* Loop to find one of the 2 pages we need */ 5087 counter = 4; /* Supported pages start at byte 4, with 0x00 */ 5088 5089 /* 5090 * Pages are returned in ascending order, and 0x83 is what we 5091 * are hoping for. 5092 */ 5093 while ((page_list[counter] <= 0x86) && 5094 (counter <= (page_list[VPD_PAGE_LENGTH] + 5095 VPD_HEAD_OFFSET))) { 5096 /* 5097 * Add 3 because page_list[3] is the number of 5098 * pages minus 3 5099 */ 5100 5101 switch (page_list[counter]) { 5102 case 0x00: 5103 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG; 5104 break; 5105 case 0x80: 5106 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG; 5107 break; 5108 case 0x81: 5109 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG; 5110 break; 5111 case 0x82: 5112 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG; 5113 break; 5114 case 0x83: 5115 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; 5116 break; 5117 case 0x86: 5118 un->un_vpd_page_mask |= SD_VPD_EXTENDED_DATA_PG; 5119 break; 5120 } 5121 counter++; 5122 } 5123 5124 } else { 5125 rval = -1; 5126 5127 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5128 "sd_check_vpd_page_support: This drive does not implement " 5129 "VPD pages.\n"); 5130 } 5131 5132 kmem_free(page_list, page_length); 5133 5134 return (rval); 5135 } 5136 5137 5138 /* 5139 * Function: sd_setup_pm 5140 * 5141 * Description: Initialize Power Management on the device 5142 * 5143 * Context: Kernel Thread 5144 */ 5145 5146 static void 5147 sd_setup_pm(struct sd_lun *un, dev_info_t *devi) 5148 { 5149 uint_t log_page_size; 5150 uchar_t *log_page_data; 5151 int rval; 5152 5153 /* 5154 * Since we are called from attach, holding a mutex for 5155 * un is unnecessary. Because some of the routines called 5156 * from here require SD_MUTEX to not be held, assert this 5157 * right up front. 5158 */ 5159 ASSERT(!mutex_owned(SD_MUTEX(un))); 5160 /* 5161 * Since the sd device does not have the 'reg' property, 5162 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 5163 * The following code is to tell cpr that this device 5164 * DOES need to be suspended and resumed. 5165 */ 5166 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 5167 "pm-hardware-state", "needs-suspend-resume"); 5168 5169 /* 5170 * This complies with the new power management framework 5171 * for certain desktop machines. Create the pm_components 5172 * property as a string array property. 5173 */ 5174 if (un->un_f_pm_supported) { 5175 /* 5176 * not all devices have a motor, try it first. 5177 * some devices may return ILLEGAL REQUEST, some 5178 * will hang 5179 * The following START_STOP_UNIT is used to check if target 5180 * device has a motor. 5181 */ 5182 un->un_f_start_stop_supported = TRUE; 5183 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 5184 SD_PATH_DIRECT) != 0) { 5185 un->un_f_start_stop_supported = FALSE; 5186 } 5187 5188 /* 5189 * create pm properties anyways otherwise the parent can't 5190 * go to sleep 5191 */ 5192 (void) sd_create_pm_components(devi, un); 5193 un->un_f_pm_is_enabled = TRUE; 5194 return; 5195 } 5196 5197 if (!un->un_f_log_sense_supported) { 5198 un->un_power_level = SD_SPINDLE_ON; 5199 un->un_f_pm_is_enabled = FALSE; 5200 return; 5201 } 5202 5203 rval = sd_log_page_supported(un, START_STOP_CYCLE_PAGE); 5204 5205 #ifdef SDDEBUG 5206 if (sd_force_pm_supported) { 5207 /* Force a successful result */ 5208 rval = 1; 5209 } 5210 #endif 5211 5212 /* 5213 * If the start-stop cycle counter log page is not supported 5214 * or if the pm-capable property is SD_PM_CAPABLE_FALSE (0) 5215 * then we should not create the pm_components property. 5216 */ 5217 if (rval == -1) { 5218 /* 5219 * Error. 5220 * Reading log sense failed, most likely this is 5221 * an older drive that does not support log sense. 5222 * If this fails auto-pm is not supported. 5223 */ 5224 un->un_power_level = SD_SPINDLE_ON; 5225 un->un_f_pm_is_enabled = FALSE; 5226 5227 } else if (rval == 0) { 5228 /* 5229 * Page not found. 5230 * The start stop cycle counter is implemented as page 5231 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For 5232 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE). 5233 */ 5234 if (sd_log_page_supported(un, START_STOP_CYCLE_VU_PAGE) == 1) { 5235 /* 5236 * Page found, use this one. 5237 */ 5238 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE; 5239 un->un_f_pm_is_enabled = TRUE; 5240 } else { 5241 /* 5242 * Error or page not found. 5243 * auto-pm is not supported for this device. 5244 */ 5245 un->un_power_level = SD_SPINDLE_ON; 5246 un->un_f_pm_is_enabled = FALSE; 5247 } 5248 } else { 5249 /* 5250 * Page found, use it. 5251 */ 5252 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE; 5253 un->un_f_pm_is_enabled = TRUE; 5254 } 5255 5256 5257 if (un->un_f_pm_is_enabled == TRUE) { 5258 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5259 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5260 5261 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 5262 log_page_size, un->un_start_stop_cycle_page, 5263 0x01, 0, SD_PATH_DIRECT); 5264 #ifdef SDDEBUG 5265 if (sd_force_pm_supported) { 5266 /* Force a successful result */ 5267 rval = 0; 5268 } 5269 #endif 5270 5271 /* 5272 * If the Log sense for Page( Start/stop cycle counter page) 5273 * succeeds, then power managment is supported and we can 5274 * enable auto-pm. 5275 */ 5276 if (rval == 0) { 5277 (void) sd_create_pm_components(devi, un); 5278 } else { 5279 un->un_power_level = SD_SPINDLE_ON; 5280 un->un_f_pm_is_enabled = FALSE; 5281 } 5282 5283 kmem_free(log_page_data, log_page_size); 5284 } 5285 } 5286 5287 5288 /* 5289 * Function: sd_create_pm_components 5290 * 5291 * Description: Initialize PM property. 5292 * 5293 * Context: Kernel thread context 5294 */ 5295 5296 static void 5297 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un) 5298 { 5299 char *pm_comp[] = { "NAME=spindle-motor", "0=off", "1=on", NULL }; 5300 5301 ASSERT(!mutex_owned(SD_MUTEX(un))); 5302 5303 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 5304 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 5305 /* 5306 * When components are initially created they are idle, 5307 * power up any non-removables. 5308 * Note: the return value of pm_raise_power can't be used 5309 * for determining if PM should be enabled for this device. 5310 * Even if you check the return values and remove this 5311 * property created above, the PM framework will not honor the 5312 * change after the first call to pm_raise_power. Hence, 5313 * removal of that property does not help if pm_raise_power 5314 * fails. In the case of removable media, the start/stop 5315 * will fail if the media is not present. 5316 */ 5317 if (un->un_f_attach_spinup && (pm_raise_power(SD_DEVINFO(un), 0, 5318 SD_SPINDLE_ON) == DDI_SUCCESS)) { 5319 mutex_enter(SD_MUTEX(un)); 5320 un->un_power_level = SD_SPINDLE_ON; 5321 mutex_enter(&un->un_pm_mutex); 5322 /* Set to on and not busy. */ 5323 un->un_pm_count = 0; 5324 } else { 5325 mutex_enter(SD_MUTEX(un)); 5326 un->un_power_level = SD_SPINDLE_OFF; 5327 mutex_enter(&un->un_pm_mutex); 5328 /* Set to off. */ 5329 un->un_pm_count = -1; 5330 } 5331 mutex_exit(&un->un_pm_mutex); 5332 mutex_exit(SD_MUTEX(un)); 5333 } else { 5334 un->un_power_level = SD_SPINDLE_ON; 5335 un->un_f_pm_is_enabled = FALSE; 5336 } 5337 } 5338 5339 5340 /* 5341 * Function: sd_ddi_suspend 5342 * 5343 * Description: Performs system power-down operations. This includes 5344 * setting the drive state to indicate its suspended so 5345 * that no new commands will be accepted. Also, wait for 5346 * all commands that are in transport or queued to a timer 5347 * for retry to complete. All timeout threads are cancelled. 5348 * 5349 * Return Code: DDI_FAILURE or DDI_SUCCESS 5350 * 5351 * Context: Kernel thread context 5352 */ 5353 5354 static int 5355 sd_ddi_suspend(dev_info_t *devi) 5356 { 5357 struct sd_lun *un; 5358 clock_t wait_cmds_complete; 5359 5360 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5361 if (un == NULL) { 5362 return (DDI_FAILURE); 5363 } 5364 5365 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n"); 5366 5367 mutex_enter(SD_MUTEX(un)); 5368 5369 /* Return success if the device is already suspended. */ 5370 if (un->un_state == SD_STATE_SUSPENDED) { 5371 mutex_exit(SD_MUTEX(un)); 5372 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5373 "device already suspended, exiting\n"); 5374 return (DDI_SUCCESS); 5375 } 5376 5377 /* Return failure if the device is being used by HA */ 5378 if (un->un_resvd_status & 5379 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) { 5380 mutex_exit(SD_MUTEX(un)); 5381 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5382 "device in use by HA, exiting\n"); 5383 return (DDI_FAILURE); 5384 } 5385 5386 /* 5387 * Return failure if the device is in a resource wait 5388 * or power changing state. 5389 */ 5390 if ((un->un_state == SD_STATE_RWAIT) || 5391 (un->un_state == SD_STATE_PM_CHANGING)) { 5392 mutex_exit(SD_MUTEX(un)); 5393 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5394 "device in resource wait state, exiting\n"); 5395 return (DDI_FAILURE); 5396 } 5397 5398 5399 un->un_save_state = un->un_last_state; 5400 New_state(un, SD_STATE_SUSPENDED); 5401 5402 /* 5403 * Wait for all commands that are in transport or queued to a timer 5404 * for retry to complete. 5405 * 5406 * While waiting, no new commands will be accepted or sent because of 5407 * the new state we set above. 5408 * 5409 * Wait till current operation has completed. If we are in the resource 5410 * wait state (with an intr outstanding) then we need to wait till the 5411 * intr completes and starts the next cmd. We want to wait for 5412 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND. 5413 */ 5414 wait_cmds_complete = ddi_get_lbolt() + 5415 (sd_wait_cmds_complete * drv_usectohz(1000000)); 5416 5417 while (un->un_ncmds_in_transport != 0) { 5418 /* 5419 * Fail if commands do not finish in the specified time. 5420 */ 5421 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un), 5422 wait_cmds_complete) == -1) { 5423 /* 5424 * Undo the state changes made above. Everything 5425 * must go back to it's original value. 5426 */ 5427 Restore_state(un); 5428 un->un_last_state = un->un_save_state; 5429 /* Wake up any threads that might be waiting. */ 5430 cv_broadcast(&un->un_suspend_cv); 5431 mutex_exit(SD_MUTEX(un)); 5432 SD_ERROR(SD_LOG_IO_PM, un, 5433 "sd_ddi_suspend: failed due to outstanding cmds\n"); 5434 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n"); 5435 return (DDI_FAILURE); 5436 } 5437 } 5438 5439 /* 5440 * Cancel SCSI watch thread and timeouts, if any are active 5441 */ 5442 5443 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) { 5444 opaque_t temp_token = un->un_swr_token; 5445 mutex_exit(SD_MUTEX(un)); 5446 scsi_watch_suspend(temp_token); 5447 mutex_enter(SD_MUTEX(un)); 5448 } 5449 5450 if (un->un_reset_throttle_timeid != NULL) { 5451 timeout_id_t temp_id = un->un_reset_throttle_timeid; 5452 un->un_reset_throttle_timeid = NULL; 5453 mutex_exit(SD_MUTEX(un)); 5454 (void) untimeout(temp_id); 5455 mutex_enter(SD_MUTEX(un)); 5456 } 5457 5458 if (un->un_dcvb_timeid != NULL) { 5459 timeout_id_t temp_id = un->un_dcvb_timeid; 5460 un->un_dcvb_timeid = NULL; 5461 mutex_exit(SD_MUTEX(un)); 5462 (void) untimeout(temp_id); 5463 mutex_enter(SD_MUTEX(un)); 5464 } 5465 5466 mutex_enter(&un->un_pm_mutex); 5467 if (un->un_pm_timeid != NULL) { 5468 timeout_id_t temp_id = un->un_pm_timeid; 5469 un->un_pm_timeid = NULL; 5470 mutex_exit(&un->un_pm_mutex); 5471 mutex_exit(SD_MUTEX(un)); 5472 (void) untimeout(temp_id); 5473 mutex_enter(SD_MUTEX(un)); 5474 } else { 5475 mutex_exit(&un->un_pm_mutex); 5476 } 5477 5478 if (un->un_retry_timeid != NULL) { 5479 timeout_id_t temp_id = un->un_retry_timeid; 5480 un->un_retry_timeid = NULL; 5481 mutex_exit(SD_MUTEX(un)); 5482 (void) untimeout(temp_id); 5483 mutex_enter(SD_MUTEX(un)); 5484 } 5485 5486 if (un->un_direct_priority_timeid != NULL) { 5487 timeout_id_t temp_id = un->un_direct_priority_timeid; 5488 un->un_direct_priority_timeid = NULL; 5489 mutex_exit(SD_MUTEX(un)); 5490 (void) untimeout(temp_id); 5491 mutex_enter(SD_MUTEX(un)); 5492 } 5493 5494 if (un->un_f_is_fibre == TRUE) { 5495 /* 5496 * Remove callbacks for insert and remove events 5497 */ 5498 if (un->un_insert_event != NULL) { 5499 mutex_exit(SD_MUTEX(un)); 5500 (void) ddi_remove_event_handler(un->un_insert_cb_id); 5501 mutex_enter(SD_MUTEX(un)); 5502 un->un_insert_event = NULL; 5503 } 5504 5505 if (un->un_remove_event != NULL) { 5506 mutex_exit(SD_MUTEX(un)); 5507 (void) ddi_remove_event_handler(un->un_remove_cb_id); 5508 mutex_enter(SD_MUTEX(un)); 5509 un->un_remove_event = NULL; 5510 } 5511 } 5512 5513 mutex_exit(SD_MUTEX(un)); 5514 5515 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); 5516 5517 return (DDI_SUCCESS); 5518 } 5519 5520 5521 /* 5522 * Function: sd_ddi_pm_suspend 5523 * 5524 * Description: Set the drive state to low power. 5525 * Someone else is required to actually change the drive 5526 * power level. 5527 * 5528 * Arguments: un - driver soft state (unit) structure 5529 * 5530 * Return Code: DDI_FAILURE or DDI_SUCCESS 5531 * 5532 * Context: Kernel thread context 5533 */ 5534 5535 static int 5536 sd_ddi_pm_suspend(struct sd_lun *un) 5537 { 5538 ASSERT(un != NULL); 5539 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: entry\n"); 5540 5541 ASSERT(!mutex_owned(SD_MUTEX(un))); 5542 mutex_enter(SD_MUTEX(un)); 5543 5544 /* 5545 * Exit if power management is not enabled for this device, or if 5546 * the device is being used by HA. 5547 */ 5548 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status & 5549 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) { 5550 mutex_exit(SD_MUTEX(un)); 5551 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exiting\n"); 5552 return (DDI_SUCCESS); 5553 } 5554 5555 SD_INFO(SD_LOG_POWER, un, "sd_ddi_pm_suspend: un_ncmds_in_driver=%ld\n", 5556 un->un_ncmds_in_driver); 5557 5558 /* 5559 * See if the device is not busy, ie.: 5560 * - we have no commands in the driver for this device 5561 * - not waiting for resources 5562 */ 5563 if ((un->un_ncmds_in_driver == 0) && 5564 (un->un_state != SD_STATE_RWAIT)) { 5565 /* 5566 * The device is not busy, so it is OK to go to low power state. 5567 * Indicate low power, but rely on someone else to actually 5568 * change it. 5569 */ 5570 mutex_enter(&un->un_pm_mutex); 5571 un->un_pm_count = -1; 5572 mutex_exit(&un->un_pm_mutex); 5573 un->un_power_level = SD_SPINDLE_OFF; 5574 } 5575 5576 mutex_exit(SD_MUTEX(un)); 5577 5578 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exit\n"); 5579 5580 return (DDI_SUCCESS); 5581 } 5582 5583 5584 /* 5585 * Function: sd_ddi_resume 5586 * 5587 * Description: Performs system power-up operations.. 5588 * 5589 * Return Code: DDI_SUCCESS 5590 * DDI_FAILURE 5591 * 5592 * Context: Kernel thread context 5593 */ 5594 5595 static int 5596 sd_ddi_resume(dev_info_t *devi) 5597 { 5598 struct sd_lun *un; 5599 5600 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5601 if (un == NULL) { 5602 return (DDI_FAILURE); 5603 } 5604 5605 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n"); 5606 5607 mutex_enter(SD_MUTEX(un)); 5608 Restore_state(un); 5609 5610 /* 5611 * Restore the state which was saved to give the 5612 * the right state in un_last_state 5613 */ 5614 un->un_last_state = un->un_save_state; 5615 /* 5616 * Note: throttle comes back at full. 5617 * Also note: this MUST be done before calling pm_raise_power 5618 * otherwise the system can get hung in biowait. The scenario where 5619 * this'll happen is under cpr suspend. Writing of the system 5620 * state goes through sddump, which writes 0 to un_throttle. If 5621 * writing the system state then fails, example if the partition is 5622 * too small, then cpr attempts a resume. If throttle isn't restored 5623 * from the saved value until after calling pm_raise_power then 5624 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs 5625 * in biowait. 5626 */ 5627 un->un_throttle = un->un_saved_throttle; 5628 5629 /* 5630 * The chance of failure is very rare as the only command done in power 5631 * entry point is START command when you transition from 0->1 or 5632 * unknown->1. Put it to SPINDLE ON state irrespective of the state at 5633 * which suspend was done. Ignore the return value as the resume should 5634 * not be failed. In the case of removable media the media need not be 5635 * inserted and hence there is a chance that raise power will fail with 5636 * media not present. 5637 */ 5638 if (un->un_f_attach_spinup) { 5639 mutex_exit(SD_MUTEX(un)); 5640 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 5641 mutex_enter(SD_MUTEX(un)); 5642 } 5643 5644 /* 5645 * Don't broadcast to the suspend cv and therefore possibly 5646 * start I/O until after power has been restored. 5647 */ 5648 cv_broadcast(&un->un_suspend_cv); 5649 cv_broadcast(&un->un_state_cv); 5650 5651 /* restart thread */ 5652 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { 5653 scsi_watch_resume(un->un_swr_token); 5654 } 5655 5656 #if (defined(__fibre)) 5657 if (un->un_f_is_fibre == TRUE) { 5658 /* 5659 * Add callbacks for insert and remove events 5660 */ 5661 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 5662 sd_init_event_callbacks(un); 5663 } 5664 } 5665 #endif 5666 5667 /* 5668 * Transport any pending commands to the target. 5669 * 5670 * If this is a low-activity device commands in queue will have to wait 5671 * until new commands come in, which may take awhile. Also, we 5672 * specifically don't check un_ncmds_in_transport because we know that 5673 * there really are no commands in progress after the unit was 5674 * suspended and we could have reached the throttle level, been 5675 * suspended, and have no new commands coming in for awhile. Highly 5676 * unlikely, but so is the low-activity disk scenario. 5677 */ 5678 ddi_xbuf_dispatch(un->un_xbuf_attr); 5679 5680 sd_start_cmds(un, NULL); 5681 mutex_exit(SD_MUTEX(un)); 5682 5683 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n"); 5684 5685 return (DDI_SUCCESS); 5686 } 5687 5688 5689 /* 5690 * Function: sd_ddi_pm_resume 5691 * 5692 * Description: Set the drive state to powered on. 5693 * Someone else is required to actually change the drive 5694 * power level. 5695 * 5696 * Arguments: un - driver soft state (unit) structure 5697 * 5698 * Return Code: DDI_SUCCESS 5699 * 5700 * Context: Kernel thread context 5701 */ 5702 5703 static int 5704 sd_ddi_pm_resume(struct sd_lun *un) 5705 { 5706 ASSERT(un != NULL); 5707 5708 ASSERT(!mutex_owned(SD_MUTEX(un))); 5709 mutex_enter(SD_MUTEX(un)); 5710 un->un_power_level = SD_SPINDLE_ON; 5711 5712 ASSERT(!mutex_owned(&un->un_pm_mutex)); 5713 mutex_enter(&un->un_pm_mutex); 5714 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 5715 un->un_pm_count++; 5716 ASSERT(un->un_pm_count == 0); 5717 /* 5718 * Note: no longer do the cv_broadcast on un_suspend_cv. The 5719 * un_suspend_cv is for a system resume, not a power management 5720 * device resume. (4297749) 5721 * cv_broadcast(&un->un_suspend_cv); 5722 */ 5723 } 5724 mutex_exit(&un->un_pm_mutex); 5725 mutex_exit(SD_MUTEX(un)); 5726 5727 return (DDI_SUCCESS); 5728 } 5729 5730 5731 /* 5732 * Function: sd_pm_idletimeout_handler 5733 * 5734 * Description: A timer routine that's active only while a device is busy. 5735 * The purpose is to extend slightly the pm framework's busy 5736 * view of the device to prevent busy/idle thrashing for 5737 * back-to-back commands. Do this by comparing the current time 5738 * to the time at which the last command completed and when the 5739 * difference is greater than sd_pm_idletime, call 5740 * pm_idle_component. In addition to indicating idle to the pm 5741 * framework, update the chain type to again use the internal pm 5742 * layers of the driver. 5743 * 5744 * Arguments: arg - driver soft state (unit) structure 5745 * 5746 * Context: Executes in a timeout(9F) thread context 5747 */ 5748 5749 static void 5750 sd_pm_idletimeout_handler(void *arg) 5751 { 5752 struct sd_lun *un = arg; 5753 5754 time_t now; 5755 5756 mutex_enter(&sd_detach_mutex); 5757 if (un->un_detach_count != 0) { 5758 /* Abort if the instance is detaching */ 5759 mutex_exit(&sd_detach_mutex); 5760 return; 5761 } 5762 mutex_exit(&sd_detach_mutex); 5763 5764 now = ddi_get_time(); 5765 /* 5766 * Grab both mutexes, in the proper order, since we're accessing 5767 * both PM and softstate variables. 5768 */ 5769 mutex_enter(SD_MUTEX(un)); 5770 mutex_enter(&un->un_pm_mutex); 5771 if (((now - un->un_pm_idle_time) > sd_pm_idletime) && 5772 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) { 5773 /* 5774 * Update the chain types. 5775 * This takes affect on the next new command received. 5776 */ 5777 if (un->un_f_non_devbsize_supported) { 5778 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 5779 } else { 5780 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 5781 } 5782 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 5783 5784 SD_TRACE(SD_LOG_IO_PM, un, 5785 "sd_pm_idletimeout_handler: idling device\n"); 5786 (void) pm_idle_component(SD_DEVINFO(un), 0); 5787 un->un_pm_idle_timeid = NULL; 5788 } else { 5789 un->un_pm_idle_timeid = 5790 timeout(sd_pm_idletimeout_handler, un, 5791 (drv_usectohz((clock_t)300000))); /* 300 ms. */ 5792 } 5793 mutex_exit(&un->un_pm_mutex); 5794 mutex_exit(SD_MUTEX(un)); 5795 } 5796 5797 5798 /* 5799 * Function: sd_pm_timeout_handler 5800 * 5801 * Description: Callback to tell framework we are idle. 5802 * 5803 * Context: timeout(9f) thread context. 5804 */ 5805 5806 static void 5807 sd_pm_timeout_handler(void *arg) 5808 { 5809 struct sd_lun *un = arg; 5810 5811 (void) pm_idle_component(SD_DEVINFO(un), 0); 5812 mutex_enter(&un->un_pm_mutex); 5813 un->un_pm_timeid = NULL; 5814 mutex_exit(&un->un_pm_mutex); 5815 } 5816 5817 5818 /* 5819 * Function: sdpower 5820 * 5821 * Description: PM entry point. 5822 * 5823 * Return Code: DDI_SUCCESS 5824 * DDI_FAILURE 5825 * 5826 * Context: Kernel thread context 5827 */ 5828 5829 static int 5830 sdpower(dev_info_t *devi, int component, int level) 5831 { 5832 struct sd_lun *un; 5833 int instance; 5834 int rval = DDI_SUCCESS; 5835 uint_t i, log_page_size, maxcycles, ncycles; 5836 uchar_t *log_page_data; 5837 int log_sense_page; 5838 int medium_present; 5839 time_t intvlp; 5840 dev_t dev; 5841 struct pm_trans_data sd_pm_tran_data; 5842 uchar_t save_state; 5843 int sval; 5844 uchar_t state_before_pm; 5845 int got_semaphore_here; 5846 5847 instance = ddi_get_instance(devi); 5848 5849 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 5850 (SD_SPINDLE_OFF > level) || (level > SD_SPINDLE_ON) || 5851 component != 0) { 5852 return (DDI_FAILURE); 5853 } 5854 5855 dev = sd_make_device(SD_DEVINFO(un)); 5856 5857 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level); 5858 5859 /* 5860 * Must synchronize power down with close. 5861 * Attempt to decrement/acquire the open/close semaphore, 5862 * but do NOT wait on it. If it's not greater than zero, 5863 * ie. it can't be decremented without waiting, then 5864 * someone else, either open or close, already has it 5865 * and the try returns 0. Use that knowledge here to determine 5866 * if it's OK to change the device power level. 5867 * Also, only increment it on exit if it was decremented, ie. gotten, 5868 * here. 5869 */ 5870 got_semaphore_here = sema_tryp(&un->un_semoclose); 5871 5872 mutex_enter(SD_MUTEX(un)); 5873 5874 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n", 5875 un->un_ncmds_in_driver); 5876 5877 /* 5878 * If un_ncmds_in_driver is non-zero it indicates commands are 5879 * already being processed in the driver, or if the semaphore was 5880 * not gotten here it indicates an open or close is being processed. 5881 * At the same time somebody is requesting to go low power which 5882 * can't happen, therefore we need to return failure. 5883 */ 5884 if ((level == SD_SPINDLE_OFF) && 5885 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) { 5886 mutex_exit(SD_MUTEX(un)); 5887 5888 if (got_semaphore_here != 0) { 5889 sema_v(&un->un_semoclose); 5890 } 5891 SD_TRACE(SD_LOG_IO_PM, un, 5892 "sdpower: exit, device has queued cmds.\n"); 5893 return (DDI_FAILURE); 5894 } 5895 5896 /* 5897 * if it is OFFLINE that means the disk is completely dead 5898 * in our case we have to put the disk in on or off by sending commands 5899 * Of course that will fail anyway so return back here. 5900 * 5901 * Power changes to a device that's OFFLINE or SUSPENDED 5902 * are not allowed. 5903 */ 5904 if ((un->un_state == SD_STATE_OFFLINE) || 5905 (un->un_state == SD_STATE_SUSPENDED)) { 5906 mutex_exit(SD_MUTEX(un)); 5907 5908 if (got_semaphore_here != 0) { 5909 sema_v(&un->un_semoclose); 5910 } 5911 SD_TRACE(SD_LOG_IO_PM, un, 5912 "sdpower: exit, device is off-line.\n"); 5913 return (DDI_FAILURE); 5914 } 5915 5916 /* 5917 * Change the device's state to indicate it's power level 5918 * is being changed. Do this to prevent a power off in the 5919 * middle of commands, which is especially bad on devices 5920 * that are really powered off instead of just spun down. 5921 */ 5922 state_before_pm = un->un_state; 5923 un->un_state = SD_STATE_PM_CHANGING; 5924 5925 mutex_exit(SD_MUTEX(un)); 5926 5927 /* 5928 * If "pm-capable" property is set to TRUE by HBA drivers, 5929 * bypass the following checking, otherwise, check the log 5930 * sense information for this device 5931 */ 5932 if ((level == SD_SPINDLE_OFF) && un->un_f_log_sense_supported) { 5933 /* 5934 * Get the log sense information to understand whether the 5935 * the powercycle counts have gone beyond the threshhold. 5936 */ 5937 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5938 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5939 5940 mutex_enter(SD_MUTEX(un)); 5941 log_sense_page = un->un_start_stop_cycle_page; 5942 mutex_exit(SD_MUTEX(un)); 5943 5944 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 5945 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT); 5946 #ifdef SDDEBUG 5947 if (sd_force_pm_supported) { 5948 /* Force a successful result */ 5949 rval = 0; 5950 } 5951 #endif 5952 if (rval != 0) { 5953 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 5954 "Log Sense Failed\n"); 5955 kmem_free(log_page_data, log_page_size); 5956 /* Cannot support power management on those drives */ 5957 5958 if (got_semaphore_here != 0) { 5959 sema_v(&un->un_semoclose); 5960 } 5961 /* 5962 * On exit put the state back to it's original value 5963 * and broadcast to anyone waiting for the power 5964 * change completion. 5965 */ 5966 mutex_enter(SD_MUTEX(un)); 5967 un->un_state = state_before_pm; 5968 cv_broadcast(&un->un_suspend_cv); 5969 mutex_exit(SD_MUTEX(un)); 5970 SD_TRACE(SD_LOG_IO_PM, un, 5971 "sdpower: exit, Log Sense Failed.\n"); 5972 return (DDI_FAILURE); 5973 } 5974 5975 /* 5976 * From the page data - Convert the essential information to 5977 * pm_trans_data 5978 */ 5979 maxcycles = 5980 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) | 5981 (log_page_data[0x1E] << 8) | log_page_data[0x1F]; 5982 5983 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles; 5984 5985 ncycles = 5986 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) | 5987 (log_page_data[0x26] << 8) | log_page_data[0x27]; 5988 5989 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles; 5990 5991 for (i = 0; i < DC_SCSI_MFR_LEN; i++) { 5992 sd_pm_tran_data.un.scsi_cycles.svc_date[i] = 5993 log_page_data[8+i]; 5994 } 5995 5996 kmem_free(log_page_data, log_page_size); 5997 5998 /* 5999 * Call pm_trans_check routine to get the Ok from 6000 * the global policy 6001 */ 6002 6003 sd_pm_tran_data.format = DC_SCSI_FORMAT; 6004 sd_pm_tran_data.un.scsi_cycles.flag = 0; 6005 6006 rval = pm_trans_check(&sd_pm_tran_data, &intvlp); 6007 #ifdef SDDEBUG 6008 if (sd_force_pm_supported) { 6009 /* Force a successful result */ 6010 rval = 1; 6011 } 6012 #endif 6013 switch (rval) { 6014 case 0: 6015 /* 6016 * Not Ok to Power cycle or error in parameters passed 6017 * Would have given the advised time to consider power 6018 * cycle. Based on the new intvlp parameter we are 6019 * supposed to pretend we are busy so that pm framework 6020 * will never call our power entry point. Because of 6021 * that install a timeout handler and wait for the 6022 * recommended time to elapse so that power management 6023 * can be effective again. 6024 * 6025 * To effect this behavior, call pm_busy_component to 6026 * indicate to the framework this device is busy. 6027 * By not adjusting un_pm_count the rest of PM in 6028 * the driver will function normally, and independant 6029 * of this but because the framework is told the device 6030 * is busy it won't attempt powering down until it gets 6031 * a matching idle. The timeout handler sends this. 6032 * Note: sd_pm_entry can't be called here to do this 6033 * because sdpower may have been called as a result 6034 * of a call to pm_raise_power from within sd_pm_entry. 6035 * 6036 * If a timeout handler is already active then 6037 * don't install another. 6038 */ 6039 mutex_enter(&un->un_pm_mutex); 6040 if (un->un_pm_timeid == NULL) { 6041 un->un_pm_timeid = 6042 timeout(sd_pm_timeout_handler, 6043 un, intvlp * drv_usectohz(1000000)); 6044 mutex_exit(&un->un_pm_mutex); 6045 (void) pm_busy_component(SD_DEVINFO(un), 0); 6046 } else { 6047 mutex_exit(&un->un_pm_mutex); 6048 } 6049 if (got_semaphore_here != 0) { 6050 sema_v(&un->un_semoclose); 6051 } 6052 /* 6053 * On exit put the state back to it's original value 6054 * and broadcast to anyone waiting for the power 6055 * change completion. 6056 */ 6057 mutex_enter(SD_MUTEX(un)); 6058 un->un_state = state_before_pm; 6059 cv_broadcast(&un->un_suspend_cv); 6060 mutex_exit(SD_MUTEX(un)); 6061 6062 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, " 6063 "trans check Failed, not ok to power cycle.\n"); 6064 return (DDI_FAILURE); 6065 6066 case -1: 6067 if (got_semaphore_here != 0) { 6068 sema_v(&un->un_semoclose); 6069 } 6070 /* 6071 * On exit put the state back to it's original value 6072 * and broadcast to anyone waiting for the power 6073 * change completion. 6074 */ 6075 mutex_enter(SD_MUTEX(un)); 6076 un->un_state = state_before_pm; 6077 cv_broadcast(&un->un_suspend_cv); 6078 mutex_exit(SD_MUTEX(un)); 6079 SD_TRACE(SD_LOG_IO_PM, un, 6080 "sdpower: exit, trans check command Failed.\n"); 6081 return (DDI_FAILURE); 6082 } 6083 } 6084 6085 if (level == SD_SPINDLE_OFF) { 6086 /* 6087 * Save the last state... if the STOP FAILS we need it 6088 * for restoring 6089 */ 6090 mutex_enter(SD_MUTEX(un)); 6091 save_state = un->un_last_state; 6092 /* 6093 * There must not be any cmds. getting processed 6094 * in the driver when we get here. Power to the 6095 * device is potentially going off. 6096 */ 6097 ASSERT(un->un_ncmds_in_driver == 0); 6098 mutex_exit(SD_MUTEX(un)); 6099 6100 /* 6101 * For now suspend the device completely before spindle is 6102 * turned off 6103 */ 6104 if ((rval = sd_ddi_pm_suspend(un)) == DDI_FAILURE) { 6105 if (got_semaphore_here != 0) { 6106 sema_v(&un->un_semoclose); 6107 } 6108 /* 6109 * On exit put the state back to it's original value 6110 * and broadcast to anyone waiting for the power 6111 * change completion. 6112 */ 6113 mutex_enter(SD_MUTEX(un)); 6114 un->un_state = state_before_pm; 6115 cv_broadcast(&un->un_suspend_cv); 6116 mutex_exit(SD_MUTEX(un)); 6117 SD_TRACE(SD_LOG_IO_PM, un, 6118 "sdpower: exit, PM suspend Failed.\n"); 6119 return (DDI_FAILURE); 6120 } 6121 } 6122 6123 /* 6124 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open, 6125 * close, or strategy. Dump no long uses this routine, it uses it's 6126 * own code so it can be done in polled mode. 6127 */ 6128 6129 medium_present = TRUE; 6130 6131 /* 6132 * When powering up, issue a TUR in case the device is at unit 6133 * attention. Don't do retries. Bypass the PM layer, otherwise 6134 * a deadlock on un_pm_busy_cv will occur. 6135 */ 6136 if (level == SD_SPINDLE_ON) { 6137 (void) sd_send_scsi_TEST_UNIT_READY(un, 6138 SD_DONT_RETRY_TUR | SD_BYPASS_PM); 6139 } 6140 6141 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n", 6142 ((level == SD_SPINDLE_ON) ? "START" : "STOP")); 6143 6144 sval = sd_send_scsi_START_STOP_UNIT(un, 6145 ((level == SD_SPINDLE_ON) ? SD_TARGET_START : SD_TARGET_STOP), 6146 SD_PATH_DIRECT); 6147 /* Command failed, check for media present. */ 6148 if ((sval == ENXIO) && un->un_f_has_removable_media) { 6149 medium_present = FALSE; 6150 } 6151 6152 /* 6153 * The conditions of interest here are: 6154 * if a spindle off with media present fails, 6155 * then restore the state and return an error. 6156 * else if a spindle on fails, 6157 * then return an error (there's no state to restore). 6158 * In all other cases we setup for the new state 6159 * and return success. 6160 */ 6161 switch (level) { 6162 case SD_SPINDLE_OFF: 6163 if ((medium_present == TRUE) && (sval != 0)) { 6164 /* The stop command from above failed */ 6165 rval = DDI_FAILURE; 6166 /* 6167 * The stop command failed, and we have media 6168 * present. Put the level back by calling the 6169 * sd_pm_resume() and set the state back to 6170 * it's previous value. 6171 */ 6172 (void) sd_ddi_pm_resume(un); 6173 mutex_enter(SD_MUTEX(un)); 6174 un->un_last_state = save_state; 6175 mutex_exit(SD_MUTEX(un)); 6176 break; 6177 } 6178 /* 6179 * The stop command from above succeeded. 6180 */ 6181 if (un->un_f_monitor_media_state) { 6182 /* 6183 * Terminate watch thread in case of removable media 6184 * devices going into low power state. This is as per 6185 * the requirements of pm framework, otherwise commands 6186 * will be generated for the device (through watch 6187 * thread), even when the device is in low power state. 6188 */ 6189 mutex_enter(SD_MUTEX(un)); 6190 un->un_f_watcht_stopped = FALSE; 6191 if (un->un_swr_token != NULL) { 6192 opaque_t temp_token = un->un_swr_token; 6193 un->un_f_watcht_stopped = TRUE; 6194 un->un_swr_token = NULL; 6195 mutex_exit(SD_MUTEX(un)); 6196 (void) scsi_watch_request_terminate(temp_token, 6197 SCSI_WATCH_TERMINATE_WAIT); 6198 } else { 6199 mutex_exit(SD_MUTEX(un)); 6200 } 6201 } 6202 break; 6203 6204 default: /* The level requested is spindle on... */ 6205 /* 6206 * Legacy behavior: return success on a failed spinup 6207 * if there is no media in the drive. 6208 * Do this by looking at medium_present here. 6209 */ 6210 if ((sval != 0) && medium_present) { 6211 /* The start command from above failed */ 6212 rval = DDI_FAILURE; 6213 break; 6214 } 6215 /* 6216 * The start command from above succeeded 6217 * Resume the devices now that we have 6218 * started the disks 6219 */ 6220 (void) sd_ddi_pm_resume(un); 6221 6222 /* 6223 * Resume the watch thread since it was suspended 6224 * when the device went into low power mode. 6225 */ 6226 if (un->un_f_monitor_media_state) { 6227 mutex_enter(SD_MUTEX(un)); 6228 if (un->un_f_watcht_stopped == TRUE) { 6229 opaque_t temp_token; 6230 6231 un->un_f_watcht_stopped = FALSE; 6232 mutex_exit(SD_MUTEX(un)); 6233 temp_token = scsi_watch_request_submit( 6234 SD_SCSI_DEVP(un), 6235 sd_check_media_time, 6236 SENSE_LENGTH, sd_media_watch_cb, 6237 (caddr_t)dev); 6238 mutex_enter(SD_MUTEX(un)); 6239 un->un_swr_token = temp_token; 6240 } 6241 mutex_exit(SD_MUTEX(un)); 6242 } 6243 } 6244 if (got_semaphore_here != 0) { 6245 sema_v(&un->un_semoclose); 6246 } 6247 /* 6248 * On exit put the state back to it's original value 6249 * and broadcast to anyone waiting for the power 6250 * change completion. 6251 */ 6252 mutex_enter(SD_MUTEX(un)); 6253 un->un_state = state_before_pm; 6254 cv_broadcast(&un->un_suspend_cv); 6255 mutex_exit(SD_MUTEX(un)); 6256 6257 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval); 6258 6259 return (rval); 6260 } 6261 6262 6263 6264 /* 6265 * Function: sdattach 6266 * 6267 * Description: Driver's attach(9e) entry point function. 6268 * 6269 * Arguments: devi - opaque device info handle 6270 * cmd - attach type 6271 * 6272 * Return Code: DDI_SUCCESS 6273 * DDI_FAILURE 6274 * 6275 * Context: Kernel thread context 6276 */ 6277 6278 static int 6279 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 6280 { 6281 switch (cmd) { 6282 case DDI_ATTACH: 6283 return (sd_unit_attach(devi)); 6284 case DDI_RESUME: 6285 return (sd_ddi_resume(devi)); 6286 default: 6287 break; 6288 } 6289 return (DDI_FAILURE); 6290 } 6291 6292 6293 /* 6294 * Function: sddetach 6295 * 6296 * Description: Driver's detach(9E) entry point function. 6297 * 6298 * Arguments: devi - opaque device info handle 6299 * cmd - detach type 6300 * 6301 * Return Code: DDI_SUCCESS 6302 * DDI_FAILURE 6303 * 6304 * Context: Kernel thread context 6305 */ 6306 6307 static int 6308 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 6309 { 6310 switch (cmd) { 6311 case DDI_DETACH: 6312 return (sd_unit_detach(devi)); 6313 case DDI_SUSPEND: 6314 return (sd_ddi_suspend(devi)); 6315 default: 6316 break; 6317 } 6318 return (DDI_FAILURE); 6319 } 6320 6321 6322 /* 6323 * Function: sd_sync_with_callback 6324 * 6325 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft 6326 * state while the callback routine is active. 6327 * 6328 * Arguments: un: softstate structure for the instance 6329 * 6330 * Context: Kernel thread context 6331 */ 6332 6333 static void 6334 sd_sync_with_callback(struct sd_lun *un) 6335 { 6336 ASSERT(un != NULL); 6337 6338 mutex_enter(SD_MUTEX(un)); 6339 6340 ASSERT(un->un_in_callback >= 0); 6341 6342 while (un->un_in_callback > 0) { 6343 mutex_exit(SD_MUTEX(un)); 6344 delay(2); 6345 mutex_enter(SD_MUTEX(un)); 6346 } 6347 6348 mutex_exit(SD_MUTEX(un)); 6349 } 6350 6351 /* 6352 * Function: sd_unit_attach 6353 * 6354 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates 6355 * the soft state structure for the device and performs 6356 * all necessary structure and device initializations. 6357 * 6358 * Arguments: devi: the system's dev_info_t for the device. 6359 * 6360 * Return Code: DDI_SUCCESS if attach is successful. 6361 * DDI_FAILURE if any part of the attach fails. 6362 * 6363 * Context: Called at attach(9e) time for the DDI_ATTACH flag. 6364 * Kernel thread context only. Can sleep. 6365 */ 6366 6367 static int 6368 sd_unit_attach(dev_info_t *devi) 6369 { 6370 struct scsi_device *devp; 6371 struct sd_lun *un; 6372 char *variantp; 6373 int reservation_flag = SD_TARGET_IS_UNRESERVED; 6374 int instance; 6375 int rval; 6376 int wc_enabled; 6377 int tgt; 6378 uint64_t capacity; 6379 uint_t lbasize = 0; 6380 dev_info_t *pdip = ddi_get_parent(devi); 6381 int offbyone = 0; 6382 int geom_label_valid = 0; 6383 6384 /* 6385 * Retrieve the target driver's private data area. This was set 6386 * up by the HBA. 6387 */ 6388 devp = ddi_get_driver_private(devi); 6389 6390 /* 6391 * Retrieve the target ID of the device. 6392 */ 6393 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6394 SCSI_ADDR_PROP_TARGET, -1); 6395 6396 /* 6397 * Since we have no idea what state things were left in by the last 6398 * user of the device, set up some 'default' settings, ie. turn 'em 6399 * off. The scsi_ifsetcap calls force re-negotiations with the drive. 6400 * Do this before the scsi_probe, which sends an inquiry. 6401 * This is a fix for bug (4430280). 6402 * Of special importance is wide-xfer. The drive could have been left 6403 * in wide transfer mode by the last driver to communicate with it, 6404 * this includes us. If that's the case, and if the following is not 6405 * setup properly or we don't re-negotiate with the drive prior to 6406 * transferring data to/from the drive, it causes bus parity errors, 6407 * data overruns, and unexpected interrupts. This first occurred when 6408 * the fix for bug (4378686) was made. 6409 */ 6410 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1); 6411 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1); 6412 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1); 6413 6414 /* 6415 * Currently, scsi_ifsetcap sets tagged-qing capability for all LUNs 6416 * on a target. Setting it per lun instance actually sets the 6417 * capability of this target, which affects those luns already 6418 * attached on the same target. So during attach, we can only disable 6419 * this capability only when no other lun has been attached on this 6420 * target. By doing this, we assume a target has the same tagged-qing 6421 * capability for every lun. The condition can be removed when HBA 6422 * is changed to support per lun based tagged-qing capability. 6423 */ 6424 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 6425 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1); 6426 } 6427 6428 /* 6429 * Use scsi_probe() to issue an INQUIRY command to the device. 6430 * This call will allocate and fill in the scsi_inquiry structure 6431 * and point the sd_inq member of the scsi_device structure to it. 6432 * If the attach succeeds, then this memory will not be de-allocated 6433 * (via scsi_unprobe()) until the instance is detached. 6434 */ 6435 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 6436 goto probe_failed; 6437 } 6438 6439 /* 6440 * Check the device type as specified in the inquiry data and 6441 * claim it if it is of a type that we support. 6442 */ 6443 switch (devp->sd_inq->inq_dtype) { 6444 case DTYPE_DIRECT: 6445 break; 6446 case DTYPE_RODIRECT: 6447 break; 6448 case DTYPE_OPTICAL: 6449 break; 6450 case DTYPE_NOTPRESENT: 6451 default: 6452 /* Unsupported device type; fail the attach. */ 6453 goto probe_failed; 6454 } 6455 6456 /* 6457 * Allocate the soft state structure for this unit. 6458 * 6459 * We rely upon this memory being set to all zeroes by 6460 * ddi_soft_state_zalloc(). We assume that any member of the 6461 * soft state structure that is not explicitly initialized by 6462 * this routine will have a value of zero. 6463 */ 6464 instance = ddi_get_instance(devp->sd_dev); 6465 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) { 6466 goto probe_failed; 6467 } 6468 6469 /* 6470 * Retrieve a pointer to the newly-allocated soft state. 6471 * 6472 * This should NEVER fail if the ddi_soft_state_zalloc() call above 6473 * was successful, unless something has gone horribly wrong and the 6474 * ddi's soft state internals are corrupt (in which case it is 6475 * probably better to halt here than just fail the attach....) 6476 */ 6477 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 6478 panic("sd_unit_attach: NULL soft state on instance:0x%x", 6479 instance); 6480 /*NOTREACHED*/ 6481 } 6482 6483 /* 6484 * Link the back ptr of the driver soft state to the scsi_device 6485 * struct for this lun. 6486 * Save a pointer to the softstate in the driver-private area of 6487 * the scsi_device struct. 6488 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until 6489 * we first set un->un_sd below. 6490 */ 6491 un->un_sd = devp; 6492 devp->sd_private = (opaque_t)un; 6493 6494 /* 6495 * The following must be after devp is stored in the soft state struct. 6496 */ 6497 #ifdef SDDEBUG 6498 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6499 "%s_unit_attach: un:0x%p instance:%d\n", 6500 ddi_driver_name(devi), un, instance); 6501 #endif 6502 6503 /* 6504 * Set up the device type and node type (for the minor nodes). 6505 * By default we assume that the device can at least support the 6506 * Common Command Set. Call it a CD-ROM if it reports itself 6507 * as a RODIRECT device. 6508 */ 6509 switch (devp->sd_inq->inq_dtype) { 6510 case DTYPE_RODIRECT: 6511 un->un_node_type = DDI_NT_CD_CHAN; 6512 un->un_ctype = CTYPE_CDROM; 6513 break; 6514 case DTYPE_OPTICAL: 6515 un->un_node_type = DDI_NT_BLOCK_CHAN; 6516 un->un_ctype = CTYPE_ROD; 6517 break; 6518 default: 6519 un->un_node_type = DDI_NT_BLOCK_CHAN; 6520 un->un_ctype = CTYPE_CCS; 6521 break; 6522 } 6523 6524 /* 6525 * Try to read the interconnect type from the HBA. 6526 * 6527 * Note: This driver is currently compiled as two binaries, a parallel 6528 * scsi version (sd) and a fibre channel version (ssd). All functional 6529 * differences are determined at compile time. In the future a single 6530 * binary will be provided and the inteconnect type will be used to 6531 * differentiate between fibre and parallel scsi behaviors. At that time 6532 * it will be necessary for all fibre channel HBAs to support this 6533 * property. 6534 * 6535 * set un_f_is_fiber to TRUE ( default fiber ) 6536 */ 6537 un->un_f_is_fibre = TRUE; 6538 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { 6539 case INTERCONNECT_SSA: 6540 un->un_interconnect_type = SD_INTERCONNECT_SSA; 6541 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6542 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); 6543 break; 6544 case INTERCONNECT_PARALLEL: 6545 un->un_f_is_fibre = FALSE; 6546 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 6547 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6548 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); 6549 break; 6550 case INTERCONNECT_SATA: 6551 un->un_f_is_fibre = FALSE; 6552 un->un_interconnect_type = SD_INTERCONNECT_SATA; 6553 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6554 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un); 6555 break; 6556 case INTERCONNECT_FIBRE: 6557 un->un_interconnect_type = SD_INTERCONNECT_FIBRE; 6558 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6559 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); 6560 break; 6561 case INTERCONNECT_FABRIC: 6562 un->un_interconnect_type = SD_INTERCONNECT_FABRIC; 6563 un->un_node_type = DDI_NT_BLOCK_FABRIC; 6564 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6565 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); 6566 break; 6567 default: 6568 #ifdef SD_DEFAULT_INTERCONNECT_TYPE 6569 /* 6570 * The HBA does not support the "interconnect-type" property 6571 * (or did not provide a recognized type). 6572 * 6573 * Note: This will be obsoleted when a single fibre channel 6574 * and parallel scsi driver is delivered. In the meantime the 6575 * interconnect type will be set to the platform default.If that 6576 * type is not parallel SCSI, it means that we should be 6577 * assuming "ssd" semantics. However, here this also means that 6578 * the FC HBA is not supporting the "interconnect-type" property 6579 * like we expect it to, so log this occurrence. 6580 */ 6581 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; 6582 if (!SD_IS_PARALLEL_SCSI(un)) { 6583 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6584 "sd_unit_attach: un:0x%p Assuming " 6585 "INTERCONNECT_FIBRE\n", un); 6586 } else { 6587 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6588 "sd_unit_attach: un:0x%p Assuming " 6589 "INTERCONNECT_PARALLEL\n", un); 6590 un->un_f_is_fibre = FALSE; 6591 } 6592 #else 6593 /* 6594 * Note: This source will be implemented when a single fibre 6595 * channel and parallel scsi driver is delivered. The default 6596 * will be to assume that if a device does not support the 6597 * "interconnect-type" property it is a parallel SCSI HBA and 6598 * we will set the interconnect type for parallel scsi. 6599 */ 6600 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 6601 un->un_f_is_fibre = FALSE; 6602 #endif 6603 break; 6604 } 6605 6606 if (un->un_f_is_fibre == TRUE) { 6607 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == 6608 SCSI_VERSION_3) { 6609 switch (un->un_interconnect_type) { 6610 case SD_INTERCONNECT_FIBRE: 6611 case SD_INTERCONNECT_SSA: 6612 un->un_node_type = DDI_NT_BLOCK_WWN; 6613 break; 6614 default: 6615 break; 6616 } 6617 } 6618 } 6619 6620 /* 6621 * Initialize the Request Sense command for the target 6622 */ 6623 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { 6624 goto alloc_rqs_failed; 6625 } 6626 6627 /* 6628 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc 6629 * with separate binary for sd and ssd. 6630 * 6631 * x86 has 1 binary, un_retry_count is set base on connection type. 6632 * The hardcoded values will go away when Sparc uses 1 binary 6633 * for sd and ssd. This hardcoded values need to match 6634 * SD_RETRY_COUNT in sddef.h 6635 * The value used is base on interconnect type. 6636 * fibre = 3, parallel = 5 6637 */ 6638 #if defined(__i386) || defined(__amd64) 6639 un->un_retry_count = un->un_f_is_fibre ? 3 : 5; 6640 #else 6641 un->un_retry_count = SD_RETRY_COUNT; 6642 #endif 6643 6644 /* 6645 * Set the per disk retry count to the default number of retries 6646 * for disks and CDROMs. This value can be overridden by the 6647 * disk property list or an entry in sd.conf. 6648 */ 6649 un->un_notready_retry_count = 6650 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un) 6651 : DISK_NOT_READY_RETRY_COUNT(un); 6652 6653 /* 6654 * Set the busy retry count to the default value of un_retry_count. 6655 * This can be overridden by entries in sd.conf or the device 6656 * config table. 6657 */ 6658 un->un_busy_retry_count = un->un_retry_count; 6659 6660 /* 6661 * Init the reset threshold for retries. This number determines 6662 * how many retries must be performed before a reset can be issued 6663 * (for certain error conditions). This can be overridden by entries 6664 * in sd.conf or the device config table. 6665 */ 6666 un->un_reset_retry_count = (un->un_retry_count / 2); 6667 6668 /* 6669 * Set the victim_retry_count to the default un_retry_count 6670 */ 6671 un->un_victim_retry_count = (2 * un->un_retry_count); 6672 6673 /* 6674 * Set the reservation release timeout to the default value of 6675 * 5 seconds. This can be overridden by entries in ssd.conf or the 6676 * device config table. 6677 */ 6678 un->un_reserve_release_time = 5; 6679 6680 /* 6681 * Set up the default maximum transfer size. Note that this may 6682 * get updated later in the attach, when setting up default wide 6683 * operations for disks. 6684 */ 6685 #if defined(__i386) || defined(__amd64) 6686 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; 6687 #else 6688 un->un_max_xfer_size = (uint_t)maxphys; 6689 #endif 6690 6691 /* 6692 * Get "allow bus device reset" property (defaults to "enabled" if 6693 * the property was not defined). This is to disable bus resets for 6694 * certain kinds of error recovery. Note: In the future when a run-time 6695 * fibre check is available the soft state flag should default to 6696 * enabled. 6697 */ 6698 if (un->un_f_is_fibre == TRUE) { 6699 un->un_f_allow_bus_device_reset = TRUE; 6700 } else { 6701 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6702 "allow-bus-device-reset", 1) != 0) { 6703 un->un_f_allow_bus_device_reset = TRUE; 6704 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6705 "sd_unit_attach: un:0x%p Bus device reset " 6706 "enabled\n", un); 6707 } else { 6708 un->un_f_allow_bus_device_reset = FALSE; 6709 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6710 "sd_unit_attach: un:0x%p Bus device reset " 6711 "disabled\n", un); 6712 } 6713 } 6714 6715 /* 6716 * Check if this is an ATAPI device. ATAPI devices use Group 1 6717 * Read/Write commands and Group 2 Mode Sense/Select commands. 6718 * 6719 * Note: The "obsolete" way of doing this is to check for the "atapi" 6720 * property. The new "variant" property with a value of "atapi" has been 6721 * introduced so that future 'variants' of standard SCSI behavior (like 6722 * atapi) could be specified by the underlying HBA drivers by supplying 6723 * a new value for the "variant" property, instead of having to define a 6724 * new property. 6725 */ 6726 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { 6727 un->un_f_cfg_is_atapi = TRUE; 6728 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6729 "sd_unit_attach: un:0x%p Atapi device\n", un); 6730 } 6731 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", 6732 &variantp) == DDI_PROP_SUCCESS) { 6733 if (strcmp(variantp, "atapi") == 0) { 6734 un->un_f_cfg_is_atapi = TRUE; 6735 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6736 "sd_unit_attach: un:0x%p Atapi device\n", un); 6737 } 6738 ddi_prop_free(variantp); 6739 } 6740 6741 un->un_cmd_timeout = SD_IO_TIME; 6742 6743 /* Info on current states, statuses, etc. (Updated frequently) */ 6744 un->un_state = SD_STATE_NORMAL; 6745 un->un_last_state = SD_STATE_NORMAL; 6746 6747 /* Control & status info for command throttling */ 6748 un->un_throttle = sd_max_throttle; 6749 un->un_saved_throttle = sd_max_throttle; 6750 un->un_min_throttle = sd_min_throttle; 6751 6752 if (un->un_f_is_fibre == TRUE) { 6753 un->un_f_use_adaptive_throttle = TRUE; 6754 } else { 6755 un->un_f_use_adaptive_throttle = FALSE; 6756 } 6757 6758 /* Removable media support. */ 6759 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 6760 un->un_mediastate = DKIO_NONE; 6761 un->un_specified_mediastate = DKIO_NONE; 6762 6763 /* CVs for suspend/resume (PM or DR) */ 6764 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 6765 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 6766 6767 /* Power management support. */ 6768 un->un_power_level = SD_SPINDLE_UNINIT; 6769 6770 cv_init(&un->un_wcc_cv, NULL, CV_DRIVER, NULL); 6771 un->un_f_wcc_inprog = 0; 6772 6773 /* 6774 * The open/close semaphore is used to serialize threads executing 6775 * in the driver's open & close entry point routines for a given 6776 * instance. 6777 */ 6778 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 6779 6780 /* 6781 * The conf file entry and softstate variable is a forceful override, 6782 * meaning a non-zero value must be entered to change the default. 6783 */ 6784 un->un_f_disksort_disabled = FALSE; 6785 6786 /* 6787 * Retrieve the properties from the static driver table or the driver 6788 * configuration file (.conf) for this unit and update the soft state 6789 * for the device as needed for the indicated properties. 6790 * Note: the property configuration needs to occur here as some of the 6791 * following routines may have dependancies on soft state flags set 6792 * as part of the driver property configuration. 6793 */ 6794 sd_read_unit_properties(un); 6795 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6796 "sd_unit_attach: un:0x%p property configuration complete.\n", un); 6797 6798 /* 6799 * Only if a device has "hotpluggable" property, it is 6800 * treated as hotpluggable device. Otherwise, it is 6801 * regarded as non-hotpluggable one. 6802 */ 6803 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable", 6804 -1) != -1) { 6805 un->un_f_is_hotpluggable = TRUE; 6806 } 6807 6808 /* 6809 * set unit's attributes(flags) according to "hotpluggable" and 6810 * RMB bit in INQUIRY data. 6811 */ 6812 sd_set_unit_attributes(un, devi); 6813 6814 /* 6815 * By default, we mark the capacity, lbasize, and geometry 6816 * as invalid. Only if we successfully read a valid capacity 6817 * will we update the un_blockcount and un_tgt_blocksize with the 6818 * valid values (the geometry will be validated later). 6819 */ 6820 un->un_f_blockcount_is_valid = FALSE; 6821 un->un_f_tgt_blocksize_is_valid = FALSE; 6822 6823 /* 6824 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine 6825 * otherwise. 6826 */ 6827 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE; 6828 un->un_blockcount = 0; 6829 6830 /* 6831 * Set up the per-instance info needed to determine the correct 6832 * CDBs and other info for issuing commands to the target. 6833 */ 6834 sd_init_cdb_limits(un); 6835 6836 /* 6837 * Set up the IO chains to use, based upon the target type. 6838 */ 6839 if (un->un_f_non_devbsize_supported) { 6840 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 6841 } else { 6842 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 6843 } 6844 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 6845 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD; 6846 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD; 6847 6848 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf), 6849 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit, 6850 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER); 6851 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi); 6852 6853 6854 if (ISCD(un)) { 6855 un->un_additional_codes = sd_additional_codes; 6856 } else { 6857 un->un_additional_codes = NULL; 6858 } 6859 6860 /* 6861 * Create the kstats here so they can be available for attach-time 6862 * routines that send commands to the unit (either polled or via 6863 * sd_send_scsi_cmd). 6864 * 6865 * Note: This is a critical sequence that needs to be maintained: 6866 * 1) Instantiate the kstats here, before any routines using the 6867 * iopath (i.e. sd_send_scsi_cmd). 6868 * 2) Instantiate and initialize the partition stats 6869 * (sd_set_pstats). 6870 * 3) Initialize the error stats (sd_set_errstats), following 6871 * sd_validate_geometry(),sd_register_devid(), 6872 * and sd_cache_control(). 6873 */ 6874 6875 un->un_stats = kstat_create(sd_label, instance, 6876 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 6877 if (un->un_stats != NULL) { 6878 un->un_stats->ks_lock = SD_MUTEX(un); 6879 kstat_install(un->un_stats); 6880 } 6881 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6882 "sd_unit_attach: un:0x%p un_stats created\n", un); 6883 6884 sd_create_errstats(un, instance); 6885 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6886 "sd_unit_attach: un:0x%p errstats created\n", un); 6887 6888 /* 6889 * The following if/else code was relocated here from below as part 6890 * of the fix for bug (4430280). However with the default setup added 6891 * on entry to this routine, it's no longer absolutely necessary for 6892 * this to be before the call to sd_spin_up_unit. 6893 */ 6894 if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) { 6895 /* 6896 * If SCSI-2 tagged queueing is supported by the target 6897 * and by the host adapter then we will enable it. 6898 */ 6899 un->un_tagflags = 0; 6900 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && 6901 (devp->sd_inq->inq_cmdque) && 6902 (un->un_f_arq_enabled == TRUE)) { 6903 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 6904 1, 1) == 1) { 6905 un->un_tagflags = FLAG_STAG; 6906 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6907 "sd_unit_attach: un:0x%p tag queueing " 6908 "enabled\n", un); 6909 } else if (scsi_ifgetcap(SD_ADDRESS(un), 6910 "untagged-qing", 0) == 1) { 6911 un->un_f_opt_queueing = TRUE; 6912 un->un_saved_throttle = un->un_throttle = 6913 min(un->un_throttle, 3); 6914 } else { 6915 un->un_f_opt_queueing = FALSE; 6916 un->un_saved_throttle = un->un_throttle = 1; 6917 } 6918 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) 6919 == 1) && (un->un_f_arq_enabled == TRUE)) { 6920 /* The Host Adapter supports internal queueing. */ 6921 un->un_f_opt_queueing = TRUE; 6922 un->un_saved_throttle = un->un_throttle = 6923 min(un->un_throttle, 3); 6924 } else { 6925 un->un_f_opt_queueing = FALSE; 6926 un->un_saved_throttle = un->un_throttle = 1; 6927 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6928 "sd_unit_attach: un:0x%p no tag queueing\n", un); 6929 } 6930 6931 /* 6932 * Enable large transfers for SATA/SAS drives 6933 */ 6934 if (SD_IS_SERIAL(un)) { 6935 un->un_max_xfer_size = 6936 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 6937 sd_max_xfer_size, SD_MAX_XFER_SIZE); 6938 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6939 "sd_unit_attach: un:0x%p max transfer " 6940 "size=0x%x\n", un, un->un_max_xfer_size); 6941 6942 } 6943 6944 /* Setup or tear down default wide operations for disks */ 6945 6946 /* 6947 * Note: Legacy: it may be possible for both "sd_max_xfer_size" 6948 * and "ssd_max_xfer_size" to exist simultaneously on the same 6949 * system and be set to different values. In the future this 6950 * code may need to be updated when the ssd module is 6951 * obsoleted and removed from the system. (4299588) 6952 */ 6953 if (SD_IS_PARALLEL_SCSI(un) && 6954 (devp->sd_inq->inq_rdf == RDF_SCSI2) && 6955 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { 6956 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 6957 1, 1) == 1) { 6958 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6959 "sd_unit_attach: un:0x%p Wide Transfer " 6960 "enabled\n", un); 6961 } 6962 6963 /* 6964 * If tagged queuing has also been enabled, then 6965 * enable large xfers 6966 */ 6967 if (un->un_saved_throttle == sd_max_throttle) { 6968 un->un_max_xfer_size = 6969 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 6970 sd_max_xfer_size, SD_MAX_XFER_SIZE); 6971 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6972 "sd_unit_attach: un:0x%p max transfer " 6973 "size=0x%x\n", un, un->un_max_xfer_size); 6974 } 6975 } else { 6976 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 6977 0, 1) == 1) { 6978 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6979 "sd_unit_attach: un:0x%p " 6980 "Wide Transfer disabled\n", un); 6981 } 6982 } 6983 } else { 6984 un->un_tagflags = FLAG_STAG; 6985 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, 6986 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); 6987 } 6988 6989 /* 6990 * If this target supports LUN reset, try to enable it. 6991 */ 6992 if (un->un_f_lun_reset_enabled) { 6993 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { 6994 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 6995 "un:0x%p lun_reset capability set\n", un); 6996 } else { 6997 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 6998 "un:0x%p lun-reset capability not set\n", un); 6999 } 7000 } 7001 7002 /* 7003 * At this point in the attach, we have enough info in the 7004 * soft state to be able to issue commands to the target. 7005 * 7006 * All command paths used below MUST issue their commands as 7007 * SD_PATH_DIRECT. This is important as intermediate layers 7008 * are not all initialized yet (such as PM). 7009 */ 7010 7011 /* 7012 * Send a TEST UNIT READY command to the device. This should clear 7013 * any outstanding UNIT ATTENTION that may be present. 7014 * 7015 * Note: Don't check for success, just track if there is a reservation, 7016 * this is a throw away command to clear any unit attentions. 7017 * 7018 * Note: This MUST be the first command issued to the target during 7019 * attach to ensure power on UNIT ATTENTIONS are cleared. 7020 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated 7021 * with attempts at spinning up a device with no media. 7022 */ 7023 if (sd_send_scsi_TEST_UNIT_READY(un, SD_DONT_RETRY_TUR) == EACCES) { 7024 reservation_flag = SD_TARGET_IS_RESERVED; 7025 } 7026 7027 /* 7028 * If the device is NOT a removable media device, attempt to spin 7029 * it up (using the START_STOP_UNIT command) and read its capacity 7030 * (using the READ CAPACITY command). Note, however, that either 7031 * of these could fail and in some cases we would continue with 7032 * the attach despite the failure (see below). 7033 */ 7034 if (un->un_f_descr_format_supported) { 7035 switch (sd_spin_up_unit(un)) { 7036 case 0: 7037 /* 7038 * Spin-up was successful; now try to read the 7039 * capacity. If successful then save the results 7040 * and mark the capacity & lbasize as valid. 7041 */ 7042 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7043 "sd_unit_attach: un:0x%p spin-up successful\n", un); 7044 7045 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, 7046 &lbasize, SD_PATH_DIRECT)) { 7047 case 0: { 7048 if (capacity > DK_MAX_BLOCKS) { 7049 #ifdef _LP64 7050 if (capacity + 1 > 7051 SD_GROUP1_MAX_ADDRESS) { 7052 /* 7053 * Enable descriptor format 7054 * sense data so that we can 7055 * get 64 bit sense data 7056 * fields. 7057 */ 7058 sd_enable_descr_sense(un); 7059 } 7060 #else 7061 /* 32-bit kernels can't handle this */ 7062 scsi_log(SD_DEVINFO(un), 7063 sd_label, CE_WARN, 7064 "disk has %llu blocks, which " 7065 "is too large for a 32-bit " 7066 "kernel", capacity); 7067 7068 #if defined(__i386) || defined(__amd64) 7069 /* 7070 * 1TB disk was treated as (1T - 512)B 7071 * in the past, so that it might have 7072 * valid VTOC and solaris partitions, 7073 * we have to allow it to continue to 7074 * work. 7075 */ 7076 if (capacity -1 > DK_MAX_BLOCKS) 7077 #endif 7078 goto spinup_failed; 7079 #endif 7080 } 7081 7082 /* 7083 * Here it's not necessary to check the case: 7084 * the capacity of the device is bigger than 7085 * what the max hba cdb can support. Because 7086 * sd_send_scsi_READ_CAPACITY will retrieve 7087 * the capacity by sending USCSI command, which 7088 * is constrained by the max hba cdb. Actually, 7089 * sd_send_scsi_READ_CAPACITY will return 7090 * EINVAL when using bigger cdb than required 7091 * cdb length. Will handle this case in 7092 * "case EINVAL". 7093 */ 7094 7095 /* 7096 * The following relies on 7097 * sd_send_scsi_READ_CAPACITY never 7098 * returning 0 for capacity and/or lbasize. 7099 */ 7100 sd_update_block_info(un, lbasize, capacity); 7101 7102 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7103 "sd_unit_attach: un:0x%p capacity = %ld " 7104 "blocks; lbasize= %ld.\n", un, 7105 un->un_blockcount, un->un_tgt_blocksize); 7106 7107 break; 7108 } 7109 case EINVAL: 7110 /* 7111 * In the case where the max-cdb-length property 7112 * is smaller than the required CDB length for 7113 * a SCSI device, a target driver can fail to 7114 * attach to that device. 7115 */ 7116 scsi_log(SD_DEVINFO(un), 7117 sd_label, CE_WARN, 7118 "disk capacity is too large " 7119 "for current cdb length"); 7120 goto spinup_failed; 7121 case EACCES: 7122 /* 7123 * Should never get here if the spin-up 7124 * succeeded, but code it in anyway. 7125 * From here, just continue with the attach... 7126 */ 7127 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7128 "sd_unit_attach: un:0x%p " 7129 "sd_send_scsi_READ_CAPACITY " 7130 "returned reservation conflict\n", un); 7131 reservation_flag = SD_TARGET_IS_RESERVED; 7132 break; 7133 default: 7134 /* 7135 * Likewise, should never get here if the 7136 * spin-up succeeded. Just continue with 7137 * the attach... 7138 */ 7139 break; 7140 } 7141 break; 7142 case EACCES: 7143 /* 7144 * Device is reserved by another host. In this case 7145 * we could not spin it up or read the capacity, but 7146 * we continue with the attach anyway. 7147 */ 7148 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7149 "sd_unit_attach: un:0x%p spin-up reservation " 7150 "conflict.\n", un); 7151 reservation_flag = SD_TARGET_IS_RESERVED; 7152 break; 7153 default: 7154 /* Fail the attach if the spin-up failed. */ 7155 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7156 "sd_unit_attach: un:0x%p spin-up failed.", un); 7157 goto spinup_failed; 7158 } 7159 } 7160 7161 /* 7162 * Check to see if this is a MMC drive 7163 */ 7164 if (ISCD(un)) { 7165 sd_set_mmc_caps(un); 7166 } 7167 7168 7169 /* 7170 * Add a zero-length attribute to tell the world we support 7171 * kernel ioctls (for layered drivers) 7172 */ 7173 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7174 DDI_KERNEL_IOCTL, NULL, 0); 7175 7176 /* 7177 * Add a boolean property to tell the world we support 7178 * the B_FAILFAST flag (for layered drivers) 7179 */ 7180 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7181 "ddi-failfast-supported", NULL, 0); 7182 7183 /* 7184 * Initialize power management 7185 */ 7186 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); 7187 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); 7188 sd_setup_pm(un, devi); 7189 if (un->un_f_pm_is_enabled == FALSE) { 7190 /* 7191 * For performance, point to a jump table that does 7192 * not include pm. 7193 * The direct and priority chains don't change with PM. 7194 * 7195 * Note: this is currently done based on individual device 7196 * capabilities. When an interface for determining system 7197 * power enabled state becomes available, or when additional 7198 * layers are added to the command chain, these values will 7199 * have to be re-evaluated for correctness. 7200 */ 7201 if (un->un_f_non_devbsize_supported) { 7202 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM; 7203 } else { 7204 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM; 7205 } 7206 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 7207 } 7208 7209 /* 7210 * This property is set to 0 by HA software to avoid retries 7211 * on a reserved disk. (The preferred property name is 7212 * "retry-on-reservation-conflict") (1189689) 7213 * 7214 * Note: The use of a global here can have unintended consequences. A 7215 * per instance variable is preferrable to match the capabilities of 7216 * different underlying hba's (4402600) 7217 */ 7218 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, 7219 DDI_PROP_DONTPASS, "retry-on-reservation-conflict", 7220 sd_retry_on_reservation_conflict); 7221 if (sd_retry_on_reservation_conflict != 0) { 7222 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, 7223 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, 7224 sd_retry_on_reservation_conflict); 7225 } 7226 7227 /* Set up options for QFULL handling. */ 7228 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7229 "qfull-retries", -1)) != -1) { 7230 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries", 7231 rval, 1); 7232 } 7233 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7234 "qfull-retry-interval", -1)) != -1) { 7235 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval", 7236 rval, 1); 7237 } 7238 7239 /* 7240 * This just prints a message that announces the existence of the 7241 * device. The message is always printed in the system logfile, but 7242 * only appears on the console if the system is booted with the 7243 * -v (verbose) argument. 7244 */ 7245 ddi_report_dev(devi); 7246 7247 un->un_mediastate = DKIO_NONE; 7248 7249 cmlb_alloc_handle(&un->un_cmlbhandle); 7250 7251 #if defined(__i386) || defined(__amd64) 7252 /* 7253 * On x86, compensate for off-by-1 legacy error 7254 */ 7255 if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable && 7256 (lbasize == un->un_sys_blocksize)) 7257 offbyone = CMLB_OFF_BY_ONE; 7258 #endif 7259 7260 if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype, 7261 un->un_f_has_removable_media, un->un_f_is_hotpluggable, 7262 un->un_node_type, offbyone, un->un_cmlbhandle, 7263 (void *)SD_PATH_DIRECT) != 0) { 7264 goto cmlb_attach_failed; 7265 } 7266 7267 7268 /* 7269 * Read and validate the device's geometry (ie, disk label) 7270 * A new unformatted drive will not have a valid geometry, but 7271 * the driver needs to successfully attach to this device so 7272 * the drive can be formatted via ioctls. 7273 */ 7274 geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0, 7275 (void *)SD_PATH_DIRECT) == 0) ? 1: 0; 7276 7277 mutex_enter(SD_MUTEX(un)); 7278 7279 /* 7280 * Read and initialize the devid for the unit. 7281 */ 7282 ASSERT(un->un_errstats != NULL); 7283 if (un->un_f_devid_supported) { 7284 sd_register_devid(un, devi, reservation_flag); 7285 } 7286 mutex_exit(SD_MUTEX(un)); 7287 7288 #if (defined(__fibre)) 7289 /* 7290 * Register callbacks for fibre only. You can't do this soley 7291 * on the basis of the devid_type because this is hba specific. 7292 * We need to query our hba capabilities to find out whether to 7293 * register or not. 7294 */ 7295 if (un->un_f_is_fibre) { 7296 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 7297 sd_init_event_callbacks(un); 7298 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7299 "sd_unit_attach: un:0x%p event callbacks inserted", 7300 un); 7301 } 7302 } 7303 #endif 7304 7305 if (un->un_f_opt_disable_cache == TRUE) { 7306 /* 7307 * Disable both read cache and write cache. This is 7308 * the historic behavior of the keywords in the config file. 7309 */ 7310 if (sd_cache_control(un, SD_CACHE_DISABLE, SD_CACHE_DISABLE) != 7311 0) { 7312 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7313 "sd_unit_attach: un:0x%p Could not disable " 7314 "caching", un); 7315 goto devid_failed; 7316 } 7317 } 7318 7319 /* 7320 * Check the value of the WCE bit now and 7321 * set un_f_write_cache_enabled accordingly. 7322 */ 7323 (void) sd_get_write_cache_enabled(un, &wc_enabled); 7324 mutex_enter(SD_MUTEX(un)); 7325 un->un_f_write_cache_enabled = (wc_enabled != 0); 7326 mutex_exit(SD_MUTEX(un)); 7327 7328 /* 7329 * Check the value of the NV_SUP bit and set 7330 * un_f_suppress_cache_flush accordingly. 7331 */ 7332 sd_get_nv_sup(un); 7333 7334 /* 7335 * Find out what type of reservation this disk supports. 7336 */ 7337 switch (sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 0, NULL)) { 7338 case 0: 7339 /* 7340 * SCSI-3 reservations are supported. 7341 */ 7342 un->un_reservation_type = SD_SCSI3_RESERVATION; 7343 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7344 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un); 7345 break; 7346 case ENOTSUP: 7347 /* 7348 * The PERSISTENT RESERVE IN command would not be recognized by 7349 * a SCSI-2 device, so assume the reservation type is SCSI-2. 7350 */ 7351 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7352 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un); 7353 un->un_reservation_type = SD_SCSI2_RESERVATION; 7354 break; 7355 default: 7356 /* 7357 * default to SCSI-3 reservations 7358 */ 7359 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7360 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un); 7361 un->un_reservation_type = SD_SCSI3_RESERVATION; 7362 break; 7363 } 7364 7365 /* 7366 * Set the pstat and error stat values here, so data obtained during the 7367 * previous attach-time routines is available. 7368 * 7369 * Note: This is a critical sequence that needs to be maintained: 7370 * 1) Instantiate the kstats before any routines using the iopath 7371 * (i.e. sd_send_scsi_cmd). 7372 * 2) Initialize the error stats (sd_set_errstats) and partition 7373 * stats (sd_set_pstats)here, following 7374 * cmlb_validate_geometry(), sd_register_devid(), and 7375 * sd_cache_control(). 7376 */ 7377 7378 if (un->un_f_pkstats_enabled && geom_label_valid) { 7379 sd_set_pstats(un); 7380 SD_TRACE(SD_LOG_IO_PARTITION, un, 7381 "sd_unit_attach: un:0x%p pstats created and set\n", un); 7382 } 7383 7384 sd_set_errstats(un); 7385 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7386 "sd_unit_attach: un:0x%p errstats set\n", un); 7387 7388 7389 /* 7390 * After successfully attaching an instance, we record the information 7391 * of how many luns have been attached on the relative target and 7392 * controller for parallel SCSI. This information is used when sd tries 7393 * to set the tagged queuing capability in HBA. 7394 */ 7395 if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) { 7396 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH); 7397 } 7398 7399 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7400 "sd_unit_attach: un:0x%p exit success\n", un); 7401 7402 return (DDI_SUCCESS); 7403 7404 /* 7405 * An error occurred during the attach; clean up & return failure. 7406 */ 7407 7408 devid_failed: 7409 7410 setup_pm_failed: 7411 ddi_remove_minor_node(devi, NULL); 7412 7413 cmlb_attach_failed: 7414 /* 7415 * Cleanup from the scsi_ifsetcap() calls (437868) 7416 */ 7417 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 7418 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 7419 7420 /* 7421 * Refer to the comments of setting tagged-qing in the beginning of 7422 * sd_unit_attach. We can only disable tagged queuing when there is 7423 * no lun attached on the target. 7424 */ 7425 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 7426 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 7427 } 7428 7429 if (un->un_f_is_fibre == FALSE) { 7430 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 7431 } 7432 7433 spinup_failed: 7434 7435 mutex_enter(SD_MUTEX(un)); 7436 7437 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ 7438 if (un->un_direct_priority_timeid != NULL) { 7439 timeout_id_t temp_id = un->un_direct_priority_timeid; 7440 un->un_direct_priority_timeid = NULL; 7441 mutex_exit(SD_MUTEX(un)); 7442 (void) untimeout(temp_id); 7443 mutex_enter(SD_MUTEX(un)); 7444 } 7445 7446 /* Cancel any pending start/stop timeouts */ 7447 if (un->un_startstop_timeid != NULL) { 7448 timeout_id_t temp_id = un->un_startstop_timeid; 7449 un->un_startstop_timeid = NULL; 7450 mutex_exit(SD_MUTEX(un)); 7451 (void) untimeout(temp_id); 7452 mutex_enter(SD_MUTEX(un)); 7453 } 7454 7455 /* Cancel any pending reset-throttle timeouts */ 7456 if (un->un_reset_throttle_timeid != NULL) { 7457 timeout_id_t temp_id = un->un_reset_throttle_timeid; 7458 un->un_reset_throttle_timeid = NULL; 7459 mutex_exit(SD_MUTEX(un)); 7460 (void) untimeout(temp_id); 7461 mutex_enter(SD_MUTEX(un)); 7462 } 7463 7464 /* Cancel any pending retry timeouts */ 7465 if (un->un_retry_timeid != NULL) { 7466 timeout_id_t temp_id = un->un_retry_timeid; 7467 un->un_retry_timeid = NULL; 7468 mutex_exit(SD_MUTEX(un)); 7469 (void) untimeout(temp_id); 7470 mutex_enter(SD_MUTEX(un)); 7471 } 7472 7473 /* Cancel any pending delayed cv broadcast timeouts */ 7474 if (un->un_dcvb_timeid != NULL) { 7475 timeout_id_t temp_id = un->un_dcvb_timeid; 7476 un->un_dcvb_timeid = NULL; 7477 mutex_exit(SD_MUTEX(un)); 7478 (void) untimeout(temp_id); 7479 mutex_enter(SD_MUTEX(un)); 7480 } 7481 7482 mutex_exit(SD_MUTEX(un)); 7483 7484 /* There should not be any in-progress I/O so ASSERT this check */ 7485 ASSERT(un->un_ncmds_in_transport == 0); 7486 ASSERT(un->un_ncmds_in_driver == 0); 7487 7488 /* Do not free the softstate if the callback routine is active */ 7489 sd_sync_with_callback(un); 7490 7491 /* 7492 * Partition stats apparently are not used with removables. These would 7493 * not have been created during attach, so no need to clean them up... 7494 */ 7495 if (un->un_stats != NULL) { 7496 kstat_delete(un->un_stats); 7497 un->un_stats = NULL; 7498 } 7499 if (un->un_errstats != NULL) { 7500 kstat_delete(un->un_errstats); 7501 un->un_errstats = NULL; 7502 } 7503 7504 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 7505 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 7506 7507 ddi_prop_remove_all(devi); 7508 sema_destroy(&un->un_semoclose); 7509 cv_destroy(&un->un_state_cv); 7510 7511 getrbuf_failed: 7512 7513 sd_free_rqs(un); 7514 7515 alloc_rqs_failed: 7516 7517 devp->sd_private = NULL; 7518 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ 7519 7520 get_softstate_failed: 7521 /* 7522 * Note: the man pages are unclear as to whether or not doing a 7523 * ddi_soft_state_free(sd_state, instance) is the right way to 7524 * clean up after the ddi_soft_state_zalloc() if the subsequent 7525 * ddi_get_soft_state() fails. The implication seems to be 7526 * that the get_soft_state cannot fail if the zalloc succeeds. 7527 */ 7528 ddi_soft_state_free(sd_state, instance); 7529 7530 probe_failed: 7531 scsi_unprobe(devp); 7532 7533 return (DDI_FAILURE); 7534 } 7535 7536 7537 /* 7538 * Function: sd_unit_detach 7539 * 7540 * Description: Performs DDI_DETACH processing for sddetach(). 7541 * 7542 * Return Code: DDI_SUCCESS 7543 * DDI_FAILURE 7544 * 7545 * Context: Kernel thread context 7546 */ 7547 7548 static int 7549 sd_unit_detach(dev_info_t *devi) 7550 { 7551 struct scsi_device *devp; 7552 struct sd_lun *un; 7553 int i; 7554 int tgt; 7555 dev_t dev; 7556 dev_info_t *pdip = ddi_get_parent(devi); 7557 int instance = ddi_get_instance(devi); 7558 7559 mutex_enter(&sd_detach_mutex); 7560 7561 /* 7562 * Fail the detach for any of the following: 7563 * - Unable to get the sd_lun struct for the instance 7564 * - A layered driver has an outstanding open on the instance 7565 * - Another thread is already detaching this instance 7566 * - Another thread is currently performing an open 7567 */ 7568 devp = ddi_get_driver_private(devi); 7569 if ((devp == NULL) || 7570 ((un = (struct sd_lun *)devp->sd_private) == NULL) || 7571 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) || 7572 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) { 7573 mutex_exit(&sd_detach_mutex); 7574 return (DDI_FAILURE); 7575 } 7576 7577 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); 7578 7579 /* 7580 * Mark this instance as currently in a detach, to inhibit any 7581 * opens from a layered driver. 7582 */ 7583 un->un_detach_count++; 7584 mutex_exit(&sd_detach_mutex); 7585 7586 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7587 SCSI_ADDR_PROP_TARGET, -1); 7588 7589 dev = sd_make_device(SD_DEVINFO(un)); 7590 7591 #ifndef lint 7592 _NOTE(COMPETING_THREADS_NOW); 7593 #endif 7594 7595 mutex_enter(SD_MUTEX(un)); 7596 7597 /* 7598 * Fail the detach if there are any outstanding layered 7599 * opens on this device. 7600 */ 7601 for (i = 0; i < NDKMAP; i++) { 7602 if (un->un_ocmap.lyropen[i] != 0) { 7603 goto err_notclosed; 7604 } 7605 } 7606 7607 /* 7608 * Verify there are NO outstanding commands issued to this device. 7609 * ie, un_ncmds_in_transport == 0. 7610 * It's possible to have outstanding commands through the physio 7611 * code path, even though everything's closed. 7612 */ 7613 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || 7614 (un->un_direct_priority_timeid != NULL) || 7615 (un->un_state == SD_STATE_RWAIT)) { 7616 mutex_exit(SD_MUTEX(un)); 7617 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7618 "sd_dr_detach: Detach failure due to outstanding cmds\n"); 7619 goto err_stillbusy; 7620 } 7621 7622 /* 7623 * If we have the device reserved, release the reservation. 7624 */ 7625 if ((un->un_resvd_status & SD_RESERVE) && 7626 !(un->un_resvd_status & SD_LOST_RESERVE)) { 7627 mutex_exit(SD_MUTEX(un)); 7628 /* 7629 * Note: sd_reserve_release sends a command to the device 7630 * via the sd_ioctlcmd() path, and can sleep. 7631 */ 7632 if (sd_reserve_release(dev, SD_RELEASE) != 0) { 7633 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7634 "sd_dr_detach: Cannot release reservation \n"); 7635 } 7636 } else { 7637 mutex_exit(SD_MUTEX(un)); 7638 } 7639 7640 /* 7641 * Untimeout any reserve recover, throttle reset, restart unit 7642 * and delayed broadcast timeout threads. Protect the timeout pointer 7643 * from getting nulled by their callback functions. 7644 */ 7645 mutex_enter(SD_MUTEX(un)); 7646 if (un->un_resvd_timeid != NULL) { 7647 timeout_id_t temp_id = un->un_resvd_timeid; 7648 un->un_resvd_timeid = NULL; 7649 mutex_exit(SD_MUTEX(un)); 7650 (void) untimeout(temp_id); 7651 mutex_enter(SD_MUTEX(un)); 7652 } 7653 7654 if (un->un_reset_throttle_timeid != NULL) { 7655 timeout_id_t temp_id = un->un_reset_throttle_timeid; 7656 un->un_reset_throttle_timeid = NULL; 7657 mutex_exit(SD_MUTEX(un)); 7658 (void) untimeout(temp_id); 7659 mutex_enter(SD_MUTEX(un)); 7660 } 7661 7662 if (un->un_startstop_timeid != NULL) { 7663 timeout_id_t temp_id = un->un_startstop_timeid; 7664 un->un_startstop_timeid = NULL; 7665 mutex_exit(SD_MUTEX(un)); 7666 (void) untimeout(temp_id); 7667 mutex_enter(SD_MUTEX(un)); 7668 } 7669 7670 if (un->un_dcvb_timeid != NULL) { 7671 timeout_id_t temp_id = un->un_dcvb_timeid; 7672 un->un_dcvb_timeid = NULL; 7673 mutex_exit(SD_MUTEX(un)); 7674 (void) untimeout(temp_id); 7675 } else { 7676 mutex_exit(SD_MUTEX(un)); 7677 } 7678 7679 /* Remove any pending reservation reclaim requests for this device */ 7680 sd_rmv_resv_reclaim_req(dev); 7681 7682 mutex_enter(SD_MUTEX(un)); 7683 7684 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ 7685 if (un->un_direct_priority_timeid != NULL) { 7686 timeout_id_t temp_id = un->un_direct_priority_timeid; 7687 un->un_direct_priority_timeid = NULL; 7688 mutex_exit(SD_MUTEX(un)); 7689 (void) untimeout(temp_id); 7690 mutex_enter(SD_MUTEX(un)); 7691 } 7692 7693 /* Cancel any active multi-host disk watch thread requests */ 7694 if (un->un_mhd_token != NULL) { 7695 mutex_exit(SD_MUTEX(un)); 7696 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); 7697 if (scsi_watch_request_terminate(un->un_mhd_token, 7698 SCSI_WATCH_TERMINATE_NOWAIT)) { 7699 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7700 "sd_dr_detach: Cannot cancel mhd watch request\n"); 7701 /* 7702 * Note: We are returning here after having removed 7703 * some driver timeouts above. This is consistent with 7704 * the legacy implementation but perhaps the watch 7705 * terminate call should be made with the wait flag set. 7706 */ 7707 goto err_stillbusy; 7708 } 7709 mutex_enter(SD_MUTEX(un)); 7710 un->un_mhd_token = NULL; 7711 } 7712 7713 if (un->un_swr_token != NULL) { 7714 mutex_exit(SD_MUTEX(un)); 7715 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); 7716 if (scsi_watch_request_terminate(un->un_swr_token, 7717 SCSI_WATCH_TERMINATE_NOWAIT)) { 7718 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7719 "sd_dr_detach: Cannot cancel swr watch request\n"); 7720 /* 7721 * Note: We are returning here after having removed 7722 * some driver timeouts above. This is consistent with 7723 * the legacy implementation but perhaps the watch 7724 * terminate call should be made with the wait flag set. 7725 */ 7726 goto err_stillbusy; 7727 } 7728 mutex_enter(SD_MUTEX(un)); 7729 un->un_swr_token = NULL; 7730 } 7731 7732 mutex_exit(SD_MUTEX(un)); 7733 7734 /* 7735 * Clear any scsi_reset_notifies. We clear the reset notifies 7736 * if we have not registered one. 7737 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! 7738 */ 7739 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 7740 sd_mhd_reset_notify_cb, (caddr_t)un); 7741 7742 /* 7743 * protect the timeout pointers from getting nulled by 7744 * their callback functions during the cancellation process. 7745 * In such a scenario untimeout can be invoked with a null value. 7746 */ 7747 _NOTE(NO_COMPETING_THREADS_NOW); 7748 7749 mutex_enter(&un->un_pm_mutex); 7750 if (un->un_pm_idle_timeid != NULL) { 7751 timeout_id_t temp_id = un->un_pm_idle_timeid; 7752 un->un_pm_idle_timeid = NULL; 7753 mutex_exit(&un->un_pm_mutex); 7754 7755 /* 7756 * Timeout is active; cancel it. 7757 * Note that it'll never be active on a device 7758 * that does not support PM therefore we don't 7759 * have to check before calling pm_idle_component. 7760 */ 7761 (void) untimeout(temp_id); 7762 (void) pm_idle_component(SD_DEVINFO(un), 0); 7763 mutex_enter(&un->un_pm_mutex); 7764 } 7765 7766 /* 7767 * Check whether there is already a timeout scheduled for power 7768 * management. If yes then don't lower the power here, that's. 7769 * the timeout handler's job. 7770 */ 7771 if (un->un_pm_timeid != NULL) { 7772 timeout_id_t temp_id = un->un_pm_timeid; 7773 un->un_pm_timeid = NULL; 7774 mutex_exit(&un->un_pm_mutex); 7775 /* 7776 * Timeout is active; cancel it. 7777 * Note that it'll never be active on a device 7778 * that does not support PM therefore we don't 7779 * have to check before calling pm_idle_component. 7780 */ 7781 (void) untimeout(temp_id); 7782 (void) pm_idle_component(SD_DEVINFO(un), 0); 7783 7784 } else { 7785 mutex_exit(&un->un_pm_mutex); 7786 if ((un->un_f_pm_is_enabled == TRUE) && 7787 (pm_lower_power(SD_DEVINFO(un), 0, SD_SPINDLE_OFF) != 7788 DDI_SUCCESS)) { 7789 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7790 "sd_dr_detach: Lower power request failed, ignoring.\n"); 7791 /* 7792 * Fix for bug: 4297749, item # 13 7793 * The above test now includes a check to see if PM is 7794 * supported by this device before call 7795 * pm_lower_power(). 7796 * Note, the following is not dead code. The call to 7797 * pm_lower_power above will generate a call back into 7798 * our sdpower routine which might result in a timeout 7799 * handler getting activated. Therefore the following 7800 * code is valid and necessary. 7801 */ 7802 mutex_enter(&un->un_pm_mutex); 7803 if (un->un_pm_timeid != NULL) { 7804 timeout_id_t temp_id = un->un_pm_timeid; 7805 un->un_pm_timeid = NULL; 7806 mutex_exit(&un->un_pm_mutex); 7807 (void) untimeout(temp_id); 7808 (void) pm_idle_component(SD_DEVINFO(un), 0); 7809 } else { 7810 mutex_exit(&un->un_pm_mutex); 7811 } 7812 } 7813 } 7814 7815 /* 7816 * Cleanup from the scsi_ifsetcap() calls (437868) 7817 * Relocated here from above to be after the call to 7818 * pm_lower_power, which was getting errors. 7819 */ 7820 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 7821 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 7822 7823 /* 7824 * Currently, tagged queuing is supported per target based by HBA. 7825 * Setting this per lun instance actually sets the capability of this 7826 * target in HBA, which affects those luns already attached on the 7827 * same target. So during detach, we can only disable this capability 7828 * only when this is the only lun left on this target. By doing 7829 * this, we assume a target has the same tagged queuing capability 7830 * for every lun. The condition can be removed when HBA is changed to 7831 * support per lun based tagged queuing capability. 7832 */ 7833 if (sd_scsi_get_target_lun_count(pdip, tgt) <= 1) { 7834 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 7835 } 7836 7837 if (un->un_f_is_fibre == FALSE) { 7838 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 7839 } 7840 7841 /* 7842 * Remove any event callbacks, fibre only 7843 */ 7844 if (un->un_f_is_fibre == TRUE) { 7845 if ((un->un_insert_event != NULL) && 7846 (ddi_remove_event_handler(un->un_insert_cb_id) != 7847 DDI_SUCCESS)) { 7848 /* 7849 * Note: We are returning here after having done 7850 * substantial cleanup above. This is consistent 7851 * with the legacy implementation but this may not 7852 * be the right thing to do. 7853 */ 7854 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7855 "sd_dr_detach: Cannot cancel insert event\n"); 7856 goto err_remove_event; 7857 } 7858 un->un_insert_event = NULL; 7859 7860 if ((un->un_remove_event != NULL) && 7861 (ddi_remove_event_handler(un->un_remove_cb_id) != 7862 DDI_SUCCESS)) { 7863 /* 7864 * Note: We are returning here after having done 7865 * substantial cleanup above. This is consistent 7866 * with the legacy implementation but this may not 7867 * be the right thing to do. 7868 */ 7869 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7870 "sd_dr_detach: Cannot cancel remove event\n"); 7871 goto err_remove_event; 7872 } 7873 un->un_remove_event = NULL; 7874 } 7875 7876 /* Do not free the softstate if the callback routine is active */ 7877 sd_sync_with_callback(un); 7878 7879 cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 7880 cmlb_free_handle(&un->un_cmlbhandle); 7881 7882 /* 7883 * Hold the detach mutex here, to make sure that no other threads ever 7884 * can access a (partially) freed soft state structure. 7885 */ 7886 mutex_enter(&sd_detach_mutex); 7887 7888 /* 7889 * Clean up the soft state struct. 7890 * Cleanup is done in reverse order of allocs/inits. 7891 * At this point there should be no competing threads anymore. 7892 */ 7893 7894 /* Unregister and free device id. */ 7895 ddi_devid_unregister(devi); 7896 if (un->un_devid) { 7897 ddi_devid_free(un->un_devid); 7898 un->un_devid = NULL; 7899 } 7900 7901 /* 7902 * Destroy wmap cache if it exists. 7903 */ 7904 if (un->un_wm_cache != NULL) { 7905 kmem_cache_destroy(un->un_wm_cache); 7906 un->un_wm_cache = NULL; 7907 } 7908 7909 /* 7910 * kstat cleanup is done in detach for all device types (4363169). 7911 * We do not want to fail detach if the device kstats are not deleted 7912 * since there is a confusion about the devo_refcnt for the device. 7913 * We just delete the kstats and let detach complete successfully. 7914 */ 7915 if (un->un_stats != NULL) { 7916 kstat_delete(un->un_stats); 7917 un->un_stats = NULL; 7918 } 7919 if (un->un_errstats != NULL) { 7920 kstat_delete(un->un_errstats); 7921 un->un_errstats = NULL; 7922 } 7923 7924 /* Remove partition stats */ 7925 if (un->un_f_pkstats_enabled) { 7926 for (i = 0; i < NSDMAP; i++) { 7927 if (un->un_pstats[i] != NULL) { 7928 kstat_delete(un->un_pstats[i]); 7929 un->un_pstats[i] = NULL; 7930 } 7931 } 7932 } 7933 7934 /* Remove xbuf registration */ 7935 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 7936 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 7937 7938 /* Remove driver properties */ 7939 ddi_prop_remove_all(devi); 7940 7941 mutex_destroy(&un->un_pm_mutex); 7942 cv_destroy(&un->un_pm_busy_cv); 7943 7944 cv_destroy(&un->un_wcc_cv); 7945 7946 /* Open/close semaphore */ 7947 sema_destroy(&un->un_semoclose); 7948 7949 /* Removable media condvar. */ 7950 cv_destroy(&un->un_state_cv); 7951 7952 /* Suspend/resume condvar. */ 7953 cv_destroy(&un->un_suspend_cv); 7954 cv_destroy(&un->un_disk_busy_cv); 7955 7956 sd_free_rqs(un); 7957 7958 /* Free up soft state */ 7959 devp->sd_private = NULL; 7960 7961 bzero(un, sizeof (struct sd_lun)); 7962 ddi_soft_state_free(sd_state, instance); 7963 7964 mutex_exit(&sd_detach_mutex); 7965 7966 /* This frees up the INQUIRY data associated with the device. */ 7967 scsi_unprobe(devp); 7968 7969 /* 7970 * After successfully detaching an instance, we update the information 7971 * of how many luns have been attached in the relative target and 7972 * controller for parallel SCSI. This information is used when sd tries 7973 * to set the tagged queuing capability in HBA. 7974 * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to 7975 * check if the device is parallel SCSI. However, we don't need to 7976 * check here because we've already checked during attach. No device 7977 * that is not parallel SCSI is in the chain. 7978 */ 7979 if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) { 7980 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH); 7981 } 7982 7983 return (DDI_SUCCESS); 7984 7985 err_notclosed: 7986 mutex_exit(SD_MUTEX(un)); 7987 7988 err_stillbusy: 7989 _NOTE(NO_COMPETING_THREADS_NOW); 7990 7991 err_remove_event: 7992 mutex_enter(&sd_detach_mutex); 7993 un->un_detach_count--; 7994 mutex_exit(&sd_detach_mutex); 7995 7996 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); 7997 return (DDI_FAILURE); 7998 } 7999 8000 8001 /* 8002 * Function: sd_create_errstats 8003 * 8004 * Description: This routine instantiates the device error stats. 8005 * 8006 * Note: During attach the stats are instantiated first so they are 8007 * available for attach-time routines that utilize the driver 8008 * iopath to send commands to the device. The stats are initialized 8009 * separately so data obtained during some attach-time routines is 8010 * available. (4362483) 8011 * 8012 * Arguments: un - driver soft state (unit) structure 8013 * instance - driver instance 8014 * 8015 * Context: Kernel thread context 8016 */ 8017 8018 static void 8019 sd_create_errstats(struct sd_lun *un, int instance) 8020 { 8021 struct sd_errstats *stp; 8022 char kstatmodule_err[KSTAT_STRLEN]; 8023 char kstatname[KSTAT_STRLEN]; 8024 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t)); 8025 8026 ASSERT(un != NULL); 8027 8028 if (un->un_errstats != NULL) { 8029 return; 8030 } 8031 8032 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err), 8033 "%serr", sd_label); 8034 (void) snprintf(kstatname, sizeof (kstatname), 8035 "%s%d,err", sd_label, instance); 8036 8037 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname, 8038 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 8039 8040 if (un->un_errstats == NULL) { 8041 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8042 "sd_create_errstats: Failed kstat_create\n"); 8043 return; 8044 } 8045 8046 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8047 kstat_named_init(&stp->sd_softerrs, "Soft Errors", 8048 KSTAT_DATA_UINT32); 8049 kstat_named_init(&stp->sd_harderrs, "Hard Errors", 8050 KSTAT_DATA_UINT32); 8051 kstat_named_init(&stp->sd_transerrs, "Transport Errors", 8052 KSTAT_DATA_UINT32); 8053 kstat_named_init(&stp->sd_vid, "Vendor", 8054 KSTAT_DATA_CHAR); 8055 kstat_named_init(&stp->sd_pid, "Product", 8056 KSTAT_DATA_CHAR); 8057 kstat_named_init(&stp->sd_revision, "Revision", 8058 KSTAT_DATA_CHAR); 8059 kstat_named_init(&stp->sd_serial, "Serial No", 8060 KSTAT_DATA_CHAR); 8061 kstat_named_init(&stp->sd_capacity, "Size", 8062 KSTAT_DATA_ULONGLONG); 8063 kstat_named_init(&stp->sd_rq_media_err, "Media Error", 8064 KSTAT_DATA_UINT32); 8065 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready", 8066 KSTAT_DATA_UINT32); 8067 kstat_named_init(&stp->sd_rq_nodev_err, "No Device", 8068 KSTAT_DATA_UINT32); 8069 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable", 8070 KSTAT_DATA_UINT32); 8071 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request", 8072 KSTAT_DATA_UINT32); 8073 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis", 8074 KSTAT_DATA_UINT32); 8075 8076 un->un_errstats->ks_private = un; 8077 un->un_errstats->ks_update = nulldev; 8078 8079 kstat_install(un->un_errstats); 8080 } 8081 8082 8083 /* 8084 * Function: sd_set_errstats 8085 * 8086 * Description: This routine sets the value of the vendor id, product id, 8087 * revision, serial number, and capacity device error stats. 8088 * 8089 * Note: During attach the stats are instantiated first so they are 8090 * available for attach-time routines that utilize the driver 8091 * iopath to send commands to the device. The stats are initialized 8092 * separately so data obtained during some attach-time routines is 8093 * available. (4362483) 8094 * 8095 * Arguments: un - driver soft state (unit) structure 8096 * 8097 * Context: Kernel thread context 8098 */ 8099 8100 static void 8101 sd_set_errstats(struct sd_lun *un) 8102 { 8103 struct sd_errstats *stp; 8104 8105 ASSERT(un != NULL); 8106 ASSERT(un->un_errstats != NULL); 8107 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8108 ASSERT(stp != NULL); 8109 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8); 8110 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16); 8111 (void) strncpy(stp->sd_revision.value.c, 8112 un->un_sd->sd_inq->inq_revision, 4); 8113 8114 /* 8115 * All the errstats are persistent across detach/attach, 8116 * so reset all the errstats here in case of the hot 8117 * replacement of disk drives, except for not changed 8118 * Sun qualified drives. 8119 */ 8120 if ((bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) != 0) || 8121 (bcmp(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8122 sizeof (SD_INQUIRY(un)->inq_serial)) != 0)) { 8123 stp->sd_softerrs.value.ui32 = 0; 8124 stp->sd_harderrs.value.ui32 = 0; 8125 stp->sd_transerrs.value.ui32 = 0; 8126 stp->sd_rq_media_err.value.ui32 = 0; 8127 stp->sd_rq_ntrdy_err.value.ui32 = 0; 8128 stp->sd_rq_nodev_err.value.ui32 = 0; 8129 stp->sd_rq_recov_err.value.ui32 = 0; 8130 stp->sd_rq_illrq_err.value.ui32 = 0; 8131 stp->sd_rq_pfa_err.value.ui32 = 0; 8132 } 8133 8134 /* 8135 * Set the "Serial No" kstat for Sun qualified drives (indicated by 8136 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid) 8137 * (4376302)) 8138 */ 8139 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) { 8140 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8141 sizeof (SD_INQUIRY(un)->inq_serial)); 8142 } 8143 8144 if (un->un_f_blockcount_is_valid != TRUE) { 8145 /* 8146 * Set capacity error stat to 0 for no media. This ensures 8147 * a valid capacity is displayed in response to 'iostat -E' 8148 * when no media is present in the device. 8149 */ 8150 stp->sd_capacity.value.ui64 = 0; 8151 } else { 8152 /* 8153 * Multiply un_blockcount by un->un_sys_blocksize to get 8154 * capacity. 8155 * 8156 * Note: for non-512 blocksize devices "un_blockcount" has been 8157 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by 8158 * (un_tgt_blocksize / un->un_sys_blocksize). 8159 */ 8160 stp->sd_capacity.value.ui64 = (uint64_t) 8161 ((uint64_t)un->un_blockcount * un->un_sys_blocksize); 8162 } 8163 } 8164 8165 8166 /* 8167 * Function: sd_set_pstats 8168 * 8169 * Description: This routine instantiates and initializes the partition 8170 * stats for each partition with more than zero blocks. 8171 * (4363169) 8172 * 8173 * Arguments: un - driver soft state (unit) structure 8174 * 8175 * Context: Kernel thread context 8176 */ 8177 8178 static void 8179 sd_set_pstats(struct sd_lun *un) 8180 { 8181 char kstatname[KSTAT_STRLEN]; 8182 int instance; 8183 int i; 8184 diskaddr_t nblks = 0; 8185 char *partname = NULL; 8186 8187 ASSERT(un != NULL); 8188 8189 instance = ddi_get_instance(SD_DEVINFO(un)); 8190 8191 /* Note:x86: is this a VTOC8/VTOC16 difference? */ 8192 for (i = 0; i < NSDMAP; i++) { 8193 8194 if (cmlb_partinfo(un->un_cmlbhandle, i, 8195 &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0) 8196 continue; 8197 mutex_enter(SD_MUTEX(un)); 8198 8199 if ((un->un_pstats[i] == NULL) && 8200 (nblks != 0)) { 8201 8202 (void) snprintf(kstatname, sizeof (kstatname), 8203 "%s%d,%s", sd_label, instance, 8204 partname); 8205 8206 un->un_pstats[i] = kstat_create(sd_label, 8207 instance, kstatname, "partition", KSTAT_TYPE_IO, 8208 1, KSTAT_FLAG_PERSISTENT); 8209 if (un->un_pstats[i] != NULL) { 8210 un->un_pstats[i]->ks_lock = SD_MUTEX(un); 8211 kstat_install(un->un_pstats[i]); 8212 } 8213 } 8214 mutex_exit(SD_MUTEX(un)); 8215 } 8216 } 8217 8218 8219 #if (defined(__fibre)) 8220 /* 8221 * Function: sd_init_event_callbacks 8222 * 8223 * Description: This routine initializes the insertion and removal event 8224 * callbacks. (fibre only) 8225 * 8226 * Arguments: un - driver soft state (unit) structure 8227 * 8228 * Context: Kernel thread context 8229 */ 8230 8231 static void 8232 sd_init_event_callbacks(struct sd_lun *un) 8233 { 8234 ASSERT(un != NULL); 8235 8236 if ((un->un_insert_event == NULL) && 8237 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, 8238 &un->un_insert_event) == DDI_SUCCESS)) { 8239 /* 8240 * Add the callback for an insertion event 8241 */ 8242 (void) ddi_add_event_handler(SD_DEVINFO(un), 8243 un->un_insert_event, sd_event_callback, (void *)un, 8244 &(un->un_insert_cb_id)); 8245 } 8246 8247 if ((un->un_remove_event == NULL) && 8248 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, 8249 &un->un_remove_event) == DDI_SUCCESS)) { 8250 /* 8251 * Add the callback for a removal event 8252 */ 8253 (void) ddi_add_event_handler(SD_DEVINFO(un), 8254 un->un_remove_event, sd_event_callback, (void *)un, 8255 &(un->un_remove_cb_id)); 8256 } 8257 } 8258 8259 8260 /* 8261 * Function: sd_event_callback 8262 * 8263 * Description: This routine handles insert/remove events (photon). The 8264 * state is changed to OFFLINE which can be used to supress 8265 * error msgs. (fibre only) 8266 * 8267 * Arguments: un - driver soft state (unit) structure 8268 * 8269 * Context: Callout thread context 8270 */ 8271 /* ARGSUSED */ 8272 static void 8273 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, 8274 void *bus_impldata) 8275 { 8276 struct sd_lun *un = (struct sd_lun *)arg; 8277 8278 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); 8279 if (event == un->un_insert_event) { 8280 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); 8281 mutex_enter(SD_MUTEX(un)); 8282 if (un->un_state == SD_STATE_OFFLINE) { 8283 if (un->un_last_state != SD_STATE_SUSPENDED) { 8284 un->un_state = un->un_last_state; 8285 } else { 8286 /* 8287 * We have gone through SUSPEND/RESUME while 8288 * we were offline. Restore the last state 8289 */ 8290 un->un_state = un->un_save_state; 8291 } 8292 } 8293 mutex_exit(SD_MUTEX(un)); 8294 8295 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); 8296 } else if (event == un->un_remove_event) { 8297 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); 8298 mutex_enter(SD_MUTEX(un)); 8299 /* 8300 * We need to handle an event callback that occurs during 8301 * the suspend operation, since we don't prevent it. 8302 */ 8303 if (un->un_state != SD_STATE_OFFLINE) { 8304 if (un->un_state != SD_STATE_SUSPENDED) { 8305 New_state(un, SD_STATE_OFFLINE); 8306 } else { 8307 un->un_last_state = SD_STATE_OFFLINE; 8308 } 8309 } 8310 mutex_exit(SD_MUTEX(un)); 8311 } else { 8312 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 8313 "!Unknown event\n"); 8314 } 8315 8316 } 8317 #endif 8318 8319 /* 8320 * Function: sd_cache_control() 8321 * 8322 * Description: This routine is the driver entry point for setting 8323 * read and write caching by modifying the WCE (write cache 8324 * enable) and RCD (read cache disable) bits of mode 8325 * page 8 (MODEPAGE_CACHING). 8326 * 8327 * Arguments: un - driver soft state (unit) structure 8328 * rcd_flag - flag for controlling the read cache 8329 * wce_flag - flag for controlling the write cache 8330 * 8331 * Return Code: EIO 8332 * code returned by sd_send_scsi_MODE_SENSE and 8333 * sd_send_scsi_MODE_SELECT 8334 * 8335 * Context: Kernel Thread 8336 */ 8337 8338 static int 8339 sd_cache_control(struct sd_lun *un, int rcd_flag, int wce_flag) 8340 { 8341 struct mode_caching *mode_caching_page; 8342 uchar_t *header; 8343 size_t buflen; 8344 int hdrlen; 8345 int bd_len; 8346 int rval = 0; 8347 struct mode_header_grp2 *mhp; 8348 8349 ASSERT(un != NULL); 8350 8351 /* 8352 * Do a test unit ready, otherwise a mode sense may not work if this 8353 * is the first command sent to the device after boot. 8354 */ 8355 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 8356 8357 if (un->un_f_cfg_is_atapi == TRUE) { 8358 hdrlen = MODE_HEADER_LENGTH_GRP2; 8359 } else { 8360 hdrlen = MODE_HEADER_LENGTH; 8361 } 8362 8363 /* 8364 * Allocate memory for the retrieved mode page and its headers. Set 8365 * a pointer to the page itself. Use mode_cache_scsi3 to insure 8366 * we get all of the mode sense data otherwise, the mode select 8367 * will fail. mode_cache_scsi3 is a superset of mode_caching. 8368 */ 8369 buflen = hdrlen + MODE_BLK_DESC_LENGTH + 8370 sizeof (struct mode_cache_scsi3); 8371 8372 header = kmem_zalloc(buflen, KM_SLEEP); 8373 8374 /* Get the information from the device. */ 8375 if (un->un_f_cfg_is_atapi == TRUE) { 8376 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, header, buflen, 8377 MODEPAGE_CACHING, SD_PATH_DIRECT); 8378 } else { 8379 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 8380 MODEPAGE_CACHING, SD_PATH_DIRECT); 8381 } 8382 if (rval != 0) { 8383 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 8384 "sd_cache_control: Mode Sense Failed\n"); 8385 kmem_free(header, buflen); 8386 return (rval); 8387 } 8388 8389 /* 8390 * Determine size of Block Descriptors in order to locate 8391 * the mode page data. ATAPI devices return 0, SCSI devices 8392 * should return MODE_BLK_DESC_LENGTH. 8393 */ 8394 if (un->un_f_cfg_is_atapi == TRUE) { 8395 mhp = (struct mode_header_grp2 *)header; 8396 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 8397 } else { 8398 bd_len = ((struct mode_header *)header)->bdesc_length; 8399 } 8400 8401 if (bd_len > MODE_BLK_DESC_LENGTH) { 8402 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 8403 "sd_cache_control: Mode Sense returned invalid " 8404 "block descriptor length\n"); 8405 kmem_free(header, buflen); 8406 return (EIO); 8407 } 8408 8409 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 8410 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 8411 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 8412 " caching page code mismatch %d\n", 8413 mode_caching_page->mode_page.code); 8414 kmem_free(header, buflen); 8415 return (EIO); 8416 } 8417 8418 /* Check the relevant bits on successful mode sense. */ 8419 if ((mode_caching_page->rcd && rcd_flag == SD_CACHE_ENABLE) || 8420 (!mode_caching_page->rcd && rcd_flag == SD_CACHE_DISABLE) || 8421 (mode_caching_page->wce && wce_flag == SD_CACHE_DISABLE) || 8422 (!mode_caching_page->wce && wce_flag == SD_CACHE_ENABLE)) { 8423 8424 size_t sbuflen; 8425 uchar_t save_pg; 8426 8427 /* 8428 * Construct select buffer length based on the 8429 * length of the sense data returned. 8430 */ 8431 sbuflen = hdrlen + MODE_BLK_DESC_LENGTH + 8432 sizeof (struct mode_page) + 8433 (int)mode_caching_page->mode_page.length; 8434 8435 /* 8436 * Set the caching bits as requested. 8437 */ 8438 if (rcd_flag == SD_CACHE_ENABLE) 8439 mode_caching_page->rcd = 0; 8440 else if (rcd_flag == SD_CACHE_DISABLE) 8441 mode_caching_page->rcd = 1; 8442 8443 if (wce_flag == SD_CACHE_ENABLE) 8444 mode_caching_page->wce = 1; 8445 else if (wce_flag == SD_CACHE_DISABLE) 8446 mode_caching_page->wce = 0; 8447 8448 /* 8449 * Save the page if the mode sense says the 8450 * drive supports it. 8451 */ 8452 save_pg = mode_caching_page->mode_page.ps ? 8453 SD_SAVE_PAGE : SD_DONTSAVE_PAGE; 8454 8455 /* Clear reserved bits before mode select. */ 8456 mode_caching_page->mode_page.ps = 0; 8457 8458 /* 8459 * Clear out mode header for mode select. 8460 * The rest of the retrieved page will be reused. 8461 */ 8462 bzero(header, hdrlen); 8463 8464 if (un->un_f_cfg_is_atapi == TRUE) { 8465 mhp = (struct mode_header_grp2 *)header; 8466 mhp->bdesc_length_hi = bd_len >> 8; 8467 mhp->bdesc_length_lo = (uchar_t)bd_len & 0xff; 8468 } else { 8469 ((struct mode_header *)header)->bdesc_length = bd_len; 8470 } 8471 8472 /* Issue mode select to change the cache settings */ 8473 if (un->un_f_cfg_is_atapi == TRUE) { 8474 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, header, 8475 sbuflen, save_pg, SD_PATH_DIRECT); 8476 } else { 8477 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 8478 sbuflen, save_pg, SD_PATH_DIRECT); 8479 } 8480 } 8481 8482 kmem_free(header, buflen); 8483 return (rval); 8484 } 8485 8486 8487 /* 8488 * Function: sd_get_write_cache_enabled() 8489 * 8490 * Description: This routine is the driver entry point for determining if 8491 * write caching is enabled. It examines the WCE (write cache 8492 * enable) bits of mode page 8 (MODEPAGE_CACHING). 8493 * 8494 * Arguments: un - driver soft state (unit) structure 8495 * is_enabled - pointer to int where write cache enabled state 8496 * is returned (non-zero -> write cache enabled) 8497 * 8498 * 8499 * Return Code: EIO 8500 * code returned by sd_send_scsi_MODE_SENSE 8501 * 8502 * Context: Kernel Thread 8503 * 8504 * NOTE: If ioctl is added to disable write cache, this sequence should 8505 * be followed so that no locking is required for accesses to 8506 * un->un_f_write_cache_enabled: 8507 * do mode select to clear wce 8508 * do synchronize cache to flush cache 8509 * set un->un_f_write_cache_enabled = FALSE 8510 * 8511 * Conversely, an ioctl to enable the write cache should be done 8512 * in this order: 8513 * set un->un_f_write_cache_enabled = TRUE 8514 * do mode select to set wce 8515 */ 8516 8517 static int 8518 sd_get_write_cache_enabled(struct sd_lun *un, int *is_enabled) 8519 { 8520 struct mode_caching *mode_caching_page; 8521 uchar_t *header; 8522 size_t buflen; 8523 int hdrlen; 8524 int bd_len; 8525 int rval = 0; 8526 8527 ASSERT(un != NULL); 8528 ASSERT(is_enabled != NULL); 8529 8530 /* in case of error, flag as enabled */ 8531 *is_enabled = TRUE; 8532 8533 /* 8534 * Do a test unit ready, otherwise a mode sense may not work if this 8535 * is the first command sent to the device after boot. 8536 */ 8537 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 8538 8539 if (un->un_f_cfg_is_atapi == TRUE) { 8540 hdrlen = MODE_HEADER_LENGTH_GRP2; 8541 } else { 8542 hdrlen = MODE_HEADER_LENGTH; 8543 } 8544 8545 /* 8546 * Allocate memory for the retrieved mode page and its headers. Set 8547 * a pointer to the page itself. 8548 */ 8549 buflen = hdrlen + MODE_BLK_DESC_LENGTH + sizeof (struct mode_caching); 8550 header = kmem_zalloc(buflen, KM_SLEEP); 8551 8552 /* Get the information from the device. */ 8553 if (un->un_f_cfg_is_atapi == TRUE) { 8554 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, header, buflen, 8555 MODEPAGE_CACHING, SD_PATH_DIRECT); 8556 } else { 8557 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 8558 MODEPAGE_CACHING, SD_PATH_DIRECT); 8559 } 8560 if (rval != 0) { 8561 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 8562 "sd_get_write_cache_enabled: Mode Sense Failed\n"); 8563 kmem_free(header, buflen); 8564 return (rval); 8565 } 8566 8567 /* 8568 * Determine size of Block Descriptors in order to locate 8569 * the mode page data. ATAPI devices return 0, SCSI devices 8570 * should return MODE_BLK_DESC_LENGTH. 8571 */ 8572 if (un->un_f_cfg_is_atapi == TRUE) { 8573 struct mode_header_grp2 *mhp; 8574 mhp = (struct mode_header_grp2 *)header; 8575 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 8576 } else { 8577 bd_len = ((struct mode_header *)header)->bdesc_length; 8578 } 8579 8580 if (bd_len > MODE_BLK_DESC_LENGTH) { 8581 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 8582 "sd_get_write_cache_enabled: Mode Sense returned invalid " 8583 "block descriptor length\n"); 8584 kmem_free(header, buflen); 8585 return (EIO); 8586 } 8587 8588 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 8589 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 8590 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 8591 " caching page code mismatch %d\n", 8592 mode_caching_page->mode_page.code); 8593 kmem_free(header, buflen); 8594 return (EIO); 8595 } 8596 *is_enabled = mode_caching_page->wce; 8597 8598 kmem_free(header, buflen); 8599 return (0); 8600 } 8601 8602 /* 8603 * Function: sd_get_nv_sup() 8604 * 8605 * Description: This routine is the driver entry point for 8606 * determining whether non-volatile cache is supported. This 8607 * determination process works as follows: 8608 * 8609 * 1. sd first queries sd.conf on whether 8610 * suppress_cache_flush bit is set for this device. 8611 * 8612 * 2. if not there, then queries the internal disk table. 8613 * 8614 * 3. if either sd.conf or internal disk table specifies 8615 * cache flush be suppressed, we don't bother checking 8616 * NV_SUP bit. 8617 * 8618 * If SUPPRESS_CACHE_FLUSH bit is not set to 1, sd queries 8619 * the optional INQUIRY VPD page 0x86. If the device 8620 * supports VPD page 0x86, sd examines the NV_SUP 8621 * (non-volatile cache support) bit in the INQUIRY VPD page 8622 * 0x86: 8623 * o If NV_SUP bit is set, sd assumes the device has a 8624 * non-volatile cache and set the 8625 * un_f_sync_nv_supported to TRUE. 8626 * o Otherwise cache is not non-volatile, 8627 * un_f_sync_nv_supported is set to FALSE. 8628 * 8629 * Arguments: un - driver soft state (unit) structure 8630 * 8631 * Return Code: 8632 * 8633 * Context: Kernel Thread 8634 */ 8635 8636 static void 8637 sd_get_nv_sup(struct sd_lun *un) 8638 { 8639 int rval = 0; 8640 uchar_t *inq86 = NULL; 8641 size_t inq86_len = MAX_INQUIRY_SIZE; 8642 size_t inq86_resid = 0; 8643 struct dk_callback *dkc; 8644 8645 ASSERT(un != NULL); 8646 8647 mutex_enter(SD_MUTEX(un)); 8648 8649 /* 8650 * Be conservative on the device's support of 8651 * SYNC_NV bit: un_f_sync_nv_supported is 8652 * initialized to be false. 8653 */ 8654 un->un_f_sync_nv_supported = FALSE; 8655 8656 /* 8657 * If either sd.conf or internal disk table 8658 * specifies cache flush be suppressed, then 8659 * we don't bother checking NV_SUP bit. 8660 */ 8661 if (un->un_f_suppress_cache_flush == TRUE) { 8662 mutex_exit(SD_MUTEX(un)); 8663 return; 8664 } 8665 8666 if (sd_check_vpd_page_support(un) == 0 && 8667 un->un_vpd_page_mask & SD_VPD_EXTENDED_DATA_PG) { 8668 mutex_exit(SD_MUTEX(un)); 8669 /* collect page 86 data if available */ 8670 inq86 = kmem_zalloc(inq86_len, KM_SLEEP); 8671 rval = sd_send_scsi_INQUIRY(un, inq86, inq86_len, 8672 0x01, 0x86, &inq86_resid); 8673 8674 if (rval == 0 && (inq86_len - inq86_resid > 6)) { 8675 SD_TRACE(SD_LOG_COMMON, un, 8676 "sd_get_nv_sup: \ 8677 successfully get VPD page: %x \ 8678 PAGE LENGTH: %x BYTE 6: %x\n", 8679 inq86[1], inq86[3], inq86[6]); 8680 8681 mutex_enter(SD_MUTEX(un)); 8682 /* 8683 * check the value of NV_SUP bit: only if the device 8684 * reports NV_SUP bit to be 1, the 8685 * un_f_sync_nv_supported bit will be set to true. 8686 */ 8687 if (inq86[6] & SD_VPD_NV_SUP) { 8688 un->un_f_sync_nv_supported = TRUE; 8689 } 8690 mutex_exit(SD_MUTEX(un)); 8691 } 8692 kmem_free(inq86, inq86_len); 8693 } else { 8694 mutex_exit(SD_MUTEX(un)); 8695 } 8696 8697 /* 8698 * Send a SYNC CACHE command to check whether 8699 * SYNC_NV bit is supported. This command should have 8700 * un_f_sync_nv_supported set to correct value. 8701 */ 8702 mutex_enter(SD_MUTEX(un)); 8703 if (un->un_f_sync_nv_supported) { 8704 mutex_exit(SD_MUTEX(un)); 8705 dkc = kmem_zalloc(sizeof (struct dk_callback), KM_SLEEP); 8706 dkc->dkc_flag = FLUSH_VOLATILE; 8707 (void) sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 8708 8709 /* 8710 * Send a TEST UNIT READY command to the device. This should 8711 * clear any outstanding UNIT ATTENTION that may be present. 8712 */ 8713 (void) sd_send_scsi_TEST_UNIT_READY(un, SD_DONT_RETRY_TUR); 8714 8715 kmem_free(dkc, sizeof (struct dk_callback)); 8716 } else { 8717 mutex_exit(SD_MUTEX(un)); 8718 } 8719 8720 SD_TRACE(SD_LOG_COMMON, un, "sd_get_nv_sup: \ 8721 un_f_suppress_cache_flush is set to %d\n", 8722 un->un_f_suppress_cache_flush); 8723 } 8724 8725 /* 8726 * Function: sd_make_device 8727 * 8728 * Description: Utility routine to return the Solaris device number from 8729 * the data in the device's dev_info structure. 8730 * 8731 * Return Code: The Solaris device number 8732 * 8733 * Context: Any 8734 */ 8735 8736 static dev_t 8737 sd_make_device(dev_info_t *devi) 8738 { 8739 return (makedevice(ddi_name_to_major(ddi_get_name(devi)), 8740 ddi_get_instance(devi) << SDUNIT_SHIFT)); 8741 } 8742 8743 8744 /* 8745 * Function: sd_pm_entry 8746 * 8747 * Description: Called at the start of a new command to manage power 8748 * and busy status of a device. This includes determining whether 8749 * the current power state of the device is sufficient for 8750 * performing the command or whether it must be changed. 8751 * The PM framework is notified appropriately. 8752 * Only with a return status of DDI_SUCCESS will the 8753 * component be busy to the framework. 8754 * 8755 * All callers of sd_pm_entry must check the return status 8756 * and only call sd_pm_exit it it was DDI_SUCCESS. A status 8757 * of DDI_FAILURE indicates the device failed to power up. 8758 * In this case un_pm_count has been adjusted so the result 8759 * on exit is still powered down, ie. count is less than 0. 8760 * Calling sd_pm_exit with this count value hits an ASSERT. 8761 * 8762 * Return Code: DDI_SUCCESS or DDI_FAILURE 8763 * 8764 * Context: Kernel thread context. 8765 */ 8766 8767 static int 8768 sd_pm_entry(struct sd_lun *un) 8769 { 8770 int return_status = DDI_SUCCESS; 8771 8772 ASSERT(!mutex_owned(SD_MUTEX(un))); 8773 ASSERT(!mutex_owned(&un->un_pm_mutex)); 8774 8775 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n"); 8776 8777 if (un->un_f_pm_is_enabled == FALSE) { 8778 SD_TRACE(SD_LOG_IO_PM, un, 8779 "sd_pm_entry: exiting, PM not enabled\n"); 8780 return (return_status); 8781 } 8782 8783 /* 8784 * Just increment a counter if PM is enabled. On the transition from 8785 * 0 ==> 1, mark the device as busy. The iodone side will decrement 8786 * the count with each IO and mark the device as idle when the count 8787 * hits 0. 8788 * 8789 * If the count is less than 0 the device is powered down. If a powered 8790 * down device is successfully powered up then the count must be 8791 * incremented to reflect the power up. Note that it'll get incremented 8792 * a second time to become busy. 8793 * 8794 * Because the following has the potential to change the device state 8795 * and must release the un_pm_mutex to do so, only one thread can be 8796 * allowed through at a time. 8797 */ 8798 8799 mutex_enter(&un->un_pm_mutex); 8800 while (un->un_pm_busy == TRUE) { 8801 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex); 8802 } 8803 un->un_pm_busy = TRUE; 8804 8805 if (un->un_pm_count < 1) { 8806 8807 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n"); 8808 8809 /* 8810 * Indicate we are now busy so the framework won't attempt to 8811 * power down the device. This call will only fail if either 8812 * we passed a bad component number or the device has no 8813 * components. Neither of these should ever happen. 8814 */ 8815 mutex_exit(&un->un_pm_mutex); 8816 return_status = pm_busy_component(SD_DEVINFO(un), 0); 8817 ASSERT(return_status == DDI_SUCCESS); 8818 8819 mutex_enter(&un->un_pm_mutex); 8820 8821 if (un->un_pm_count < 0) { 8822 mutex_exit(&un->un_pm_mutex); 8823 8824 SD_TRACE(SD_LOG_IO_PM, un, 8825 "sd_pm_entry: power up component\n"); 8826 8827 /* 8828 * pm_raise_power will cause sdpower to be called 8829 * which brings the device power level to the 8830 * desired state, ON in this case. If successful, 8831 * un_pm_count and un_power_level will be updated 8832 * appropriately. 8833 */ 8834 return_status = pm_raise_power(SD_DEVINFO(un), 0, 8835 SD_SPINDLE_ON); 8836 8837 mutex_enter(&un->un_pm_mutex); 8838 8839 if (return_status != DDI_SUCCESS) { 8840 /* 8841 * Power up failed. 8842 * Idle the device and adjust the count 8843 * so the result on exit is that we're 8844 * still powered down, ie. count is less than 0. 8845 */ 8846 SD_TRACE(SD_LOG_IO_PM, un, 8847 "sd_pm_entry: power up failed," 8848 " idle the component\n"); 8849 8850 (void) pm_idle_component(SD_DEVINFO(un), 0); 8851 un->un_pm_count--; 8852 } else { 8853 /* 8854 * Device is powered up, verify the 8855 * count is non-negative. 8856 * This is debug only. 8857 */ 8858 ASSERT(un->un_pm_count == 0); 8859 } 8860 } 8861 8862 if (return_status == DDI_SUCCESS) { 8863 /* 8864 * For performance, now that the device has been tagged 8865 * as busy, and it's known to be powered up, update the 8866 * chain types to use jump tables that do not include 8867 * pm. This significantly lowers the overhead and 8868 * therefore improves performance. 8869 */ 8870 8871 mutex_exit(&un->un_pm_mutex); 8872 mutex_enter(SD_MUTEX(un)); 8873 SD_TRACE(SD_LOG_IO_PM, un, 8874 "sd_pm_entry: changing uscsi_chain_type from %d\n", 8875 un->un_uscsi_chain_type); 8876 8877 if (un->un_f_non_devbsize_supported) { 8878 un->un_buf_chain_type = 8879 SD_CHAIN_INFO_RMMEDIA_NO_PM; 8880 } else { 8881 un->un_buf_chain_type = 8882 SD_CHAIN_INFO_DISK_NO_PM; 8883 } 8884 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 8885 8886 SD_TRACE(SD_LOG_IO_PM, un, 8887 " changed uscsi_chain_type to %d\n", 8888 un->un_uscsi_chain_type); 8889 mutex_exit(SD_MUTEX(un)); 8890 mutex_enter(&un->un_pm_mutex); 8891 8892 if (un->un_pm_idle_timeid == NULL) { 8893 /* 300 ms. */ 8894 un->un_pm_idle_timeid = 8895 timeout(sd_pm_idletimeout_handler, un, 8896 (drv_usectohz((clock_t)300000))); 8897 /* 8898 * Include an extra call to busy which keeps the 8899 * device busy with-respect-to the PM layer 8900 * until the timer fires, at which time it'll 8901 * get the extra idle call. 8902 */ 8903 (void) pm_busy_component(SD_DEVINFO(un), 0); 8904 } 8905 } 8906 } 8907 un->un_pm_busy = FALSE; 8908 /* Next... */ 8909 cv_signal(&un->un_pm_busy_cv); 8910 8911 un->un_pm_count++; 8912 8913 SD_TRACE(SD_LOG_IO_PM, un, 8914 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count); 8915 8916 mutex_exit(&un->un_pm_mutex); 8917 8918 return (return_status); 8919 } 8920 8921 8922 /* 8923 * Function: sd_pm_exit 8924 * 8925 * Description: Called at the completion of a command to manage busy 8926 * status for the device. If the device becomes idle the 8927 * PM framework is notified. 8928 * 8929 * Context: Kernel thread context 8930 */ 8931 8932 static void 8933 sd_pm_exit(struct sd_lun *un) 8934 { 8935 ASSERT(!mutex_owned(SD_MUTEX(un))); 8936 ASSERT(!mutex_owned(&un->un_pm_mutex)); 8937 8938 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n"); 8939 8940 /* 8941 * After attach the following flag is only read, so don't 8942 * take the penalty of acquiring a mutex for it. 8943 */ 8944 if (un->un_f_pm_is_enabled == TRUE) { 8945 8946 mutex_enter(&un->un_pm_mutex); 8947 un->un_pm_count--; 8948 8949 SD_TRACE(SD_LOG_IO_PM, un, 8950 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count); 8951 8952 ASSERT(un->un_pm_count >= 0); 8953 if (un->un_pm_count == 0) { 8954 mutex_exit(&un->un_pm_mutex); 8955 8956 SD_TRACE(SD_LOG_IO_PM, un, 8957 "sd_pm_exit: idle component\n"); 8958 8959 (void) pm_idle_component(SD_DEVINFO(un), 0); 8960 8961 } else { 8962 mutex_exit(&un->un_pm_mutex); 8963 } 8964 } 8965 8966 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n"); 8967 } 8968 8969 8970 /* 8971 * Function: sdopen 8972 * 8973 * Description: Driver's open(9e) entry point function. 8974 * 8975 * Arguments: dev_i - pointer to device number 8976 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE) 8977 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 8978 * cred_p - user credential pointer 8979 * 8980 * Return Code: EINVAL 8981 * ENXIO 8982 * EIO 8983 * EROFS 8984 * EBUSY 8985 * 8986 * Context: Kernel thread context 8987 */ 8988 /* ARGSUSED */ 8989 static int 8990 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 8991 { 8992 struct sd_lun *un; 8993 int nodelay; 8994 int part; 8995 uint64_t partmask; 8996 int instance; 8997 dev_t dev; 8998 int rval = EIO; 8999 diskaddr_t nblks = 0; 9000 9001 /* Validate the open type */ 9002 if (otyp >= OTYPCNT) { 9003 return (EINVAL); 9004 } 9005 9006 dev = *dev_p; 9007 instance = SDUNIT(dev); 9008 mutex_enter(&sd_detach_mutex); 9009 9010 /* 9011 * Fail the open if there is no softstate for the instance, or 9012 * if another thread somewhere is trying to detach the instance. 9013 */ 9014 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 9015 (un->un_detach_count != 0)) { 9016 mutex_exit(&sd_detach_mutex); 9017 /* 9018 * The probe cache only needs to be cleared when open (9e) fails 9019 * with ENXIO (4238046). 9020 */ 9021 /* 9022 * un-conditionally clearing probe cache is ok with 9023 * separate sd/ssd binaries 9024 * x86 platform can be an issue with both parallel 9025 * and fibre in 1 binary 9026 */ 9027 sd_scsi_clear_probe_cache(); 9028 return (ENXIO); 9029 } 9030 9031 /* 9032 * The un_layer_count is to prevent another thread in specfs from 9033 * trying to detach the instance, which can happen when we are 9034 * called from a higher-layer driver instead of thru specfs. 9035 * This will not be needed when DDI provides a layered driver 9036 * interface that allows specfs to know that an instance is in 9037 * use by a layered driver & should not be detached. 9038 * 9039 * Note: the semantics for layered driver opens are exactly one 9040 * close for every open. 9041 */ 9042 if (otyp == OTYP_LYR) { 9043 un->un_layer_count++; 9044 } 9045 9046 /* 9047 * Keep a count of the current # of opens in progress. This is because 9048 * some layered drivers try to call us as a regular open. This can 9049 * cause problems that we cannot prevent, however by keeping this count 9050 * we can at least keep our open and detach routines from racing against 9051 * each other under such conditions. 9052 */ 9053 un->un_opens_in_progress++; 9054 mutex_exit(&sd_detach_mutex); 9055 9056 nodelay = (flag & (FNDELAY | FNONBLOCK)); 9057 part = SDPART(dev); 9058 partmask = 1 << part; 9059 9060 /* 9061 * We use a semaphore here in order to serialize 9062 * open and close requests on the device. 9063 */ 9064 sema_p(&un->un_semoclose); 9065 9066 mutex_enter(SD_MUTEX(un)); 9067 9068 /* 9069 * All device accesses go thru sdstrategy() where we check 9070 * on suspend status but there could be a scsi_poll command, 9071 * which bypasses sdstrategy(), so we need to check pm 9072 * status. 9073 */ 9074 9075 if (!nodelay) { 9076 while ((un->un_state == SD_STATE_SUSPENDED) || 9077 (un->un_state == SD_STATE_PM_CHANGING)) { 9078 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9079 } 9080 9081 mutex_exit(SD_MUTEX(un)); 9082 if (sd_pm_entry(un) != DDI_SUCCESS) { 9083 rval = EIO; 9084 SD_ERROR(SD_LOG_OPEN_CLOSE, un, 9085 "sdopen: sd_pm_entry failed\n"); 9086 goto open_failed_with_pm; 9087 } 9088 mutex_enter(SD_MUTEX(un)); 9089 } 9090 9091 /* check for previous exclusive open */ 9092 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); 9093 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9094 "sdopen: exclopen=%x, flag=%x, regopen=%x\n", 9095 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 9096 9097 if (un->un_exclopen & (partmask)) { 9098 goto excl_open_fail; 9099 } 9100 9101 if (flag & FEXCL) { 9102 int i; 9103 if (un->un_ocmap.lyropen[part]) { 9104 goto excl_open_fail; 9105 } 9106 for (i = 0; i < (OTYPCNT - 1); i++) { 9107 if (un->un_ocmap.regopen[i] & (partmask)) { 9108 goto excl_open_fail; 9109 } 9110 } 9111 } 9112 9113 /* 9114 * Check the write permission if this is a removable media device, 9115 * NDELAY has not been set, and writable permission is requested. 9116 * 9117 * Note: If NDELAY was set and this is write-protected media the WRITE 9118 * attempt will fail with EIO as part of the I/O processing. This is a 9119 * more permissive implementation that allows the open to succeed and 9120 * WRITE attempts to fail when appropriate. 9121 */ 9122 if (un->un_f_chk_wp_open) { 9123 if ((flag & FWRITE) && (!nodelay)) { 9124 mutex_exit(SD_MUTEX(un)); 9125 /* 9126 * Defer the check for write permission on writable 9127 * DVD drive till sdstrategy and will not fail open even 9128 * if FWRITE is set as the device can be writable 9129 * depending upon the media and the media can change 9130 * after the call to open(). 9131 */ 9132 if (un->un_f_dvdram_writable_device == FALSE) { 9133 if (ISCD(un) || sr_check_wp(dev)) { 9134 rval = EROFS; 9135 mutex_enter(SD_MUTEX(un)); 9136 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9137 "write to cd or write protected media\n"); 9138 goto open_fail; 9139 } 9140 } 9141 mutex_enter(SD_MUTEX(un)); 9142 } 9143 } 9144 9145 /* 9146 * If opening in NDELAY/NONBLOCK mode, just return. 9147 * Check if disk is ready and has a valid geometry later. 9148 */ 9149 if (!nodelay) { 9150 mutex_exit(SD_MUTEX(un)); 9151 rval = sd_ready_and_valid(un); 9152 mutex_enter(SD_MUTEX(un)); 9153 /* 9154 * Fail if device is not ready or if the number of disk 9155 * blocks is zero or negative for non CD devices. 9156 */ 9157 9158 nblks = 0; 9159 9160 if (rval == SD_READY_VALID && (!ISCD(un))) { 9161 /* if cmlb_partinfo fails, nblks remains 0 */ 9162 mutex_exit(SD_MUTEX(un)); 9163 (void) cmlb_partinfo(un->un_cmlbhandle, part, &nblks, 9164 NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 9165 mutex_enter(SD_MUTEX(un)); 9166 } 9167 9168 if ((rval != SD_READY_VALID) || 9169 (!ISCD(un) && nblks <= 0)) { 9170 rval = un->un_f_has_removable_media ? ENXIO : EIO; 9171 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9172 "device not ready or invalid disk block value\n"); 9173 goto open_fail; 9174 } 9175 #if defined(__i386) || defined(__amd64) 9176 } else { 9177 uchar_t *cp; 9178 /* 9179 * x86 requires special nodelay handling, so that p0 is 9180 * always defined and accessible. 9181 * Invalidate geometry only if device is not already open. 9182 */ 9183 cp = &un->un_ocmap.chkd[0]; 9184 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9185 if (*cp != (uchar_t)0) { 9186 break; 9187 } 9188 cp++; 9189 } 9190 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9191 mutex_exit(SD_MUTEX(un)); 9192 cmlb_invalidate(un->un_cmlbhandle, 9193 (void *)SD_PATH_DIRECT); 9194 mutex_enter(SD_MUTEX(un)); 9195 } 9196 9197 #endif 9198 } 9199 9200 if (otyp == OTYP_LYR) { 9201 un->un_ocmap.lyropen[part]++; 9202 } else { 9203 un->un_ocmap.regopen[otyp] |= partmask; 9204 } 9205 9206 /* Set up open and exclusive open flags */ 9207 if (flag & FEXCL) { 9208 un->un_exclopen |= (partmask); 9209 } 9210 9211 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9212 "open of part %d type %d\n", part, otyp); 9213 9214 mutex_exit(SD_MUTEX(un)); 9215 if (!nodelay) { 9216 sd_pm_exit(un); 9217 } 9218 9219 sema_v(&un->un_semoclose); 9220 9221 mutex_enter(&sd_detach_mutex); 9222 un->un_opens_in_progress--; 9223 mutex_exit(&sd_detach_mutex); 9224 9225 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n"); 9226 return (DDI_SUCCESS); 9227 9228 excl_open_fail: 9229 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n"); 9230 rval = EBUSY; 9231 9232 open_fail: 9233 mutex_exit(SD_MUTEX(un)); 9234 9235 /* 9236 * On a failed open we must exit the pm management. 9237 */ 9238 if (!nodelay) { 9239 sd_pm_exit(un); 9240 } 9241 open_failed_with_pm: 9242 sema_v(&un->un_semoclose); 9243 9244 mutex_enter(&sd_detach_mutex); 9245 un->un_opens_in_progress--; 9246 if (otyp == OTYP_LYR) { 9247 un->un_layer_count--; 9248 } 9249 mutex_exit(&sd_detach_mutex); 9250 9251 return (rval); 9252 } 9253 9254 9255 /* 9256 * Function: sdclose 9257 * 9258 * Description: Driver's close(9e) entry point function. 9259 * 9260 * Arguments: dev - device number 9261 * flag - file status flag, informational only 9262 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9263 * cred_p - user credential pointer 9264 * 9265 * Return Code: ENXIO 9266 * 9267 * Context: Kernel thread context 9268 */ 9269 /* ARGSUSED */ 9270 static int 9271 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 9272 { 9273 struct sd_lun *un; 9274 uchar_t *cp; 9275 int part; 9276 int nodelay; 9277 int rval = 0; 9278 9279 /* Validate the open type */ 9280 if (otyp >= OTYPCNT) { 9281 return (ENXIO); 9282 } 9283 9284 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9285 return (ENXIO); 9286 } 9287 9288 part = SDPART(dev); 9289 nodelay = flag & (FNDELAY | FNONBLOCK); 9290 9291 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9292 "sdclose: close of part %d type %d\n", part, otyp); 9293 9294 /* 9295 * We use a semaphore here in order to serialize 9296 * open and close requests on the device. 9297 */ 9298 sema_p(&un->un_semoclose); 9299 9300 mutex_enter(SD_MUTEX(un)); 9301 9302 /* Don't proceed if power is being changed. */ 9303 while (un->un_state == SD_STATE_PM_CHANGING) { 9304 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9305 } 9306 9307 if (un->un_exclopen & (1 << part)) { 9308 un->un_exclopen &= ~(1 << part); 9309 } 9310 9311 /* Update the open partition map */ 9312 if (otyp == OTYP_LYR) { 9313 un->un_ocmap.lyropen[part] -= 1; 9314 } else { 9315 un->un_ocmap.regopen[otyp] &= ~(1 << part); 9316 } 9317 9318 cp = &un->un_ocmap.chkd[0]; 9319 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9320 if (*cp != NULL) { 9321 break; 9322 } 9323 cp++; 9324 } 9325 9326 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9327 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n"); 9328 9329 /* 9330 * We avoid persistance upon the last close, and set 9331 * the throttle back to the maximum. 9332 */ 9333 un->un_throttle = un->un_saved_throttle; 9334 9335 if (un->un_state == SD_STATE_OFFLINE) { 9336 if (un->un_f_is_fibre == FALSE) { 9337 scsi_log(SD_DEVINFO(un), sd_label, 9338 CE_WARN, "offline\n"); 9339 } 9340 mutex_exit(SD_MUTEX(un)); 9341 cmlb_invalidate(un->un_cmlbhandle, 9342 (void *)SD_PATH_DIRECT); 9343 mutex_enter(SD_MUTEX(un)); 9344 9345 } else { 9346 /* 9347 * Flush any outstanding writes in NVRAM cache. 9348 * Note: SYNCHRONIZE CACHE is an optional SCSI-2 9349 * cmd, it may not work for non-Pluto devices. 9350 * SYNCHRONIZE CACHE is not required for removables, 9351 * except DVD-RAM drives. 9352 * 9353 * Also note: because SYNCHRONIZE CACHE is currently 9354 * the only command issued here that requires the 9355 * drive be powered up, only do the power up before 9356 * sending the Sync Cache command. If additional 9357 * commands are added which require a powered up 9358 * drive, the following sequence may have to change. 9359 * 9360 * And finally, note that parallel SCSI on SPARC 9361 * only issues a Sync Cache to DVD-RAM, a newly 9362 * supported device. 9363 */ 9364 #if defined(__i386) || defined(__amd64) 9365 if (un->un_f_sync_cache_supported || 9366 un->un_f_dvdram_writable_device == TRUE) { 9367 #else 9368 if (un->un_f_dvdram_writable_device == TRUE) { 9369 #endif 9370 mutex_exit(SD_MUTEX(un)); 9371 if (sd_pm_entry(un) == DDI_SUCCESS) { 9372 rval = 9373 sd_send_scsi_SYNCHRONIZE_CACHE(un, 9374 NULL); 9375 /* ignore error if not supported */ 9376 if (rval == ENOTSUP) { 9377 rval = 0; 9378 } else if (rval != 0) { 9379 rval = EIO; 9380 } 9381 sd_pm_exit(un); 9382 } else { 9383 rval = EIO; 9384 } 9385 mutex_enter(SD_MUTEX(un)); 9386 } 9387 9388 /* 9389 * For devices which supports DOOR_LOCK, send an ALLOW 9390 * MEDIA REMOVAL command, but don't get upset if it 9391 * fails. We need to raise the power of the drive before 9392 * we can call sd_send_scsi_DOORLOCK() 9393 */ 9394 if (un->un_f_doorlock_supported) { 9395 mutex_exit(SD_MUTEX(un)); 9396 if (sd_pm_entry(un) == DDI_SUCCESS) { 9397 rval = sd_send_scsi_DOORLOCK(un, 9398 SD_REMOVAL_ALLOW, SD_PATH_DIRECT); 9399 9400 sd_pm_exit(un); 9401 if (ISCD(un) && (rval != 0) && 9402 (nodelay != 0)) { 9403 rval = ENXIO; 9404 } 9405 } else { 9406 rval = EIO; 9407 } 9408 mutex_enter(SD_MUTEX(un)); 9409 } 9410 9411 /* 9412 * If a device has removable media, invalidate all 9413 * parameters related to media, such as geometry, 9414 * blocksize, and blockcount. 9415 */ 9416 if (un->un_f_has_removable_media) { 9417 sr_ejected(un); 9418 } 9419 9420 /* 9421 * Destroy the cache (if it exists) which was 9422 * allocated for the write maps since this is 9423 * the last close for this media. 9424 */ 9425 if (un->un_wm_cache) { 9426 /* 9427 * Check if there are pending commands. 9428 * and if there are give a warning and 9429 * do not destroy the cache. 9430 */ 9431 if (un->un_ncmds_in_driver > 0) { 9432 scsi_log(SD_DEVINFO(un), 9433 sd_label, CE_WARN, 9434 "Unable to clean up memory " 9435 "because of pending I/O\n"); 9436 } else { 9437 kmem_cache_destroy( 9438 un->un_wm_cache); 9439 un->un_wm_cache = NULL; 9440 } 9441 } 9442 } 9443 } 9444 9445 mutex_exit(SD_MUTEX(un)); 9446 sema_v(&un->un_semoclose); 9447 9448 if (otyp == OTYP_LYR) { 9449 mutex_enter(&sd_detach_mutex); 9450 /* 9451 * The detach routine may run when the layer count 9452 * drops to zero. 9453 */ 9454 un->un_layer_count--; 9455 mutex_exit(&sd_detach_mutex); 9456 } 9457 9458 return (rval); 9459 } 9460 9461 9462 /* 9463 * Function: sd_ready_and_valid 9464 * 9465 * Description: Test if device is ready and has a valid geometry. 9466 * 9467 * Arguments: dev - device number 9468 * un - driver soft state (unit) structure 9469 * 9470 * Return Code: SD_READY_VALID ready and valid label 9471 * SD_NOT_READY_VALID not ready, no label 9472 * SD_RESERVED_BY_OTHERS reservation conflict 9473 * 9474 * Context: Never called at interrupt context. 9475 */ 9476 9477 static int 9478 sd_ready_and_valid(struct sd_lun *un) 9479 { 9480 struct sd_errstats *stp; 9481 uint64_t capacity; 9482 uint_t lbasize; 9483 int rval = SD_READY_VALID; 9484 char name_str[48]; 9485 int is_valid; 9486 9487 ASSERT(un != NULL); 9488 ASSERT(!mutex_owned(SD_MUTEX(un))); 9489 9490 mutex_enter(SD_MUTEX(un)); 9491 /* 9492 * If a device has removable media, we must check if media is 9493 * ready when checking if this device is ready and valid. 9494 */ 9495 if (un->un_f_has_removable_media) { 9496 mutex_exit(SD_MUTEX(un)); 9497 if (sd_send_scsi_TEST_UNIT_READY(un, 0) != 0) { 9498 rval = SD_NOT_READY_VALID; 9499 mutex_enter(SD_MUTEX(un)); 9500 goto done; 9501 } 9502 9503 is_valid = SD_IS_VALID_LABEL(un); 9504 mutex_enter(SD_MUTEX(un)); 9505 if (!is_valid || 9506 (un->un_f_blockcount_is_valid == FALSE) || 9507 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 9508 9509 /* capacity has to be read every open. */ 9510 mutex_exit(SD_MUTEX(un)); 9511 if (sd_send_scsi_READ_CAPACITY(un, &capacity, 9512 &lbasize, SD_PATH_DIRECT) != 0) { 9513 cmlb_invalidate(un->un_cmlbhandle, 9514 (void *)SD_PATH_DIRECT); 9515 mutex_enter(SD_MUTEX(un)); 9516 rval = SD_NOT_READY_VALID; 9517 goto done; 9518 } else { 9519 mutex_enter(SD_MUTEX(un)); 9520 sd_update_block_info(un, lbasize, capacity); 9521 } 9522 } 9523 9524 /* 9525 * Check if the media in the device is writable or not. 9526 */ 9527 if (!is_valid && ISCD(un)) { 9528 sd_check_for_writable_cd(un, SD_PATH_DIRECT); 9529 } 9530 9531 } else { 9532 /* 9533 * Do a test unit ready to clear any unit attention from non-cd 9534 * devices. 9535 */ 9536 mutex_exit(SD_MUTEX(un)); 9537 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 9538 mutex_enter(SD_MUTEX(un)); 9539 } 9540 9541 9542 /* 9543 * If this is a non 512 block device, allocate space for 9544 * the wmap cache. This is being done here since every time 9545 * a media is changed this routine will be called and the 9546 * block size is a function of media rather than device. 9547 */ 9548 if (un->un_f_non_devbsize_supported && NOT_DEVBSIZE(un)) { 9549 if (!(un->un_wm_cache)) { 9550 (void) snprintf(name_str, sizeof (name_str), 9551 "%s%d_cache", 9552 ddi_driver_name(SD_DEVINFO(un)), 9553 ddi_get_instance(SD_DEVINFO(un))); 9554 un->un_wm_cache = kmem_cache_create( 9555 name_str, sizeof (struct sd_w_map), 9556 8, sd_wm_cache_constructor, 9557 sd_wm_cache_destructor, NULL, 9558 (void *)un, NULL, 0); 9559 if (!(un->un_wm_cache)) { 9560 rval = ENOMEM; 9561 goto done; 9562 } 9563 } 9564 } 9565 9566 if (un->un_state == SD_STATE_NORMAL) { 9567 /* 9568 * If the target is not yet ready here (defined by a TUR 9569 * failure), invalidate the geometry and print an 'offline' 9570 * message. This is a legacy message, as the state of the 9571 * target is not actually changed to SD_STATE_OFFLINE. 9572 * 9573 * If the TUR fails for EACCES (Reservation Conflict), 9574 * SD_RESERVED_BY_OTHERS will be returned to indicate 9575 * reservation conflict. If the TUR fails for other 9576 * reasons, SD_NOT_READY_VALID will be returned. 9577 */ 9578 int err; 9579 9580 mutex_exit(SD_MUTEX(un)); 9581 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 9582 mutex_enter(SD_MUTEX(un)); 9583 9584 if (err != 0) { 9585 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 9586 "offline or reservation conflict\n"); 9587 mutex_exit(SD_MUTEX(un)); 9588 cmlb_invalidate(un->un_cmlbhandle, 9589 (void *)SD_PATH_DIRECT); 9590 mutex_enter(SD_MUTEX(un)); 9591 if (err == EACCES) { 9592 rval = SD_RESERVED_BY_OTHERS; 9593 } else { 9594 rval = SD_NOT_READY_VALID; 9595 } 9596 goto done; 9597 } 9598 } 9599 9600 if (un->un_f_format_in_progress == FALSE) { 9601 mutex_exit(SD_MUTEX(un)); 9602 if (cmlb_validate(un->un_cmlbhandle, 0, 9603 (void *)SD_PATH_DIRECT) != 0) { 9604 rval = SD_NOT_READY_VALID; 9605 mutex_enter(SD_MUTEX(un)); 9606 goto done; 9607 } 9608 if (un->un_f_pkstats_enabled) { 9609 sd_set_pstats(un); 9610 SD_TRACE(SD_LOG_IO_PARTITION, un, 9611 "sd_ready_and_valid: un:0x%p pstats created and " 9612 "set\n", un); 9613 } 9614 mutex_enter(SD_MUTEX(un)); 9615 } 9616 9617 /* 9618 * If this device supports DOOR_LOCK command, try and send 9619 * this command to PREVENT MEDIA REMOVAL, but don't get upset 9620 * if it fails. For a CD, however, it is an error 9621 */ 9622 if (un->un_f_doorlock_supported) { 9623 mutex_exit(SD_MUTEX(un)); 9624 if ((sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 9625 SD_PATH_DIRECT) != 0) && ISCD(un)) { 9626 rval = SD_NOT_READY_VALID; 9627 mutex_enter(SD_MUTEX(un)); 9628 goto done; 9629 } 9630 mutex_enter(SD_MUTEX(un)); 9631 } 9632 9633 /* The state has changed, inform the media watch routines */ 9634 un->un_mediastate = DKIO_INSERTED; 9635 cv_broadcast(&un->un_state_cv); 9636 rval = SD_READY_VALID; 9637 9638 done: 9639 9640 /* 9641 * Initialize the capacity kstat value, if no media previously 9642 * (capacity kstat is 0) and a media has been inserted 9643 * (un_blockcount > 0). 9644 */ 9645 if (un->un_errstats != NULL) { 9646 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9647 if ((stp->sd_capacity.value.ui64 == 0) && 9648 (un->un_f_blockcount_is_valid == TRUE)) { 9649 stp->sd_capacity.value.ui64 = 9650 (uint64_t)((uint64_t)un->un_blockcount * 9651 un->un_sys_blocksize); 9652 } 9653 } 9654 9655 mutex_exit(SD_MUTEX(un)); 9656 return (rval); 9657 } 9658 9659 9660 /* 9661 * Function: sdmin 9662 * 9663 * Description: Routine to limit the size of a data transfer. Used in 9664 * conjunction with physio(9F). 9665 * 9666 * Arguments: bp - pointer to the indicated buf(9S) struct. 9667 * 9668 * Context: Kernel thread context. 9669 */ 9670 9671 static void 9672 sdmin(struct buf *bp) 9673 { 9674 struct sd_lun *un; 9675 int instance; 9676 9677 instance = SDUNIT(bp->b_edev); 9678 9679 un = ddi_get_soft_state(sd_state, instance); 9680 ASSERT(un != NULL); 9681 9682 if (bp->b_bcount > un->un_max_xfer_size) { 9683 bp->b_bcount = un->un_max_xfer_size; 9684 } 9685 } 9686 9687 9688 /* 9689 * Function: sdread 9690 * 9691 * Description: Driver's read(9e) entry point function. 9692 * 9693 * Arguments: dev - device number 9694 * uio - structure pointer describing where data is to be stored 9695 * in user's space 9696 * cred_p - user credential pointer 9697 * 9698 * Return Code: ENXIO 9699 * EIO 9700 * EINVAL 9701 * value returned by physio 9702 * 9703 * Context: Kernel thread context. 9704 */ 9705 /* ARGSUSED */ 9706 static int 9707 sdread(dev_t dev, struct uio *uio, cred_t *cred_p) 9708 { 9709 struct sd_lun *un = NULL; 9710 int secmask; 9711 int err; 9712 9713 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9714 return (ENXIO); 9715 } 9716 9717 ASSERT(!mutex_owned(SD_MUTEX(un))); 9718 9719 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9720 mutex_enter(SD_MUTEX(un)); 9721 /* 9722 * Because the call to sd_ready_and_valid will issue I/O we 9723 * must wait here if either the device is suspended or 9724 * if it's power level is changing. 9725 */ 9726 while ((un->un_state == SD_STATE_SUSPENDED) || 9727 (un->un_state == SD_STATE_PM_CHANGING)) { 9728 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9729 } 9730 un->un_ncmds_in_driver++; 9731 mutex_exit(SD_MUTEX(un)); 9732 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9733 mutex_enter(SD_MUTEX(un)); 9734 un->un_ncmds_in_driver--; 9735 ASSERT(un->un_ncmds_in_driver >= 0); 9736 mutex_exit(SD_MUTEX(un)); 9737 return (EIO); 9738 } 9739 mutex_enter(SD_MUTEX(un)); 9740 un->un_ncmds_in_driver--; 9741 ASSERT(un->un_ncmds_in_driver >= 0); 9742 mutex_exit(SD_MUTEX(un)); 9743 } 9744 9745 /* 9746 * Read requests are restricted to multiples of the system block size. 9747 */ 9748 secmask = un->un_sys_blocksize - 1; 9749 9750 if (uio->uio_loffset & ((offset_t)(secmask))) { 9751 SD_ERROR(SD_LOG_READ_WRITE, un, 9752 "sdread: file offset not modulo %d\n", 9753 un->un_sys_blocksize); 9754 err = EINVAL; 9755 } else if (uio->uio_iov->iov_len & (secmask)) { 9756 SD_ERROR(SD_LOG_READ_WRITE, un, 9757 "sdread: transfer length not modulo %d\n", 9758 un->un_sys_blocksize); 9759 err = EINVAL; 9760 } else { 9761 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio); 9762 } 9763 return (err); 9764 } 9765 9766 9767 /* 9768 * Function: sdwrite 9769 * 9770 * Description: Driver's write(9e) entry point function. 9771 * 9772 * Arguments: dev - device number 9773 * uio - structure pointer describing where data is stored in 9774 * user's space 9775 * cred_p - user credential pointer 9776 * 9777 * Return Code: ENXIO 9778 * EIO 9779 * EINVAL 9780 * value returned by physio 9781 * 9782 * Context: Kernel thread context. 9783 */ 9784 /* ARGSUSED */ 9785 static int 9786 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 9787 { 9788 struct sd_lun *un = NULL; 9789 int secmask; 9790 int err; 9791 9792 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9793 return (ENXIO); 9794 } 9795 9796 ASSERT(!mutex_owned(SD_MUTEX(un))); 9797 9798 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9799 mutex_enter(SD_MUTEX(un)); 9800 /* 9801 * Because the call to sd_ready_and_valid will issue I/O we 9802 * must wait here if either the device is suspended or 9803 * if it's power level is changing. 9804 */ 9805 while ((un->un_state == SD_STATE_SUSPENDED) || 9806 (un->un_state == SD_STATE_PM_CHANGING)) { 9807 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9808 } 9809 un->un_ncmds_in_driver++; 9810 mutex_exit(SD_MUTEX(un)); 9811 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9812 mutex_enter(SD_MUTEX(un)); 9813 un->un_ncmds_in_driver--; 9814 ASSERT(un->un_ncmds_in_driver >= 0); 9815 mutex_exit(SD_MUTEX(un)); 9816 return (EIO); 9817 } 9818 mutex_enter(SD_MUTEX(un)); 9819 un->un_ncmds_in_driver--; 9820 ASSERT(un->un_ncmds_in_driver >= 0); 9821 mutex_exit(SD_MUTEX(un)); 9822 } 9823 9824 /* 9825 * Write requests are restricted to multiples of the system block size. 9826 */ 9827 secmask = un->un_sys_blocksize - 1; 9828 9829 if (uio->uio_loffset & ((offset_t)(secmask))) { 9830 SD_ERROR(SD_LOG_READ_WRITE, un, 9831 "sdwrite: file offset not modulo %d\n", 9832 un->un_sys_blocksize); 9833 err = EINVAL; 9834 } else if (uio->uio_iov->iov_len & (secmask)) { 9835 SD_ERROR(SD_LOG_READ_WRITE, un, 9836 "sdwrite: transfer length not modulo %d\n", 9837 un->un_sys_blocksize); 9838 err = EINVAL; 9839 } else { 9840 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio); 9841 } 9842 return (err); 9843 } 9844 9845 9846 /* 9847 * Function: sdaread 9848 * 9849 * Description: Driver's aread(9e) entry point function. 9850 * 9851 * Arguments: dev - device number 9852 * aio - structure pointer describing where data is to be stored 9853 * cred_p - user credential pointer 9854 * 9855 * Return Code: ENXIO 9856 * EIO 9857 * EINVAL 9858 * value returned by aphysio 9859 * 9860 * Context: Kernel thread context. 9861 */ 9862 /* ARGSUSED */ 9863 static int 9864 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 9865 { 9866 struct sd_lun *un = NULL; 9867 struct uio *uio = aio->aio_uio; 9868 int secmask; 9869 int err; 9870 9871 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9872 return (ENXIO); 9873 } 9874 9875 ASSERT(!mutex_owned(SD_MUTEX(un))); 9876 9877 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9878 mutex_enter(SD_MUTEX(un)); 9879 /* 9880 * Because the call to sd_ready_and_valid will issue I/O we 9881 * must wait here if either the device is suspended or 9882 * if it's power level is changing. 9883 */ 9884 while ((un->un_state == SD_STATE_SUSPENDED) || 9885 (un->un_state == SD_STATE_PM_CHANGING)) { 9886 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9887 } 9888 un->un_ncmds_in_driver++; 9889 mutex_exit(SD_MUTEX(un)); 9890 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9891 mutex_enter(SD_MUTEX(un)); 9892 un->un_ncmds_in_driver--; 9893 ASSERT(un->un_ncmds_in_driver >= 0); 9894 mutex_exit(SD_MUTEX(un)); 9895 return (EIO); 9896 } 9897 mutex_enter(SD_MUTEX(un)); 9898 un->un_ncmds_in_driver--; 9899 ASSERT(un->un_ncmds_in_driver >= 0); 9900 mutex_exit(SD_MUTEX(un)); 9901 } 9902 9903 /* 9904 * Read requests are restricted to multiples of the system block size. 9905 */ 9906 secmask = un->un_sys_blocksize - 1; 9907 9908 if (uio->uio_loffset & ((offset_t)(secmask))) { 9909 SD_ERROR(SD_LOG_READ_WRITE, un, 9910 "sdaread: file offset not modulo %d\n", 9911 un->un_sys_blocksize); 9912 err = EINVAL; 9913 } else if (uio->uio_iov->iov_len & (secmask)) { 9914 SD_ERROR(SD_LOG_READ_WRITE, un, 9915 "sdaread: transfer length not modulo %d\n", 9916 un->un_sys_blocksize); 9917 err = EINVAL; 9918 } else { 9919 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio); 9920 } 9921 return (err); 9922 } 9923 9924 9925 /* 9926 * Function: sdawrite 9927 * 9928 * Description: Driver's awrite(9e) entry point function. 9929 * 9930 * Arguments: dev - device number 9931 * aio - structure pointer describing where data is stored 9932 * cred_p - user credential pointer 9933 * 9934 * Return Code: ENXIO 9935 * EIO 9936 * EINVAL 9937 * value returned by aphysio 9938 * 9939 * Context: Kernel thread context. 9940 */ 9941 /* ARGSUSED */ 9942 static int 9943 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 9944 { 9945 struct sd_lun *un = NULL; 9946 struct uio *uio = aio->aio_uio; 9947 int secmask; 9948 int err; 9949 9950 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9951 return (ENXIO); 9952 } 9953 9954 ASSERT(!mutex_owned(SD_MUTEX(un))); 9955 9956 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9957 mutex_enter(SD_MUTEX(un)); 9958 /* 9959 * Because the call to sd_ready_and_valid will issue I/O we 9960 * must wait here if either the device is suspended or 9961 * if it's power level is changing. 9962 */ 9963 while ((un->un_state == SD_STATE_SUSPENDED) || 9964 (un->un_state == SD_STATE_PM_CHANGING)) { 9965 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9966 } 9967 un->un_ncmds_in_driver++; 9968 mutex_exit(SD_MUTEX(un)); 9969 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9970 mutex_enter(SD_MUTEX(un)); 9971 un->un_ncmds_in_driver--; 9972 ASSERT(un->un_ncmds_in_driver >= 0); 9973 mutex_exit(SD_MUTEX(un)); 9974 return (EIO); 9975 } 9976 mutex_enter(SD_MUTEX(un)); 9977 un->un_ncmds_in_driver--; 9978 ASSERT(un->un_ncmds_in_driver >= 0); 9979 mutex_exit(SD_MUTEX(un)); 9980 } 9981 9982 /* 9983 * Write requests are restricted to multiples of the system block size. 9984 */ 9985 secmask = un->un_sys_blocksize - 1; 9986 9987 if (uio->uio_loffset & ((offset_t)(secmask))) { 9988 SD_ERROR(SD_LOG_READ_WRITE, un, 9989 "sdawrite: file offset not modulo %d\n", 9990 un->un_sys_blocksize); 9991 err = EINVAL; 9992 } else if (uio->uio_iov->iov_len & (secmask)) { 9993 SD_ERROR(SD_LOG_READ_WRITE, un, 9994 "sdawrite: transfer length not modulo %d\n", 9995 un->un_sys_blocksize); 9996 err = EINVAL; 9997 } else { 9998 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio); 9999 } 10000 return (err); 10001 } 10002 10003 10004 10005 10006 10007 /* 10008 * Driver IO processing follows the following sequence: 10009 * 10010 * sdioctl(9E) sdstrategy(9E) biodone(9F) 10011 * | | ^ 10012 * v v | 10013 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+ 10014 * | | | | 10015 * v | | | 10016 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone() 10017 * | | ^ ^ 10018 * v v | | 10019 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | | 10020 * | | | | 10021 * +---+ | +------------+ +-------+ 10022 * | | | | 10023 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10024 * | v | | 10025 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() | 10026 * | | ^ | 10027 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10028 * | v | | 10029 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() | 10030 * | | ^ | 10031 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10032 * | v | | 10033 * | sd_checksum_iostart() sd_checksum_iodone() | 10034 * | | ^ | 10035 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+ 10036 * | v | | 10037 * | sd_pm_iostart() sd_pm_iodone() | 10038 * | | ^ | 10039 * | | | | 10040 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+ 10041 * | ^ 10042 * v | 10043 * sd_core_iostart() | 10044 * | | 10045 * | +------>(*destroypkt)() 10046 * +-> sd_start_cmds() <-+ | | 10047 * | | | v 10048 * | | | scsi_destroy_pkt(9F) 10049 * | | | 10050 * +->(*initpkt)() +- sdintr() 10051 * | | | | 10052 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx() 10053 * | +-> scsi_setup_cdb(9F) | 10054 * | | 10055 * +--> scsi_transport(9F) | 10056 * | | 10057 * +----> SCSA ---->+ 10058 * 10059 * 10060 * This code is based upon the following presumptions: 10061 * 10062 * - iostart and iodone functions operate on buf(9S) structures. These 10063 * functions perform the necessary operations on the buf(9S) and pass 10064 * them along to the next function in the chain by using the macros 10065 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE() 10066 * (for iodone side functions). 10067 * 10068 * - The iostart side functions may sleep. The iodone side functions 10069 * are called under interrupt context and may NOT sleep. Therefore 10070 * iodone side functions also may not call iostart side functions. 10071 * (NOTE: iostart side functions should NOT sleep for memory, as 10072 * this could result in deadlock.) 10073 * 10074 * - An iostart side function may call its corresponding iodone side 10075 * function directly (if necessary). 10076 * 10077 * - In the event of an error, an iostart side function can return a buf(9S) 10078 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and 10079 * b_error in the usual way of course). 10080 * 10081 * - The taskq mechanism may be used by the iodone side functions to dispatch 10082 * requests to the iostart side functions. The iostart side functions in 10083 * this case would be called under the context of a taskq thread, so it's 10084 * OK for them to block/sleep/spin in this case. 10085 * 10086 * - iostart side functions may allocate "shadow" buf(9S) structs and 10087 * pass them along to the next function in the chain. The corresponding 10088 * iodone side functions must coalesce the "shadow" bufs and return 10089 * the "original" buf to the next higher layer. 10090 * 10091 * - The b_private field of the buf(9S) struct holds a pointer to 10092 * an sd_xbuf struct, which contains information needed to 10093 * construct the scsi_pkt for the command. 10094 * 10095 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each 10096 * layer must acquire & release the SD_MUTEX(un) as needed. 10097 */ 10098 10099 10100 /* 10101 * Create taskq for all targets in the system. This is created at 10102 * _init(9E) and destroyed at _fini(9E). 10103 * 10104 * Note: here we set the minalloc to a reasonably high number to ensure that 10105 * we will have an adequate supply of task entries available at interrupt time. 10106 * This is used in conjunction with the TASKQ_PREPOPULATE flag in 10107 * sd_create_taskq(). Since we do not want to sleep for allocations at 10108 * interrupt time, set maxalloc equal to minalloc. That way we will just fail 10109 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq 10110 * requests any one instant in time. 10111 */ 10112 #define SD_TASKQ_NUMTHREADS 8 10113 #define SD_TASKQ_MINALLOC 256 10114 #define SD_TASKQ_MAXALLOC 256 10115 10116 static taskq_t *sd_tq = NULL; 10117 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq)) 10118 10119 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; 10120 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; 10121 10122 /* 10123 * The following task queue is being created for the write part of 10124 * read-modify-write of non-512 block size devices. 10125 * Limit the number of threads to 1 for now. This number has been chosen 10126 * considering the fact that it applies only to dvd ram drives/MO drives 10127 * currently. Performance for which is not main criteria at this stage. 10128 * Note: It needs to be explored if we can use a single taskq in future 10129 */ 10130 #define SD_WMR_TASKQ_NUMTHREADS 1 10131 static taskq_t *sd_wmr_tq = NULL; 10132 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq)) 10133 10134 /* 10135 * Function: sd_taskq_create 10136 * 10137 * Description: Create taskq thread(s) and preallocate task entries 10138 * 10139 * Return Code: Returns a pointer to the allocated taskq_t. 10140 * 10141 * Context: Can sleep. Requires blockable context. 10142 * 10143 * Notes: - The taskq() facility currently is NOT part of the DDI. 10144 * (definitely NOT recommeded for 3rd-party drivers!) :-) 10145 * - taskq_create() will block for memory, also it will panic 10146 * if it cannot create the requested number of threads. 10147 * - Currently taskq_create() creates threads that cannot be 10148 * swapped. 10149 * - We use TASKQ_PREPOPULATE to ensure we have an adequate 10150 * supply of taskq entries at interrupt time (ie, so that we 10151 * do not have to sleep for memory) 10152 */ 10153 10154 static void 10155 sd_taskq_create(void) 10156 { 10157 char taskq_name[TASKQ_NAMELEN]; 10158 10159 ASSERT(sd_tq == NULL); 10160 ASSERT(sd_wmr_tq == NULL); 10161 10162 (void) snprintf(taskq_name, sizeof (taskq_name), 10163 "%s_drv_taskq", sd_label); 10164 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS, 10165 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10166 TASKQ_PREPOPULATE)); 10167 10168 (void) snprintf(taskq_name, sizeof (taskq_name), 10169 "%s_rmw_taskq", sd_label); 10170 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS, 10171 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10172 TASKQ_PREPOPULATE)); 10173 } 10174 10175 10176 /* 10177 * Function: sd_taskq_delete 10178 * 10179 * Description: Complementary cleanup routine for sd_taskq_create(). 10180 * 10181 * Context: Kernel thread context. 10182 */ 10183 10184 static void 10185 sd_taskq_delete(void) 10186 { 10187 ASSERT(sd_tq != NULL); 10188 ASSERT(sd_wmr_tq != NULL); 10189 taskq_destroy(sd_tq); 10190 taskq_destroy(sd_wmr_tq); 10191 sd_tq = NULL; 10192 sd_wmr_tq = NULL; 10193 } 10194 10195 10196 /* 10197 * Function: sdstrategy 10198 * 10199 * Description: Driver's strategy (9E) entry point function. 10200 * 10201 * Arguments: bp - pointer to buf(9S) 10202 * 10203 * Return Code: Always returns zero 10204 * 10205 * Context: Kernel thread context. 10206 */ 10207 10208 static int 10209 sdstrategy(struct buf *bp) 10210 { 10211 struct sd_lun *un; 10212 10213 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 10214 if (un == NULL) { 10215 bioerror(bp, EIO); 10216 bp->b_resid = bp->b_bcount; 10217 biodone(bp); 10218 return (0); 10219 } 10220 /* As was done in the past, fail new cmds. if state is dumping. */ 10221 if (un->un_state == SD_STATE_DUMPING) { 10222 bioerror(bp, ENXIO); 10223 bp->b_resid = bp->b_bcount; 10224 biodone(bp); 10225 return (0); 10226 } 10227 10228 ASSERT(!mutex_owned(SD_MUTEX(un))); 10229 10230 /* 10231 * Commands may sneak in while we released the mutex in 10232 * DDI_SUSPEND, we should block new commands. However, old 10233 * commands that are still in the driver at this point should 10234 * still be allowed to drain. 10235 */ 10236 mutex_enter(SD_MUTEX(un)); 10237 /* 10238 * Must wait here if either the device is suspended or 10239 * if it's power level is changing. 10240 */ 10241 while ((un->un_state == SD_STATE_SUSPENDED) || 10242 (un->un_state == SD_STATE_PM_CHANGING)) { 10243 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10244 } 10245 10246 un->un_ncmds_in_driver++; 10247 10248 /* 10249 * atapi: Since we are running the CD for now in PIO mode we need to 10250 * call bp_mapin here to avoid bp_mapin called interrupt context under 10251 * the HBA's init_pkt routine. 10252 */ 10253 if (un->un_f_cfg_is_atapi == TRUE) { 10254 mutex_exit(SD_MUTEX(un)); 10255 bp_mapin(bp); 10256 mutex_enter(SD_MUTEX(un)); 10257 } 10258 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n", 10259 un->un_ncmds_in_driver); 10260 10261 mutex_exit(SD_MUTEX(un)); 10262 10263 /* 10264 * This will (eventually) allocate the sd_xbuf area and 10265 * call sd_xbuf_strategy(). We just want to return the 10266 * result of ddi_xbuf_qstrategy so that we have an opt- 10267 * imized tail call which saves us a stack frame. 10268 */ 10269 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); 10270 } 10271 10272 10273 /* 10274 * Function: sd_xbuf_strategy 10275 * 10276 * Description: Function for initiating IO operations via the 10277 * ddi_xbuf_qstrategy() mechanism. 10278 * 10279 * Context: Kernel thread context. 10280 */ 10281 10282 static void 10283 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg) 10284 { 10285 struct sd_lun *un = arg; 10286 10287 ASSERT(bp != NULL); 10288 ASSERT(xp != NULL); 10289 ASSERT(un != NULL); 10290 ASSERT(!mutex_owned(SD_MUTEX(un))); 10291 10292 /* 10293 * Initialize the fields in the xbuf and save a pointer to the 10294 * xbuf in bp->b_private. 10295 */ 10296 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL); 10297 10298 /* Send the buf down the iostart chain */ 10299 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp); 10300 } 10301 10302 10303 /* 10304 * Function: sd_xbuf_init 10305 * 10306 * Description: Prepare the given sd_xbuf struct for use. 10307 * 10308 * Arguments: un - ptr to softstate 10309 * bp - ptr to associated buf(9S) 10310 * xp - ptr to associated sd_xbuf 10311 * chain_type - IO chain type to use: 10312 * SD_CHAIN_NULL 10313 * SD_CHAIN_BUFIO 10314 * SD_CHAIN_USCSI 10315 * SD_CHAIN_DIRECT 10316 * SD_CHAIN_DIRECT_PRIORITY 10317 * pktinfop - ptr to private data struct for scsi_pkt(9S) 10318 * initialization; may be NULL if none. 10319 * 10320 * Context: Kernel thread context 10321 */ 10322 10323 static void 10324 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 10325 uchar_t chain_type, void *pktinfop) 10326 { 10327 int index; 10328 10329 ASSERT(un != NULL); 10330 ASSERT(bp != NULL); 10331 ASSERT(xp != NULL); 10332 10333 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n", 10334 bp, chain_type); 10335 10336 xp->xb_un = un; 10337 xp->xb_pktp = NULL; 10338 xp->xb_pktinfo = pktinfop; 10339 xp->xb_private = bp->b_private; 10340 xp->xb_blkno = (daddr_t)bp->b_blkno; 10341 10342 /* 10343 * Set up the iostart and iodone chain indexes in the xbuf, based 10344 * upon the specified chain type to use. 10345 */ 10346 switch (chain_type) { 10347 case SD_CHAIN_NULL: 10348 /* 10349 * Fall thru to just use the values for the buf type, even 10350 * tho for the NULL chain these values will never be used. 10351 */ 10352 /* FALLTHRU */ 10353 case SD_CHAIN_BUFIO: 10354 index = un->un_buf_chain_type; 10355 break; 10356 case SD_CHAIN_USCSI: 10357 index = un->un_uscsi_chain_type; 10358 break; 10359 case SD_CHAIN_DIRECT: 10360 index = un->un_direct_chain_type; 10361 break; 10362 case SD_CHAIN_DIRECT_PRIORITY: 10363 index = un->un_priority_chain_type; 10364 break; 10365 default: 10366 /* We're really broken if we ever get here... */ 10367 panic("sd_xbuf_init: illegal chain type!"); 10368 /*NOTREACHED*/ 10369 } 10370 10371 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index; 10372 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index; 10373 10374 /* 10375 * It might be a bit easier to simply bzero the entire xbuf above, 10376 * but it turns out that since we init a fair number of members anyway, 10377 * we save a fair number cycles by doing explicit assignment of zero. 10378 */ 10379 xp->xb_pkt_flags = 0; 10380 xp->xb_dma_resid = 0; 10381 xp->xb_retry_count = 0; 10382 xp->xb_victim_retry_count = 0; 10383 xp->xb_ua_retry_count = 0; 10384 xp->xb_nr_retry_count = 0; 10385 xp->xb_sense_bp = NULL; 10386 xp->xb_sense_status = 0; 10387 xp->xb_sense_state = 0; 10388 xp->xb_sense_resid = 0; 10389 10390 bp->b_private = xp; 10391 bp->b_flags &= ~(B_DONE | B_ERROR); 10392 bp->b_resid = 0; 10393 bp->av_forw = NULL; 10394 bp->av_back = NULL; 10395 bioerror(bp, 0); 10396 10397 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n"); 10398 } 10399 10400 10401 /* 10402 * Function: sd_uscsi_strategy 10403 * 10404 * Description: Wrapper for calling into the USCSI chain via physio(9F) 10405 * 10406 * Arguments: bp - buf struct ptr 10407 * 10408 * Return Code: Always returns 0 10409 * 10410 * Context: Kernel thread context 10411 */ 10412 10413 static int 10414 sd_uscsi_strategy(struct buf *bp) 10415 { 10416 struct sd_lun *un; 10417 struct sd_uscsi_info *uip; 10418 struct sd_xbuf *xp; 10419 uchar_t chain_type; 10420 10421 ASSERT(bp != NULL); 10422 10423 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 10424 if (un == NULL) { 10425 bioerror(bp, EIO); 10426 bp->b_resid = bp->b_bcount; 10427 biodone(bp); 10428 return (0); 10429 } 10430 10431 ASSERT(!mutex_owned(SD_MUTEX(un))); 10432 10433 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp); 10434 10435 mutex_enter(SD_MUTEX(un)); 10436 /* 10437 * atapi: Since we are running the CD for now in PIO mode we need to 10438 * call bp_mapin here to avoid bp_mapin called interrupt context under 10439 * the HBA's init_pkt routine. 10440 */ 10441 if (un->un_f_cfg_is_atapi == TRUE) { 10442 mutex_exit(SD_MUTEX(un)); 10443 bp_mapin(bp); 10444 mutex_enter(SD_MUTEX(un)); 10445 } 10446 un->un_ncmds_in_driver++; 10447 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n", 10448 un->un_ncmds_in_driver); 10449 mutex_exit(SD_MUTEX(un)); 10450 10451 /* 10452 * A pointer to a struct sd_uscsi_info is expected in bp->b_private 10453 */ 10454 ASSERT(bp->b_private != NULL); 10455 uip = (struct sd_uscsi_info *)bp->b_private; 10456 10457 switch (uip->ui_flags) { 10458 case SD_PATH_DIRECT: 10459 chain_type = SD_CHAIN_DIRECT; 10460 break; 10461 case SD_PATH_DIRECT_PRIORITY: 10462 chain_type = SD_CHAIN_DIRECT_PRIORITY; 10463 break; 10464 default: 10465 chain_type = SD_CHAIN_USCSI; 10466 break; 10467 } 10468 10469 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 10470 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp); 10471 10472 /* Use the index obtained within xbuf_init */ 10473 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp); 10474 10475 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp); 10476 10477 return (0); 10478 } 10479 10480 /* 10481 * Function: sd_send_scsi_cmd 10482 * 10483 * Description: Runs a USCSI command for user (when called thru sdioctl), 10484 * or for the driver 10485 * 10486 * Arguments: dev - the dev_t for the device 10487 * incmd - ptr to a valid uscsi_cmd struct 10488 * flag - bit flag, indicating open settings, 32/64 bit type 10489 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 10490 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 10491 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 10492 * to use the USCSI "direct" chain and bypass the normal 10493 * command waitq. 10494 * 10495 * Return Code: 0 - successful completion of the given command 10496 * EIO - scsi_uscsi_handle_command() failed 10497 * ENXIO - soft state not found for specified dev 10498 * EINVAL 10499 * EFAULT - copyin/copyout error 10500 * return code of scsi_uscsi_handle_command(): 10501 * EIO 10502 * ENXIO 10503 * EACCES 10504 * 10505 * Context: Waits for command to complete. Can sleep. 10506 */ 10507 10508 static int 10509 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 10510 enum uio_seg dataspace, int path_flag) 10511 { 10512 struct sd_uscsi_info *uip; 10513 struct uscsi_cmd *uscmd; 10514 struct sd_lun *un; 10515 int format = 0; 10516 int rval; 10517 10518 un = ddi_get_soft_state(sd_state, SDUNIT(dev)); 10519 if (un == NULL) { 10520 return (ENXIO); 10521 } 10522 10523 ASSERT(!mutex_owned(SD_MUTEX(un))); 10524 10525 #ifdef SDDEBUG 10526 switch (dataspace) { 10527 case UIO_USERSPACE: 10528 SD_TRACE(SD_LOG_IO, un, 10529 "sd_send_scsi_cmd: entry: un:0x%p UIO_USERSPACE\n", un); 10530 break; 10531 case UIO_SYSSPACE: 10532 SD_TRACE(SD_LOG_IO, un, 10533 "sd_send_scsi_cmd: entry: un:0x%p UIO_SYSSPACE\n", un); 10534 break; 10535 default: 10536 SD_TRACE(SD_LOG_IO, un, 10537 "sd_send_scsi_cmd: entry: un:0x%p UNEXPECTED SPACE\n", un); 10538 break; 10539 } 10540 #endif 10541 10542 rval = scsi_uscsi_alloc_and_copyin((intptr_t)incmd, flag, 10543 SD_ADDRESS(un), &uscmd); 10544 if (rval != 0) { 10545 SD_TRACE(SD_LOG_IO, un, "sd_sense_scsi_cmd: " 10546 "scsi_uscsi_alloc_and_copyin failed\n", un); 10547 return (rval); 10548 } 10549 10550 if ((uscmd->uscsi_cdb != NULL) && 10551 (uscmd->uscsi_cdb[0] == SCMD_FORMAT)) { 10552 mutex_enter(SD_MUTEX(un)); 10553 un->un_f_format_in_progress = TRUE; 10554 mutex_exit(SD_MUTEX(un)); 10555 format = 1; 10556 } 10557 10558 /* 10559 * Allocate an sd_uscsi_info struct and fill it with the info 10560 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 10561 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 10562 * since we allocate the buf here in this function, we do not 10563 * need to preserve the prior contents of b_private. 10564 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 10565 */ 10566 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 10567 uip->ui_flags = path_flag; 10568 uip->ui_cmdp = uscmd; 10569 10570 /* 10571 * Commands sent with priority are intended for error recovery 10572 * situations, and do not have retries performed. 10573 */ 10574 if (path_flag == SD_PATH_DIRECT_PRIORITY) { 10575 uscmd->uscsi_flags |= USCSI_DIAGNOSE; 10576 } 10577 uscmd->uscsi_flags &= ~USCSI_NOINTR; 10578 10579 rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd, 10580 sd_uscsi_strategy, NULL, uip); 10581 10582 #ifdef SDDEBUG 10583 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 10584 "uscsi_status: 0x%02x uscsi_resid:0x%x\n", 10585 uscmd->uscsi_status, uscmd->uscsi_resid); 10586 if (uscmd->uscsi_bufaddr != NULL) { 10587 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 10588 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n", 10589 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen); 10590 if (dataspace == UIO_SYSSPACE) { 10591 SD_DUMP_MEMORY(un, SD_LOG_IO, 10592 "data", (uchar_t *)uscmd->uscsi_bufaddr, 10593 uscmd->uscsi_buflen, SD_LOG_HEX); 10594 } 10595 } 10596 #endif 10597 10598 if (format == 1) { 10599 mutex_enter(SD_MUTEX(un)); 10600 un->un_f_format_in_progress = FALSE; 10601 mutex_exit(SD_MUTEX(un)); 10602 } 10603 10604 (void) scsi_uscsi_copyout_and_free((intptr_t)incmd, uscmd); 10605 kmem_free(uip, sizeof (struct sd_uscsi_info)); 10606 10607 return (rval); 10608 } 10609 10610 10611 /* 10612 * Function: sd_buf_iodone 10613 * 10614 * Description: Frees the sd_xbuf & returns the buf to its originator. 10615 * 10616 * Context: May be called from interrupt context. 10617 */ 10618 /* ARGSUSED */ 10619 static void 10620 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp) 10621 { 10622 struct sd_xbuf *xp; 10623 10624 ASSERT(un != NULL); 10625 ASSERT(bp != NULL); 10626 ASSERT(!mutex_owned(SD_MUTEX(un))); 10627 10628 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n"); 10629 10630 xp = SD_GET_XBUF(bp); 10631 ASSERT(xp != NULL); 10632 10633 mutex_enter(SD_MUTEX(un)); 10634 10635 /* 10636 * Grab time when the cmd completed. 10637 * This is used for determining if the system has been 10638 * idle long enough to make it idle to the PM framework. 10639 * This is for lowering the overhead, and therefore improving 10640 * performance per I/O operation. 10641 */ 10642 un->un_pm_idle_time = ddi_get_time(); 10643 10644 un->un_ncmds_in_driver--; 10645 ASSERT(un->un_ncmds_in_driver >= 0); 10646 SD_INFO(SD_LOG_IO, un, "sd_buf_iodone: un_ncmds_in_driver = %ld\n", 10647 un->un_ncmds_in_driver); 10648 10649 mutex_exit(SD_MUTEX(un)); 10650 10651 ddi_xbuf_done(bp, un->un_xbuf_attr); /* xbuf is gone after this */ 10652 biodone(bp); /* bp is gone after this */ 10653 10654 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n"); 10655 } 10656 10657 10658 /* 10659 * Function: sd_uscsi_iodone 10660 * 10661 * Description: Frees the sd_xbuf & returns the buf to its originator. 10662 * 10663 * Context: May be called from interrupt context. 10664 */ 10665 /* ARGSUSED */ 10666 static void 10667 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 10668 { 10669 struct sd_xbuf *xp; 10670 10671 ASSERT(un != NULL); 10672 ASSERT(bp != NULL); 10673 10674 xp = SD_GET_XBUF(bp); 10675 ASSERT(xp != NULL); 10676 ASSERT(!mutex_owned(SD_MUTEX(un))); 10677 10678 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n"); 10679 10680 bp->b_private = xp->xb_private; 10681 10682 mutex_enter(SD_MUTEX(un)); 10683 10684 /* 10685 * Grab time when the cmd completed. 10686 * This is used for determining if the system has been 10687 * idle long enough to make it idle to the PM framework. 10688 * This is for lowering the overhead, and therefore improving 10689 * performance per I/O operation. 10690 */ 10691 un->un_pm_idle_time = ddi_get_time(); 10692 10693 un->un_ncmds_in_driver--; 10694 ASSERT(un->un_ncmds_in_driver >= 0); 10695 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", 10696 un->un_ncmds_in_driver); 10697 10698 mutex_exit(SD_MUTEX(un)); 10699 10700 kmem_free(xp, sizeof (struct sd_xbuf)); 10701 biodone(bp); 10702 10703 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n"); 10704 } 10705 10706 10707 /* 10708 * Function: sd_mapblockaddr_iostart 10709 * 10710 * Description: Verify request lies within the partition limits for 10711 * the indicated minor device. Issue "overrun" buf if 10712 * request would exceed partition range. Converts 10713 * partition-relative block address to absolute. 10714 * 10715 * Context: Can sleep 10716 * 10717 * Issues: This follows what the old code did, in terms of accessing 10718 * some of the partition info in the unit struct without holding 10719 * the mutext. This is a general issue, if the partition info 10720 * can be altered while IO is in progress... as soon as we send 10721 * a buf, its partitioning can be invalid before it gets to the 10722 * device. Probably the right fix is to move partitioning out 10723 * of the driver entirely. 10724 */ 10725 10726 static void 10727 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp) 10728 { 10729 diskaddr_t nblocks; /* #blocks in the given partition */ 10730 daddr_t blocknum; /* Block number specified by the buf */ 10731 size_t requested_nblocks; 10732 size_t available_nblocks; 10733 int partition; 10734 diskaddr_t partition_offset; 10735 struct sd_xbuf *xp; 10736 10737 10738 ASSERT(un != NULL); 10739 ASSERT(bp != NULL); 10740 ASSERT(!mutex_owned(SD_MUTEX(un))); 10741 10742 SD_TRACE(SD_LOG_IO_PARTITION, un, 10743 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp); 10744 10745 xp = SD_GET_XBUF(bp); 10746 ASSERT(xp != NULL); 10747 10748 /* 10749 * If the geometry is not indicated as valid, attempt to access 10750 * the unit & verify the geometry/label. This can be the case for 10751 * removable-media devices, of if the device was opened in 10752 * NDELAY/NONBLOCK mode. 10753 */ 10754 if (!SD_IS_VALID_LABEL(un) && 10755 (sd_ready_and_valid(un) != SD_READY_VALID)) { 10756 /* 10757 * For removable devices it is possible to start an I/O 10758 * without a media by opening the device in nodelay mode. 10759 * Also for writable CDs there can be many scenarios where 10760 * there is no geometry yet but volume manager is trying to 10761 * issue a read() just because it can see TOC on the CD. So 10762 * do not print a message for removables. 10763 */ 10764 if (!un->un_f_has_removable_media) { 10765 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10766 "i/o to invalid geometry\n"); 10767 } 10768 bioerror(bp, EIO); 10769 bp->b_resid = bp->b_bcount; 10770 SD_BEGIN_IODONE(index, un, bp); 10771 return; 10772 } 10773 10774 partition = SDPART(bp->b_edev); 10775 10776 nblocks = 0; 10777 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 10778 &nblocks, &partition_offset, NULL, NULL, (void *)SD_PATH_DIRECT); 10779 10780 /* 10781 * blocknum is the starting block number of the request. At this 10782 * point it is still relative to the start of the minor device. 10783 */ 10784 blocknum = xp->xb_blkno; 10785 10786 /* 10787 * Legacy: If the starting block number is one past the last block 10788 * in the partition, do not set B_ERROR in the buf. 10789 */ 10790 if (blocknum == nblocks) { 10791 goto error_exit; 10792 } 10793 10794 /* 10795 * Confirm that the first block of the request lies within the 10796 * partition limits. Also the requested number of bytes must be 10797 * a multiple of the system block size. 10798 */ 10799 if ((blocknum < 0) || (blocknum >= nblocks) || 10800 ((bp->b_bcount & (un->un_sys_blocksize - 1)) != 0)) { 10801 bp->b_flags |= B_ERROR; 10802 goto error_exit; 10803 } 10804 10805 /* 10806 * If the requsted # blocks exceeds the available # blocks, that 10807 * is an overrun of the partition. 10808 */ 10809 requested_nblocks = SD_BYTES2SYSBLOCKS(un, bp->b_bcount); 10810 available_nblocks = (size_t)(nblocks - blocknum); 10811 ASSERT(nblocks >= blocknum); 10812 10813 if (requested_nblocks > available_nblocks) { 10814 /* 10815 * Allocate an "overrun" buf to allow the request to proceed 10816 * for the amount of space available in the partition. The 10817 * amount not transferred will be added into the b_resid 10818 * when the operation is complete. The overrun buf 10819 * replaces the original buf here, and the original buf 10820 * is saved inside the overrun buf, for later use. 10821 */ 10822 size_t resid = SD_SYSBLOCKS2BYTES(un, 10823 (offset_t)(requested_nblocks - available_nblocks)); 10824 size_t count = bp->b_bcount - resid; 10825 /* 10826 * Note: count is an unsigned entity thus it'll NEVER 10827 * be less than 0 so ASSERT the original values are 10828 * correct. 10829 */ 10830 ASSERT(bp->b_bcount >= resid); 10831 10832 bp = sd_bioclone_alloc(bp, count, blocknum, 10833 (int (*)(struct buf *)) sd_mapblockaddr_iodone); 10834 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */ 10835 ASSERT(xp != NULL); 10836 } 10837 10838 /* At this point there should be no residual for this buf. */ 10839 ASSERT(bp->b_resid == 0); 10840 10841 /* Convert the block number to an absolute address. */ 10842 xp->xb_blkno += partition_offset; 10843 10844 SD_NEXT_IOSTART(index, un, bp); 10845 10846 SD_TRACE(SD_LOG_IO_PARTITION, un, 10847 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp); 10848 10849 return; 10850 10851 error_exit: 10852 bp->b_resid = bp->b_bcount; 10853 SD_BEGIN_IODONE(index, un, bp); 10854 SD_TRACE(SD_LOG_IO_PARTITION, un, 10855 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp); 10856 } 10857 10858 10859 /* 10860 * Function: sd_mapblockaddr_iodone 10861 * 10862 * Description: Completion-side processing for partition management. 10863 * 10864 * Context: May be called under interrupt context 10865 */ 10866 10867 static void 10868 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp) 10869 { 10870 /* int partition; */ /* Not used, see below. */ 10871 ASSERT(un != NULL); 10872 ASSERT(bp != NULL); 10873 ASSERT(!mutex_owned(SD_MUTEX(un))); 10874 10875 SD_TRACE(SD_LOG_IO_PARTITION, un, 10876 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp); 10877 10878 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) { 10879 /* 10880 * We have an "overrun" buf to deal with... 10881 */ 10882 struct sd_xbuf *xp; 10883 struct buf *obp; /* ptr to the original buf */ 10884 10885 xp = SD_GET_XBUF(bp); 10886 ASSERT(xp != NULL); 10887 10888 /* Retrieve the pointer to the original buf */ 10889 obp = (struct buf *)xp->xb_private; 10890 ASSERT(obp != NULL); 10891 10892 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid); 10893 bioerror(obp, bp->b_error); 10894 10895 sd_bioclone_free(bp); 10896 10897 /* 10898 * Get back the original buf. 10899 * Note that since the restoration of xb_blkno below 10900 * was removed, the sd_xbuf is not needed. 10901 */ 10902 bp = obp; 10903 /* 10904 * xp = SD_GET_XBUF(bp); 10905 * ASSERT(xp != NULL); 10906 */ 10907 } 10908 10909 /* 10910 * Convert sd->xb_blkno back to a minor-device relative value. 10911 * Note: this has been commented out, as it is not needed in the 10912 * current implementation of the driver (ie, since this function 10913 * is at the top of the layering chains, so the info will be 10914 * discarded) and it is in the "hot" IO path. 10915 * 10916 * partition = getminor(bp->b_edev) & SDPART_MASK; 10917 * xp->xb_blkno -= un->un_offset[partition]; 10918 */ 10919 10920 SD_NEXT_IODONE(index, un, bp); 10921 10922 SD_TRACE(SD_LOG_IO_PARTITION, un, 10923 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp); 10924 } 10925 10926 10927 /* 10928 * Function: sd_mapblocksize_iostart 10929 * 10930 * Description: Convert between system block size (un->un_sys_blocksize) 10931 * and target block size (un->un_tgt_blocksize). 10932 * 10933 * Context: Can sleep to allocate resources. 10934 * 10935 * Assumptions: A higher layer has already performed any partition validation, 10936 * and converted the xp->xb_blkno to an absolute value relative 10937 * to the start of the device. 10938 * 10939 * It is also assumed that the higher layer has implemented 10940 * an "overrun" mechanism for the case where the request would 10941 * read/write beyond the end of a partition. In this case we 10942 * assume (and ASSERT) that bp->b_resid == 0. 10943 * 10944 * Note: The implementation for this routine assumes the target 10945 * block size remains constant between allocation and transport. 10946 */ 10947 10948 static void 10949 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp) 10950 { 10951 struct sd_mapblocksize_info *bsp; 10952 struct sd_xbuf *xp; 10953 offset_t first_byte; 10954 daddr_t start_block, end_block; 10955 daddr_t request_bytes; 10956 ushort_t is_aligned = FALSE; 10957 10958 ASSERT(un != NULL); 10959 ASSERT(bp != NULL); 10960 ASSERT(!mutex_owned(SD_MUTEX(un))); 10961 ASSERT(bp->b_resid == 0); 10962 10963 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 10964 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp); 10965 10966 /* 10967 * For a non-writable CD, a write request is an error 10968 */ 10969 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) && 10970 (un->un_f_mmc_writable_media == FALSE)) { 10971 bioerror(bp, EIO); 10972 bp->b_resid = bp->b_bcount; 10973 SD_BEGIN_IODONE(index, un, bp); 10974 return; 10975 } 10976 10977 /* 10978 * We do not need a shadow buf if the device is using 10979 * un->un_sys_blocksize as its block size or if bcount == 0. 10980 * In this case there is no layer-private data block allocated. 10981 */ 10982 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 10983 (bp->b_bcount == 0)) { 10984 goto done; 10985 } 10986 10987 #if defined(__i386) || defined(__amd64) 10988 /* We do not support non-block-aligned transfers for ROD devices */ 10989 ASSERT(!ISROD(un)); 10990 #endif 10991 10992 xp = SD_GET_XBUF(bp); 10993 ASSERT(xp != NULL); 10994 10995 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 10996 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n", 10997 un->un_tgt_blocksize, un->un_sys_blocksize); 10998 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 10999 "request start block:0x%x\n", xp->xb_blkno); 11000 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 11001 "request len:0x%x\n", bp->b_bcount); 11002 11003 /* 11004 * Allocate the layer-private data area for the mapblocksize layer. 11005 * Layers are allowed to use the xp_private member of the sd_xbuf 11006 * struct to store the pointer to their layer-private data block, but 11007 * each layer also has the responsibility of restoring the prior 11008 * contents of xb_private before returning the buf/xbuf to the 11009 * higher layer that sent it. 11010 * 11011 * Here we save the prior contents of xp->xb_private into the 11012 * bsp->mbs_oprivate field of our layer-private data area. This value 11013 * is restored by sd_mapblocksize_iodone() just prior to freeing up 11014 * the layer-private area and returning the buf/xbuf to the layer 11015 * that sent it. 11016 * 11017 * Note that here we use kmem_zalloc for the allocation as there are 11018 * parts of the mapblocksize code that expect certain fields to be 11019 * zero unless explicitly set to a required value. 11020 */ 11021 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 11022 bsp->mbs_oprivate = xp->xb_private; 11023 xp->xb_private = bsp; 11024 11025 /* 11026 * This treats the data on the disk (target) as an array of bytes. 11027 * first_byte is the byte offset, from the beginning of the device, 11028 * to the location of the request. This is converted from a 11029 * un->un_sys_blocksize block address to a byte offset, and then back 11030 * to a block address based upon a un->un_tgt_blocksize block size. 11031 * 11032 * xp->xb_blkno should be absolute upon entry into this function, 11033 * but, but it is based upon partitions that use the "system" 11034 * block size. It must be adjusted to reflect the block size of 11035 * the target. 11036 * 11037 * Note that end_block is actually the block that follows the last 11038 * block of the request, but that's what is needed for the computation. 11039 */ 11040 first_byte = SD_SYSBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 11041 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize; 11042 end_block = (first_byte + bp->b_bcount + un->un_tgt_blocksize - 1) / 11043 un->un_tgt_blocksize; 11044 11045 /* request_bytes is rounded up to a multiple of the target block size */ 11046 request_bytes = (end_block - start_block) * un->un_tgt_blocksize; 11047 11048 /* 11049 * See if the starting address of the request and the request 11050 * length are aligned on a un->un_tgt_blocksize boundary. If aligned 11051 * then we do not need to allocate a shadow buf to handle the request. 11052 */ 11053 if (((first_byte % un->un_tgt_blocksize) == 0) && 11054 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) { 11055 is_aligned = TRUE; 11056 } 11057 11058 if ((bp->b_flags & B_READ) == 0) { 11059 /* 11060 * Lock the range for a write operation. An aligned request is 11061 * considered a simple write; otherwise the request must be a 11062 * read-modify-write. 11063 */ 11064 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1, 11065 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW); 11066 } 11067 11068 /* 11069 * Alloc a shadow buf if the request is not aligned. Also, this is 11070 * where the READ command is generated for a read-modify-write. (The 11071 * write phase is deferred until after the read completes.) 11072 */ 11073 if (is_aligned == FALSE) { 11074 11075 struct sd_mapblocksize_info *shadow_bsp; 11076 struct sd_xbuf *shadow_xp; 11077 struct buf *shadow_bp; 11078 11079 /* 11080 * Allocate the shadow buf and it associated xbuf. Note that 11081 * after this call the xb_blkno value in both the original 11082 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the 11083 * same: absolute relative to the start of the device, and 11084 * adjusted for the target block size. The b_blkno in the 11085 * shadow buf will also be set to this value. We should never 11086 * change b_blkno in the original bp however. 11087 * 11088 * Note also that the shadow buf will always need to be a 11089 * READ command, regardless of whether the incoming command 11090 * is a READ or a WRITE. 11091 */ 11092 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ, 11093 xp->xb_blkno, 11094 (int (*)(struct buf *)) sd_mapblocksize_iodone); 11095 11096 shadow_xp = SD_GET_XBUF(shadow_bp); 11097 11098 /* 11099 * Allocate the layer-private data for the shadow buf. 11100 * (No need to preserve xb_private in the shadow xbuf.) 11101 */ 11102 shadow_xp->xb_private = shadow_bsp = 11103 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 11104 11105 /* 11106 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone 11107 * to figure out where the start of the user data is (based upon 11108 * the system block size) in the data returned by the READ 11109 * command (which will be based upon the target blocksize). Note 11110 * that this is only really used if the request is unaligned. 11111 */ 11112 bsp->mbs_copy_offset = (ssize_t)(first_byte - 11113 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize)); 11114 ASSERT((bsp->mbs_copy_offset >= 0) && 11115 (bsp->mbs_copy_offset < un->un_tgt_blocksize)); 11116 11117 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset; 11118 11119 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index; 11120 11121 /* Transfer the wmap (if any) to the shadow buf */ 11122 shadow_bsp->mbs_wmp = bsp->mbs_wmp; 11123 bsp->mbs_wmp = NULL; 11124 11125 /* 11126 * The shadow buf goes on from here in place of the 11127 * original buf. 11128 */ 11129 shadow_bsp->mbs_orig_bp = bp; 11130 bp = shadow_bp; 11131 } 11132 11133 SD_INFO(SD_LOG_IO_RMMEDIA, un, 11134 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno); 11135 SD_INFO(SD_LOG_IO_RMMEDIA, un, 11136 "sd_mapblocksize_iostart: tgt request len:0x%x\n", 11137 request_bytes); 11138 SD_INFO(SD_LOG_IO_RMMEDIA, un, 11139 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp); 11140 11141 done: 11142 SD_NEXT_IOSTART(index, un, bp); 11143 11144 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 11145 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp); 11146 } 11147 11148 11149 /* 11150 * Function: sd_mapblocksize_iodone 11151 * 11152 * Description: Completion side processing for block-size mapping. 11153 * 11154 * Context: May be called under interrupt context 11155 */ 11156 11157 static void 11158 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp) 11159 { 11160 struct sd_mapblocksize_info *bsp; 11161 struct sd_xbuf *xp; 11162 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */ 11163 struct buf *orig_bp; /* ptr to the original buf */ 11164 offset_t shadow_end; 11165 offset_t request_end; 11166 offset_t shadow_start; 11167 ssize_t copy_offset; 11168 size_t copy_length; 11169 size_t shortfall; 11170 uint_t is_write; /* TRUE if this bp is a WRITE */ 11171 uint_t has_wmap; /* TRUE is this bp has a wmap */ 11172 11173 ASSERT(un != NULL); 11174 ASSERT(bp != NULL); 11175 11176 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 11177 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp); 11178 11179 /* 11180 * There is no shadow buf or layer-private data if the target is 11181 * using un->un_sys_blocksize as its block size or if bcount == 0. 11182 */ 11183 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 11184 (bp->b_bcount == 0)) { 11185 goto exit; 11186 } 11187 11188 xp = SD_GET_XBUF(bp); 11189 ASSERT(xp != NULL); 11190 11191 /* Retrieve the pointer to the layer-private data area from the xbuf. */ 11192 bsp = xp->xb_private; 11193 11194 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE; 11195 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE; 11196 11197 if (is_write) { 11198 /* 11199 * For a WRITE request we must free up the block range that 11200 * we have locked up. This holds regardless of whether this is 11201 * an aligned write request or a read-modify-write request. 11202 */ 11203 sd_range_unlock(un, bsp->mbs_wmp); 11204 bsp->mbs_wmp = NULL; 11205 } 11206 11207 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) { 11208 /* 11209 * An aligned read or write command will have no shadow buf; 11210 * there is not much else to do with it. 11211 */ 11212 goto done; 11213 } 11214 11215 orig_bp = bsp->mbs_orig_bp; 11216 ASSERT(orig_bp != NULL); 11217 orig_xp = SD_GET_XBUF(orig_bp); 11218 ASSERT(orig_xp != NULL); 11219 ASSERT(!mutex_owned(SD_MUTEX(un))); 11220 11221 if (!is_write && has_wmap) { 11222 /* 11223 * A READ with a wmap means this is the READ phase of a 11224 * read-modify-write. If an error occurred on the READ then 11225 * we do not proceed with the WRITE phase or copy any data. 11226 * Just release the write maps and return with an error. 11227 */ 11228 if ((bp->b_resid != 0) || (bp->b_error != 0)) { 11229 orig_bp->b_resid = orig_bp->b_bcount; 11230 bioerror(orig_bp, bp->b_error); 11231 sd_range_unlock(un, bsp->mbs_wmp); 11232 goto freebuf_done; 11233 } 11234 } 11235 11236 /* 11237 * Here is where we set up to copy the data from the shadow buf 11238 * into the space associated with the original buf. 11239 * 11240 * To deal with the conversion between block sizes, these 11241 * computations treat the data as an array of bytes, with the 11242 * first byte (byte 0) corresponding to the first byte in the 11243 * first block on the disk. 11244 */ 11245 11246 /* 11247 * shadow_start and shadow_len indicate the location and size of 11248 * the data returned with the shadow IO request. 11249 */ 11250 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 11251 shadow_end = shadow_start + bp->b_bcount - bp->b_resid; 11252 11253 /* 11254 * copy_offset gives the offset (in bytes) from the start of the first 11255 * block of the READ request to the beginning of the data. We retrieve 11256 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved 11257 * there by sd_mapblockize_iostart(). copy_length gives the amount of 11258 * data to be copied (in bytes). 11259 */ 11260 copy_offset = bsp->mbs_copy_offset; 11261 ASSERT((copy_offset >= 0) && (copy_offset < un->un_tgt_blocksize)); 11262 copy_length = orig_bp->b_bcount; 11263 request_end = shadow_start + copy_offset + orig_bp->b_bcount; 11264 11265 /* 11266 * Set up the resid and error fields of orig_bp as appropriate. 11267 */ 11268 if (shadow_end >= request_end) { 11269 /* We got all the requested data; set resid to zero */ 11270 orig_bp->b_resid = 0; 11271 } else { 11272 /* 11273 * We failed to get enough data to fully satisfy the original 11274 * request. Just copy back whatever data we got and set 11275 * up the residual and error code as required. 11276 * 11277 * 'shortfall' is the amount by which the data received with the 11278 * shadow buf has "fallen short" of the requested amount. 11279 */ 11280 shortfall = (size_t)(request_end - shadow_end); 11281 11282 if (shortfall > orig_bp->b_bcount) { 11283 /* 11284 * We did not get enough data to even partially 11285 * fulfill the original request. The residual is 11286 * equal to the amount requested. 11287 */ 11288 orig_bp->b_resid = orig_bp->b_bcount; 11289 } else { 11290 /* 11291 * We did not get all the data that we requested 11292 * from the device, but we will try to return what 11293 * portion we did get. 11294 */ 11295 orig_bp->b_resid = shortfall; 11296 } 11297 ASSERT(copy_length >= orig_bp->b_resid); 11298 copy_length -= orig_bp->b_resid; 11299 } 11300 11301 /* Propagate the error code from the shadow buf to the original buf */ 11302 bioerror(orig_bp, bp->b_error); 11303 11304 if (is_write) { 11305 goto freebuf_done; /* No data copying for a WRITE */ 11306 } 11307 11308 if (has_wmap) { 11309 /* 11310 * This is a READ command from the READ phase of a 11311 * read-modify-write request. We have to copy the data given 11312 * by the user OVER the data returned by the READ command, 11313 * then convert the command from a READ to a WRITE and send 11314 * it back to the target. 11315 */ 11316 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset, 11317 copy_length); 11318 11319 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */ 11320 11321 /* 11322 * Dispatch the WRITE command to the taskq thread, which 11323 * will in turn send the command to the target. When the 11324 * WRITE command completes, we (sd_mapblocksize_iodone()) 11325 * will get called again as part of the iodone chain 11326 * processing for it. Note that we will still be dealing 11327 * with the shadow buf at that point. 11328 */ 11329 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp, 11330 KM_NOSLEEP) != 0) { 11331 /* 11332 * Dispatch was successful so we are done. Return 11333 * without going any higher up the iodone chain. Do 11334 * not free up any layer-private data until after the 11335 * WRITE completes. 11336 */ 11337 return; 11338 } 11339 11340 /* 11341 * Dispatch of the WRITE command failed; set up the error 11342 * condition and send this IO back up the iodone chain. 11343 */ 11344 bioerror(orig_bp, EIO); 11345 orig_bp->b_resid = orig_bp->b_bcount; 11346 11347 } else { 11348 /* 11349 * This is a regular READ request (ie, not a RMW). Copy the 11350 * data from the shadow buf into the original buf. The 11351 * copy_offset compensates for any "misalignment" between the 11352 * shadow buf (with its un->un_tgt_blocksize blocks) and the 11353 * original buf (with its un->un_sys_blocksize blocks). 11354 */ 11355 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr, 11356 copy_length); 11357 } 11358 11359 freebuf_done: 11360 11361 /* 11362 * At this point we still have both the shadow buf AND the original 11363 * buf to deal with, as well as the layer-private data area in each. 11364 * Local variables are as follows: 11365 * 11366 * bp -- points to shadow buf 11367 * xp -- points to xbuf of shadow buf 11368 * bsp -- points to layer-private data area of shadow buf 11369 * orig_bp -- points to original buf 11370 * 11371 * First free the shadow buf and its associated xbuf, then free the 11372 * layer-private data area from the shadow buf. There is no need to 11373 * restore xb_private in the shadow xbuf. 11374 */ 11375 sd_shadow_buf_free(bp); 11376 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 11377 11378 /* 11379 * Now update the local variables to point to the original buf, xbuf, 11380 * and layer-private area. 11381 */ 11382 bp = orig_bp; 11383 xp = SD_GET_XBUF(bp); 11384 ASSERT(xp != NULL); 11385 ASSERT(xp == orig_xp); 11386 bsp = xp->xb_private; 11387 ASSERT(bsp != NULL); 11388 11389 done: 11390 /* 11391 * Restore xb_private to whatever it was set to by the next higher 11392 * layer in the chain, then free the layer-private data area. 11393 */ 11394 xp->xb_private = bsp->mbs_oprivate; 11395 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 11396 11397 exit: 11398 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp), 11399 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp); 11400 11401 SD_NEXT_IODONE(index, un, bp); 11402 } 11403 11404 11405 /* 11406 * Function: sd_checksum_iostart 11407 * 11408 * Description: A stub function for a layer that's currently not used. 11409 * For now just a placeholder. 11410 * 11411 * Context: Kernel thread context 11412 */ 11413 11414 static void 11415 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp) 11416 { 11417 ASSERT(un != NULL); 11418 ASSERT(bp != NULL); 11419 ASSERT(!mutex_owned(SD_MUTEX(un))); 11420 SD_NEXT_IOSTART(index, un, bp); 11421 } 11422 11423 11424 /* 11425 * Function: sd_checksum_iodone 11426 * 11427 * Description: A stub function for a layer that's currently not used. 11428 * For now just a placeholder. 11429 * 11430 * Context: May be called under interrupt context 11431 */ 11432 11433 static void 11434 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp) 11435 { 11436 ASSERT(un != NULL); 11437 ASSERT(bp != NULL); 11438 ASSERT(!mutex_owned(SD_MUTEX(un))); 11439 SD_NEXT_IODONE(index, un, bp); 11440 } 11441 11442 11443 /* 11444 * Function: sd_checksum_uscsi_iostart 11445 * 11446 * Description: A stub function for a layer that's currently not used. 11447 * For now just a placeholder. 11448 * 11449 * Context: Kernel thread context 11450 */ 11451 11452 static void 11453 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp) 11454 { 11455 ASSERT(un != NULL); 11456 ASSERT(bp != NULL); 11457 ASSERT(!mutex_owned(SD_MUTEX(un))); 11458 SD_NEXT_IOSTART(index, un, bp); 11459 } 11460 11461 11462 /* 11463 * Function: sd_checksum_uscsi_iodone 11464 * 11465 * Description: A stub function for a layer that's currently not used. 11466 * For now just a placeholder. 11467 * 11468 * Context: May be called under interrupt context 11469 */ 11470 11471 static void 11472 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 11473 { 11474 ASSERT(un != NULL); 11475 ASSERT(bp != NULL); 11476 ASSERT(!mutex_owned(SD_MUTEX(un))); 11477 SD_NEXT_IODONE(index, un, bp); 11478 } 11479 11480 11481 /* 11482 * Function: sd_pm_iostart 11483 * 11484 * Description: iostart-side routine for Power mangement. 11485 * 11486 * Context: Kernel thread context 11487 */ 11488 11489 static void 11490 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp) 11491 { 11492 ASSERT(un != NULL); 11493 ASSERT(bp != NULL); 11494 ASSERT(!mutex_owned(SD_MUTEX(un))); 11495 ASSERT(!mutex_owned(&un->un_pm_mutex)); 11496 11497 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n"); 11498 11499 if (sd_pm_entry(un) != DDI_SUCCESS) { 11500 /* 11501 * Set up to return the failed buf back up the 'iodone' 11502 * side of the calling chain. 11503 */ 11504 bioerror(bp, EIO); 11505 bp->b_resid = bp->b_bcount; 11506 11507 SD_BEGIN_IODONE(index, un, bp); 11508 11509 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 11510 return; 11511 } 11512 11513 SD_NEXT_IOSTART(index, un, bp); 11514 11515 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 11516 } 11517 11518 11519 /* 11520 * Function: sd_pm_iodone 11521 * 11522 * Description: iodone-side routine for power mangement. 11523 * 11524 * Context: may be called from interrupt context 11525 */ 11526 11527 static void 11528 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp) 11529 { 11530 ASSERT(un != NULL); 11531 ASSERT(bp != NULL); 11532 ASSERT(!mutex_owned(&un->un_pm_mutex)); 11533 11534 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n"); 11535 11536 /* 11537 * After attach the following flag is only read, so don't 11538 * take the penalty of acquiring a mutex for it. 11539 */ 11540 if (un->un_f_pm_is_enabled == TRUE) { 11541 sd_pm_exit(un); 11542 } 11543 11544 SD_NEXT_IODONE(index, un, bp); 11545 11546 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n"); 11547 } 11548 11549 11550 /* 11551 * Function: sd_core_iostart 11552 * 11553 * Description: Primary driver function for enqueuing buf(9S) structs from 11554 * the system and initiating IO to the target device 11555 * 11556 * Context: Kernel thread context. Can sleep. 11557 * 11558 * Assumptions: - The given xp->xb_blkno is absolute 11559 * (ie, relative to the start of the device). 11560 * - The IO is to be done using the native blocksize of 11561 * the device, as specified in un->un_tgt_blocksize. 11562 */ 11563 /* ARGSUSED */ 11564 static void 11565 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp) 11566 { 11567 struct sd_xbuf *xp; 11568 11569 ASSERT(un != NULL); 11570 ASSERT(bp != NULL); 11571 ASSERT(!mutex_owned(SD_MUTEX(un))); 11572 ASSERT(bp->b_resid == 0); 11573 11574 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp); 11575 11576 xp = SD_GET_XBUF(bp); 11577 ASSERT(xp != NULL); 11578 11579 mutex_enter(SD_MUTEX(un)); 11580 11581 /* 11582 * If we are currently in the failfast state, fail any new IO 11583 * that has B_FAILFAST set, then return. 11584 */ 11585 if ((bp->b_flags & B_FAILFAST) && 11586 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) { 11587 mutex_exit(SD_MUTEX(un)); 11588 bioerror(bp, EIO); 11589 bp->b_resid = bp->b_bcount; 11590 SD_BEGIN_IODONE(index, un, bp); 11591 return; 11592 } 11593 11594 if (SD_IS_DIRECT_PRIORITY(xp)) { 11595 /* 11596 * Priority command -- transport it immediately. 11597 * 11598 * Note: We may want to assert that USCSI_DIAGNOSE is set, 11599 * because all direct priority commands should be associated 11600 * with error recovery actions which we don't want to retry. 11601 */ 11602 sd_start_cmds(un, bp); 11603 } else { 11604 /* 11605 * Normal command -- add it to the wait queue, then start 11606 * transporting commands from the wait queue. 11607 */ 11608 sd_add_buf_to_waitq(un, bp); 11609 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 11610 sd_start_cmds(un, NULL); 11611 } 11612 11613 mutex_exit(SD_MUTEX(un)); 11614 11615 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp); 11616 } 11617 11618 11619 /* 11620 * Function: sd_init_cdb_limits 11621 * 11622 * Description: This is to handle scsi_pkt initialization differences 11623 * between the driver platforms. 11624 * 11625 * Legacy behaviors: 11626 * 11627 * If the block number or the sector count exceeds the 11628 * capabilities of a Group 0 command, shift over to a 11629 * Group 1 command. We don't blindly use Group 1 11630 * commands because a) some drives (CDC Wren IVs) get a 11631 * bit confused, and b) there is probably a fair amount 11632 * of speed difference for a target to receive and decode 11633 * a 10 byte command instead of a 6 byte command. 11634 * 11635 * The xfer time difference of 6 vs 10 byte CDBs is 11636 * still significant so this code is still worthwhile. 11637 * 10 byte CDBs are very inefficient with the fas HBA driver 11638 * and older disks. Each CDB byte took 1 usec with some 11639 * popular disks. 11640 * 11641 * Context: Must be called at attach time 11642 */ 11643 11644 static void 11645 sd_init_cdb_limits(struct sd_lun *un) 11646 { 11647 int hba_cdb_limit; 11648 11649 /* 11650 * Use CDB_GROUP1 commands for most devices except for 11651 * parallel SCSI fixed drives in which case we get better 11652 * performance using CDB_GROUP0 commands (where applicable). 11653 */ 11654 un->un_mincdb = SD_CDB_GROUP1; 11655 #if !defined(__fibre) 11656 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && 11657 !un->un_f_has_removable_media) { 11658 un->un_mincdb = SD_CDB_GROUP0; 11659 } 11660 #endif 11661 11662 /* 11663 * Try to read the max-cdb-length supported by HBA. 11664 */ 11665 un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1); 11666 if (0 >= un->un_max_hba_cdb) { 11667 un->un_max_hba_cdb = CDB_GROUP4; 11668 hba_cdb_limit = SD_CDB_GROUP4; 11669 } else if (0 < un->un_max_hba_cdb && 11670 un->un_max_hba_cdb < CDB_GROUP1) { 11671 hba_cdb_limit = SD_CDB_GROUP0; 11672 } else if (CDB_GROUP1 <= un->un_max_hba_cdb && 11673 un->un_max_hba_cdb < CDB_GROUP5) { 11674 hba_cdb_limit = SD_CDB_GROUP1; 11675 } else if (CDB_GROUP5 <= un->un_max_hba_cdb && 11676 un->un_max_hba_cdb < CDB_GROUP4) { 11677 hba_cdb_limit = SD_CDB_GROUP5; 11678 } else { 11679 hba_cdb_limit = SD_CDB_GROUP4; 11680 } 11681 11682 /* 11683 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4 11684 * commands for fixed disks unless we are building for a 32 bit 11685 * kernel. 11686 */ 11687 #ifdef _LP64 11688 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 11689 min(hba_cdb_limit, SD_CDB_GROUP4); 11690 #else 11691 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 11692 min(hba_cdb_limit, SD_CDB_GROUP1); 11693 #endif 11694 11695 /* 11696 * x86 systems require the PKT_DMA_PARTIAL flag 11697 */ 11698 #if defined(__x86) 11699 un->un_pkt_flags = PKT_DMA_PARTIAL; 11700 #else 11701 un->un_pkt_flags = 0; 11702 #endif 11703 11704 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) 11705 ? sizeof (struct scsi_arq_status) : 1); 11706 un->un_cmd_timeout = (ushort_t)sd_io_time; 11707 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; 11708 } 11709 11710 11711 /* 11712 * Function: sd_initpkt_for_buf 11713 * 11714 * Description: Allocate and initialize for transport a scsi_pkt struct, 11715 * based upon the info specified in the given buf struct. 11716 * 11717 * Assumes the xb_blkno in the request is absolute (ie, 11718 * relative to the start of the device (NOT partition!). 11719 * Also assumes that the request is using the native block 11720 * size of the device (as returned by the READ CAPACITY 11721 * command). 11722 * 11723 * Return Code: SD_PKT_ALLOC_SUCCESS 11724 * SD_PKT_ALLOC_FAILURE 11725 * SD_PKT_ALLOC_FAILURE_NO_DMA 11726 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 11727 * 11728 * Context: Kernel thread and may be called from software interrupt context 11729 * as part of a sdrunout callback. This function may not block or 11730 * call routines that block 11731 */ 11732 11733 static int 11734 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp) 11735 { 11736 struct sd_xbuf *xp; 11737 struct scsi_pkt *pktp = NULL; 11738 struct sd_lun *un; 11739 size_t blockcount; 11740 daddr_t startblock; 11741 int rval; 11742 int cmd_flags; 11743 11744 ASSERT(bp != NULL); 11745 ASSERT(pktpp != NULL); 11746 xp = SD_GET_XBUF(bp); 11747 ASSERT(xp != NULL); 11748 un = SD_GET_UN(bp); 11749 ASSERT(un != NULL); 11750 ASSERT(mutex_owned(SD_MUTEX(un))); 11751 ASSERT(bp->b_resid == 0); 11752 11753 SD_TRACE(SD_LOG_IO_CORE, un, 11754 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); 11755 11756 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11757 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { 11758 /* 11759 * Already have a scsi_pkt -- just need DMA resources. 11760 * We must recompute the CDB in case the mapping returns 11761 * a nonzero pkt_resid. 11762 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer 11763 * that is being retried, the unmap/remap of the DMA resouces 11764 * will result in the entire transfer starting over again 11765 * from the very first block. 11766 */ 11767 ASSERT(xp->xb_pktp != NULL); 11768 pktp = xp->xb_pktp; 11769 } else { 11770 pktp = NULL; 11771 } 11772 #endif /* __i386 || __amd64 */ 11773 11774 startblock = xp->xb_blkno; /* Absolute block num. */ 11775 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 11776 11777 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11778 11779 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); 11780 11781 #else 11782 11783 cmd_flags = un->un_pkt_flags | xp->xb_pkt_flags; 11784 11785 #endif 11786 11787 /* 11788 * sd_setup_rw_pkt will determine the appropriate CDB group to use, 11789 * call scsi_init_pkt, and build the CDB. 11790 */ 11791 rval = sd_setup_rw_pkt(un, &pktp, bp, 11792 cmd_flags, sdrunout, (caddr_t)un, 11793 startblock, blockcount); 11794 11795 if (rval == 0) { 11796 /* 11797 * Success. 11798 * 11799 * If partial DMA is being used and required for this transfer. 11800 * set it up here. 11801 */ 11802 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 && 11803 (pktp->pkt_resid != 0)) { 11804 11805 /* 11806 * Save the CDB length and pkt_resid for the 11807 * next xfer 11808 */ 11809 xp->xb_dma_resid = pktp->pkt_resid; 11810 11811 /* rezero resid */ 11812 pktp->pkt_resid = 0; 11813 11814 } else { 11815 xp->xb_dma_resid = 0; 11816 } 11817 11818 pktp->pkt_flags = un->un_tagflags; 11819 pktp->pkt_time = un->un_cmd_timeout; 11820 pktp->pkt_comp = sdintr; 11821 11822 pktp->pkt_private = bp; 11823 *pktpp = pktp; 11824 11825 SD_TRACE(SD_LOG_IO_CORE, un, 11826 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); 11827 11828 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11829 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; 11830 #endif 11831 11832 return (SD_PKT_ALLOC_SUCCESS); 11833 11834 } 11835 11836 /* 11837 * SD_PKT_ALLOC_FAILURE is the only expected failure code 11838 * from sd_setup_rw_pkt. 11839 */ 11840 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 11841 11842 if (rval == SD_PKT_ALLOC_FAILURE) { 11843 *pktpp = NULL; 11844 /* 11845 * Set the driver state to RWAIT to indicate the driver 11846 * is waiting on resource allocations. The driver will not 11847 * suspend, pm_suspend, or detatch while the state is RWAIT. 11848 */ 11849 New_state(un, SD_STATE_RWAIT); 11850 11851 SD_ERROR(SD_LOG_IO_CORE, un, 11852 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp); 11853 11854 if ((bp->b_flags & B_ERROR) != 0) { 11855 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 11856 } 11857 return (SD_PKT_ALLOC_FAILURE); 11858 } else { 11859 /* 11860 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL 11861 * 11862 * This should never happen. Maybe someone messed with the 11863 * kernel's minphys? 11864 */ 11865 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 11866 "Request rejected: too large for CDB: " 11867 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount); 11868 SD_ERROR(SD_LOG_IO_CORE, un, 11869 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp); 11870 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 11871 11872 } 11873 } 11874 11875 11876 /* 11877 * Function: sd_destroypkt_for_buf 11878 * 11879 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing). 11880 * 11881 * Context: Kernel thread or interrupt context 11882 */ 11883 11884 static void 11885 sd_destroypkt_for_buf(struct buf *bp) 11886 { 11887 ASSERT(bp != NULL); 11888 ASSERT(SD_GET_UN(bp) != NULL); 11889 11890 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 11891 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp); 11892 11893 ASSERT(SD_GET_PKTP(bp) != NULL); 11894 scsi_destroy_pkt(SD_GET_PKTP(bp)); 11895 11896 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 11897 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp); 11898 } 11899 11900 /* 11901 * Function: sd_setup_rw_pkt 11902 * 11903 * Description: Determines appropriate CDB group for the requested LBA 11904 * and transfer length, calls scsi_init_pkt, and builds 11905 * the CDB. Do not use for partial DMA transfers except 11906 * for the initial transfer since the CDB size must 11907 * remain constant. 11908 * 11909 * Context: Kernel thread and may be called from software interrupt 11910 * context as part of a sdrunout callback. This function may not 11911 * block or call routines that block 11912 */ 11913 11914 11915 int 11916 sd_setup_rw_pkt(struct sd_lun *un, 11917 struct scsi_pkt **pktpp, struct buf *bp, int flags, 11918 int (*callback)(caddr_t), caddr_t callback_arg, 11919 diskaddr_t lba, uint32_t blockcount) 11920 { 11921 struct scsi_pkt *return_pktp; 11922 union scsi_cdb *cdbp; 11923 struct sd_cdbinfo *cp = NULL; 11924 int i; 11925 11926 /* 11927 * See which size CDB to use, based upon the request. 11928 */ 11929 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) { 11930 11931 /* 11932 * Check lba and block count against sd_cdbtab limits. 11933 * In the partial DMA case, we have to use the same size 11934 * CDB for all the transfers. Check lba + blockcount 11935 * against the max LBA so we know that segment of the 11936 * transfer can use the CDB we select. 11937 */ 11938 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) && 11939 (blockcount <= sd_cdbtab[i].sc_maxlen)) { 11940 11941 /* 11942 * The command will fit into the CDB type 11943 * specified by sd_cdbtab[i]. 11944 */ 11945 cp = sd_cdbtab + i; 11946 11947 /* 11948 * Call scsi_init_pkt so we can fill in the 11949 * CDB. 11950 */ 11951 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp, 11952 bp, cp->sc_grpcode, un->un_status_len, 0, 11953 flags, callback, callback_arg); 11954 11955 if (return_pktp != NULL) { 11956 11957 /* 11958 * Return new value of pkt 11959 */ 11960 *pktpp = return_pktp; 11961 11962 /* 11963 * To be safe, zero the CDB insuring there is 11964 * no leftover data from a previous command. 11965 */ 11966 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode); 11967 11968 /* 11969 * Handle partial DMA mapping 11970 */ 11971 if (return_pktp->pkt_resid != 0) { 11972 11973 /* 11974 * Not going to xfer as many blocks as 11975 * originally expected 11976 */ 11977 blockcount -= 11978 SD_BYTES2TGTBLOCKS(un, 11979 return_pktp->pkt_resid); 11980 } 11981 11982 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp; 11983 11984 /* 11985 * Set command byte based on the CDB 11986 * type we matched. 11987 */ 11988 cdbp->scc_cmd = cp->sc_grpmask | 11989 ((bp->b_flags & B_READ) ? 11990 SCMD_READ : SCMD_WRITE); 11991 11992 SD_FILL_SCSI1_LUN(un, return_pktp); 11993 11994 /* 11995 * Fill in LBA and length 11996 */ 11997 ASSERT((cp->sc_grpcode == CDB_GROUP1) || 11998 (cp->sc_grpcode == CDB_GROUP4) || 11999 (cp->sc_grpcode == CDB_GROUP0) || 12000 (cp->sc_grpcode == CDB_GROUP5)); 12001 12002 if (cp->sc_grpcode == CDB_GROUP1) { 12003 FORMG1ADDR(cdbp, lba); 12004 FORMG1COUNT(cdbp, blockcount); 12005 return (0); 12006 } else if (cp->sc_grpcode == CDB_GROUP4) { 12007 FORMG4LONGADDR(cdbp, lba); 12008 FORMG4COUNT(cdbp, blockcount); 12009 return (0); 12010 } else if (cp->sc_grpcode == CDB_GROUP0) { 12011 FORMG0ADDR(cdbp, lba); 12012 FORMG0COUNT(cdbp, blockcount); 12013 return (0); 12014 } else if (cp->sc_grpcode == CDB_GROUP5) { 12015 FORMG5ADDR(cdbp, lba); 12016 FORMG5COUNT(cdbp, blockcount); 12017 return (0); 12018 } 12019 12020 /* 12021 * It should be impossible to not match one 12022 * of the CDB types above, so we should never 12023 * reach this point. Set the CDB command byte 12024 * to test-unit-ready to avoid writing 12025 * to somewhere we don't intend. 12026 */ 12027 cdbp->scc_cmd = SCMD_TEST_UNIT_READY; 12028 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 12029 } else { 12030 /* 12031 * Couldn't get scsi_pkt 12032 */ 12033 return (SD_PKT_ALLOC_FAILURE); 12034 } 12035 } 12036 } 12037 12038 /* 12039 * None of the available CDB types were suitable. This really 12040 * should never happen: on a 64 bit system we support 12041 * READ16/WRITE16 which will hold an entire 64 bit disk address 12042 * and on a 32 bit system we will refuse to bind to a device 12043 * larger than 2TB so addresses will never be larger than 32 bits. 12044 */ 12045 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 12046 } 12047 12048 #if defined(__i386) || defined(__amd64) 12049 /* 12050 * Function: sd_setup_next_rw_pkt 12051 * 12052 * Description: Setup packet for partial DMA transfers, except for the 12053 * initial transfer. sd_setup_rw_pkt should be used for 12054 * the initial transfer. 12055 * 12056 * Context: Kernel thread and may be called from interrupt context. 12057 */ 12058 12059 int 12060 sd_setup_next_rw_pkt(struct sd_lun *un, 12061 struct scsi_pkt *pktp, struct buf *bp, 12062 diskaddr_t lba, uint32_t blockcount) 12063 { 12064 uchar_t com; 12065 union scsi_cdb *cdbp; 12066 uchar_t cdb_group_id; 12067 12068 ASSERT(pktp != NULL); 12069 ASSERT(pktp->pkt_cdbp != NULL); 12070 12071 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 12072 com = cdbp->scc_cmd; 12073 cdb_group_id = CDB_GROUPID(com); 12074 12075 ASSERT((cdb_group_id == CDB_GROUPID_0) || 12076 (cdb_group_id == CDB_GROUPID_1) || 12077 (cdb_group_id == CDB_GROUPID_4) || 12078 (cdb_group_id == CDB_GROUPID_5)); 12079 12080 /* 12081 * Move pkt to the next portion of the xfer. 12082 * func is NULL_FUNC so we do not have to release 12083 * the disk mutex here. 12084 */ 12085 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0, 12086 NULL_FUNC, NULL) == pktp) { 12087 /* Success. Handle partial DMA */ 12088 if (pktp->pkt_resid != 0) { 12089 blockcount -= 12090 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid); 12091 } 12092 12093 cdbp->scc_cmd = com; 12094 SD_FILL_SCSI1_LUN(un, pktp); 12095 if (cdb_group_id == CDB_GROUPID_1) { 12096 FORMG1ADDR(cdbp, lba); 12097 FORMG1COUNT(cdbp, blockcount); 12098 return (0); 12099 } else if (cdb_group_id == CDB_GROUPID_4) { 12100 FORMG4LONGADDR(cdbp, lba); 12101 FORMG4COUNT(cdbp, blockcount); 12102 return (0); 12103 } else if (cdb_group_id == CDB_GROUPID_0) { 12104 FORMG0ADDR(cdbp, lba); 12105 FORMG0COUNT(cdbp, blockcount); 12106 return (0); 12107 } else if (cdb_group_id == CDB_GROUPID_5) { 12108 FORMG5ADDR(cdbp, lba); 12109 FORMG5COUNT(cdbp, blockcount); 12110 return (0); 12111 } 12112 12113 /* Unreachable */ 12114 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 12115 } 12116 12117 /* 12118 * Error setting up next portion of cmd transfer. 12119 * Something is definitely very wrong and this 12120 * should not happen. 12121 */ 12122 return (SD_PKT_ALLOC_FAILURE); 12123 } 12124 #endif /* defined(__i386) || defined(__amd64) */ 12125 12126 /* 12127 * Function: sd_initpkt_for_uscsi 12128 * 12129 * Description: Allocate and initialize for transport a scsi_pkt struct, 12130 * based upon the info specified in the given uscsi_cmd struct. 12131 * 12132 * Return Code: SD_PKT_ALLOC_SUCCESS 12133 * SD_PKT_ALLOC_FAILURE 12134 * SD_PKT_ALLOC_FAILURE_NO_DMA 12135 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 12136 * 12137 * Context: Kernel thread and may be called from software interrupt context 12138 * as part of a sdrunout callback. This function may not block or 12139 * call routines that block 12140 */ 12141 12142 static int 12143 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp) 12144 { 12145 struct uscsi_cmd *uscmd; 12146 struct sd_xbuf *xp; 12147 struct scsi_pkt *pktp; 12148 struct sd_lun *un; 12149 uint32_t flags = 0; 12150 12151 ASSERT(bp != NULL); 12152 ASSERT(pktpp != NULL); 12153 xp = SD_GET_XBUF(bp); 12154 ASSERT(xp != NULL); 12155 un = SD_GET_UN(bp); 12156 ASSERT(un != NULL); 12157 ASSERT(mutex_owned(SD_MUTEX(un))); 12158 12159 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 12160 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 12161 ASSERT(uscmd != NULL); 12162 12163 SD_TRACE(SD_LOG_IO_CORE, un, 12164 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp); 12165 12166 /* 12167 * Allocate the scsi_pkt for the command. 12168 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path 12169 * during scsi_init_pkt time and will continue to use the 12170 * same path as long as the same scsi_pkt is used without 12171 * intervening scsi_dma_free(). Since uscsi command does 12172 * not call scsi_dmafree() before retry failed command, it 12173 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT 12174 * set such that scsi_vhci can use other available path for 12175 * retry. Besides, ucsci command does not allow DMA breakup, 12176 * so there is no need to set PKT_DMA_PARTIAL flag. 12177 */ 12178 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 12179 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 12180 sizeof (struct scsi_arq_status), 0, 12181 (un->un_pkt_flags & ~PKT_DMA_PARTIAL), 12182 sdrunout, (caddr_t)un); 12183 12184 if (pktp == NULL) { 12185 *pktpp = NULL; 12186 /* 12187 * Set the driver state to RWAIT to indicate the driver 12188 * is waiting on resource allocations. The driver will not 12189 * suspend, pm_suspend, or detatch while the state is RWAIT. 12190 */ 12191 New_state(un, SD_STATE_RWAIT); 12192 12193 SD_ERROR(SD_LOG_IO_CORE, un, 12194 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp); 12195 12196 if ((bp->b_flags & B_ERROR) != 0) { 12197 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 12198 } 12199 return (SD_PKT_ALLOC_FAILURE); 12200 } 12201 12202 /* 12203 * We do not do DMA breakup for USCSI commands, so return failure 12204 * here if all the needed DMA resources were not allocated. 12205 */ 12206 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) && 12207 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) { 12208 scsi_destroy_pkt(pktp); 12209 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: " 12210 "No partial DMA for USCSI. exit: buf:0x%p\n", bp); 12211 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL); 12212 } 12213 12214 /* Init the cdb from the given uscsi struct */ 12215 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp, 12216 uscmd->uscsi_cdb[0], 0, 0, 0); 12217 12218 SD_FILL_SCSI1_LUN(un, pktp); 12219 12220 /* 12221 * Set up the optional USCSI flags. See the uscsi (7I) man page 12222 * for listing of the supported flags. 12223 */ 12224 12225 if (uscmd->uscsi_flags & USCSI_SILENT) { 12226 flags |= FLAG_SILENT; 12227 } 12228 12229 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) { 12230 flags |= FLAG_DIAGNOSE; 12231 } 12232 12233 if (uscmd->uscsi_flags & USCSI_ISOLATE) { 12234 flags |= FLAG_ISOLATE; 12235 } 12236 12237 if (un->un_f_is_fibre == FALSE) { 12238 if (uscmd->uscsi_flags & USCSI_RENEGOT) { 12239 flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 12240 } 12241 } 12242 12243 /* 12244 * Set the pkt flags here so we save time later. 12245 * Note: These flags are NOT in the uscsi man page!!! 12246 */ 12247 if (uscmd->uscsi_flags & USCSI_HEAD) { 12248 flags |= FLAG_HEAD; 12249 } 12250 12251 if (uscmd->uscsi_flags & USCSI_NOINTR) { 12252 flags |= FLAG_NOINTR; 12253 } 12254 12255 /* 12256 * For tagged queueing, things get a bit complicated. 12257 * Check first for head of queue and last for ordered queue. 12258 * If neither head nor order, use the default driver tag flags. 12259 */ 12260 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) { 12261 if (uscmd->uscsi_flags & USCSI_HTAG) { 12262 flags |= FLAG_HTAG; 12263 } else if (uscmd->uscsi_flags & USCSI_OTAG) { 12264 flags |= FLAG_OTAG; 12265 } else { 12266 flags |= un->un_tagflags & FLAG_TAGMASK; 12267 } 12268 } 12269 12270 if (uscmd->uscsi_flags & USCSI_NODISCON) { 12271 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON; 12272 } 12273 12274 pktp->pkt_flags = flags; 12275 12276 /* Copy the caller's CDB into the pkt... */ 12277 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen); 12278 12279 if (uscmd->uscsi_timeout == 0) { 12280 pktp->pkt_time = un->un_uscsi_timeout; 12281 } else { 12282 pktp->pkt_time = uscmd->uscsi_timeout; 12283 } 12284 12285 /* need it later to identify USCSI request in sdintr */ 12286 xp->xb_pkt_flags |= SD_XB_USCSICMD; 12287 12288 xp->xb_sense_resid = uscmd->uscsi_rqresid; 12289 12290 pktp->pkt_private = bp; 12291 pktp->pkt_comp = sdintr; 12292 *pktpp = pktp; 12293 12294 SD_TRACE(SD_LOG_IO_CORE, un, 12295 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp); 12296 12297 return (SD_PKT_ALLOC_SUCCESS); 12298 } 12299 12300 12301 /* 12302 * Function: sd_destroypkt_for_uscsi 12303 * 12304 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi 12305 * IOs.. Also saves relevant info into the associated uscsi_cmd 12306 * struct. 12307 * 12308 * Context: May be called under interrupt context 12309 */ 12310 12311 static void 12312 sd_destroypkt_for_uscsi(struct buf *bp) 12313 { 12314 struct uscsi_cmd *uscmd; 12315 struct sd_xbuf *xp; 12316 struct scsi_pkt *pktp; 12317 struct sd_lun *un; 12318 12319 ASSERT(bp != NULL); 12320 xp = SD_GET_XBUF(bp); 12321 ASSERT(xp != NULL); 12322 un = SD_GET_UN(bp); 12323 ASSERT(un != NULL); 12324 ASSERT(!mutex_owned(SD_MUTEX(un))); 12325 pktp = SD_GET_PKTP(bp); 12326 ASSERT(pktp != NULL); 12327 12328 SD_TRACE(SD_LOG_IO_CORE, un, 12329 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp); 12330 12331 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 12332 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 12333 ASSERT(uscmd != NULL); 12334 12335 /* Save the status and the residual into the uscsi_cmd struct */ 12336 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 12337 uscmd->uscsi_resid = bp->b_resid; 12338 12339 /* 12340 * If enabled, copy any saved sense data into the area specified 12341 * by the uscsi command. 12342 */ 12343 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 12344 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 12345 /* 12346 * Note: uscmd->uscsi_rqbuf should always point to a buffer 12347 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd()) 12348 */ 12349 uscmd->uscsi_rqstatus = xp->xb_sense_status; 12350 uscmd->uscsi_rqresid = xp->xb_sense_resid; 12351 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, SENSE_LENGTH); 12352 } 12353 12354 /* We are done with the scsi_pkt; free it now */ 12355 ASSERT(SD_GET_PKTP(bp) != NULL); 12356 scsi_destroy_pkt(SD_GET_PKTP(bp)); 12357 12358 SD_TRACE(SD_LOG_IO_CORE, un, 12359 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp); 12360 } 12361 12362 12363 /* 12364 * Function: sd_bioclone_alloc 12365 * 12366 * Description: Allocate a buf(9S) and init it as per the given buf 12367 * and the various arguments. The associated sd_xbuf 12368 * struct is (nearly) duplicated. The struct buf *bp 12369 * argument is saved in new_xp->xb_private. 12370 * 12371 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 12372 * datalen - size of data area for the shadow bp 12373 * blkno - starting LBA 12374 * func - function pointer for b_iodone in the shadow buf. (May 12375 * be NULL if none.) 12376 * 12377 * Return Code: Pointer to allocates buf(9S) struct 12378 * 12379 * Context: Can sleep. 12380 */ 12381 12382 static struct buf * 12383 sd_bioclone_alloc(struct buf *bp, size_t datalen, 12384 daddr_t blkno, int (*func)(struct buf *)) 12385 { 12386 struct sd_lun *un; 12387 struct sd_xbuf *xp; 12388 struct sd_xbuf *new_xp; 12389 struct buf *new_bp; 12390 12391 ASSERT(bp != NULL); 12392 xp = SD_GET_XBUF(bp); 12393 ASSERT(xp != NULL); 12394 un = SD_GET_UN(bp); 12395 ASSERT(un != NULL); 12396 ASSERT(!mutex_owned(SD_MUTEX(un))); 12397 12398 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func, 12399 NULL, KM_SLEEP); 12400 12401 new_bp->b_lblkno = blkno; 12402 12403 /* 12404 * Allocate an xbuf for the shadow bp and copy the contents of the 12405 * original xbuf into it. 12406 */ 12407 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 12408 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 12409 12410 /* 12411 * The given bp is automatically saved in the xb_private member 12412 * of the new xbuf. Callers are allowed to depend on this. 12413 */ 12414 new_xp->xb_private = bp; 12415 12416 new_bp->b_private = new_xp; 12417 12418 return (new_bp); 12419 } 12420 12421 /* 12422 * Function: sd_shadow_buf_alloc 12423 * 12424 * Description: Allocate a buf(9S) and init it as per the given buf 12425 * and the various arguments. The associated sd_xbuf 12426 * struct is (nearly) duplicated. The struct buf *bp 12427 * argument is saved in new_xp->xb_private. 12428 * 12429 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 12430 * datalen - size of data area for the shadow bp 12431 * bflags - B_READ or B_WRITE (pseudo flag) 12432 * blkno - starting LBA 12433 * func - function pointer for b_iodone in the shadow buf. (May 12434 * be NULL if none.) 12435 * 12436 * Return Code: Pointer to allocates buf(9S) struct 12437 * 12438 * Context: Can sleep. 12439 */ 12440 12441 static struct buf * 12442 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags, 12443 daddr_t blkno, int (*func)(struct buf *)) 12444 { 12445 struct sd_lun *un; 12446 struct sd_xbuf *xp; 12447 struct sd_xbuf *new_xp; 12448 struct buf *new_bp; 12449 12450 ASSERT(bp != NULL); 12451 xp = SD_GET_XBUF(bp); 12452 ASSERT(xp != NULL); 12453 un = SD_GET_UN(bp); 12454 ASSERT(un != NULL); 12455 ASSERT(!mutex_owned(SD_MUTEX(un))); 12456 12457 if (bp->b_flags & (B_PAGEIO | B_PHYS)) { 12458 bp_mapin(bp); 12459 } 12460 12461 bflags &= (B_READ | B_WRITE); 12462 #if defined(__i386) || defined(__amd64) 12463 new_bp = getrbuf(KM_SLEEP); 12464 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); 12465 new_bp->b_bcount = datalen; 12466 new_bp->b_flags = bflags | 12467 (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW)); 12468 #else 12469 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, 12470 datalen, bflags, SLEEP_FUNC, NULL); 12471 #endif 12472 new_bp->av_forw = NULL; 12473 new_bp->av_back = NULL; 12474 new_bp->b_dev = bp->b_dev; 12475 new_bp->b_blkno = blkno; 12476 new_bp->b_iodone = func; 12477 new_bp->b_edev = bp->b_edev; 12478 new_bp->b_resid = 0; 12479 12480 /* We need to preserve the B_FAILFAST flag */ 12481 if (bp->b_flags & B_FAILFAST) { 12482 new_bp->b_flags |= B_FAILFAST; 12483 } 12484 12485 /* 12486 * Allocate an xbuf for the shadow bp and copy the contents of the 12487 * original xbuf into it. 12488 */ 12489 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 12490 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 12491 12492 /* Need later to copy data between the shadow buf & original buf! */ 12493 new_xp->xb_pkt_flags |= PKT_CONSISTENT; 12494 12495 /* 12496 * The given bp is automatically saved in the xb_private member 12497 * of the new xbuf. Callers are allowed to depend on this. 12498 */ 12499 new_xp->xb_private = bp; 12500 12501 new_bp->b_private = new_xp; 12502 12503 return (new_bp); 12504 } 12505 12506 /* 12507 * Function: sd_bioclone_free 12508 * 12509 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations 12510 * in the larger than partition operation. 12511 * 12512 * Context: May be called under interrupt context 12513 */ 12514 12515 static void 12516 sd_bioclone_free(struct buf *bp) 12517 { 12518 struct sd_xbuf *xp; 12519 12520 ASSERT(bp != NULL); 12521 xp = SD_GET_XBUF(bp); 12522 ASSERT(xp != NULL); 12523 12524 /* 12525 * Call bp_mapout() before freeing the buf, in case a lower 12526 * layer or HBA had done a bp_mapin(). we must do this here 12527 * as we are the "originator" of the shadow buf. 12528 */ 12529 bp_mapout(bp); 12530 12531 /* 12532 * Null out b_iodone before freeing the bp, to ensure that the driver 12533 * never gets confused by a stale value in this field. (Just a little 12534 * extra defensiveness here.) 12535 */ 12536 bp->b_iodone = NULL; 12537 12538 freerbuf(bp); 12539 12540 kmem_free(xp, sizeof (struct sd_xbuf)); 12541 } 12542 12543 /* 12544 * Function: sd_shadow_buf_free 12545 * 12546 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations. 12547 * 12548 * Context: May be called under interrupt context 12549 */ 12550 12551 static void 12552 sd_shadow_buf_free(struct buf *bp) 12553 { 12554 struct sd_xbuf *xp; 12555 12556 ASSERT(bp != NULL); 12557 xp = SD_GET_XBUF(bp); 12558 ASSERT(xp != NULL); 12559 12560 #if defined(__sparc) 12561 /* 12562 * Call bp_mapout() before freeing the buf, in case a lower 12563 * layer or HBA had done a bp_mapin(). we must do this here 12564 * as we are the "originator" of the shadow buf. 12565 */ 12566 bp_mapout(bp); 12567 #endif 12568 12569 /* 12570 * Null out b_iodone before freeing the bp, to ensure that the driver 12571 * never gets confused by a stale value in this field. (Just a little 12572 * extra defensiveness here.) 12573 */ 12574 bp->b_iodone = NULL; 12575 12576 #if defined(__i386) || defined(__amd64) 12577 kmem_free(bp->b_un.b_addr, bp->b_bcount); 12578 freerbuf(bp); 12579 #else 12580 scsi_free_consistent_buf(bp); 12581 #endif 12582 12583 kmem_free(xp, sizeof (struct sd_xbuf)); 12584 } 12585 12586 12587 /* 12588 * Function: sd_print_transport_rejected_message 12589 * 12590 * Description: This implements the ludicrously complex rules for printing 12591 * a "transport rejected" message. This is to address the 12592 * specific problem of having a flood of this error message 12593 * produced when a failover occurs. 12594 * 12595 * Context: Any. 12596 */ 12597 12598 static void 12599 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp, 12600 int code) 12601 { 12602 ASSERT(un != NULL); 12603 ASSERT(mutex_owned(SD_MUTEX(un))); 12604 ASSERT(xp != NULL); 12605 12606 /* 12607 * Print the "transport rejected" message under the following 12608 * conditions: 12609 * 12610 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set 12611 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR. 12612 * - If the error code IS a TRAN_FATAL_ERROR, then the message is 12613 * printed the FIRST time a TRAN_FATAL_ERROR is returned from 12614 * scsi_transport(9F) (which indicates that the target might have 12615 * gone off-line). This uses the un->un_tran_fatal_count 12616 * count, which is incremented whenever a TRAN_FATAL_ERROR is 12617 * received, and reset to zero whenver a TRAN_ACCEPT is returned 12618 * from scsi_transport(). 12619 * 12620 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of 12621 * the preceeding cases in order for the message to be printed. 12622 */ 12623 if ((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) { 12624 if ((sd_level_mask & SD_LOGMASK_DIAG) || 12625 (code != TRAN_FATAL_ERROR) || 12626 (un->un_tran_fatal_count == 1)) { 12627 switch (code) { 12628 case TRAN_BADPKT: 12629 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12630 "transport rejected bad packet\n"); 12631 break; 12632 case TRAN_FATAL_ERROR: 12633 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12634 "transport rejected fatal error\n"); 12635 break; 12636 default: 12637 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12638 "transport rejected (%d)\n", code); 12639 break; 12640 } 12641 } 12642 } 12643 } 12644 12645 12646 /* 12647 * Function: sd_add_buf_to_waitq 12648 * 12649 * Description: Add the given buf(9S) struct to the wait queue for the 12650 * instance. If sorting is enabled, then the buf is added 12651 * to the queue via an elevator sort algorithm (a la 12652 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key. 12653 * If sorting is not enabled, then the buf is just added 12654 * to the end of the wait queue. 12655 * 12656 * Return Code: void 12657 * 12658 * Context: Does not sleep/block, therefore technically can be called 12659 * from any context. However if sorting is enabled then the 12660 * execution time is indeterminate, and may take long if 12661 * the wait queue grows large. 12662 */ 12663 12664 static void 12665 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp) 12666 { 12667 struct buf *ap; 12668 12669 ASSERT(bp != NULL); 12670 ASSERT(un != NULL); 12671 ASSERT(mutex_owned(SD_MUTEX(un))); 12672 12673 /* If the queue is empty, add the buf as the only entry & return. */ 12674 if (un->un_waitq_headp == NULL) { 12675 ASSERT(un->un_waitq_tailp == NULL); 12676 un->un_waitq_headp = un->un_waitq_tailp = bp; 12677 bp->av_forw = NULL; 12678 return; 12679 } 12680 12681 ASSERT(un->un_waitq_tailp != NULL); 12682 12683 /* 12684 * If sorting is disabled, just add the buf to the tail end of 12685 * the wait queue and return. 12686 */ 12687 if (un->un_f_disksort_disabled) { 12688 un->un_waitq_tailp->av_forw = bp; 12689 un->un_waitq_tailp = bp; 12690 bp->av_forw = NULL; 12691 return; 12692 } 12693 12694 /* 12695 * Sort thru the list of requests currently on the wait queue 12696 * and add the new buf request at the appropriate position. 12697 * 12698 * The un->un_waitq_headp is an activity chain pointer on which 12699 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The 12700 * first queue holds those requests which are positioned after 12701 * the current SD_GET_BLKNO() (in the first request); the second holds 12702 * requests which came in after their SD_GET_BLKNO() number was passed. 12703 * Thus we implement a one way scan, retracting after reaching 12704 * the end of the drive to the first request on the second 12705 * queue, at which time it becomes the first queue. 12706 * A one-way scan is natural because of the way UNIX read-ahead 12707 * blocks are allocated. 12708 * 12709 * If we lie after the first request, then we must locate the 12710 * second request list and add ourselves to it. 12711 */ 12712 ap = un->un_waitq_headp; 12713 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) { 12714 while (ap->av_forw != NULL) { 12715 /* 12716 * Look for an "inversion" in the (normally 12717 * ascending) block numbers. This indicates 12718 * the start of the second request list. 12719 */ 12720 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) { 12721 /* 12722 * Search the second request list for the 12723 * first request at a larger block number. 12724 * We go before that; however if there is 12725 * no such request, we go at the end. 12726 */ 12727 do { 12728 if (SD_GET_BLKNO(bp) < 12729 SD_GET_BLKNO(ap->av_forw)) { 12730 goto insert; 12731 } 12732 ap = ap->av_forw; 12733 } while (ap->av_forw != NULL); 12734 goto insert; /* after last */ 12735 } 12736 ap = ap->av_forw; 12737 } 12738 12739 /* 12740 * No inversions... we will go after the last, and 12741 * be the first request in the second request list. 12742 */ 12743 goto insert; 12744 } 12745 12746 /* 12747 * Request is at/after the current request... 12748 * sort in the first request list. 12749 */ 12750 while (ap->av_forw != NULL) { 12751 /* 12752 * We want to go after the current request (1) if 12753 * there is an inversion after it (i.e. it is the end 12754 * of the first request list), or (2) if the next 12755 * request is a larger block no. than our request. 12756 */ 12757 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) || 12758 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) { 12759 goto insert; 12760 } 12761 ap = ap->av_forw; 12762 } 12763 12764 /* 12765 * Neither a second list nor a larger request, therefore 12766 * we go at the end of the first list (which is the same 12767 * as the end of the whole schebang). 12768 */ 12769 insert: 12770 bp->av_forw = ap->av_forw; 12771 ap->av_forw = bp; 12772 12773 /* 12774 * If we inserted onto the tail end of the waitq, make sure the 12775 * tail pointer is updated. 12776 */ 12777 if (ap == un->un_waitq_tailp) { 12778 un->un_waitq_tailp = bp; 12779 } 12780 } 12781 12782 12783 /* 12784 * Function: sd_start_cmds 12785 * 12786 * Description: Remove and transport cmds from the driver queues. 12787 * 12788 * Arguments: un - pointer to the unit (soft state) struct for the target. 12789 * 12790 * immed_bp - ptr to a buf to be transported immediately. Only 12791 * the immed_bp is transported; bufs on the waitq are not 12792 * processed and the un_retry_bp is not checked. If immed_bp is 12793 * NULL, then normal queue processing is performed. 12794 * 12795 * Context: May be called from kernel thread context, interrupt context, 12796 * or runout callback context. This function may not block or 12797 * call routines that block. 12798 */ 12799 12800 static void 12801 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) 12802 { 12803 struct sd_xbuf *xp; 12804 struct buf *bp; 12805 void (*statp)(kstat_io_t *); 12806 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12807 void (*saved_statp)(kstat_io_t *); 12808 #endif 12809 int rval; 12810 12811 ASSERT(un != NULL); 12812 ASSERT(mutex_owned(SD_MUTEX(un))); 12813 ASSERT(un->un_ncmds_in_transport >= 0); 12814 ASSERT(un->un_throttle >= 0); 12815 12816 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); 12817 12818 do { 12819 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12820 saved_statp = NULL; 12821 #endif 12822 12823 /* 12824 * If we are syncing or dumping, fail the command to 12825 * avoid recursively calling back into scsi_transport(). 12826 * The dump I/O itself uses a separate code path so this 12827 * only prevents non-dump I/O from being sent while dumping. 12828 * File system sync takes place before dumping begins. 12829 * During panic, filesystem I/O is allowed provided 12830 * un_in_callback is <= 1. This is to prevent recursion 12831 * such as sd_start_cmds -> scsi_transport -> sdintr -> 12832 * sd_start_cmds and so on. See panic.c for more information 12833 * about the states the system can be in during panic. 12834 */ 12835 if ((un->un_state == SD_STATE_DUMPING) || 12836 (ddi_in_panic() && (un->un_in_callback > 1))) { 12837 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12838 "sd_start_cmds: panicking\n"); 12839 goto exit; 12840 } 12841 12842 if ((bp = immed_bp) != NULL) { 12843 /* 12844 * We have a bp that must be transported immediately. 12845 * It's OK to transport the immed_bp here without doing 12846 * the throttle limit check because the immed_bp is 12847 * always used in a retry/recovery case. This means 12848 * that we know we are not at the throttle limit by 12849 * virtue of the fact that to get here we must have 12850 * already gotten a command back via sdintr(). This also 12851 * relies on (1) the command on un_retry_bp preventing 12852 * further commands from the waitq from being issued; 12853 * and (2) the code in sd_retry_command checking the 12854 * throttle limit before issuing a delayed or immediate 12855 * retry. This holds even if the throttle limit is 12856 * currently ratcheted down from its maximum value. 12857 */ 12858 statp = kstat_runq_enter; 12859 if (bp == un->un_retry_bp) { 12860 ASSERT((un->un_retry_statp == NULL) || 12861 (un->un_retry_statp == kstat_waitq_enter) || 12862 (un->un_retry_statp == 12863 kstat_runq_back_to_waitq)); 12864 /* 12865 * If the waitq kstat was incremented when 12866 * sd_set_retry_bp() queued this bp for a retry, 12867 * then we must set up statp so that the waitq 12868 * count will get decremented correctly below. 12869 * Also we must clear un->un_retry_statp to 12870 * ensure that we do not act on a stale value 12871 * in this field. 12872 */ 12873 if ((un->un_retry_statp == kstat_waitq_enter) || 12874 (un->un_retry_statp == 12875 kstat_runq_back_to_waitq)) { 12876 statp = kstat_waitq_to_runq; 12877 } 12878 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12879 saved_statp = un->un_retry_statp; 12880 #endif 12881 un->un_retry_statp = NULL; 12882 12883 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 12884 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " 12885 "un_throttle:%d un_ncmds_in_transport:%d\n", 12886 un, un->un_retry_bp, un->un_throttle, 12887 un->un_ncmds_in_transport); 12888 } else { 12889 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: " 12890 "processing priority bp:0x%p\n", bp); 12891 } 12892 12893 } else if ((bp = un->un_waitq_headp) != NULL) { 12894 /* 12895 * A command on the waitq is ready to go, but do not 12896 * send it if: 12897 * 12898 * (1) the throttle limit has been reached, or 12899 * (2) a retry is pending, or 12900 * (3) a START_STOP_UNIT callback pending, or 12901 * (4) a callback for a SD_PATH_DIRECT_PRIORITY 12902 * command is pending. 12903 * 12904 * For all of these conditions, IO processing will 12905 * restart after the condition is cleared. 12906 */ 12907 if (un->un_ncmds_in_transport >= un->un_throttle) { 12908 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12909 "sd_start_cmds: exiting, " 12910 "throttle limit reached!\n"); 12911 goto exit; 12912 } 12913 if (un->un_retry_bp != NULL) { 12914 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12915 "sd_start_cmds: exiting, retry pending!\n"); 12916 goto exit; 12917 } 12918 if (un->un_startstop_timeid != NULL) { 12919 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12920 "sd_start_cmds: exiting, " 12921 "START_STOP pending!\n"); 12922 goto exit; 12923 } 12924 if (un->un_direct_priority_timeid != NULL) { 12925 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12926 "sd_start_cmds: exiting, " 12927 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n"); 12928 goto exit; 12929 } 12930 12931 /* Dequeue the command */ 12932 un->un_waitq_headp = bp->av_forw; 12933 if (un->un_waitq_headp == NULL) { 12934 un->un_waitq_tailp = NULL; 12935 } 12936 bp->av_forw = NULL; 12937 statp = kstat_waitq_to_runq; 12938 SD_TRACE(SD_LOG_IO_CORE, un, 12939 "sd_start_cmds: processing waitq bp:0x%p\n", bp); 12940 12941 } else { 12942 /* No work to do so bail out now */ 12943 SD_TRACE(SD_LOG_IO_CORE, un, 12944 "sd_start_cmds: no more work, exiting!\n"); 12945 goto exit; 12946 } 12947 12948 /* 12949 * Reset the state to normal. This is the mechanism by which 12950 * the state transitions from either SD_STATE_RWAIT or 12951 * SD_STATE_OFFLINE to SD_STATE_NORMAL. 12952 * If state is SD_STATE_PM_CHANGING then this command is 12953 * part of the device power control and the state must 12954 * not be put back to normal. Doing so would would 12955 * allow new commands to proceed when they shouldn't, 12956 * the device may be going off. 12957 */ 12958 if ((un->un_state != SD_STATE_SUSPENDED) && 12959 (un->un_state != SD_STATE_PM_CHANGING)) { 12960 New_state(un, SD_STATE_NORMAL); 12961 } 12962 12963 xp = SD_GET_XBUF(bp); 12964 ASSERT(xp != NULL); 12965 12966 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12967 /* 12968 * Allocate the scsi_pkt if we need one, or attach DMA 12969 * resources if we have a scsi_pkt that needs them. The 12970 * latter should only occur for commands that are being 12971 * retried. 12972 */ 12973 if ((xp->xb_pktp == NULL) || 12974 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { 12975 #else 12976 if (xp->xb_pktp == NULL) { 12977 #endif 12978 /* 12979 * There is no scsi_pkt allocated for this buf. Call 12980 * the initpkt function to allocate & init one. 12981 * 12982 * The scsi_init_pkt runout callback functionality is 12983 * implemented as follows: 12984 * 12985 * 1) The initpkt function always calls 12986 * scsi_init_pkt(9F) with sdrunout specified as the 12987 * callback routine. 12988 * 2) A successful packet allocation is initialized and 12989 * the I/O is transported. 12990 * 3) The I/O associated with an allocation resource 12991 * failure is left on its queue to be retried via 12992 * runout or the next I/O. 12993 * 4) The I/O associated with a DMA error is removed 12994 * from the queue and failed with EIO. Processing of 12995 * the transport queues is also halted to be 12996 * restarted via runout or the next I/O. 12997 * 5) The I/O associated with a CDB size or packet 12998 * size error is removed from the queue and failed 12999 * with EIO. Processing of the transport queues is 13000 * continued. 13001 * 13002 * Note: there is no interface for canceling a runout 13003 * callback. To prevent the driver from detaching or 13004 * suspending while a runout is pending the driver 13005 * state is set to SD_STATE_RWAIT 13006 * 13007 * Note: using the scsi_init_pkt callback facility can 13008 * result in an I/O request persisting at the head of 13009 * the list which cannot be satisfied even after 13010 * multiple retries. In the future the driver may 13011 * implement some kind of maximum runout count before 13012 * failing an I/O. 13013 * 13014 * Note: the use of funcp below may seem superfluous, 13015 * but it helps warlock figure out the correct 13016 * initpkt function calls (see [s]sd.wlcmd). 13017 */ 13018 struct scsi_pkt *pktp; 13019 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp); 13020 13021 ASSERT(bp != un->un_rqs_bp); 13022 13023 funcp = sd_initpkt_map[xp->xb_chain_iostart]; 13024 switch ((*funcp)(bp, &pktp)) { 13025 case SD_PKT_ALLOC_SUCCESS: 13026 xp->xb_pktp = pktp; 13027 SD_TRACE(SD_LOG_IO_CORE, un, 13028 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n", 13029 pktp); 13030 goto got_pkt; 13031 13032 case SD_PKT_ALLOC_FAILURE: 13033 /* 13034 * Temporary (hopefully) resource depletion. 13035 * Since retries and RQS commands always have a 13036 * scsi_pkt allocated, these cases should never 13037 * get here. So the only cases this needs to 13038 * handle is a bp from the waitq (which we put 13039 * back onto the waitq for sdrunout), or a bp 13040 * sent as an immed_bp (which we just fail). 13041 */ 13042 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13043 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); 13044 13045 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13046 13047 if (bp == immed_bp) { 13048 /* 13049 * If SD_XB_DMA_FREED is clear, then 13050 * this is a failure to allocate a 13051 * scsi_pkt, and we must fail the 13052 * command. 13053 */ 13054 if ((xp->xb_pkt_flags & 13055 SD_XB_DMA_FREED) == 0) { 13056 break; 13057 } 13058 13059 /* 13060 * If this immediate command is NOT our 13061 * un_retry_bp, then we must fail it. 13062 */ 13063 if (bp != un->un_retry_bp) { 13064 break; 13065 } 13066 13067 /* 13068 * We get here if this cmd is our 13069 * un_retry_bp that was DMAFREED, but 13070 * scsi_init_pkt() failed to reallocate 13071 * DMA resources when we attempted to 13072 * retry it. This can happen when an 13073 * mpxio failover is in progress, but 13074 * we don't want to just fail the 13075 * command in this case. 13076 * 13077 * Use timeout(9F) to restart it after 13078 * a 100ms delay. We don't want to 13079 * let sdrunout() restart it, because 13080 * sdrunout() is just supposed to start 13081 * commands that are sitting on the 13082 * wait queue. The un_retry_bp stays 13083 * set until the command completes, but 13084 * sdrunout can be called many times 13085 * before that happens. Since sdrunout 13086 * cannot tell if the un_retry_bp is 13087 * already in the transport, it could 13088 * end up calling scsi_transport() for 13089 * the un_retry_bp multiple times. 13090 * 13091 * Also: don't schedule the callback 13092 * if some other callback is already 13093 * pending. 13094 */ 13095 if (un->un_retry_statp == NULL) { 13096 /* 13097 * restore the kstat pointer to 13098 * keep kstat counts coherent 13099 * when we do retry the command. 13100 */ 13101 un->un_retry_statp = 13102 saved_statp; 13103 } 13104 13105 if ((un->un_startstop_timeid == NULL) && 13106 (un->un_retry_timeid == NULL) && 13107 (un->un_direct_priority_timeid == 13108 NULL)) { 13109 13110 un->un_retry_timeid = 13111 timeout( 13112 sd_start_retry_command, 13113 un, SD_RESTART_TIMEOUT); 13114 } 13115 goto exit; 13116 } 13117 13118 #else 13119 if (bp == immed_bp) { 13120 break; /* Just fail the command */ 13121 } 13122 #endif 13123 13124 /* Add the buf back to the head of the waitq */ 13125 bp->av_forw = un->un_waitq_headp; 13126 un->un_waitq_headp = bp; 13127 if (un->un_waitq_tailp == NULL) { 13128 un->un_waitq_tailp = bp; 13129 } 13130 goto exit; 13131 13132 case SD_PKT_ALLOC_FAILURE_NO_DMA: 13133 /* 13134 * HBA DMA resource failure. Fail the command 13135 * and continue processing of the queues. 13136 */ 13137 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13138 "sd_start_cmds: " 13139 "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); 13140 break; 13141 13142 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: 13143 /* 13144 * Note:x86: Partial DMA mapping not supported 13145 * for USCSI commands, and all the needed DMA 13146 * resources were not allocated. 13147 */ 13148 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13149 "sd_start_cmds: " 13150 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); 13151 break; 13152 13153 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: 13154 /* 13155 * Note:x86: Request cannot fit into CDB based 13156 * on lba and len. 13157 */ 13158 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13159 "sd_start_cmds: " 13160 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); 13161 break; 13162 13163 default: 13164 /* Should NEVER get here! */ 13165 panic("scsi_initpkt error"); 13166 /*NOTREACHED*/ 13167 } 13168 13169 /* 13170 * Fatal error in allocating a scsi_pkt for this buf. 13171 * Update kstats & return the buf with an error code. 13172 * We must use sd_return_failed_command_no_restart() to 13173 * avoid a recursive call back into sd_start_cmds(). 13174 * However this also means that we must keep processing 13175 * the waitq here in order to avoid stalling. 13176 */ 13177 if (statp == kstat_waitq_to_runq) { 13178 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 13179 } 13180 sd_return_failed_command_no_restart(un, bp, EIO); 13181 if (bp == immed_bp) { 13182 /* immed_bp is gone by now, so clear this */ 13183 immed_bp = NULL; 13184 } 13185 continue; 13186 } 13187 got_pkt: 13188 if (bp == immed_bp) { 13189 /* goto the head of the class.... */ 13190 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 13191 } 13192 13193 un->un_ncmds_in_transport++; 13194 SD_UPDATE_KSTATS(un, statp, bp); 13195 13196 /* 13197 * Call scsi_transport() to send the command to the target. 13198 * According to SCSA architecture, we must drop the mutex here 13199 * before calling scsi_transport() in order to avoid deadlock. 13200 * Note that the scsi_pkt's completion routine can be executed 13201 * (from interrupt context) even before the call to 13202 * scsi_transport() returns. 13203 */ 13204 SD_TRACE(SD_LOG_IO_CORE, un, 13205 "sd_start_cmds: calling scsi_transport()\n"); 13206 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); 13207 13208 mutex_exit(SD_MUTEX(un)); 13209 rval = scsi_transport(xp->xb_pktp); 13210 mutex_enter(SD_MUTEX(un)); 13211 13212 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13213 "sd_start_cmds: scsi_transport() returned %d\n", rval); 13214 13215 switch (rval) { 13216 case TRAN_ACCEPT: 13217 /* Clear this with every pkt accepted by the HBA */ 13218 un->un_tran_fatal_count = 0; 13219 break; /* Success; try the next cmd (if any) */ 13220 13221 case TRAN_BUSY: 13222 un->un_ncmds_in_transport--; 13223 ASSERT(un->un_ncmds_in_transport >= 0); 13224 13225 /* 13226 * Don't retry request sense, the sense data 13227 * is lost when another request is sent. 13228 * Free up the rqs buf and retry 13229 * the original failed cmd. Update kstat. 13230 */ 13231 if (bp == un->un_rqs_bp) { 13232 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13233 bp = sd_mark_rqs_idle(un, xp); 13234 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 13235 NULL, NULL, EIO, SD_BSY_TIMEOUT / 500, 13236 kstat_waitq_enter); 13237 goto exit; 13238 } 13239 13240 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13241 /* 13242 * Free the DMA resources for the scsi_pkt. This will 13243 * allow mpxio to select another path the next time 13244 * we call scsi_transport() with this scsi_pkt. 13245 * See sdintr() for the rationalization behind this. 13246 */ 13247 if ((un->un_f_is_fibre == TRUE) && 13248 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 13249 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { 13250 scsi_dmafree(xp->xb_pktp); 13251 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 13252 } 13253 #endif 13254 13255 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { 13256 /* 13257 * Commands that are SD_PATH_DIRECT_PRIORITY 13258 * are for error recovery situations. These do 13259 * not use the normal command waitq, so if they 13260 * get a TRAN_BUSY we cannot put them back onto 13261 * the waitq for later retry. One possible 13262 * problem is that there could already be some 13263 * other command on un_retry_bp that is waiting 13264 * for this one to complete, so we would be 13265 * deadlocked if we put this command back onto 13266 * the waitq for later retry (since un_retry_bp 13267 * must complete before the driver gets back to 13268 * commands on the waitq). 13269 * 13270 * To avoid deadlock we must schedule a callback 13271 * that will restart this command after a set 13272 * interval. This should keep retrying for as 13273 * long as the underlying transport keeps 13274 * returning TRAN_BUSY (just like for other 13275 * commands). Use the same timeout interval as 13276 * for the ordinary TRAN_BUSY retry. 13277 */ 13278 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13279 "sd_start_cmds: scsi_transport() returned " 13280 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n"); 13281 13282 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13283 un->un_direct_priority_timeid = 13284 timeout(sd_start_direct_priority_command, 13285 bp, SD_BSY_TIMEOUT / 500); 13286 13287 goto exit; 13288 } 13289 13290 /* 13291 * For TRAN_BUSY, we want to reduce the throttle value, 13292 * unless we are retrying a command. 13293 */ 13294 if (bp != un->un_retry_bp) { 13295 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); 13296 } 13297 13298 /* 13299 * Set up the bp to be tried again 10 ms later. 13300 * Note:x86: Is there a timeout value in the sd_lun 13301 * for this condition? 13302 */ 13303 sd_set_retry_bp(un, bp, SD_BSY_TIMEOUT / 500, 13304 kstat_runq_back_to_waitq); 13305 goto exit; 13306 13307 case TRAN_FATAL_ERROR: 13308 un->un_tran_fatal_count++; 13309 /* FALLTHRU */ 13310 13311 case TRAN_BADPKT: 13312 default: 13313 un->un_ncmds_in_transport--; 13314 ASSERT(un->un_ncmds_in_transport >= 0); 13315 13316 /* 13317 * If this is our REQUEST SENSE command with a 13318 * transport error, we must get back the pointers 13319 * to the original buf, and mark the REQUEST 13320 * SENSE command as "available". 13321 */ 13322 if (bp == un->un_rqs_bp) { 13323 bp = sd_mark_rqs_idle(un, xp); 13324 xp = SD_GET_XBUF(bp); 13325 } else { 13326 /* 13327 * Legacy behavior: do not update transport 13328 * error count for request sense commands. 13329 */ 13330 SD_UPDATE_ERRSTATS(un, sd_transerrs); 13331 } 13332 13333 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13334 sd_print_transport_rejected_message(un, xp, rval); 13335 13336 /* 13337 * We must use sd_return_failed_command_no_restart() to 13338 * avoid a recursive call back into sd_start_cmds(). 13339 * However this also means that we must keep processing 13340 * the waitq here in order to avoid stalling. 13341 */ 13342 sd_return_failed_command_no_restart(un, bp, EIO); 13343 13344 /* 13345 * Notify any threads waiting in sd_ddi_suspend() that 13346 * a command completion has occurred. 13347 */ 13348 if (un->un_state == SD_STATE_SUSPENDED) { 13349 cv_broadcast(&un->un_disk_busy_cv); 13350 } 13351 13352 if (bp == immed_bp) { 13353 /* immed_bp is gone by now, so clear this */ 13354 immed_bp = NULL; 13355 } 13356 break; 13357 } 13358 13359 } while (immed_bp == NULL); 13360 13361 exit: 13362 ASSERT(mutex_owned(SD_MUTEX(un))); 13363 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n"); 13364 } 13365 13366 13367 /* 13368 * Function: sd_return_command 13369 * 13370 * Description: Returns a command to its originator (with or without an 13371 * error). Also starts commands waiting to be transported 13372 * to the target. 13373 * 13374 * Context: May be called from interrupt, kernel, or timeout context 13375 */ 13376 13377 static void 13378 sd_return_command(struct sd_lun *un, struct buf *bp) 13379 { 13380 struct sd_xbuf *xp; 13381 #if defined(__i386) || defined(__amd64) 13382 struct scsi_pkt *pktp; 13383 #endif 13384 13385 ASSERT(bp != NULL); 13386 ASSERT(un != NULL); 13387 ASSERT(mutex_owned(SD_MUTEX(un))); 13388 ASSERT(bp != un->un_rqs_bp); 13389 xp = SD_GET_XBUF(bp); 13390 ASSERT(xp != NULL); 13391 13392 #if defined(__i386) || defined(__amd64) 13393 pktp = SD_GET_PKTP(bp); 13394 #endif 13395 13396 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n"); 13397 13398 #if defined(__i386) || defined(__amd64) 13399 /* 13400 * Note:x86: check for the "sdrestart failed" case. 13401 */ 13402 if (((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) && 13403 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) && 13404 (xp->xb_pktp->pkt_resid == 0)) { 13405 13406 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) { 13407 /* 13408 * Successfully set up next portion of cmd 13409 * transfer, try sending it 13410 */ 13411 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 13412 NULL, NULL, 0, (clock_t)0, NULL); 13413 sd_start_cmds(un, NULL); 13414 return; /* Note:x86: need a return here? */ 13415 } 13416 } 13417 #endif 13418 13419 /* 13420 * If this is the failfast bp, clear it from un_failfast_bp. This 13421 * can happen if upon being re-tried the failfast bp either 13422 * succeeded or encountered another error (possibly even a different 13423 * error than the one that precipitated the failfast state, but in 13424 * that case it would have had to exhaust retries as well). Regardless, 13425 * this should not occur whenever the instance is in the active 13426 * failfast state. 13427 */ 13428 if (bp == un->un_failfast_bp) { 13429 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 13430 un->un_failfast_bp = NULL; 13431 } 13432 13433 /* 13434 * Clear the failfast state upon successful completion of ANY cmd. 13435 */ 13436 if (bp->b_error == 0) { 13437 un->un_failfast_state = SD_FAILFAST_INACTIVE; 13438 } 13439 13440 /* 13441 * This is used if the command was retried one or more times. Show that 13442 * we are done with it, and allow processing of the waitq to resume. 13443 */ 13444 if (bp == un->un_retry_bp) { 13445 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13446 "sd_return_command: un:0x%p: " 13447 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 13448 un->un_retry_bp = NULL; 13449 un->un_retry_statp = NULL; 13450 } 13451 13452 SD_UPDATE_RDWR_STATS(un, bp); 13453 SD_UPDATE_PARTITION_STATS(un, bp); 13454 13455 switch (un->un_state) { 13456 case SD_STATE_SUSPENDED: 13457 /* 13458 * Notify any threads waiting in sd_ddi_suspend() that 13459 * a command completion has occurred. 13460 */ 13461 cv_broadcast(&un->un_disk_busy_cv); 13462 break; 13463 default: 13464 sd_start_cmds(un, NULL); 13465 break; 13466 } 13467 13468 /* Return this command up the iodone chain to its originator. */ 13469 mutex_exit(SD_MUTEX(un)); 13470 13471 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 13472 xp->xb_pktp = NULL; 13473 13474 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 13475 13476 ASSERT(!mutex_owned(SD_MUTEX(un))); 13477 mutex_enter(SD_MUTEX(un)); 13478 13479 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n"); 13480 } 13481 13482 13483 /* 13484 * Function: sd_return_failed_command 13485 * 13486 * Description: Command completion when an error occurred. 13487 * 13488 * Context: May be called from interrupt context 13489 */ 13490 13491 static void 13492 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode) 13493 { 13494 ASSERT(bp != NULL); 13495 ASSERT(un != NULL); 13496 ASSERT(mutex_owned(SD_MUTEX(un))); 13497 13498 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13499 "sd_return_failed_command: entry\n"); 13500 13501 /* 13502 * b_resid could already be nonzero due to a partial data 13503 * transfer, so do not change it here. 13504 */ 13505 SD_BIOERROR(bp, errcode); 13506 13507 sd_return_command(un, bp); 13508 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13509 "sd_return_failed_command: exit\n"); 13510 } 13511 13512 13513 /* 13514 * Function: sd_return_failed_command_no_restart 13515 * 13516 * Description: Same as sd_return_failed_command, but ensures that no 13517 * call back into sd_start_cmds will be issued. 13518 * 13519 * Context: May be called from interrupt context 13520 */ 13521 13522 static void 13523 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp, 13524 int errcode) 13525 { 13526 struct sd_xbuf *xp; 13527 13528 ASSERT(bp != NULL); 13529 ASSERT(un != NULL); 13530 ASSERT(mutex_owned(SD_MUTEX(un))); 13531 xp = SD_GET_XBUF(bp); 13532 ASSERT(xp != NULL); 13533 ASSERT(errcode != 0); 13534 13535 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13536 "sd_return_failed_command_no_restart: entry\n"); 13537 13538 /* 13539 * b_resid could already be nonzero due to a partial data 13540 * transfer, so do not change it here. 13541 */ 13542 SD_BIOERROR(bp, errcode); 13543 13544 /* 13545 * If this is the failfast bp, clear it. This can happen if the 13546 * failfast bp encounterd a fatal error when we attempted to 13547 * re-try it (such as a scsi_transport(9F) failure). However 13548 * we should NOT be in an active failfast state if the failfast 13549 * bp is not NULL. 13550 */ 13551 if (bp == un->un_failfast_bp) { 13552 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 13553 un->un_failfast_bp = NULL; 13554 } 13555 13556 if (bp == un->un_retry_bp) { 13557 /* 13558 * This command was retried one or more times. Show that we are 13559 * done with it, and allow processing of the waitq to resume. 13560 */ 13561 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13562 "sd_return_failed_command_no_restart: " 13563 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 13564 un->un_retry_bp = NULL; 13565 un->un_retry_statp = NULL; 13566 } 13567 13568 SD_UPDATE_RDWR_STATS(un, bp); 13569 SD_UPDATE_PARTITION_STATS(un, bp); 13570 13571 mutex_exit(SD_MUTEX(un)); 13572 13573 if (xp->xb_pktp != NULL) { 13574 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 13575 xp->xb_pktp = NULL; 13576 } 13577 13578 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 13579 13580 mutex_enter(SD_MUTEX(un)); 13581 13582 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13583 "sd_return_failed_command_no_restart: exit\n"); 13584 } 13585 13586 13587 /* 13588 * Function: sd_retry_command 13589 * 13590 * Description: queue up a command for retry, or (optionally) fail it 13591 * if retry counts are exhausted. 13592 * 13593 * Arguments: un - Pointer to the sd_lun struct for the target. 13594 * 13595 * bp - Pointer to the buf for the command to be retried. 13596 * 13597 * retry_check_flag - Flag to see which (if any) of the retry 13598 * counts should be decremented/checked. If the indicated 13599 * retry count is exhausted, then the command will not be 13600 * retried; it will be failed instead. This should use a 13601 * value equal to one of the following: 13602 * 13603 * SD_RETRIES_NOCHECK 13604 * SD_RESD_RETRIES_STANDARD 13605 * SD_RETRIES_VICTIM 13606 * 13607 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE 13608 * if the check should be made to see of FLAG_ISOLATE is set 13609 * in the pkt. If FLAG_ISOLATE is set, then the command is 13610 * not retried, it is simply failed. 13611 * 13612 * user_funcp - Ptr to function to call before dispatching the 13613 * command. May be NULL if no action needs to be performed. 13614 * (Primarily intended for printing messages.) 13615 * 13616 * user_arg - Optional argument to be passed along to 13617 * the user_funcp call. 13618 * 13619 * failure_code - errno return code to set in the bp if the 13620 * command is going to be failed. 13621 * 13622 * retry_delay - Retry delay interval in (clock_t) units. May 13623 * be zero which indicates that the retry should be retried 13624 * immediately (ie, without an intervening delay). 13625 * 13626 * statp - Ptr to kstat function to be updated if the command 13627 * is queued for a delayed retry. May be NULL if no kstat 13628 * update is desired. 13629 * 13630 * Context: May be called from interrupt context. 13631 */ 13632 13633 static void 13634 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag, 13635 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int 13636 code), void *user_arg, int failure_code, clock_t retry_delay, 13637 void (*statp)(kstat_io_t *)) 13638 { 13639 struct sd_xbuf *xp; 13640 struct scsi_pkt *pktp; 13641 13642 ASSERT(un != NULL); 13643 ASSERT(mutex_owned(SD_MUTEX(un))); 13644 ASSERT(bp != NULL); 13645 xp = SD_GET_XBUF(bp); 13646 ASSERT(xp != NULL); 13647 pktp = SD_GET_PKTP(bp); 13648 ASSERT(pktp != NULL); 13649 13650 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 13651 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp); 13652 13653 /* 13654 * If we are syncing or dumping, fail the command to avoid 13655 * recursively calling back into scsi_transport(). 13656 */ 13657 if (ddi_in_panic()) { 13658 goto fail_command_no_log; 13659 } 13660 13661 /* 13662 * We should never be be retrying a command with FLAG_DIAGNOSE set, so 13663 * log an error and fail the command. 13664 */ 13665 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 13666 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 13667 "ERROR, retrying FLAG_DIAGNOSE command.\n"); 13668 sd_dump_memory(un, SD_LOG_IO, "CDB", 13669 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 13670 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 13671 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 13672 goto fail_command; 13673 } 13674 13675 /* 13676 * If we are suspended, then put the command onto head of the 13677 * wait queue since we don't want to start more commands, and 13678 * clear the un_retry_bp. Next time when we are resumed, will 13679 * handle the command in the wait queue. 13680 */ 13681 switch (un->un_state) { 13682 case SD_STATE_SUSPENDED: 13683 case SD_STATE_DUMPING: 13684 bp->av_forw = un->un_waitq_headp; 13685 un->un_waitq_headp = bp; 13686 if (un->un_waitq_tailp == NULL) { 13687 un->un_waitq_tailp = bp; 13688 } 13689 if (bp == un->un_retry_bp) { 13690 un->un_retry_bp = NULL; 13691 un->un_retry_statp = NULL; 13692 } 13693 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 13694 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: " 13695 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp); 13696 return; 13697 default: 13698 break; 13699 } 13700 13701 /* 13702 * If the caller wants us to check FLAG_ISOLATE, then see if that 13703 * is set; if it is then we do not want to retry the command. 13704 * Normally, FLAG_ISOLATE is only used with USCSI cmds. 13705 */ 13706 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) { 13707 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { 13708 goto fail_command; 13709 } 13710 } 13711 13712 13713 /* 13714 * If SD_RETRIES_FAILFAST is set, it indicates that either a 13715 * command timeout or a selection timeout has occurred. This means 13716 * that we were unable to establish an kind of communication with 13717 * the target, and subsequent retries and/or commands are likely 13718 * to encounter similar results and take a long time to complete. 13719 * 13720 * If this is a failfast error condition, we need to update the 13721 * failfast state, even if this bp does not have B_FAILFAST set. 13722 */ 13723 if (retry_check_flag & SD_RETRIES_FAILFAST) { 13724 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { 13725 ASSERT(un->un_failfast_bp == NULL); 13726 /* 13727 * If we are already in the active failfast state, and 13728 * another failfast error condition has been detected, 13729 * then fail this command if it has B_FAILFAST set. 13730 * If B_FAILFAST is clear, then maintain the legacy 13731 * behavior of retrying heroically, even tho this will 13732 * take a lot more time to fail the command. 13733 */ 13734 if (bp->b_flags & B_FAILFAST) { 13735 goto fail_command; 13736 } 13737 } else { 13738 /* 13739 * We're not in the active failfast state, but we 13740 * have a failfast error condition, so we must begin 13741 * transition to the next state. We do this regardless 13742 * of whether or not this bp has B_FAILFAST set. 13743 */ 13744 if (un->un_failfast_bp == NULL) { 13745 /* 13746 * This is the first bp to meet a failfast 13747 * condition so save it on un_failfast_bp & 13748 * do normal retry processing. Do not enter 13749 * active failfast state yet. This marks 13750 * entry into the "failfast pending" state. 13751 */ 13752 un->un_failfast_bp = bp; 13753 13754 } else if (un->un_failfast_bp == bp) { 13755 /* 13756 * This is the second time *this* bp has 13757 * encountered a failfast error condition, 13758 * so enter active failfast state & flush 13759 * queues as appropriate. 13760 */ 13761 un->un_failfast_state = SD_FAILFAST_ACTIVE; 13762 un->un_failfast_bp = NULL; 13763 sd_failfast_flushq(un); 13764 13765 /* 13766 * Fail this bp now if B_FAILFAST set; 13767 * otherwise continue with retries. (It would 13768 * be pretty ironic if this bp succeeded on a 13769 * subsequent retry after we just flushed all 13770 * the queues). 13771 */ 13772 if (bp->b_flags & B_FAILFAST) { 13773 goto fail_command; 13774 } 13775 13776 #if !defined(lint) && !defined(__lint) 13777 } else { 13778 /* 13779 * If neither of the preceeding conditionals 13780 * was true, it means that there is some 13781 * *other* bp that has met an inital failfast 13782 * condition and is currently either being 13783 * retried or is waiting to be retried. In 13784 * that case we should perform normal retry 13785 * processing on *this* bp, since there is a 13786 * chance that the current failfast condition 13787 * is transient and recoverable. If that does 13788 * not turn out to be the case, then retries 13789 * will be cleared when the wait queue is 13790 * flushed anyway. 13791 */ 13792 #endif 13793 } 13794 } 13795 } else { 13796 /* 13797 * SD_RETRIES_FAILFAST is clear, which indicates that we 13798 * likely were able to at least establish some level of 13799 * communication with the target and subsequent commands 13800 * and/or retries are likely to get through to the target, 13801 * In this case we want to be aggressive about clearing 13802 * the failfast state. Note that this does not affect 13803 * the "failfast pending" condition. 13804 */ 13805 un->un_failfast_state = SD_FAILFAST_INACTIVE; 13806 } 13807 13808 13809 /* 13810 * Check the specified retry count to see if we can still do 13811 * any retries with this pkt before we should fail it. 13812 */ 13813 switch (retry_check_flag & SD_RETRIES_MASK) { 13814 case SD_RETRIES_VICTIM: 13815 /* 13816 * Check the victim retry count. If exhausted, then fall 13817 * thru & check against the standard retry count. 13818 */ 13819 if (xp->xb_victim_retry_count < un->un_victim_retry_count) { 13820 /* Increment count & proceed with the retry */ 13821 xp->xb_victim_retry_count++; 13822 break; 13823 } 13824 /* Victim retries exhausted, fall back to std. retries... */ 13825 /* FALLTHRU */ 13826 13827 case SD_RETRIES_STANDARD: 13828 if (xp->xb_retry_count >= un->un_retry_count) { 13829 /* Retries exhausted, fail the command */ 13830 SD_TRACE(SD_LOG_IO_CORE, un, 13831 "sd_retry_command: retries exhausted!\n"); 13832 /* 13833 * update b_resid for failed SCMD_READ & SCMD_WRITE 13834 * commands with nonzero pkt_resid. 13835 */ 13836 if ((pktp->pkt_reason == CMD_CMPLT) && 13837 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) && 13838 (pktp->pkt_resid != 0)) { 13839 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F; 13840 if ((op == SCMD_READ) || (op == SCMD_WRITE)) { 13841 SD_UPDATE_B_RESID(bp, pktp); 13842 } 13843 } 13844 goto fail_command; 13845 } 13846 xp->xb_retry_count++; 13847 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13848 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 13849 break; 13850 13851 case SD_RETRIES_UA: 13852 if (xp->xb_ua_retry_count >= sd_ua_retry_count) { 13853 /* Retries exhausted, fail the command */ 13854 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13855 "Unit Attention retries exhausted. " 13856 "Check the target.\n"); 13857 goto fail_command; 13858 } 13859 xp->xb_ua_retry_count++; 13860 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13861 "sd_retry_command: retry count:%d\n", 13862 xp->xb_ua_retry_count); 13863 break; 13864 13865 case SD_RETRIES_BUSY: 13866 if (xp->xb_retry_count >= un->un_busy_retry_count) { 13867 /* Retries exhausted, fail the command */ 13868 SD_TRACE(SD_LOG_IO_CORE, un, 13869 "sd_retry_command: retries exhausted!\n"); 13870 goto fail_command; 13871 } 13872 xp->xb_retry_count++; 13873 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13874 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 13875 break; 13876 13877 case SD_RETRIES_NOCHECK: 13878 default: 13879 /* No retry count to check. Just proceed with the retry */ 13880 break; 13881 } 13882 13883 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 13884 13885 /* 13886 * If we were given a zero timeout, we must attempt to retry the 13887 * command immediately (ie, without a delay). 13888 */ 13889 if (retry_delay == 0) { 13890 /* 13891 * Check some limiting conditions to see if we can actually 13892 * do the immediate retry. If we cannot, then we must 13893 * fall back to queueing up a delayed retry. 13894 */ 13895 if (un->un_ncmds_in_transport >= un->un_throttle) { 13896 /* 13897 * We are at the throttle limit for the target, 13898 * fall back to delayed retry. 13899 */ 13900 retry_delay = SD_BSY_TIMEOUT; 13901 statp = kstat_waitq_enter; 13902 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13903 "sd_retry_command: immed. retry hit " 13904 "throttle!\n"); 13905 } else { 13906 /* 13907 * We're clear to proceed with the immediate retry. 13908 * First call the user-provided function (if any) 13909 */ 13910 if (user_funcp != NULL) { 13911 (*user_funcp)(un, bp, user_arg, 13912 SD_IMMEDIATE_RETRY_ISSUED); 13913 #ifdef __lock_lint 13914 sd_print_incomplete_msg(un, bp, user_arg, 13915 SD_IMMEDIATE_RETRY_ISSUED); 13916 sd_print_cmd_incomplete_msg(un, bp, user_arg, 13917 SD_IMMEDIATE_RETRY_ISSUED); 13918 sd_print_sense_failed_msg(un, bp, user_arg, 13919 SD_IMMEDIATE_RETRY_ISSUED); 13920 #endif 13921 } 13922 13923 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13924 "sd_retry_command: issuing immediate retry\n"); 13925 13926 /* 13927 * Call sd_start_cmds() to transport the command to 13928 * the target. 13929 */ 13930 sd_start_cmds(un, bp); 13931 13932 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13933 "sd_retry_command exit\n"); 13934 return; 13935 } 13936 } 13937 13938 /* 13939 * Set up to retry the command after a delay. 13940 * First call the user-provided function (if any) 13941 */ 13942 if (user_funcp != NULL) { 13943 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED); 13944 } 13945 13946 sd_set_retry_bp(un, bp, retry_delay, statp); 13947 13948 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 13949 return; 13950 13951 fail_command: 13952 13953 if (user_funcp != NULL) { 13954 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED); 13955 } 13956 13957 fail_command_no_log: 13958 13959 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13960 "sd_retry_command: returning failed command\n"); 13961 13962 sd_return_failed_command(un, bp, failure_code); 13963 13964 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 13965 } 13966 13967 13968 /* 13969 * Function: sd_set_retry_bp 13970 * 13971 * Description: Set up the given bp for retry. 13972 * 13973 * Arguments: un - ptr to associated softstate 13974 * bp - ptr to buf(9S) for the command 13975 * retry_delay - time interval before issuing retry (may be 0) 13976 * statp - optional pointer to kstat function 13977 * 13978 * Context: May be called under interrupt context 13979 */ 13980 13981 static void 13982 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, 13983 void (*statp)(kstat_io_t *)) 13984 { 13985 ASSERT(un != NULL); 13986 ASSERT(mutex_owned(SD_MUTEX(un))); 13987 ASSERT(bp != NULL); 13988 13989 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 13990 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp); 13991 13992 /* 13993 * Indicate that the command is being retried. This will not allow any 13994 * other commands on the wait queue to be transported to the target 13995 * until this command has been completed (success or failure). The 13996 * "retry command" is not transported to the target until the given 13997 * time delay expires, unless the user specified a 0 retry_delay. 13998 * 13999 * Note: the timeout(9F) callback routine is what actually calls 14000 * sd_start_cmds() to transport the command, with the exception of a 14001 * zero retry_delay. The only current implementor of a zero retry delay 14002 * is the case where a START_STOP_UNIT is sent to spin-up a device. 14003 */ 14004 if (un->un_retry_bp == NULL) { 14005 ASSERT(un->un_retry_statp == NULL); 14006 un->un_retry_bp = bp; 14007 14008 /* 14009 * If the user has not specified a delay the command should 14010 * be queued and no timeout should be scheduled. 14011 */ 14012 if (retry_delay == 0) { 14013 /* 14014 * Save the kstat pointer that will be used in the 14015 * call to SD_UPDATE_KSTATS() below, so that 14016 * sd_start_cmds() can correctly decrement the waitq 14017 * count when it is time to transport this command. 14018 */ 14019 un->un_retry_statp = statp; 14020 goto done; 14021 } 14022 } 14023 14024 if (un->un_retry_bp == bp) { 14025 /* 14026 * Save the kstat pointer that will be used in the call to 14027 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can 14028 * correctly decrement the waitq count when it is time to 14029 * transport this command. 14030 */ 14031 un->un_retry_statp = statp; 14032 14033 /* 14034 * Schedule a timeout if: 14035 * 1) The user has specified a delay. 14036 * 2) There is not a START_STOP_UNIT callback pending. 14037 * 14038 * If no delay has been specified, then it is up to the caller 14039 * to ensure that IO processing continues without stalling. 14040 * Effectively, this means that the caller will issue the 14041 * required call to sd_start_cmds(). The START_STOP_UNIT 14042 * callback does this after the START STOP UNIT command has 14043 * completed. In either of these cases we should not schedule 14044 * a timeout callback here. Also don't schedule the timeout if 14045 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart. 14046 */ 14047 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) && 14048 (un->un_direct_priority_timeid == NULL)) { 14049 un->un_retry_timeid = 14050 timeout(sd_start_retry_command, un, retry_delay); 14051 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14052 "sd_set_retry_bp: setting timeout: un: 0x%p" 14053 " bp:0x%p un_retry_timeid:0x%p\n", 14054 un, bp, un->un_retry_timeid); 14055 } 14056 } else { 14057 /* 14058 * We only get in here if there is already another command 14059 * waiting to be retried. In this case, we just put the 14060 * given command onto the wait queue, so it can be transported 14061 * after the current retry command has completed. 14062 * 14063 * Also we have to make sure that if the command at the head 14064 * of the wait queue is the un_failfast_bp, that we do not 14065 * put ahead of it any other commands that are to be retried. 14066 */ 14067 if ((un->un_failfast_bp != NULL) && 14068 (un->un_failfast_bp == un->un_waitq_headp)) { 14069 /* 14070 * Enqueue this command AFTER the first command on 14071 * the wait queue (which is also un_failfast_bp). 14072 */ 14073 bp->av_forw = un->un_waitq_headp->av_forw; 14074 un->un_waitq_headp->av_forw = bp; 14075 if (un->un_waitq_headp == un->un_waitq_tailp) { 14076 un->un_waitq_tailp = bp; 14077 } 14078 } else { 14079 /* Enqueue this command at the head of the waitq. */ 14080 bp->av_forw = un->un_waitq_headp; 14081 un->un_waitq_headp = bp; 14082 if (un->un_waitq_tailp == NULL) { 14083 un->un_waitq_tailp = bp; 14084 } 14085 } 14086 14087 if (statp == NULL) { 14088 statp = kstat_waitq_enter; 14089 } 14090 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14091 "sd_set_retry_bp: un:0x%p already delayed retry\n", un); 14092 } 14093 14094 done: 14095 if (statp != NULL) { 14096 SD_UPDATE_KSTATS(un, statp, bp); 14097 } 14098 14099 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14100 "sd_set_retry_bp: exit un:0x%p\n", un); 14101 } 14102 14103 14104 /* 14105 * Function: sd_start_retry_command 14106 * 14107 * Description: Start the command that has been waiting on the target's 14108 * retry queue. Called from timeout(9F) context after the 14109 * retry delay interval has expired. 14110 * 14111 * Arguments: arg - pointer to associated softstate for the device. 14112 * 14113 * Context: timeout(9F) thread context. May not sleep. 14114 */ 14115 14116 static void 14117 sd_start_retry_command(void *arg) 14118 { 14119 struct sd_lun *un = arg; 14120 14121 ASSERT(un != NULL); 14122 ASSERT(!mutex_owned(SD_MUTEX(un))); 14123 14124 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14125 "sd_start_retry_command: entry\n"); 14126 14127 mutex_enter(SD_MUTEX(un)); 14128 14129 un->un_retry_timeid = NULL; 14130 14131 if (un->un_retry_bp != NULL) { 14132 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14133 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n", 14134 un, un->un_retry_bp); 14135 sd_start_cmds(un, un->un_retry_bp); 14136 } 14137 14138 mutex_exit(SD_MUTEX(un)); 14139 14140 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14141 "sd_start_retry_command: exit\n"); 14142 } 14143 14144 14145 /* 14146 * Function: sd_start_direct_priority_command 14147 * 14148 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had 14149 * received TRAN_BUSY when we called scsi_transport() to send it 14150 * to the underlying HBA. This function is called from timeout(9F) 14151 * context after the delay interval has expired. 14152 * 14153 * Arguments: arg - pointer to associated buf(9S) to be restarted. 14154 * 14155 * Context: timeout(9F) thread context. May not sleep. 14156 */ 14157 14158 static void 14159 sd_start_direct_priority_command(void *arg) 14160 { 14161 struct buf *priority_bp = arg; 14162 struct sd_lun *un; 14163 14164 ASSERT(priority_bp != NULL); 14165 un = SD_GET_UN(priority_bp); 14166 ASSERT(un != NULL); 14167 ASSERT(!mutex_owned(SD_MUTEX(un))); 14168 14169 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14170 "sd_start_direct_priority_command: entry\n"); 14171 14172 mutex_enter(SD_MUTEX(un)); 14173 un->un_direct_priority_timeid = NULL; 14174 sd_start_cmds(un, priority_bp); 14175 mutex_exit(SD_MUTEX(un)); 14176 14177 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14178 "sd_start_direct_priority_command: exit\n"); 14179 } 14180 14181 14182 /* 14183 * Function: sd_send_request_sense_command 14184 * 14185 * Description: Sends a REQUEST SENSE command to the target 14186 * 14187 * Context: May be called from interrupt context. 14188 */ 14189 14190 static void 14191 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 14192 struct scsi_pkt *pktp) 14193 { 14194 ASSERT(bp != NULL); 14195 ASSERT(un != NULL); 14196 ASSERT(mutex_owned(SD_MUTEX(un))); 14197 14198 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: " 14199 "entry: buf:0x%p\n", bp); 14200 14201 /* 14202 * If we are syncing or dumping, then fail the command to avoid a 14203 * recursive callback into scsi_transport(). Also fail the command 14204 * if we are suspended (legacy behavior). 14205 */ 14206 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 14207 (un->un_state == SD_STATE_DUMPING)) { 14208 sd_return_failed_command(un, bp, EIO); 14209 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14210 "sd_send_request_sense_command: syncing/dumping, exit\n"); 14211 return; 14212 } 14213 14214 /* 14215 * Retry the failed command and don't issue the request sense if: 14216 * 1) the sense buf is busy 14217 * 2) we have 1 or more outstanding commands on the target 14218 * (the sense data will be cleared or invalidated any way) 14219 * 14220 * Note: There could be an issue with not checking a retry limit here, 14221 * the problem is determining which retry limit to check. 14222 */ 14223 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { 14224 /* Don't retry if the command is flagged as non-retryable */ 14225 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 14226 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 14227 NULL, NULL, 0, SD_BSY_TIMEOUT, kstat_waitq_enter); 14228 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14229 "sd_send_request_sense_command: " 14230 "at full throttle, retrying exit\n"); 14231 } else { 14232 sd_return_failed_command(un, bp, EIO); 14233 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14234 "sd_send_request_sense_command: " 14235 "at full throttle, non-retryable exit\n"); 14236 } 14237 return; 14238 } 14239 14240 sd_mark_rqs_busy(un, bp); 14241 sd_start_cmds(un, un->un_rqs_bp); 14242 14243 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14244 "sd_send_request_sense_command: exit\n"); 14245 } 14246 14247 14248 /* 14249 * Function: sd_mark_rqs_busy 14250 * 14251 * Description: Indicate that the request sense bp for this instance is 14252 * in use. 14253 * 14254 * Context: May be called under interrupt context 14255 */ 14256 14257 static void 14258 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp) 14259 { 14260 struct sd_xbuf *sense_xp; 14261 14262 ASSERT(un != NULL); 14263 ASSERT(bp != NULL); 14264 ASSERT(mutex_owned(SD_MUTEX(un))); 14265 ASSERT(un->un_sense_isbusy == 0); 14266 14267 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: " 14268 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un); 14269 14270 sense_xp = SD_GET_XBUF(un->un_rqs_bp); 14271 ASSERT(sense_xp != NULL); 14272 14273 SD_INFO(SD_LOG_IO, un, 14274 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp); 14275 14276 ASSERT(sense_xp->xb_pktp != NULL); 14277 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) 14278 == (FLAG_SENSING | FLAG_HEAD)); 14279 14280 un->un_sense_isbusy = 1; 14281 un->un_rqs_bp->b_resid = 0; 14282 sense_xp->xb_pktp->pkt_resid = 0; 14283 sense_xp->xb_pktp->pkt_reason = 0; 14284 14285 /* So we can get back the bp at interrupt time! */ 14286 sense_xp->xb_sense_bp = bp; 14287 14288 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH); 14289 14290 /* 14291 * Mark this buf as awaiting sense data. (This is already set in 14292 * the pkt_flags for the RQS packet.) 14293 */ 14294 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING; 14295 14296 sense_xp->xb_retry_count = 0; 14297 sense_xp->xb_victim_retry_count = 0; 14298 sense_xp->xb_ua_retry_count = 0; 14299 sense_xp->xb_nr_retry_count = 0; 14300 sense_xp->xb_dma_resid = 0; 14301 14302 /* Clean up the fields for auto-request sense */ 14303 sense_xp->xb_sense_status = 0; 14304 sense_xp->xb_sense_state = 0; 14305 sense_xp->xb_sense_resid = 0; 14306 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data)); 14307 14308 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n"); 14309 } 14310 14311 14312 /* 14313 * Function: sd_mark_rqs_idle 14314 * 14315 * Description: SD_MUTEX must be held continuously through this routine 14316 * to prevent reuse of the rqs struct before the caller can 14317 * complete it's processing. 14318 * 14319 * Return Code: Pointer to the RQS buf 14320 * 14321 * Context: May be called under interrupt context 14322 */ 14323 14324 static struct buf * 14325 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp) 14326 { 14327 struct buf *bp; 14328 ASSERT(un != NULL); 14329 ASSERT(sense_xp != NULL); 14330 ASSERT(mutex_owned(SD_MUTEX(un))); 14331 ASSERT(un->un_sense_isbusy != 0); 14332 14333 un->un_sense_isbusy = 0; 14334 bp = sense_xp->xb_sense_bp; 14335 sense_xp->xb_sense_bp = NULL; 14336 14337 /* This pkt is no longer interested in getting sense data */ 14338 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING; 14339 14340 return (bp); 14341 } 14342 14343 14344 14345 /* 14346 * Function: sd_alloc_rqs 14347 * 14348 * Description: Set up the unit to receive auto request sense data 14349 * 14350 * Return Code: DDI_SUCCESS or DDI_FAILURE 14351 * 14352 * Context: Called under attach(9E) context 14353 */ 14354 14355 static int 14356 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un) 14357 { 14358 struct sd_xbuf *xp; 14359 14360 ASSERT(un != NULL); 14361 ASSERT(!mutex_owned(SD_MUTEX(un))); 14362 ASSERT(un->un_rqs_bp == NULL); 14363 ASSERT(un->un_rqs_pktp == NULL); 14364 14365 /* 14366 * First allocate the required buf and scsi_pkt structs, then set up 14367 * the CDB in the scsi_pkt for a REQUEST SENSE command. 14368 */ 14369 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL, 14370 SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 14371 if (un->un_rqs_bp == NULL) { 14372 return (DDI_FAILURE); 14373 } 14374 14375 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp, 14376 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL); 14377 14378 if (un->un_rqs_pktp == NULL) { 14379 sd_free_rqs(un); 14380 return (DDI_FAILURE); 14381 } 14382 14383 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */ 14384 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp, 14385 SCMD_REQUEST_SENSE, 0, SENSE_LENGTH, 0); 14386 14387 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); 14388 14389 /* Set up the other needed members in the ARQ scsi_pkt. */ 14390 un->un_rqs_pktp->pkt_comp = sdintr; 14391 un->un_rqs_pktp->pkt_time = sd_io_time; 14392 un->un_rqs_pktp->pkt_flags |= 14393 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ 14394 14395 /* 14396 * Allocate & init the sd_xbuf struct for the RQS command. Do not 14397 * provide any intpkt, destroypkt routines as we take care of 14398 * scsi_pkt allocation/freeing here and in sd_free_rqs(). 14399 */ 14400 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 14401 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL); 14402 xp->xb_pktp = un->un_rqs_pktp; 14403 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14404 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n", 14405 un, xp, un->un_rqs_pktp, un->un_rqs_bp); 14406 14407 /* 14408 * Save the pointer to the request sense private bp so it can 14409 * be retrieved in sdintr. 14410 */ 14411 un->un_rqs_pktp->pkt_private = un->un_rqs_bp; 14412 ASSERT(un->un_rqs_bp->b_private == xp); 14413 14414 /* 14415 * See if the HBA supports auto-request sense for the specified 14416 * target/lun. If it does, then try to enable it (if not already 14417 * enabled). 14418 * 14419 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return 14420 * failure, while for other HBAs (pln) scsi_ifsetcap will always 14421 * return success. However, in both of these cases ARQ is always 14422 * enabled and scsi_ifgetcap will always return true. The best approach 14423 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). 14424 * 14425 * The 3rd case is the HBA (adp) always return enabled on 14426 * scsi_ifgetgetcap even when it's not enable, the best approach 14427 * is issue a scsi_ifsetcap then a scsi_ifgetcap 14428 * Note: this case is to circumvent the Adaptec bug. (x86 only) 14429 */ 14430 14431 if (un->un_f_is_fibre == TRUE) { 14432 un->un_f_arq_enabled = TRUE; 14433 } else { 14434 #if defined(__i386) || defined(__amd64) 14435 /* 14436 * Circumvent the Adaptec bug, remove this code when 14437 * the bug is fixed 14438 */ 14439 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); 14440 #endif 14441 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { 14442 case 0: 14443 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14444 "sd_alloc_rqs: HBA supports ARQ\n"); 14445 /* 14446 * ARQ is supported by this HBA but currently is not 14447 * enabled. Attempt to enable it and if successful then 14448 * mark this instance as ARQ enabled. 14449 */ 14450 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1) 14451 == 1) { 14452 /* Successfully enabled ARQ in the HBA */ 14453 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14454 "sd_alloc_rqs: ARQ enabled\n"); 14455 un->un_f_arq_enabled = TRUE; 14456 } else { 14457 /* Could not enable ARQ in the HBA */ 14458 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14459 "sd_alloc_rqs: failed ARQ enable\n"); 14460 un->un_f_arq_enabled = FALSE; 14461 } 14462 break; 14463 case 1: 14464 /* 14465 * ARQ is supported by this HBA and is already enabled. 14466 * Just mark ARQ as enabled for this instance. 14467 */ 14468 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14469 "sd_alloc_rqs: ARQ already enabled\n"); 14470 un->un_f_arq_enabled = TRUE; 14471 break; 14472 default: 14473 /* 14474 * ARQ is not supported by this HBA; disable it for this 14475 * instance. 14476 */ 14477 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14478 "sd_alloc_rqs: HBA does not support ARQ\n"); 14479 un->un_f_arq_enabled = FALSE; 14480 break; 14481 } 14482 } 14483 14484 return (DDI_SUCCESS); 14485 } 14486 14487 14488 /* 14489 * Function: sd_free_rqs 14490 * 14491 * Description: Cleanup for the pre-instance RQS command. 14492 * 14493 * Context: Kernel thread context 14494 */ 14495 14496 static void 14497 sd_free_rqs(struct sd_lun *un) 14498 { 14499 ASSERT(un != NULL); 14500 14501 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n"); 14502 14503 /* 14504 * If consistent memory is bound to a scsi_pkt, the pkt 14505 * has to be destroyed *before* freeing the consistent memory. 14506 * Don't change the sequence of this operations. 14507 * scsi_destroy_pkt() might access memory, which isn't allowed, 14508 * after it was freed in scsi_free_consistent_buf(). 14509 */ 14510 if (un->un_rqs_pktp != NULL) { 14511 scsi_destroy_pkt(un->un_rqs_pktp); 14512 un->un_rqs_pktp = NULL; 14513 } 14514 14515 if (un->un_rqs_bp != NULL) { 14516 kmem_free(SD_GET_XBUF(un->un_rqs_bp), sizeof (struct sd_xbuf)); 14517 scsi_free_consistent_buf(un->un_rqs_bp); 14518 un->un_rqs_bp = NULL; 14519 } 14520 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n"); 14521 } 14522 14523 14524 14525 /* 14526 * Function: sd_reduce_throttle 14527 * 14528 * Description: Reduces the maximum # of outstanding commands on a 14529 * target to the current number of outstanding commands. 14530 * Queues a tiemout(9F) callback to restore the limit 14531 * after a specified interval has elapsed. 14532 * Typically used when we get a TRAN_BUSY return code 14533 * back from scsi_transport(). 14534 * 14535 * Arguments: un - ptr to the sd_lun softstate struct 14536 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL 14537 * 14538 * Context: May be called from interrupt context 14539 */ 14540 14541 static void 14542 sd_reduce_throttle(struct sd_lun *un, int throttle_type) 14543 { 14544 ASSERT(un != NULL); 14545 ASSERT(mutex_owned(SD_MUTEX(un))); 14546 ASSERT(un->un_ncmds_in_transport >= 0); 14547 14548 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 14549 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n", 14550 un, un->un_throttle, un->un_ncmds_in_transport); 14551 14552 if (un->un_throttle > 1) { 14553 if (un->un_f_use_adaptive_throttle == TRUE) { 14554 switch (throttle_type) { 14555 case SD_THROTTLE_TRAN_BUSY: 14556 if (un->un_busy_throttle == 0) { 14557 un->un_busy_throttle = un->un_throttle; 14558 } 14559 break; 14560 case SD_THROTTLE_QFULL: 14561 un->un_busy_throttle = 0; 14562 break; 14563 default: 14564 ASSERT(FALSE); 14565 } 14566 14567 if (un->un_ncmds_in_transport > 0) { 14568 un->un_throttle = un->un_ncmds_in_transport; 14569 } 14570 14571 } else { 14572 if (un->un_ncmds_in_transport == 0) { 14573 un->un_throttle = 1; 14574 } else { 14575 un->un_throttle = un->un_ncmds_in_transport; 14576 } 14577 } 14578 } 14579 14580 /* Reschedule the timeout if none is currently active */ 14581 if (un->un_reset_throttle_timeid == NULL) { 14582 un->un_reset_throttle_timeid = timeout(sd_restore_throttle, 14583 un, SD_THROTTLE_RESET_INTERVAL); 14584 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14585 "sd_reduce_throttle: timeout scheduled!\n"); 14586 } 14587 14588 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 14589 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle); 14590 } 14591 14592 14593 14594 /* 14595 * Function: sd_restore_throttle 14596 * 14597 * Description: Callback function for timeout(9F). Resets the current 14598 * value of un->un_throttle to its default. 14599 * 14600 * Arguments: arg - pointer to associated softstate for the device. 14601 * 14602 * Context: May be called from interrupt context 14603 */ 14604 14605 static void 14606 sd_restore_throttle(void *arg) 14607 { 14608 struct sd_lun *un = arg; 14609 14610 ASSERT(un != NULL); 14611 ASSERT(!mutex_owned(SD_MUTEX(un))); 14612 14613 mutex_enter(SD_MUTEX(un)); 14614 14615 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 14616 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle); 14617 14618 un->un_reset_throttle_timeid = NULL; 14619 14620 if (un->un_f_use_adaptive_throttle == TRUE) { 14621 /* 14622 * If un_busy_throttle is nonzero, then it contains the 14623 * value that un_throttle was when we got a TRAN_BUSY back 14624 * from scsi_transport(). We want to revert back to this 14625 * value. 14626 * 14627 * In the QFULL case, the throttle limit will incrementally 14628 * increase until it reaches max throttle. 14629 */ 14630 if (un->un_busy_throttle > 0) { 14631 un->un_throttle = un->un_busy_throttle; 14632 un->un_busy_throttle = 0; 14633 } else { 14634 /* 14635 * increase throttle by 10% open gate slowly, schedule 14636 * another restore if saved throttle has not been 14637 * reached 14638 */ 14639 short throttle; 14640 if (sd_qfull_throttle_enable) { 14641 throttle = un->un_throttle + 14642 max((un->un_throttle / 10), 1); 14643 un->un_throttle = 14644 (throttle < un->un_saved_throttle) ? 14645 throttle : un->un_saved_throttle; 14646 if (un->un_throttle < un->un_saved_throttle) { 14647 un->un_reset_throttle_timeid = 14648 timeout(sd_restore_throttle, 14649 un, 14650 SD_QFULL_THROTTLE_RESET_INTERVAL); 14651 } 14652 } 14653 } 14654 14655 /* 14656 * If un_throttle has fallen below the low-water mark, we 14657 * restore the maximum value here (and allow it to ratchet 14658 * down again if necessary). 14659 */ 14660 if (un->un_throttle < un->un_min_throttle) { 14661 un->un_throttle = un->un_saved_throttle; 14662 } 14663 } else { 14664 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 14665 "restoring limit from 0x%x to 0x%x\n", 14666 un->un_throttle, un->un_saved_throttle); 14667 un->un_throttle = un->un_saved_throttle; 14668 } 14669 14670 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14671 "sd_restore_throttle: calling sd_start_cmds!\n"); 14672 14673 sd_start_cmds(un, NULL); 14674 14675 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14676 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n", 14677 un, un->un_throttle); 14678 14679 mutex_exit(SD_MUTEX(un)); 14680 14681 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n"); 14682 } 14683 14684 /* 14685 * Function: sdrunout 14686 * 14687 * Description: Callback routine for scsi_init_pkt when a resource allocation 14688 * fails. 14689 * 14690 * Arguments: arg - a pointer to the sd_lun unit struct for the particular 14691 * soft state instance. 14692 * 14693 * Return Code: The scsi_init_pkt routine allows for the callback function to 14694 * return a 0 indicating the callback should be rescheduled or a 1 14695 * indicating not to reschedule. This routine always returns 1 14696 * because the driver always provides a callback function to 14697 * scsi_init_pkt. This results in a callback always being scheduled 14698 * (via the scsi_init_pkt callback implementation) if a resource 14699 * failure occurs. 14700 * 14701 * Context: This callback function may not block or call routines that block 14702 * 14703 * Note: Using the scsi_init_pkt callback facility can result in an I/O 14704 * request persisting at the head of the list which cannot be 14705 * satisfied even after multiple retries. In the future the driver 14706 * may implement some time of maximum runout count before failing 14707 * an I/O. 14708 */ 14709 14710 static int 14711 sdrunout(caddr_t arg) 14712 { 14713 struct sd_lun *un = (struct sd_lun *)arg; 14714 14715 ASSERT(un != NULL); 14716 ASSERT(!mutex_owned(SD_MUTEX(un))); 14717 14718 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n"); 14719 14720 mutex_enter(SD_MUTEX(un)); 14721 sd_start_cmds(un, NULL); 14722 mutex_exit(SD_MUTEX(un)); 14723 /* 14724 * This callback routine always returns 1 (i.e. do not reschedule) 14725 * because we always specify sdrunout as the callback handler for 14726 * scsi_init_pkt inside the call to sd_start_cmds. 14727 */ 14728 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); 14729 return (1); 14730 } 14731 14732 14733 /* 14734 * Function: sdintr 14735 * 14736 * Description: Completion callback routine for scsi_pkt(9S) structs 14737 * sent to the HBA driver via scsi_transport(9F). 14738 * 14739 * Context: Interrupt context 14740 */ 14741 14742 static void 14743 sdintr(struct scsi_pkt *pktp) 14744 { 14745 struct buf *bp; 14746 struct sd_xbuf *xp; 14747 struct sd_lun *un; 14748 14749 ASSERT(pktp != NULL); 14750 bp = (struct buf *)pktp->pkt_private; 14751 ASSERT(bp != NULL); 14752 xp = SD_GET_XBUF(bp); 14753 ASSERT(xp != NULL); 14754 ASSERT(xp->xb_pktp != NULL); 14755 un = SD_GET_UN(bp); 14756 ASSERT(un != NULL); 14757 ASSERT(!mutex_owned(SD_MUTEX(un))); 14758 14759 #ifdef SD_FAULT_INJECTION 14760 14761 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n"); 14762 /* SD FaultInjection */ 14763 sd_faultinjection(pktp); 14764 14765 #endif /* SD_FAULT_INJECTION */ 14766 14767 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p," 14768 " xp:0x%p, un:0x%p\n", bp, xp, un); 14769 14770 mutex_enter(SD_MUTEX(un)); 14771 14772 /* Reduce the count of the #commands currently in transport */ 14773 un->un_ncmds_in_transport--; 14774 ASSERT(un->un_ncmds_in_transport >= 0); 14775 14776 /* Increment counter to indicate that the callback routine is active */ 14777 un->un_in_callback++; 14778 14779 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14780 14781 #ifdef SDDEBUG 14782 if (bp == un->un_retry_bp) { 14783 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " 14784 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", 14785 un, un->un_retry_bp, un->un_ncmds_in_transport); 14786 } 14787 #endif 14788 14789 /* 14790 * If pkt_reason is CMD_DEV_GONE, fail the command, and update the media 14791 * state if needed. 14792 */ 14793 if (pktp->pkt_reason == CMD_DEV_GONE) { 14794 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 14795 "Device is gone\n"); 14796 if (un->un_mediastate != DKIO_DEV_GONE) { 14797 un->un_mediastate = DKIO_DEV_GONE; 14798 cv_broadcast(&un->un_state_cv); 14799 } 14800 sd_return_failed_command(un, bp, EIO); 14801 goto exit; 14802 } 14803 14804 /* 14805 * First see if the pkt has auto-request sense data with it.... 14806 * Look at the packet state first so we don't take a performance 14807 * hit looking at the arq enabled flag unless absolutely necessary. 14808 */ 14809 if ((pktp->pkt_state & STATE_ARQ_DONE) && 14810 (un->un_f_arq_enabled == TRUE)) { 14811 /* 14812 * The HBA did an auto request sense for this command so check 14813 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 14814 * driver command that should not be retried. 14815 */ 14816 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 14817 /* 14818 * Save the relevant sense info into the xp for the 14819 * original cmd. 14820 */ 14821 struct scsi_arq_status *asp; 14822 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 14823 xp->xb_sense_status = 14824 *((uchar_t *)(&(asp->sts_rqpkt_status))); 14825 xp->xb_sense_state = asp->sts_rqpkt_state; 14826 xp->xb_sense_resid = asp->sts_rqpkt_resid; 14827 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 14828 min(sizeof (struct scsi_extended_sense), 14829 SENSE_LENGTH)); 14830 14831 /* fail the command */ 14832 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14833 "sdintr: arq done and FLAG_DIAGNOSE set\n"); 14834 sd_return_failed_command(un, bp, EIO); 14835 goto exit; 14836 } 14837 14838 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 14839 /* 14840 * We want to either retry or fail this command, so free 14841 * the DMA resources here. If we retry the command then 14842 * the DMA resources will be reallocated in sd_start_cmds(). 14843 * Note that when PKT_DMA_PARTIAL is used, this reallocation 14844 * causes the *entire* transfer to start over again from the 14845 * beginning of the request, even for PARTIAL chunks that 14846 * have already transferred successfully. 14847 */ 14848 if ((un->un_f_is_fibre == TRUE) && 14849 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 14850 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 14851 scsi_dmafree(pktp); 14852 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 14853 } 14854 #endif 14855 14856 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14857 "sdintr: arq done, sd_handle_auto_request_sense\n"); 14858 14859 sd_handle_auto_request_sense(un, bp, xp, pktp); 14860 goto exit; 14861 } 14862 14863 /* Next see if this is the REQUEST SENSE pkt for the instance */ 14864 if (pktp->pkt_flags & FLAG_SENSING) { 14865 /* This pktp is from the unit's REQUEST_SENSE command */ 14866 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14867 "sdintr: sd_handle_request_sense\n"); 14868 sd_handle_request_sense(un, bp, xp, pktp); 14869 goto exit; 14870 } 14871 14872 /* 14873 * Check to see if the command successfully completed as requested; 14874 * this is the most common case (and also the hot performance path). 14875 * 14876 * Requirements for successful completion are: 14877 * pkt_reason is CMD_CMPLT and packet status is status good. 14878 * In addition: 14879 * - A residual of zero indicates successful completion no matter what 14880 * the command is. 14881 * - If the residual is not zero and the command is not a read or 14882 * write, then it's still defined as successful completion. In other 14883 * words, if the command is a read or write the residual must be 14884 * zero for successful completion. 14885 * - If the residual is not zero and the command is a read or 14886 * write, and it's a USCSICMD, then it's still defined as 14887 * successful completion. 14888 */ 14889 if ((pktp->pkt_reason == CMD_CMPLT) && 14890 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) { 14891 14892 /* 14893 * Since this command is returned with a good status, we 14894 * can reset the count for Sonoma failover. 14895 */ 14896 un->un_sonoma_failure_count = 0; 14897 14898 /* 14899 * Return all USCSI commands on good status 14900 */ 14901 if (pktp->pkt_resid == 0) { 14902 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14903 "sdintr: returning command for resid == 0\n"); 14904 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) && 14905 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) { 14906 SD_UPDATE_B_RESID(bp, pktp); 14907 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14908 "sdintr: returning command for resid != 0\n"); 14909 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 14910 SD_UPDATE_B_RESID(bp, pktp); 14911 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14912 "sdintr: returning uscsi command\n"); 14913 } else { 14914 goto not_successful; 14915 } 14916 sd_return_command(un, bp); 14917 14918 /* 14919 * Decrement counter to indicate that the callback routine 14920 * is done. 14921 */ 14922 un->un_in_callback--; 14923 ASSERT(un->un_in_callback >= 0); 14924 mutex_exit(SD_MUTEX(un)); 14925 14926 return; 14927 } 14928 14929 not_successful: 14930 14931 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 14932 /* 14933 * The following is based upon knowledge of the underlying transport 14934 * and its use of DMA resources. This code should be removed when 14935 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor 14936 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() 14937 * and sd_start_cmds(). 14938 * 14939 * Free any DMA resources associated with this command if there 14940 * is a chance it could be retried or enqueued for later retry. 14941 * If we keep the DMA binding then mpxio cannot reissue the 14942 * command on another path whenever a path failure occurs. 14943 * 14944 * Note that when PKT_DMA_PARTIAL is used, free/reallocation 14945 * causes the *entire* transfer to start over again from the 14946 * beginning of the request, even for PARTIAL chunks that 14947 * have already transferred successfully. 14948 * 14949 * This is only done for non-uscsi commands (and also skipped for the 14950 * driver's internal RQS command). Also just do this for Fibre Channel 14951 * devices as these are the only ones that support mpxio. 14952 */ 14953 if ((un->un_f_is_fibre == TRUE) && 14954 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 14955 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 14956 scsi_dmafree(pktp); 14957 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 14958 } 14959 #endif 14960 14961 /* 14962 * The command did not successfully complete as requested so check 14963 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 14964 * driver command that should not be retried so just return. If 14965 * FLAG_DIAGNOSE is not set the error will be processed below. 14966 */ 14967 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 14968 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14969 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n"); 14970 /* 14971 * Issue a request sense if a check condition caused the error 14972 * (we handle the auto request sense case above), otherwise 14973 * just fail the command. 14974 */ 14975 if ((pktp->pkt_reason == CMD_CMPLT) && 14976 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { 14977 sd_send_request_sense_command(un, bp, pktp); 14978 } else { 14979 sd_return_failed_command(un, bp, EIO); 14980 } 14981 goto exit; 14982 } 14983 14984 /* 14985 * The command did not successfully complete as requested so process 14986 * the error, retry, and/or attempt recovery. 14987 */ 14988 switch (pktp->pkt_reason) { 14989 case CMD_CMPLT: 14990 switch (SD_GET_PKT_STATUS(pktp)) { 14991 case STATUS_GOOD: 14992 /* 14993 * The command completed successfully with a non-zero 14994 * residual 14995 */ 14996 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14997 "sdintr: STATUS_GOOD \n"); 14998 sd_pkt_status_good(un, bp, xp, pktp); 14999 break; 15000 15001 case STATUS_CHECK: 15002 case STATUS_TERMINATED: 15003 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15004 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n"); 15005 sd_pkt_status_check_condition(un, bp, xp, pktp); 15006 break; 15007 15008 case STATUS_BUSY: 15009 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15010 "sdintr: STATUS_BUSY\n"); 15011 sd_pkt_status_busy(un, bp, xp, pktp); 15012 break; 15013 15014 case STATUS_RESERVATION_CONFLICT: 15015 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15016 "sdintr: STATUS_RESERVATION_CONFLICT\n"); 15017 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 15018 break; 15019 15020 case STATUS_QFULL: 15021 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15022 "sdintr: STATUS_QFULL\n"); 15023 sd_pkt_status_qfull(un, bp, xp, pktp); 15024 break; 15025 15026 case STATUS_MET: 15027 case STATUS_INTERMEDIATE: 15028 case STATUS_SCSI2: 15029 case STATUS_INTERMEDIATE_MET: 15030 case STATUS_ACA_ACTIVE: 15031 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 15032 "Unexpected SCSI status received: 0x%x\n", 15033 SD_GET_PKT_STATUS(pktp)); 15034 sd_return_failed_command(un, bp, EIO); 15035 break; 15036 15037 default: 15038 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 15039 "Invalid SCSI status received: 0x%x\n", 15040 SD_GET_PKT_STATUS(pktp)); 15041 sd_return_failed_command(un, bp, EIO); 15042 break; 15043 15044 } 15045 break; 15046 15047 case CMD_INCOMPLETE: 15048 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15049 "sdintr: CMD_INCOMPLETE\n"); 15050 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp); 15051 break; 15052 case CMD_TRAN_ERR: 15053 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15054 "sdintr: CMD_TRAN_ERR\n"); 15055 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp); 15056 break; 15057 case CMD_RESET: 15058 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15059 "sdintr: CMD_RESET \n"); 15060 sd_pkt_reason_cmd_reset(un, bp, xp, pktp); 15061 break; 15062 case CMD_ABORTED: 15063 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15064 "sdintr: CMD_ABORTED \n"); 15065 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp); 15066 break; 15067 case CMD_TIMEOUT: 15068 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15069 "sdintr: CMD_TIMEOUT\n"); 15070 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp); 15071 break; 15072 case CMD_UNX_BUS_FREE: 15073 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15074 "sdintr: CMD_UNX_BUS_FREE \n"); 15075 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp); 15076 break; 15077 case CMD_TAG_REJECT: 15078 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15079 "sdintr: CMD_TAG_REJECT\n"); 15080 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp); 15081 break; 15082 default: 15083 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15084 "sdintr: default\n"); 15085 sd_pkt_reason_default(un, bp, xp, pktp); 15086 break; 15087 } 15088 15089 exit: 15090 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n"); 15091 15092 /* Decrement counter to indicate that the callback routine is done. */ 15093 un->un_in_callback--; 15094 ASSERT(un->un_in_callback >= 0); 15095 15096 /* 15097 * At this point, the pkt has been dispatched, ie, it is either 15098 * being re-tried or has been returned to its caller and should 15099 * not be referenced. 15100 */ 15101 15102 mutex_exit(SD_MUTEX(un)); 15103 } 15104 15105 15106 /* 15107 * Function: sd_print_incomplete_msg 15108 * 15109 * Description: Prints the error message for a CMD_INCOMPLETE error. 15110 * 15111 * Arguments: un - ptr to associated softstate for the device. 15112 * bp - ptr to the buf(9S) for the command. 15113 * arg - message string ptr 15114 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED, 15115 * or SD_NO_RETRY_ISSUED. 15116 * 15117 * Context: May be called under interrupt context 15118 */ 15119 15120 static void 15121 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 15122 { 15123 struct scsi_pkt *pktp; 15124 char *msgp; 15125 char *cmdp = arg; 15126 15127 ASSERT(un != NULL); 15128 ASSERT(mutex_owned(SD_MUTEX(un))); 15129 ASSERT(bp != NULL); 15130 ASSERT(arg != NULL); 15131 pktp = SD_GET_PKTP(bp); 15132 ASSERT(pktp != NULL); 15133 15134 switch (code) { 15135 case SD_DELAYED_RETRY_ISSUED: 15136 case SD_IMMEDIATE_RETRY_ISSUED: 15137 msgp = "retrying"; 15138 break; 15139 case SD_NO_RETRY_ISSUED: 15140 default: 15141 msgp = "giving up"; 15142 break; 15143 } 15144 15145 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 15146 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15147 "incomplete %s- %s\n", cmdp, msgp); 15148 } 15149 } 15150 15151 15152 15153 /* 15154 * Function: sd_pkt_status_good 15155 * 15156 * Description: Processing for a STATUS_GOOD code in pkt_status. 15157 * 15158 * Context: May be called under interrupt context 15159 */ 15160 15161 static void 15162 sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 15163 struct sd_xbuf *xp, struct scsi_pkt *pktp) 15164 { 15165 char *cmdp; 15166 15167 ASSERT(un != NULL); 15168 ASSERT(mutex_owned(SD_MUTEX(un))); 15169 ASSERT(bp != NULL); 15170 ASSERT(xp != NULL); 15171 ASSERT(pktp != NULL); 15172 ASSERT(pktp->pkt_reason == CMD_CMPLT); 15173 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD); 15174 ASSERT(pktp->pkt_resid != 0); 15175 15176 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n"); 15177 15178 SD_UPDATE_ERRSTATS(un, sd_harderrs); 15179 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) { 15180 case SCMD_READ: 15181 cmdp = "read"; 15182 break; 15183 case SCMD_WRITE: 15184 cmdp = "write"; 15185 break; 15186 default: 15187 SD_UPDATE_B_RESID(bp, pktp); 15188 sd_return_command(un, bp); 15189 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 15190 return; 15191 } 15192 15193 /* 15194 * See if we can retry the read/write, preferrably immediately. 15195 * If retries are exhaused, then sd_retry_command() will update 15196 * the b_resid count. 15197 */ 15198 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg, 15199 cmdp, EIO, (clock_t)0, NULL); 15200 15201 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 15202 } 15203 15204 15205 15206 15207 15208 /* 15209 * Function: sd_handle_request_sense 15210 * 15211 * Description: Processing for non-auto Request Sense command. 15212 * 15213 * Arguments: un - ptr to associated softstate 15214 * sense_bp - ptr to buf(9S) for the RQS command 15215 * sense_xp - ptr to the sd_xbuf for the RQS command 15216 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command 15217 * 15218 * Context: May be called under interrupt context 15219 */ 15220 15221 static void 15222 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp, 15223 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp) 15224 { 15225 struct buf *cmd_bp; /* buf for the original command */ 15226 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */ 15227 struct scsi_pkt *cmd_pktp; /* pkt for the original command */ 15228 15229 ASSERT(un != NULL); 15230 ASSERT(mutex_owned(SD_MUTEX(un))); 15231 ASSERT(sense_bp != NULL); 15232 ASSERT(sense_xp != NULL); 15233 ASSERT(sense_pktp != NULL); 15234 15235 /* 15236 * Note the sense_bp, sense_xp, and sense_pktp here are for the 15237 * RQS command and not the original command. 15238 */ 15239 ASSERT(sense_pktp == un->un_rqs_pktp); 15240 ASSERT(sense_bp == un->un_rqs_bp); 15241 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) == 15242 (FLAG_SENSING | FLAG_HEAD)); 15243 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) & 15244 FLAG_SENSING) == FLAG_SENSING); 15245 15246 /* These are the bp, xp, and pktp for the original command */ 15247 cmd_bp = sense_xp->xb_sense_bp; 15248 cmd_xp = SD_GET_XBUF(cmd_bp); 15249 cmd_pktp = SD_GET_PKTP(cmd_bp); 15250 15251 if (sense_pktp->pkt_reason != CMD_CMPLT) { 15252 /* 15253 * The REQUEST SENSE command failed. Release the REQUEST 15254 * SENSE command for re-use, get back the bp for the original 15255 * command, and attempt to re-try the original command if 15256 * FLAG_DIAGNOSE is not set in the original packet. 15257 */ 15258 SD_UPDATE_ERRSTATS(un, sd_harderrs); 15259 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15260 cmd_bp = sd_mark_rqs_idle(un, sense_xp); 15261 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD, 15262 NULL, NULL, EIO, (clock_t)0, NULL); 15263 return; 15264 } 15265 } 15266 15267 /* 15268 * Save the relevant sense info into the xp for the original cmd. 15269 * 15270 * Note: if the request sense failed the state info will be zero 15271 * as set in sd_mark_rqs_busy() 15272 */ 15273 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp); 15274 cmd_xp->xb_sense_state = sense_pktp->pkt_state; 15275 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid; 15276 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, SENSE_LENGTH); 15277 15278 /* 15279 * Free up the RQS command.... 15280 * NOTE: 15281 * Must do this BEFORE calling sd_validate_sense_data! 15282 * sd_validate_sense_data may return the original command in 15283 * which case the pkt will be freed and the flags can no 15284 * longer be touched. 15285 * SD_MUTEX is held through this process until the command 15286 * is dispatched based upon the sense data, so there are 15287 * no race conditions. 15288 */ 15289 (void) sd_mark_rqs_idle(un, sense_xp); 15290 15291 /* 15292 * For a retryable command see if we have valid sense data, if so then 15293 * turn it over to sd_decode_sense() to figure out the right course of 15294 * action. Just fail a non-retryable command. 15295 */ 15296 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15297 if (sd_validate_sense_data(un, cmd_bp, cmd_xp) == 15298 SD_SENSE_DATA_IS_VALID) { 15299 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp); 15300 } 15301 } else { 15302 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB", 15303 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15304 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data", 15305 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 15306 sd_return_failed_command(un, cmd_bp, EIO); 15307 } 15308 } 15309 15310 15311 15312 15313 /* 15314 * Function: sd_handle_auto_request_sense 15315 * 15316 * Description: Processing for auto-request sense information. 15317 * 15318 * Arguments: un - ptr to associated softstate 15319 * bp - ptr to buf(9S) for the command 15320 * xp - ptr to the sd_xbuf for the command 15321 * pktp - ptr to the scsi_pkt(9S) for the command 15322 * 15323 * Context: May be called under interrupt context 15324 */ 15325 15326 static void 15327 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 15328 struct sd_xbuf *xp, struct scsi_pkt *pktp) 15329 { 15330 struct scsi_arq_status *asp; 15331 15332 ASSERT(un != NULL); 15333 ASSERT(mutex_owned(SD_MUTEX(un))); 15334 ASSERT(bp != NULL); 15335 ASSERT(xp != NULL); 15336 ASSERT(pktp != NULL); 15337 ASSERT(pktp != un->un_rqs_pktp); 15338 ASSERT(bp != un->un_rqs_bp); 15339 15340 /* 15341 * For auto-request sense, we get a scsi_arq_status back from 15342 * the HBA, with the sense data in the sts_sensedata member. 15343 * The pkt_scbp of the packet points to this scsi_arq_status. 15344 */ 15345 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 15346 15347 if (asp->sts_rqpkt_reason != CMD_CMPLT) { 15348 /* 15349 * The auto REQUEST SENSE failed; see if we can re-try 15350 * the original command. 15351 */ 15352 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15353 "auto request sense failed (reason=%s)\n", 15354 scsi_rname(asp->sts_rqpkt_reason)); 15355 15356 sd_reset_target(un, pktp); 15357 15358 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15359 NULL, NULL, EIO, (clock_t)0, NULL); 15360 return; 15361 } 15362 15363 /* Save the relevant sense info into the xp for the original cmd. */ 15364 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status))); 15365 xp->xb_sense_state = asp->sts_rqpkt_state; 15366 xp->xb_sense_resid = asp->sts_rqpkt_resid; 15367 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 15368 min(sizeof (struct scsi_extended_sense), SENSE_LENGTH)); 15369 15370 /* 15371 * See if we have valid sense data, if so then turn it over to 15372 * sd_decode_sense() to figure out the right course of action. 15373 */ 15374 if (sd_validate_sense_data(un, bp, xp) == SD_SENSE_DATA_IS_VALID) { 15375 sd_decode_sense(un, bp, xp, pktp); 15376 } 15377 } 15378 15379 15380 /* 15381 * Function: sd_print_sense_failed_msg 15382 * 15383 * Description: Print log message when RQS has failed. 15384 * 15385 * Arguments: un - ptr to associated softstate 15386 * bp - ptr to buf(9S) for the command 15387 * arg - generic message string ptr 15388 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 15389 * or SD_NO_RETRY_ISSUED 15390 * 15391 * Context: May be called from interrupt context 15392 */ 15393 15394 static void 15395 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg, 15396 int code) 15397 { 15398 char *msgp = arg; 15399 15400 ASSERT(un != NULL); 15401 ASSERT(mutex_owned(SD_MUTEX(un))); 15402 ASSERT(bp != NULL); 15403 15404 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) { 15405 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp); 15406 } 15407 } 15408 15409 15410 /* 15411 * Function: sd_validate_sense_data 15412 * 15413 * Description: Check the given sense data for validity. 15414 * If the sense data is not valid, the command will 15415 * be either failed or retried! 15416 * 15417 * Return Code: SD_SENSE_DATA_IS_INVALID 15418 * SD_SENSE_DATA_IS_VALID 15419 * 15420 * Context: May be called from interrupt context 15421 */ 15422 15423 static int 15424 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp) 15425 { 15426 struct scsi_extended_sense *esp; 15427 struct scsi_pkt *pktp; 15428 size_t actual_len; 15429 char *msgp = NULL; 15430 15431 ASSERT(un != NULL); 15432 ASSERT(mutex_owned(SD_MUTEX(un))); 15433 ASSERT(bp != NULL); 15434 ASSERT(bp != un->un_rqs_bp); 15435 ASSERT(xp != NULL); 15436 15437 pktp = SD_GET_PKTP(bp); 15438 ASSERT(pktp != NULL); 15439 15440 /* 15441 * Check the status of the RQS command (auto or manual). 15442 */ 15443 switch (xp->xb_sense_status & STATUS_MASK) { 15444 case STATUS_GOOD: 15445 break; 15446 15447 case STATUS_RESERVATION_CONFLICT: 15448 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 15449 return (SD_SENSE_DATA_IS_INVALID); 15450 15451 case STATUS_BUSY: 15452 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15453 "Busy Status on REQUEST SENSE\n"); 15454 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL, 15455 NULL, EIO, SD_BSY_TIMEOUT / 500, kstat_waitq_enter); 15456 return (SD_SENSE_DATA_IS_INVALID); 15457 15458 case STATUS_QFULL: 15459 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15460 "QFULL Status on REQUEST SENSE\n"); 15461 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, 15462 NULL, EIO, SD_BSY_TIMEOUT / 500, kstat_waitq_enter); 15463 return (SD_SENSE_DATA_IS_INVALID); 15464 15465 case STATUS_CHECK: 15466 case STATUS_TERMINATED: 15467 msgp = "Check Condition on REQUEST SENSE\n"; 15468 goto sense_failed; 15469 15470 default: 15471 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n"; 15472 goto sense_failed; 15473 } 15474 15475 /* 15476 * See if we got the minimum required amount of sense data. 15477 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes 15478 * or less. 15479 */ 15480 actual_len = (int)(SENSE_LENGTH - xp->xb_sense_resid); 15481 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) || 15482 (actual_len == 0)) { 15483 msgp = "Request Sense couldn't get sense data\n"; 15484 goto sense_failed; 15485 } 15486 15487 if (actual_len < SUN_MIN_SENSE_LENGTH) { 15488 msgp = "Not enough sense information\n"; 15489 goto sense_failed; 15490 } 15491 15492 /* 15493 * We require the extended sense data 15494 */ 15495 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 15496 if (esp->es_class != CLASS_EXTENDED_SENSE) { 15497 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 15498 static char tmp[8]; 15499 static char buf[148]; 15500 char *p = (char *)(xp->xb_sense_data); 15501 int i; 15502 15503 mutex_enter(&sd_sense_mutex); 15504 (void) strcpy(buf, "undecodable sense information:"); 15505 for (i = 0; i < actual_len; i++) { 15506 (void) sprintf(tmp, " 0x%x", *(p++)&0xff); 15507 (void) strcpy(&buf[strlen(buf)], tmp); 15508 } 15509 i = strlen(buf); 15510 (void) strcpy(&buf[i], "-(assumed fatal)\n"); 15511 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, buf); 15512 mutex_exit(&sd_sense_mutex); 15513 } 15514 /* Note: Legacy behavior, fail the command with no retry */ 15515 sd_return_failed_command(un, bp, EIO); 15516 return (SD_SENSE_DATA_IS_INVALID); 15517 } 15518 15519 /* 15520 * Check that es_code is valid (es_class concatenated with es_code 15521 * make up the "response code" field. es_class will always be 7, so 15522 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the 15523 * format. 15524 */ 15525 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) && 15526 (esp->es_code != CODE_FMT_FIXED_DEFERRED) && 15527 (esp->es_code != CODE_FMT_DESCR_CURRENT) && 15528 (esp->es_code != CODE_FMT_DESCR_DEFERRED) && 15529 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) { 15530 goto sense_failed; 15531 } 15532 15533 return (SD_SENSE_DATA_IS_VALID); 15534 15535 sense_failed: 15536 /* 15537 * If the request sense failed (for whatever reason), attempt 15538 * to retry the original command. 15539 */ 15540 #if defined(__i386) || defined(__amd64) 15541 /* 15542 * SD_RETRY_DELAY is conditionally compile (#if fibre) in 15543 * sddef.h for Sparc platform, and x86 uses 1 binary 15544 * for both SCSI/FC. 15545 * The SD_RETRY_DELAY value need to be adjusted here 15546 * when SD_RETRY_DELAY change in sddef.h 15547 */ 15548 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15549 sd_print_sense_failed_msg, msgp, EIO, 15550 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL); 15551 #else 15552 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15553 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); 15554 #endif 15555 15556 return (SD_SENSE_DATA_IS_INVALID); 15557 } 15558 15559 15560 15561 /* 15562 * Function: sd_decode_sense 15563 * 15564 * Description: Take recovery action(s) when SCSI Sense Data is received. 15565 * 15566 * Context: Interrupt context. 15567 */ 15568 15569 static void 15570 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 15571 struct scsi_pkt *pktp) 15572 { 15573 uint8_t sense_key; 15574 15575 ASSERT(un != NULL); 15576 ASSERT(mutex_owned(SD_MUTEX(un))); 15577 ASSERT(bp != NULL); 15578 ASSERT(bp != un->un_rqs_bp); 15579 ASSERT(xp != NULL); 15580 ASSERT(pktp != NULL); 15581 15582 sense_key = scsi_sense_key(xp->xb_sense_data); 15583 15584 switch (sense_key) { 15585 case KEY_NO_SENSE: 15586 sd_sense_key_no_sense(un, bp, xp, pktp); 15587 break; 15588 case KEY_RECOVERABLE_ERROR: 15589 sd_sense_key_recoverable_error(un, xp->xb_sense_data, 15590 bp, xp, pktp); 15591 break; 15592 case KEY_NOT_READY: 15593 sd_sense_key_not_ready(un, xp->xb_sense_data, 15594 bp, xp, pktp); 15595 break; 15596 case KEY_MEDIUM_ERROR: 15597 case KEY_HARDWARE_ERROR: 15598 sd_sense_key_medium_or_hardware_error(un, 15599 xp->xb_sense_data, bp, xp, pktp); 15600 break; 15601 case KEY_ILLEGAL_REQUEST: 15602 sd_sense_key_illegal_request(un, bp, xp, pktp); 15603 break; 15604 case KEY_UNIT_ATTENTION: 15605 sd_sense_key_unit_attention(un, xp->xb_sense_data, 15606 bp, xp, pktp); 15607 break; 15608 case KEY_WRITE_PROTECT: 15609 case KEY_VOLUME_OVERFLOW: 15610 case KEY_MISCOMPARE: 15611 sd_sense_key_fail_command(un, bp, xp, pktp); 15612 break; 15613 case KEY_BLANK_CHECK: 15614 sd_sense_key_blank_check(un, bp, xp, pktp); 15615 break; 15616 case KEY_ABORTED_COMMAND: 15617 sd_sense_key_aborted_command(un, bp, xp, pktp); 15618 break; 15619 case KEY_VENDOR_UNIQUE: 15620 case KEY_COPY_ABORTED: 15621 case KEY_EQUAL: 15622 case KEY_RESERVED: 15623 default: 15624 sd_sense_key_default(un, xp->xb_sense_data, 15625 bp, xp, pktp); 15626 break; 15627 } 15628 } 15629 15630 15631 /* 15632 * Function: sd_dump_memory 15633 * 15634 * Description: Debug logging routine to print the contents of a user provided 15635 * buffer. The output of the buffer is broken up into 256 byte 15636 * segments due to a size constraint of the scsi_log. 15637 * implementation. 15638 * 15639 * Arguments: un - ptr to softstate 15640 * comp - component mask 15641 * title - "title" string to preceed data when printed 15642 * data - ptr to data block to be printed 15643 * len - size of data block to be printed 15644 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c) 15645 * 15646 * Context: May be called from interrupt context 15647 */ 15648 15649 #define SD_DUMP_MEMORY_BUF_SIZE 256 15650 15651 static char *sd_dump_format_string[] = { 15652 " 0x%02x", 15653 " %c" 15654 }; 15655 15656 static void 15657 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data, 15658 int len, int fmt) 15659 { 15660 int i, j; 15661 int avail_count; 15662 int start_offset; 15663 int end_offset; 15664 size_t entry_len; 15665 char *bufp; 15666 char *local_buf; 15667 char *format_string; 15668 15669 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR)); 15670 15671 /* 15672 * In the debug version of the driver, this function is called from a 15673 * number of places which are NOPs in the release driver. 15674 * The debug driver therefore has additional methods of filtering 15675 * debug output. 15676 */ 15677 #ifdef SDDEBUG 15678 /* 15679 * In the debug version of the driver we can reduce the amount of debug 15680 * messages by setting sd_error_level to something other than 15681 * SCSI_ERR_ALL and clearing bits in sd_level_mask and 15682 * sd_component_mask. 15683 */ 15684 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) || 15685 (sd_error_level != SCSI_ERR_ALL)) { 15686 return; 15687 } 15688 if (((sd_component_mask & comp) == 0) || 15689 (sd_error_level != SCSI_ERR_ALL)) { 15690 return; 15691 } 15692 #else 15693 if (sd_error_level != SCSI_ERR_ALL) { 15694 return; 15695 } 15696 #endif 15697 15698 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP); 15699 bufp = local_buf; 15700 /* 15701 * Available length is the length of local_buf[], minus the 15702 * length of the title string, minus one for the ":", minus 15703 * one for the newline, minus one for the NULL terminator. 15704 * This gives the #bytes available for holding the printed 15705 * values from the given data buffer. 15706 */ 15707 if (fmt == SD_LOG_HEX) { 15708 format_string = sd_dump_format_string[0]; 15709 } else /* SD_LOG_CHAR */ { 15710 format_string = sd_dump_format_string[1]; 15711 } 15712 /* 15713 * Available count is the number of elements from the given 15714 * data buffer that we can fit into the available length. 15715 * This is based upon the size of the format string used. 15716 * Make one entry and find it's size. 15717 */ 15718 (void) sprintf(bufp, format_string, data[0]); 15719 entry_len = strlen(bufp); 15720 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len; 15721 15722 j = 0; 15723 while (j < len) { 15724 bufp = local_buf; 15725 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE); 15726 start_offset = j; 15727 15728 end_offset = start_offset + avail_count; 15729 15730 (void) sprintf(bufp, "%s:", title); 15731 bufp += strlen(bufp); 15732 for (i = start_offset; ((i < end_offset) && (j < len)); 15733 i++, j++) { 15734 (void) sprintf(bufp, format_string, data[i]); 15735 bufp += entry_len; 15736 } 15737 (void) sprintf(bufp, "\n"); 15738 15739 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf); 15740 } 15741 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE); 15742 } 15743 15744 /* 15745 * Function: sd_print_sense_msg 15746 * 15747 * Description: Log a message based upon the given sense data. 15748 * 15749 * Arguments: un - ptr to associated softstate 15750 * bp - ptr to buf(9S) for the command 15751 * arg - ptr to associate sd_sense_info struct 15752 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 15753 * or SD_NO_RETRY_ISSUED 15754 * 15755 * Context: May be called from interrupt context 15756 */ 15757 15758 static void 15759 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 15760 { 15761 struct sd_xbuf *xp; 15762 struct scsi_pkt *pktp; 15763 uint8_t *sensep; 15764 daddr_t request_blkno; 15765 diskaddr_t err_blkno; 15766 int severity; 15767 int pfa_flag; 15768 extern struct scsi_key_strings scsi_cmds[]; 15769 15770 ASSERT(un != NULL); 15771 ASSERT(mutex_owned(SD_MUTEX(un))); 15772 ASSERT(bp != NULL); 15773 xp = SD_GET_XBUF(bp); 15774 ASSERT(xp != NULL); 15775 pktp = SD_GET_PKTP(bp); 15776 ASSERT(pktp != NULL); 15777 ASSERT(arg != NULL); 15778 15779 severity = ((struct sd_sense_info *)(arg))->ssi_severity; 15780 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag; 15781 15782 if ((code == SD_DELAYED_RETRY_ISSUED) || 15783 (code == SD_IMMEDIATE_RETRY_ISSUED)) { 15784 severity = SCSI_ERR_RETRYABLE; 15785 } 15786 15787 /* Use absolute block number for the request block number */ 15788 request_blkno = xp->xb_blkno; 15789 15790 /* 15791 * Now try to get the error block number from the sense data 15792 */ 15793 sensep = xp->xb_sense_data; 15794 15795 if (scsi_sense_info_uint64(sensep, SENSE_LENGTH, 15796 (uint64_t *)&err_blkno)) { 15797 /* 15798 * We retrieved the error block number from the information 15799 * portion of the sense data. 15800 * 15801 * For USCSI commands we are better off using the error 15802 * block no. as the requested block no. (This is the best 15803 * we can estimate.) 15804 */ 15805 if ((SD_IS_BUFIO(xp) == FALSE) && 15806 ((pktp->pkt_flags & FLAG_SILENT) == 0)) { 15807 request_blkno = err_blkno; 15808 } 15809 } else { 15810 /* 15811 * Without the es_valid bit set (for fixed format) or an 15812 * information descriptor (for descriptor format) we cannot 15813 * be certain of the error blkno, so just use the 15814 * request_blkno. 15815 */ 15816 err_blkno = (diskaddr_t)request_blkno; 15817 } 15818 15819 /* 15820 * The following will log the buffer contents for the release driver 15821 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error 15822 * level is set to verbose. 15823 */ 15824 sd_dump_memory(un, SD_LOG_IO, "Failed CDB", 15825 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15826 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 15827 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX); 15828 15829 if (pfa_flag == FALSE) { 15830 /* This is normally only set for USCSI */ 15831 if ((pktp->pkt_flags & FLAG_SILENT) != 0) { 15832 return; 15833 } 15834 15835 if ((SD_IS_BUFIO(xp) == TRUE) && 15836 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) && 15837 (severity < sd_error_level))) { 15838 return; 15839 } 15840 } 15841 15842 /* 15843 * Check for Sonoma Failover and keep a count of how many failed I/O's 15844 */ 15845 if ((SD_IS_LSI(un)) && 15846 (scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) && 15847 (scsi_sense_asc(sensep) == 0x94) && 15848 (scsi_sense_ascq(sensep) == 0x01)) { 15849 un->un_sonoma_failure_count++; 15850 if (un->un_sonoma_failure_count > 1) { 15851 return; 15852 } 15853 } 15854 15855 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity, 15856 request_blkno, err_blkno, scsi_cmds, 15857 (struct scsi_extended_sense *)sensep, 15858 un->un_additional_codes, NULL); 15859 } 15860 15861 /* 15862 * Function: sd_sense_key_no_sense 15863 * 15864 * Description: Recovery action when sense data was not received. 15865 * 15866 * Context: May be called from interrupt context 15867 */ 15868 15869 static void 15870 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 15871 struct sd_xbuf *xp, struct scsi_pkt *pktp) 15872 { 15873 struct sd_sense_info si; 15874 15875 ASSERT(un != NULL); 15876 ASSERT(mutex_owned(SD_MUTEX(un))); 15877 ASSERT(bp != NULL); 15878 ASSERT(xp != NULL); 15879 ASSERT(pktp != NULL); 15880 15881 si.ssi_severity = SCSI_ERR_FATAL; 15882 si.ssi_pfa_flag = FALSE; 15883 15884 SD_UPDATE_ERRSTATS(un, sd_softerrs); 15885 15886 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 15887 &si, EIO, (clock_t)0, NULL); 15888 } 15889 15890 15891 /* 15892 * Function: sd_sense_key_recoverable_error 15893 * 15894 * Description: Recovery actions for a SCSI "Recovered Error" sense key. 15895 * 15896 * Context: May be called from interrupt context 15897 */ 15898 15899 static void 15900 sd_sense_key_recoverable_error(struct sd_lun *un, 15901 uint8_t *sense_datap, 15902 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 15903 { 15904 struct sd_sense_info si; 15905 uint8_t asc = scsi_sense_asc(sense_datap); 15906 15907 ASSERT(un != NULL); 15908 ASSERT(mutex_owned(SD_MUTEX(un))); 15909 ASSERT(bp != NULL); 15910 ASSERT(xp != NULL); 15911 ASSERT(pktp != NULL); 15912 15913 /* 15914 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED 15915 */ 15916 if ((asc == 0x5D) && (sd_report_pfa != 0)) { 15917 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 15918 si.ssi_severity = SCSI_ERR_INFO; 15919 si.ssi_pfa_flag = TRUE; 15920 } else { 15921 SD_UPDATE_ERRSTATS(un, sd_softerrs); 15922 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err); 15923 si.ssi_severity = SCSI_ERR_RECOVERED; 15924 si.ssi_pfa_flag = FALSE; 15925 } 15926 15927 if (pktp->pkt_resid == 0) { 15928 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 15929 sd_return_command(un, bp); 15930 return; 15931 } 15932 15933 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 15934 &si, EIO, (clock_t)0, NULL); 15935 } 15936 15937 15938 15939 15940 /* 15941 * Function: sd_sense_key_not_ready 15942 * 15943 * Description: Recovery actions for a SCSI "Not Ready" sense key. 15944 * 15945 * Context: May be called from interrupt context 15946 */ 15947 15948 static void 15949 sd_sense_key_not_ready(struct sd_lun *un, 15950 uint8_t *sense_datap, 15951 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 15952 { 15953 struct sd_sense_info si; 15954 uint8_t asc = scsi_sense_asc(sense_datap); 15955 uint8_t ascq = scsi_sense_ascq(sense_datap); 15956 15957 ASSERT(un != NULL); 15958 ASSERT(mutex_owned(SD_MUTEX(un))); 15959 ASSERT(bp != NULL); 15960 ASSERT(xp != NULL); 15961 ASSERT(pktp != NULL); 15962 15963 si.ssi_severity = SCSI_ERR_FATAL; 15964 si.ssi_pfa_flag = FALSE; 15965 15966 /* 15967 * Update error stats after first NOT READY error. Disks may have 15968 * been powered down and may need to be restarted. For CDROMs, 15969 * report NOT READY errors only if media is present. 15970 */ 15971 if ((ISCD(un) && (asc == 0x3A)) || 15972 (xp->xb_nr_retry_count > 0)) { 15973 SD_UPDATE_ERRSTATS(un, sd_harderrs); 15974 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err); 15975 } 15976 15977 /* 15978 * Just fail if the "not ready" retry limit has been reached. 15979 */ 15980 if (xp->xb_nr_retry_count >= un->un_notready_retry_count) { 15981 /* Special check for error message printing for removables. */ 15982 if (un->un_f_has_removable_media && (asc == 0x04) && 15983 (ascq >= 0x04)) { 15984 si.ssi_severity = SCSI_ERR_ALL; 15985 } 15986 goto fail_command; 15987 } 15988 15989 /* 15990 * Check the ASC and ASCQ in the sense data as needed, to determine 15991 * what to do. 15992 */ 15993 switch (asc) { 15994 case 0x04: /* LOGICAL UNIT NOT READY */ 15995 /* 15996 * disk drives that don't spin up result in a very long delay 15997 * in format without warning messages. We will log a message 15998 * if the error level is set to verbose. 15999 */ 16000 if (sd_error_level < SCSI_ERR_RETRYABLE) { 16001 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16002 "logical unit not ready, resetting disk\n"); 16003 } 16004 16005 /* 16006 * There are different requirements for CDROMs and disks for 16007 * the number of retries. If a CD-ROM is giving this, it is 16008 * probably reading TOC and is in the process of getting 16009 * ready, so we should keep on trying for a long time to make 16010 * sure that all types of media are taken in account (for 16011 * some media the drive takes a long time to read TOC). For 16012 * disks we do not want to retry this too many times as this 16013 * can cause a long hang in format when the drive refuses to 16014 * spin up (a very common failure). 16015 */ 16016 switch (ascq) { 16017 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */ 16018 /* 16019 * Disk drives frequently refuse to spin up which 16020 * results in a very long hang in format without 16021 * warning messages. 16022 * 16023 * Note: This code preserves the legacy behavior of 16024 * comparing xb_nr_retry_count against zero for fibre 16025 * channel targets instead of comparing against the 16026 * un_reset_retry_count value. The reason for this 16027 * discrepancy has been so utterly lost beneath the 16028 * Sands of Time that even Indiana Jones could not 16029 * find it. 16030 */ 16031 if (un->un_f_is_fibre == TRUE) { 16032 if (((sd_level_mask & SD_LOGMASK_DIAG) || 16033 (xp->xb_nr_retry_count > 0)) && 16034 (un->un_startstop_timeid == NULL)) { 16035 scsi_log(SD_DEVINFO(un), sd_label, 16036 CE_WARN, "logical unit not ready, " 16037 "resetting disk\n"); 16038 sd_reset_target(un, pktp); 16039 } 16040 } else { 16041 if (((sd_level_mask & SD_LOGMASK_DIAG) || 16042 (xp->xb_nr_retry_count > 16043 un->un_reset_retry_count)) && 16044 (un->un_startstop_timeid == NULL)) { 16045 scsi_log(SD_DEVINFO(un), sd_label, 16046 CE_WARN, "logical unit not ready, " 16047 "resetting disk\n"); 16048 sd_reset_target(un, pktp); 16049 } 16050 } 16051 break; 16052 16053 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */ 16054 /* 16055 * If the target is in the process of becoming 16056 * ready, just proceed with the retry. This can 16057 * happen with CD-ROMs that take a long time to 16058 * read TOC after a power cycle or reset. 16059 */ 16060 goto do_retry; 16061 16062 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */ 16063 break; 16064 16065 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */ 16066 /* 16067 * Retries cannot help here so just fail right away. 16068 */ 16069 goto fail_command; 16070 16071 case 0x88: 16072 /* 16073 * Vendor-unique code for T3/T4: it indicates a 16074 * path problem in a mutipathed config, but as far as 16075 * the target driver is concerned it equates to a fatal 16076 * error, so we should just fail the command right away 16077 * (without printing anything to the console). If this 16078 * is not a T3/T4, fall thru to the default recovery 16079 * action. 16080 * T3/T4 is FC only, don't need to check is_fibre 16081 */ 16082 if (SD_IS_T3(un) || SD_IS_T4(un)) { 16083 sd_return_failed_command(un, bp, EIO); 16084 return; 16085 } 16086 /* FALLTHRU */ 16087 16088 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */ 16089 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */ 16090 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */ 16091 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */ 16092 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */ 16093 default: /* Possible future codes in SCSI spec? */ 16094 /* 16095 * For removable-media devices, do not retry if 16096 * ASCQ > 2 as these result mostly from USCSI commands 16097 * on MMC devices issued to check status of an 16098 * operation initiated in immediate mode. Also for 16099 * ASCQ >= 4 do not print console messages as these 16100 * mainly represent a user-initiated operation 16101 * instead of a system failure. 16102 */ 16103 if (un->un_f_has_removable_media) { 16104 si.ssi_severity = SCSI_ERR_ALL; 16105 goto fail_command; 16106 } 16107 break; 16108 } 16109 16110 /* 16111 * As part of our recovery attempt for the NOT READY 16112 * condition, we issue a START STOP UNIT command. However 16113 * we want to wait for a short delay before attempting this 16114 * as there may still be more commands coming back from the 16115 * target with the check condition. To do this we use 16116 * timeout(9F) to call sd_start_stop_unit_callback() after 16117 * the delay interval expires. (sd_start_stop_unit_callback() 16118 * dispatches sd_start_stop_unit_task(), which will issue 16119 * the actual START STOP UNIT command. The delay interval 16120 * is one-half of the delay that we will use to retry the 16121 * command that generated the NOT READY condition. 16122 * 16123 * Note that we could just dispatch sd_start_stop_unit_task() 16124 * from here and allow it to sleep for the delay interval, 16125 * but then we would be tying up the taskq thread 16126 * uncesessarily for the duration of the delay. 16127 * 16128 * Do not issue the START STOP UNIT if the current command 16129 * is already a START STOP UNIT. 16130 */ 16131 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) { 16132 break; 16133 } 16134 16135 /* 16136 * Do not schedule the timeout if one is already pending. 16137 */ 16138 if (un->un_startstop_timeid != NULL) { 16139 SD_INFO(SD_LOG_ERROR, un, 16140 "sd_sense_key_not_ready: restart already issued to" 16141 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)), 16142 ddi_get_instance(SD_DEVINFO(un))); 16143 break; 16144 } 16145 16146 /* 16147 * Schedule the START STOP UNIT command, then queue the command 16148 * for a retry. 16149 * 16150 * Note: A timeout is not scheduled for this retry because we 16151 * want the retry to be serial with the START_STOP_UNIT. The 16152 * retry will be started when the START_STOP_UNIT is completed 16153 * in sd_start_stop_unit_task. 16154 */ 16155 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback, 16156 un, SD_BSY_TIMEOUT / 2); 16157 xp->xb_nr_retry_count++; 16158 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter); 16159 return; 16160 16161 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */ 16162 if (sd_error_level < SCSI_ERR_RETRYABLE) { 16163 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16164 "unit does not respond to selection\n"); 16165 } 16166 break; 16167 16168 case 0x3A: /* MEDIUM NOT PRESENT */ 16169 if (sd_error_level >= SCSI_ERR_FATAL) { 16170 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16171 "Caddy not inserted in drive\n"); 16172 } 16173 16174 sr_ejected(un); 16175 un->un_mediastate = DKIO_EJECTED; 16176 /* The state has changed, inform the media watch routines */ 16177 cv_broadcast(&un->un_state_cv); 16178 /* Just fail if no media is present in the drive. */ 16179 goto fail_command; 16180 16181 default: 16182 if (sd_error_level < SCSI_ERR_RETRYABLE) { 16183 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 16184 "Unit not Ready. Additional sense code 0x%x\n", 16185 asc); 16186 } 16187 break; 16188 } 16189 16190 do_retry: 16191 16192 /* 16193 * Retry the command, as some targets may report NOT READY for 16194 * several seconds after being reset. 16195 */ 16196 xp->xb_nr_retry_count++; 16197 si.ssi_severity = SCSI_ERR_RETRYABLE; 16198 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 16199 &si, EIO, SD_BSY_TIMEOUT, NULL); 16200 16201 return; 16202 16203 fail_command: 16204 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16205 sd_return_failed_command(un, bp, EIO); 16206 } 16207 16208 16209 16210 /* 16211 * Function: sd_sense_key_medium_or_hardware_error 16212 * 16213 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error" 16214 * sense key. 16215 * 16216 * Context: May be called from interrupt context 16217 */ 16218 16219 static void 16220 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 16221 uint8_t *sense_datap, 16222 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16223 { 16224 struct sd_sense_info si; 16225 uint8_t sense_key = scsi_sense_key(sense_datap); 16226 uint8_t asc = scsi_sense_asc(sense_datap); 16227 16228 ASSERT(un != NULL); 16229 ASSERT(mutex_owned(SD_MUTEX(un))); 16230 ASSERT(bp != NULL); 16231 ASSERT(xp != NULL); 16232 ASSERT(pktp != NULL); 16233 16234 si.ssi_severity = SCSI_ERR_FATAL; 16235 si.ssi_pfa_flag = FALSE; 16236 16237 if (sense_key == KEY_MEDIUM_ERROR) { 16238 SD_UPDATE_ERRSTATS(un, sd_rq_media_err); 16239 } 16240 16241 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16242 16243 if ((un->un_reset_retry_count != 0) && 16244 (xp->xb_retry_count == un->un_reset_retry_count)) { 16245 mutex_exit(SD_MUTEX(un)); 16246 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */ 16247 if (un->un_f_allow_bus_device_reset == TRUE) { 16248 16249 boolean_t try_resetting_target = B_TRUE; 16250 16251 /* 16252 * We need to be able to handle specific ASC when we are 16253 * handling a KEY_HARDWARE_ERROR. In particular 16254 * taking the default action of resetting the target may 16255 * not be the appropriate way to attempt recovery. 16256 * Resetting a target because of a single LUN failure 16257 * victimizes all LUNs on that target. 16258 * 16259 * This is true for the LSI arrays, if an LSI 16260 * array controller returns an ASC of 0x84 (LUN Dead) we 16261 * should trust it. 16262 */ 16263 16264 if (sense_key == KEY_HARDWARE_ERROR) { 16265 switch (asc) { 16266 case 0x84: 16267 if (SD_IS_LSI(un)) { 16268 try_resetting_target = B_FALSE; 16269 } 16270 break; 16271 default: 16272 break; 16273 } 16274 } 16275 16276 if (try_resetting_target == B_TRUE) { 16277 int reset_retval = 0; 16278 if (un->un_f_lun_reset_enabled == TRUE) { 16279 SD_TRACE(SD_LOG_IO_CORE, un, 16280 "sd_sense_key_medium_or_hardware_" 16281 "error: issuing RESET_LUN\n"); 16282 reset_retval = 16283 scsi_reset(SD_ADDRESS(un), 16284 RESET_LUN); 16285 } 16286 if (reset_retval == 0) { 16287 SD_TRACE(SD_LOG_IO_CORE, un, 16288 "sd_sense_key_medium_or_hardware_" 16289 "error: issuing RESET_TARGET\n"); 16290 (void) scsi_reset(SD_ADDRESS(un), 16291 RESET_TARGET); 16292 } 16293 } 16294 } 16295 mutex_enter(SD_MUTEX(un)); 16296 } 16297 16298 /* 16299 * This really ought to be a fatal error, but we will retry anyway 16300 * as some drives report this as a spurious error. 16301 */ 16302 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16303 &si, EIO, (clock_t)0, NULL); 16304 } 16305 16306 16307 16308 /* 16309 * Function: sd_sense_key_illegal_request 16310 * 16311 * Description: Recovery actions for a SCSI "Illegal Request" sense key. 16312 * 16313 * Context: May be called from interrupt context 16314 */ 16315 16316 static void 16317 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 16318 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16319 { 16320 struct sd_sense_info si; 16321 16322 ASSERT(un != NULL); 16323 ASSERT(mutex_owned(SD_MUTEX(un))); 16324 ASSERT(bp != NULL); 16325 ASSERT(xp != NULL); 16326 ASSERT(pktp != NULL); 16327 16328 SD_UPDATE_ERRSTATS(un, sd_softerrs); 16329 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err); 16330 16331 si.ssi_severity = SCSI_ERR_INFO; 16332 si.ssi_pfa_flag = FALSE; 16333 16334 /* Pointless to retry if the target thinks it's an illegal request */ 16335 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16336 sd_return_failed_command(un, bp, EIO); 16337 } 16338 16339 16340 16341 16342 /* 16343 * Function: sd_sense_key_unit_attention 16344 * 16345 * Description: Recovery actions for a SCSI "Unit Attention" sense key. 16346 * 16347 * Context: May be called from interrupt context 16348 */ 16349 16350 static void 16351 sd_sense_key_unit_attention(struct sd_lun *un, 16352 uint8_t *sense_datap, 16353 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16354 { 16355 /* 16356 * For UNIT ATTENTION we allow retries for one minute. Devices 16357 * like Sonoma can return UNIT ATTENTION close to a minute 16358 * under certain conditions. 16359 */ 16360 int retry_check_flag = SD_RETRIES_UA; 16361 boolean_t kstat_updated = B_FALSE; 16362 struct sd_sense_info si; 16363 uint8_t asc = scsi_sense_asc(sense_datap); 16364 16365 ASSERT(un != NULL); 16366 ASSERT(mutex_owned(SD_MUTEX(un))); 16367 ASSERT(bp != NULL); 16368 ASSERT(xp != NULL); 16369 ASSERT(pktp != NULL); 16370 16371 si.ssi_severity = SCSI_ERR_INFO; 16372 si.ssi_pfa_flag = FALSE; 16373 16374 16375 switch (asc) { 16376 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */ 16377 if (sd_report_pfa != 0) { 16378 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 16379 si.ssi_pfa_flag = TRUE; 16380 retry_check_flag = SD_RETRIES_STANDARD; 16381 goto do_retry; 16382 } 16383 16384 break; 16385 16386 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ 16387 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 16388 un->un_resvd_status |= 16389 (SD_LOST_RESERVE | SD_WANT_RESERVE); 16390 } 16391 #ifdef _LP64 16392 if (un->un_blockcount + 1 > SD_GROUP1_MAX_ADDRESS) { 16393 if (taskq_dispatch(sd_tq, sd_reenable_dsense_task, 16394 un, KM_NOSLEEP) == 0) { 16395 /* 16396 * If we can't dispatch the task we'll just 16397 * live without descriptor sense. We can 16398 * try again on the next "unit attention" 16399 */ 16400 SD_ERROR(SD_LOG_ERROR, un, 16401 "sd_sense_key_unit_attention: " 16402 "Could not dispatch " 16403 "sd_reenable_dsense_task\n"); 16404 } 16405 } 16406 #endif /* _LP64 */ 16407 /* FALLTHRU */ 16408 16409 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ 16410 if (!un->un_f_has_removable_media) { 16411 break; 16412 } 16413 16414 /* 16415 * When we get a unit attention from a removable-media device, 16416 * it may be in a state that will take a long time to recover 16417 * (e.g., from a reset). Since we are executing in interrupt 16418 * context here, we cannot wait around for the device to come 16419 * back. So hand this command off to sd_media_change_task() 16420 * for deferred processing under taskq thread context. (Note 16421 * that the command still may be failed if a problem is 16422 * encountered at a later time.) 16423 */ 16424 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp, 16425 KM_NOSLEEP) == 0) { 16426 /* 16427 * Cannot dispatch the request so fail the command. 16428 */ 16429 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16430 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 16431 si.ssi_severity = SCSI_ERR_FATAL; 16432 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16433 sd_return_failed_command(un, bp, EIO); 16434 } 16435 16436 /* 16437 * If failed to dispatch sd_media_change_task(), we already 16438 * updated kstat. If succeed to dispatch sd_media_change_task(), 16439 * we should update kstat later if it encounters an error. So, 16440 * we update kstat_updated flag here. 16441 */ 16442 kstat_updated = B_TRUE; 16443 16444 /* 16445 * Either the command has been successfully dispatched to a 16446 * task Q for retrying, or the dispatch failed. In either case 16447 * do NOT retry again by calling sd_retry_command. This sets up 16448 * two retries of the same command and when one completes and 16449 * frees the resources the other will access freed memory, 16450 * a bad thing. 16451 */ 16452 return; 16453 16454 default: 16455 break; 16456 } 16457 16458 /* 16459 * Update kstat if we haven't done that. 16460 */ 16461 if (!kstat_updated) { 16462 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16463 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 16464 } 16465 16466 do_retry: 16467 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si, 16468 EIO, SD_UA_RETRY_DELAY, NULL); 16469 } 16470 16471 16472 16473 /* 16474 * Function: sd_sense_key_fail_command 16475 * 16476 * Description: Use to fail a command when we don't like the sense key that 16477 * was returned. 16478 * 16479 * Context: May be called from interrupt context 16480 */ 16481 16482 static void 16483 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 16484 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16485 { 16486 struct sd_sense_info si; 16487 16488 ASSERT(un != NULL); 16489 ASSERT(mutex_owned(SD_MUTEX(un))); 16490 ASSERT(bp != NULL); 16491 ASSERT(xp != NULL); 16492 ASSERT(pktp != NULL); 16493 16494 si.ssi_severity = SCSI_ERR_FATAL; 16495 si.ssi_pfa_flag = FALSE; 16496 16497 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16498 sd_return_failed_command(un, bp, EIO); 16499 } 16500 16501 16502 16503 /* 16504 * Function: sd_sense_key_blank_check 16505 * 16506 * Description: Recovery actions for a SCSI "Blank Check" sense key. 16507 * Has no monetary connotation. 16508 * 16509 * Context: May be called from interrupt context 16510 */ 16511 16512 static void 16513 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 16514 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16515 { 16516 struct sd_sense_info si; 16517 16518 ASSERT(un != NULL); 16519 ASSERT(mutex_owned(SD_MUTEX(un))); 16520 ASSERT(bp != NULL); 16521 ASSERT(xp != NULL); 16522 ASSERT(pktp != NULL); 16523 16524 /* 16525 * Blank check is not fatal for removable devices, therefore 16526 * it does not require a console message. 16527 */ 16528 si.ssi_severity = (un->un_f_has_removable_media) ? SCSI_ERR_ALL : 16529 SCSI_ERR_FATAL; 16530 si.ssi_pfa_flag = FALSE; 16531 16532 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16533 sd_return_failed_command(un, bp, EIO); 16534 } 16535 16536 16537 16538 16539 /* 16540 * Function: sd_sense_key_aborted_command 16541 * 16542 * Description: Recovery actions for a SCSI "Aborted Command" sense key. 16543 * 16544 * Context: May be called from interrupt context 16545 */ 16546 16547 static void 16548 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 16549 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16550 { 16551 struct sd_sense_info si; 16552 16553 ASSERT(un != NULL); 16554 ASSERT(mutex_owned(SD_MUTEX(un))); 16555 ASSERT(bp != NULL); 16556 ASSERT(xp != NULL); 16557 ASSERT(pktp != NULL); 16558 16559 si.ssi_severity = SCSI_ERR_FATAL; 16560 si.ssi_pfa_flag = FALSE; 16561 16562 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16563 16564 /* 16565 * This really ought to be a fatal error, but we will retry anyway 16566 * as some drives report this as a spurious error. 16567 */ 16568 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16569 &si, EIO, drv_usectohz(100000), NULL); 16570 } 16571 16572 16573 16574 /* 16575 * Function: sd_sense_key_default 16576 * 16577 * Description: Default recovery action for several SCSI sense keys (basically 16578 * attempts a retry). 16579 * 16580 * Context: May be called from interrupt context 16581 */ 16582 16583 static void 16584 sd_sense_key_default(struct sd_lun *un, 16585 uint8_t *sense_datap, 16586 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16587 { 16588 struct sd_sense_info si; 16589 uint8_t sense_key = scsi_sense_key(sense_datap); 16590 16591 ASSERT(un != NULL); 16592 ASSERT(mutex_owned(SD_MUTEX(un))); 16593 ASSERT(bp != NULL); 16594 ASSERT(xp != NULL); 16595 ASSERT(pktp != NULL); 16596 16597 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16598 16599 /* 16600 * Undecoded sense key. Attempt retries and hope that will fix 16601 * the problem. Otherwise, we're dead. 16602 */ 16603 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16604 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16605 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]); 16606 } 16607 16608 si.ssi_severity = SCSI_ERR_FATAL; 16609 si.ssi_pfa_flag = FALSE; 16610 16611 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16612 &si, EIO, (clock_t)0, NULL); 16613 } 16614 16615 16616 16617 /* 16618 * Function: sd_print_retry_msg 16619 * 16620 * Description: Print a message indicating the retry action being taken. 16621 * 16622 * Arguments: un - ptr to associated softstate 16623 * bp - ptr to buf(9S) for the command 16624 * arg - not used. 16625 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16626 * or SD_NO_RETRY_ISSUED 16627 * 16628 * Context: May be called from interrupt context 16629 */ 16630 /* ARGSUSED */ 16631 static void 16632 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag) 16633 { 16634 struct sd_xbuf *xp; 16635 struct scsi_pkt *pktp; 16636 char *reasonp; 16637 char *msgp; 16638 16639 ASSERT(un != NULL); 16640 ASSERT(mutex_owned(SD_MUTEX(un))); 16641 ASSERT(bp != NULL); 16642 pktp = SD_GET_PKTP(bp); 16643 ASSERT(pktp != NULL); 16644 xp = SD_GET_XBUF(bp); 16645 ASSERT(xp != NULL); 16646 16647 ASSERT(!mutex_owned(&un->un_pm_mutex)); 16648 mutex_enter(&un->un_pm_mutex); 16649 if ((un->un_state == SD_STATE_SUSPENDED) || 16650 (SD_DEVICE_IS_IN_LOW_POWER(un)) || 16651 (pktp->pkt_flags & FLAG_SILENT)) { 16652 mutex_exit(&un->un_pm_mutex); 16653 goto update_pkt_reason; 16654 } 16655 mutex_exit(&un->un_pm_mutex); 16656 16657 /* 16658 * Suppress messages if they are all the same pkt_reason; with 16659 * TQ, many (up to 256) are returned with the same pkt_reason. 16660 * If we are in panic, then suppress the retry messages. 16661 */ 16662 switch (flag) { 16663 case SD_NO_RETRY_ISSUED: 16664 msgp = "giving up"; 16665 break; 16666 case SD_IMMEDIATE_RETRY_ISSUED: 16667 case SD_DELAYED_RETRY_ISSUED: 16668 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) || 16669 ((pktp->pkt_reason == un->un_last_pkt_reason) && 16670 (sd_error_level != SCSI_ERR_ALL))) { 16671 return; 16672 } 16673 msgp = "retrying command"; 16674 break; 16675 default: 16676 goto update_pkt_reason; 16677 } 16678 16679 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" : 16680 scsi_rname(pktp->pkt_reason)); 16681 16682 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16683 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp); 16684 16685 update_pkt_reason: 16686 /* 16687 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason. 16688 * This is to prevent multiple console messages for the same failure 16689 * condition. Note that un->un_last_pkt_reason is NOT restored if & 16690 * when the command is retried successfully because there still may be 16691 * more commands coming back with the same value of pktp->pkt_reason. 16692 */ 16693 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) { 16694 un->un_last_pkt_reason = pktp->pkt_reason; 16695 } 16696 } 16697 16698 16699 /* 16700 * Function: sd_print_cmd_incomplete_msg 16701 * 16702 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason. 16703 * 16704 * Arguments: un - ptr to associated softstate 16705 * bp - ptr to buf(9S) for the command 16706 * arg - passed to sd_print_retry_msg() 16707 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16708 * or SD_NO_RETRY_ISSUED 16709 * 16710 * Context: May be called from interrupt context 16711 */ 16712 16713 static void 16714 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, 16715 int code) 16716 { 16717 dev_info_t *dip; 16718 16719 ASSERT(un != NULL); 16720 ASSERT(mutex_owned(SD_MUTEX(un))); 16721 ASSERT(bp != NULL); 16722 16723 switch (code) { 16724 case SD_NO_RETRY_ISSUED: 16725 /* Command was failed. Someone turned off this target? */ 16726 if (un->un_state != SD_STATE_OFFLINE) { 16727 /* 16728 * Suppress message if we are detaching and 16729 * device has been disconnected 16730 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation 16731 * private interface and not part of the DDI 16732 */ 16733 dip = un->un_sd->sd_dev; 16734 if (!(DEVI_IS_DETACHING(dip) && 16735 DEVI_IS_DEVICE_REMOVED(dip))) { 16736 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16737 "disk not responding to selection\n"); 16738 } 16739 New_state(un, SD_STATE_OFFLINE); 16740 } 16741 break; 16742 16743 case SD_DELAYED_RETRY_ISSUED: 16744 case SD_IMMEDIATE_RETRY_ISSUED: 16745 default: 16746 /* Command was successfully queued for retry */ 16747 sd_print_retry_msg(un, bp, arg, code); 16748 break; 16749 } 16750 } 16751 16752 16753 /* 16754 * Function: sd_pkt_reason_cmd_incomplete 16755 * 16756 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason. 16757 * 16758 * Context: May be called from interrupt context 16759 */ 16760 16761 static void 16762 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 16763 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16764 { 16765 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE; 16766 16767 ASSERT(un != NULL); 16768 ASSERT(mutex_owned(SD_MUTEX(un))); 16769 ASSERT(bp != NULL); 16770 ASSERT(xp != NULL); 16771 ASSERT(pktp != NULL); 16772 16773 /* Do not do a reset if selection did not complete */ 16774 /* Note: Should this not just check the bit? */ 16775 if (pktp->pkt_state != STATE_GOT_BUS) { 16776 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16777 sd_reset_target(un, pktp); 16778 } 16779 16780 /* 16781 * If the target was not successfully selected, then set 16782 * SD_RETRIES_FAILFAST to indicate that we lost communication 16783 * with the target, and further retries and/or commands are 16784 * likely to take a long time. 16785 */ 16786 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) { 16787 flag |= SD_RETRIES_FAILFAST; 16788 } 16789 16790 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16791 16792 sd_retry_command(un, bp, flag, 16793 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16794 } 16795 16796 16797 16798 /* 16799 * Function: sd_pkt_reason_cmd_tran_err 16800 * 16801 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason. 16802 * 16803 * Context: May be called from interrupt context 16804 */ 16805 16806 static void 16807 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 16808 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16809 { 16810 ASSERT(un != NULL); 16811 ASSERT(mutex_owned(SD_MUTEX(un))); 16812 ASSERT(bp != NULL); 16813 ASSERT(xp != NULL); 16814 ASSERT(pktp != NULL); 16815 16816 /* 16817 * Do not reset if we got a parity error, or if 16818 * selection did not complete. 16819 */ 16820 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16821 /* Note: Should this not just check the bit for pkt_state? */ 16822 if (((pktp->pkt_statistics & STAT_PERR) == 0) && 16823 (pktp->pkt_state != STATE_GOT_BUS)) { 16824 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16825 sd_reset_target(un, pktp); 16826 } 16827 16828 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16829 16830 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 16831 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16832 } 16833 16834 16835 16836 /* 16837 * Function: sd_pkt_reason_cmd_reset 16838 * 16839 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason. 16840 * 16841 * Context: May be called from interrupt context 16842 */ 16843 16844 static void 16845 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 16846 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16847 { 16848 ASSERT(un != NULL); 16849 ASSERT(mutex_owned(SD_MUTEX(un))); 16850 ASSERT(bp != NULL); 16851 ASSERT(xp != NULL); 16852 ASSERT(pktp != NULL); 16853 16854 /* The target may still be running the command, so try to reset. */ 16855 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16856 sd_reset_target(un, pktp); 16857 16858 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16859 16860 /* 16861 * If pkt_reason is CMD_RESET chances are that this pkt got 16862 * reset because another target on this bus caused it. The target 16863 * that caused it should get CMD_TIMEOUT with pkt_statistics 16864 * of STAT_TIMEOUT/STAT_DEV_RESET. 16865 */ 16866 16867 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 16868 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16869 } 16870 16871 16872 16873 16874 /* 16875 * Function: sd_pkt_reason_cmd_aborted 16876 * 16877 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason. 16878 * 16879 * Context: May be called from interrupt context 16880 */ 16881 16882 static void 16883 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 16884 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16885 { 16886 ASSERT(un != NULL); 16887 ASSERT(mutex_owned(SD_MUTEX(un))); 16888 ASSERT(bp != NULL); 16889 ASSERT(xp != NULL); 16890 ASSERT(pktp != NULL); 16891 16892 /* The target may still be running the command, so try to reset. */ 16893 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16894 sd_reset_target(un, pktp); 16895 16896 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16897 16898 /* 16899 * If pkt_reason is CMD_ABORTED chances are that this pkt got 16900 * aborted because another target on this bus caused it. The target 16901 * that caused it should get CMD_TIMEOUT with pkt_statistics 16902 * of STAT_TIMEOUT/STAT_DEV_RESET. 16903 */ 16904 16905 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 16906 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16907 } 16908 16909 16910 16911 /* 16912 * Function: sd_pkt_reason_cmd_timeout 16913 * 16914 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason. 16915 * 16916 * Context: May be called from interrupt context 16917 */ 16918 16919 static void 16920 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 16921 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16922 { 16923 ASSERT(un != NULL); 16924 ASSERT(mutex_owned(SD_MUTEX(un))); 16925 ASSERT(bp != NULL); 16926 ASSERT(xp != NULL); 16927 ASSERT(pktp != NULL); 16928 16929 16930 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16931 sd_reset_target(un, pktp); 16932 16933 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16934 16935 /* 16936 * A command timeout indicates that we could not establish 16937 * communication with the target, so set SD_RETRIES_FAILFAST 16938 * as further retries/commands are likely to take a long time. 16939 */ 16940 sd_retry_command(un, bp, 16941 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST), 16942 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16943 } 16944 16945 16946 16947 /* 16948 * Function: sd_pkt_reason_cmd_unx_bus_free 16949 * 16950 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason. 16951 * 16952 * Context: May be called from interrupt context 16953 */ 16954 16955 static void 16956 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 16957 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16958 { 16959 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code); 16960 16961 ASSERT(un != NULL); 16962 ASSERT(mutex_owned(SD_MUTEX(un))); 16963 ASSERT(bp != NULL); 16964 ASSERT(xp != NULL); 16965 ASSERT(pktp != NULL); 16966 16967 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16968 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16969 16970 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? 16971 sd_print_retry_msg : NULL; 16972 16973 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 16974 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16975 } 16976 16977 16978 /* 16979 * Function: sd_pkt_reason_cmd_tag_reject 16980 * 16981 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason. 16982 * 16983 * Context: May be called from interrupt context 16984 */ 16985 16986 static void 16987 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 16988 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16989 { 16990 ASSERT(un != NULL); 16991 ASSERT(mutex_owned(SD_MUTEX(un))); 16992 ASSERT(bp != NULL); 16993 ASSERT(xp != NULL); 16994 ASSERT(pktp != NULL); 16995 16996 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16997 pktp->pkt_flags = 0; 16998 un->un_tagflags = 0; 16999 if (un->un_f_opt_queueing == TRUE) { 17000 un->un_throttle = min(un->un_throttle, 3); 17001 } else { 17002 un->un_throttle = 1; 17003 } 17004 mutex_exit(SD_MUTEX(un)); 17005 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 17006 mutex_enter(SD_MUTEX(un)); 17007 17008 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17009 17010 /* Legacy behavior not to check retry counts here. */ 17011 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE), 17012 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17013 } 17014 17015 17016 /* 17017 * Function: sd_pkt_reason_default 17018 * 17019 * Description: Default recovery actions for SCSA pkt_reason values that 17020 * do not have more explicit recovery actions. 17021 * 17022 * Context: May be called from interrupt context 17023 */ 17024 17025 static void 17026 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 17027 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17028 { 17029 ASSERT(un != NULL); 17030 ASSERT(mutex_owned(SD_MUTEX(un))); 17031 ASSERT(bp != NULL); 17032 ASSERT(xp != NULL); 17033 ASSERT(pktp != NULL); 17034 17035 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17036 sd_reset_target(un, pktp); 17037 17038 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17039 17040 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 17041 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17042 } 17043 17044 17045 17046 /* 17047 * Function: sd_pkt_status_check_condition 17048 * 17049 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status. 17050 * 17051 * Context: May be called from interrupt context 17052 */ 17053 17054 static void 17055 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 17056 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17057 { 17058 ASSERT(un != NULL); 17059 ASSERT(mutex_owned(SD_MUTEX(un))); 17060 ASSERT(bp != NULL); 17061 ASSERT(xp != NULL); 17062 ASSERT(pktp != NULL); 17063 17064 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: " 17065 "entry: buf:0x%p xp:0x%p\n", bp, xp); 17066 17067 /* 17068 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the 17069 * command will be retried after the request sense). Otherwise, retry 17070 * the command. Note: we are issuing the request sense even though the 17071 * retry limit may have been reached for the failed command. 17072 */ 17073 if (un->un_f_arq_enabled == FALSE) { 17074 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 17075 "no ARQ, sending request sense command\n"); 17076 sd_send_request_sense_command(un, bp, pktp); 17077 } else { 17078 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 17079 "ARQ,retrying request sense command\n"); 17080 #if defined(__i386) || defined(__amd64) 17081 /* 17082 * The SD_RETRY_DELAY value need to be adjusted here 17083 * when SD_RETRY_DELAY change in sddef.h 17084 */ 17085 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 17086 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, 17087 NULL); 17088 #else 17089 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 17090 EIO, SD_RETRY_DELAY, NULL); 17091 #endif 17092 } 17093 17094 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); 17095 } 17096 17097 17098 /* 17099 * Function: sd_pkt_status_busy 17100 * 17101 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status. 17102 * 17103 * Context: May be called from interrupt context 17104 */ 17105 17106 static void 17107 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 17108 struct scsi_pkt *pktp) 17109 { 17110 ASSERT(un != NULL); 17111 ASSERT(mutex_owned(SD_MUTEX(un))); 17112 ASSERT(bp != NULL); 17113 ASSERT(xp != NULL); 17114 ASSERT(pktp != NULL); 17115 17116 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17117 "sd_pkt_status_busy: entry\n"); 17118 17119 /* If retries are exhausted, just fail the command. */ 17120 if (xp->xb_retry_count >= un->un_busy_retry_count) { 17121 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17122 "device busy too long\n"); 17123 sd_return_failed_command(un, bp, EIO); 17124 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17125 "sd_pkt_status_busy: exit\n"); 17126 return; 17127 } 17128 xp->xb_retry_count++; 17129 17130 /* 17131 * Try to reset the target. However, we do not want to perform 17132 * more than one reset if the device continues to fail. The reset 17133 * will be performed when the retry count reaches the reset 17134 * threshold. This threshold should be set such that at least 17135 * one retry is issued before the reset is performed. 17136 */ 17137 if (xp->xb_retry_count == 17138 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) { 17139 int rval = 0; 17140 mutex_exit(SD_MUTEX(un)); 17141 if (un->un_f_allow_bus_device_reset == TRUE) { 17142 /* 17143 * First try to reset the LUN; if we cannot then 17144 * try to reset the target. 17145 */ 17146 if (un->un_f_lun_reset_enabled == TRUE) { 17147 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17148 "sd_pkt_status_busy: RESET_LUN\n"); 17149 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 17150 } 17151 if (rval == 0) { 17152 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17153 "sd_pkt_status_busy: RESET_TARGET\n"); 17154 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 17155 } 17156 } 17157 if (rval == 0) { 17158 /* 17159 * If the RESET_LUN and/or RESET_TARGET failed, 17160 * try RESET_ALL 17161 */ 17162 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17163 "sd_pkt_status_busy: RESET_ALL\n"); 17164 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL); 17165 } 17166 mutex_enter(SD_MUTEX(un)); 17167 if (rval == 0) { 17168 /* 17169 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed. 17170 * At this point we give up & fail the command. 17171 */ 17172 sd_return_failed_command(un, bp, EIO); 17173 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17174 "sd_pkt_status_busy: exit (failed cmd)\n"); 17175 return; 17176 } 17177 } 17178 17179 /* 17180 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as 17181 * we have already checked the retry counts above. 17182 */ 17183 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 17184 EIO, SD_BSY_TIMEOUT, NULL); 17185 17186 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17187 "sd_pkt_status_busy: exit\n"); 17188 } 17189 17190 17191 /* 17192 * Function: sd_pkt_status_reservation_conflict 17193 * 17194 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI 17195 * command status. 17196 * 17197 * Context: May be called from interrupt context 17198 */ 17199 17200 static void 17201 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp, 17202 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17203 { 17204 ASSERT(un != NULL); 17205 ASSERT(mutex_owned(SD_MUTEX(un))); 17206 ASSERT(bp != NULL); 17207 ASSERT(xp != NULL); 17208 ASSERT(pktp != NULL); 17209 17210 /* 17211 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation 17212 * conflict could be due to various reasons like incorrect keys, not 17213 * registered or not reserved etc. So, we return EACCES to the caller. 17214 */ 17215 if (un->un_reservation_type == SD_SCSI3_RESERVATION) { 17216 int cmd = SD_GET_PKT_OPCODE(pktp); 17217 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) || 17218 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) { 17219 sd_return_failed_command(un, bp, EACCES); 17220 return; 17221 } 17222 } 17223 17224 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 17225 17226 if ((un->un_resvd_status & SD_FAILFAST) != 0) { 17227 if (sd_failfast_enable != 0) { 17228 /* By definition, we must panic here.... */ 17229 sd_panic_for_res_conflict(un); 17230 /*NOTREACHED*/ 17231 } 17232 SD_ERROR(SD_LOG_IO, un, 17233 "sd_handle_resv_conflict: Disk Reserved\n"); 17234 sd_return_failed_command(un, bp, EACCES); 17235 return; 17236 } 17237 17238 /* 17239 * 1147670: retry only if sd_retry_on_reservation_conflict 17240 * property is set (default is 1). Retries will not succeed 17241 * on a disk reserved by another initiator. HA systems 17242 * may reset this via sd.conf to avoid these retries. 17243 * 17244 * Note: The legacy return code for this failure is EIO, however EACCES 17245 * seems more appropriate for a reservation conflict. 17246 */ 17247 if (sd_retry_on_reservation_conflict == 0) { 17248 SD_ERROR(SD_LOG_IO, un, 17249 "sd_handle_resv_conflict: Device Reserved\n"); 17250 sd_return_failed_command(un, bp, EIO); 17251 return; 17252 } 17253 17254 /* 17255 * Retry the command if we can. 17256 * 17257 * Note: The legacy return code for this failure is EIO, however EACCES 17258 * seems more appropriate for a reservation conflict. 17259 */ 17260 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 17261 (clock_t)2, NULL); 17262 } 17263 17264 17265 17266 /* 17267 * Function: sd_pkt_status_qfull 17268 * 17269 * Description: Handle a QUEUE FULL condition from the target. This can 17270 * occur if the HBA does not handle the queue full condition. 17271 * (Basically this means third-party HBAs as Sun HBAs will 17272 * handle the queue full condition.) Note that if there are 17273 * some commands already in the transport, then the queue full 17274 * has occurred because the queue for this nexus is actually 17275 * full. If there are no commands in the transport, then the 17276 * queue full is resulting from some other initiator or lun 17277 * consuming all the resources at the target. 17278 * 17279 * Context: May be called from interrupt context 17280 */ 17281 17282 static void 17283 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 17284 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17285 { 17286 ASSERT(un != NULL); 17287 ASSERT(mutex_owned(SD_MUTEX(un))); 17288 ASSERT(bp != NULL); 17289 ASSERT(xp != NULL); 17290 ASSERT(pktp != NULL); 17291 17292 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17293 "sd_pkt_status_qfull: entry\n"); 17294 17295 /* 17296 * Just lower the QFULL throttle and retry the command. Note that 17297 * we do not limit the number of retries here. 17298 */ 17299 sd_reduce_throttle(un, SD_THROTTLE_QFULL); 17300 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, 17301 SD_RESTART_TIMEOUT, NULL); 17302 17303 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17304 "sd_pkt_status_qfull: exit\n"); 17305 } 17306 17307 17308 /* 17309 * Function: sd_reset_target 17310 * 17311 * Description: Issue a scsi_reset(9F), with either RESET_LUN, 17312 * RESET_TARGET, or RESET_ALL. 17313 * 17314 * Context: May be called under interrupt context. 17315 */ 17316 17317 static void 17318 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp) 17319 { 17320 int rval = 0; 17321 17322 ASSERT(un != NULL); 17323 ASSERT(mutex_owned(SD_MUTEX(un))); 17324 ASSERT(pktp != NULL); 17325 17326 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n"); 17327 17328 /* 17329 * No need to reset if the transport layer has already done so. 17330 */ 17331 if ((pktp->pkt_statistics & 17332 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) { 17333 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17334 "sd_reset_target: no reset\n"); 17335 return; 17336 } 17337 17338 mutex_exit(SD_MUTEX(un)); 17339 17340 if (un->un_f_allow_bus_device_reset == TRUE) { 17341 if (un->un_f_lun_reset_enabled == TRUE) { 17342 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17343 "sd_reset_target: RESET_LUN\n"); 17344 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 17345 } 17346 if (rval == 0) { 17347 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17348 "sd_reset_target: RESET_TARGET\n"); 17349 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 17350 } 17351 } 17352 17353 if (rval == 0) { 17354 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17355 "sd_reset_target: RESET_ALL\n"); 17356 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 17357 } 17358 17359 mutex_enter(SD_MUTEX(un)); 17360 17361 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n"); 17362 } 17363 17364 17365 /* 17366 * Function: sd_media_change_task 17367 * 17368 * Description: Recovery action for CDROM to become available. 17369 * 17370 * Context: Executes in a taskq() thread context 17371 */ 17372 17373 static void 17374 sd_media_change_task(void *arg) 17375 { 17376 struct scsi_pkt *pktp = arg; 17377 struct sd_lun *un; 17378 struct buf *bp; 17379 struct sd_xbuf *xp; 17380 int err = 0; 17381 int retry_count = 0; 17382 int retry_limit = SD_UNIT_ATTENTION_RETRY/10; 17383 struct sd_sense_info si; 17384 17385 ASSERT(pktp != NULL); 17386 bp = (struct buf *)pktp->pkt_private; 17387 ASSERT(bp != NULL); 17388 xp = SD_GET_XBUF(bp); 17389 ASSERT(xp != NULL); 17390 un = SD_GET_UN(bp); 17391 ASSERT(un != NULL); 17392 ASSERT(!mutex_owned(SD_MUTEX(un))); 17393 ASSERT(un->un_f_monitor_media_state); 17394 17395 si.ssi_severity = SCSI_ERR_INFO; 17396 si.ssi_pfa_flag = FALSE; 17397 17398 /* 17399 * When a reset is issued on a CDROM, it takes a long time to 17400 * recover. First few attempts to read capacity and other things 17401 * related to handling unit attention fail (with a ASC 0x4 and 17402 * ASCQ 0x1). In that case we want to do enough retries and we want 17403 * to limit the retries in other cases of genuine failures like 17404 * no media in drive. 17405 */ 17406 while (retry_count++ < retry_limit) { 17407 if ((err = sd_handle_mchange(un)) == 0) { 17408 break; 17409 } 17410 if (err == EAGAIN) { 17411 retry_limit = SD_UNIT_ATTENTION_RETRY; 17412 } 17413 /* Sleep for 0.5 sec. & try again */ 17414 delay(drv_usectohz(500000)); 17415 } 17416 17417 /* 17418 * Dispatch (retry or fail) the original command here, 17419 * along with appropriate console messages.... 17420 * 17421 * Must grab the mutex before calling sd_retry_command, 17422 * sd_print_sense_msg and sd_return_failed_command. 17423 */ 17424 mutex_enter(SD_MUTEX(un)); 17425 if (err != SD_CMD_SUCCESS) { 17426 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17427 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17428 si.ssi_severity = SCSI_ERR_FATAL; 17429 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17430 sd_return_failed_command(un, bp, EIO); 17431 } else { 17432 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 17433 &si, EIO, (clock_t)0, NULL); 17434 } 17435 mutex_exit(SD_MUTEX(un)); 17436 } 17437 17438 17439 17440 /* 17441 * Function: sd_handle_mchange 17442 * 17443 * Description: Perform geometry validation & other recovery when CDROM 17444 * has been removed from drive. 17445 * 17446 * Return Code: 0 for success 17447 * errno-type return code of either sd_send_scsi_DOORLOCK() or 17448 * sd_send_scsi_READ_CAPACITY() 17449 * 17450 * Context: Executes in a taskq() thread context 17451 */ 17452 17453 static int 17454 sd_handle_mchange(struct sd_lun *un) 17455 { 17456 uint64_t capacity; 17457 uint32_t lbasize; 17458 int rval; 17459 17460 ASSERT(!mutex_owned(SD_MUTEX(un))); 17461 ASSERT(un->un_f_monitor_media_state); 17462 17463 if ((rval = sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 17464 SD_PATH_DIRECT_PRIORITY)) != 0) { 17465 return (rval); 17466 } 17467 17468 mutex_enter(SD_MUTEX(un)); 17469 sd_update_block_info(un, lbasize, capacity); 17470 17471 if (un->un_errstats != NULL) { 17472 struct sd_errstats *stp = 17473 (struct sd_errstats *)un->un_errstats->ks_data; 17474 stp->sd_capacity.value.ui64 = (uint64_t) 17475 ((uint64_t)un->un_blockcount * 17476 (uint64_t)un->un_tgt_blocksize); 17477 } 17478 17479 17480 /* 17481 * Check if the media in the device is writable or not 17482 */ 17483 if (ISCD(un)) 17484 sd_check_for_writable_cd(un, SD_PATH_DIRECT_PRIORITY); 17485 17486 /* 17487 * Note: Maybe let the strategy/partitioning chain worry about getting 17488 * valid geometry. 17489 */ 17490 mutex_exit(SD_MUTEX(un)); 17491 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 17492 17493 17494 if (cmlb_validate(un->un_cmlbhandle, 0, 17495 (void *)SD_PATH_DIRECT_PRIORITY) != 0) { 17496 return (EIO); 17497 } else { 17498 if (un->un_f_pkstats_enabled) { 17499 sd_set_pstats(un); 17500 SD_TRACE(SD_LOG_IO_PARTITION, un, 17501 "sd_handle_mchange: un:0x%p pstats created and " 17502 "set\n", un); 17503 } 17504 } 17505 17506 17507 /* 17508 * Try to lock the door 17509 */ 17510 return (sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 17511 SD_PATH_DIRECT_PRIORITY)); 17512 } 17513 17514 17515 /* 17516 * Function: sd_send_scsi_DOORLOCK 17517 * 17518 * Description: Issue the scsi DOOR LOCK command 17519 * 17520 * Arguments: un - pointer to driver soft state (unit) structure for 17521 * this target. 17522 * flag - SD_REMOVAL_ALLOW 17523 * SD_REMOVAL_PREVENT 17524 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17525 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17526 * to use the USCSI "direct" chain and bypass the normal 17527 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 17528 * command is issued as part of an error recovery action. 17529 * 17530 * Return Code: 0 - Success 17531 * errno return code from sd_send_scsi_cmd() 17532 * 17533 * Context: Can sleep. 17534 */ 17535 17536 static int 17537 sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag) 17538 { 17539 union scsi_cdb cdb; 17540 struct uscsi_cmd ucmd_buf; 17541 struct scsi_extended_sense sense_buf; 17542 int status; 17543 17544 ASSERT(un != NULL); 17545 ASSERT(!mutex_owned(SD_MUTEX(un))); 17546 17547 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un); 17548 17549 /* already determined doorlock is not supported, fake success */ 17550 if (un->un_f_doorlock_supported == FALSE) { 17551 return (0); 17552 } 17553 17554 /* 17555 * If we are ejecting and see an SD_REMOVAL_PREVENT 17556 * ignore the command so we can complete the eject 17557 * operation. 17558 */ 17559 if (flag == SD_REMOVAL_PREVENT) { 17560 mutex_enter(SD_MUTEX(un)); 17561 if (un->un_f_ejecting == TRUE) { 17562 mutex_exit(SD_MUTEX(un)); 17563 return (EAGAIN); 17564 } 17565 mutex_exit(SD_MUTEX(un)); 17566 } 17567 17568 bzero(&cdb, sizeof (cdb)); 17569 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17570 17571 cdb.scc_cmd = SCMD_DOORLOCK; 17572 cdb.cdb_opaque[4] = (uchar_t)flag; 17573 17574 ucmd_buf.uscsi_cdb = (char *)&cdb; 17575 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 17576 ucmd_buf.uscsi_bufaddr = NULL; 17577 ucmd_buf.uscsi_buflen = 0; 17578 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17579 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 17580 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 17581 ucmd_buf.uscsi_timeout = 15; 17582 17583 SD_TRACE(SD_LOG_IO, un, 17584 "sd_send_scsi_DOORLOCK: returning sd_send_scsi_cmd()\n"); 17585 17586 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17587 UIO_SYSSPACE, path_flag); 17588 17589 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) && 17590 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 17591 (scsi_sense_key((uint8_t *)&sense_buf) == KEY_ILLEGAL_REQUEST)) { 17592 /* fake success and skip subsequent doorlock commands */ 17593 un->un_f_doorlock_supported = FALSE; 17594 return (0); 17595 } 17596 17597 return (status); 17598 } 17599 17600 /* 17601 * Function: sd_send_scsi_READ_CAPACITY 17602 * 17603 * Description: This routine uses the scsi READ CAPACITY command to determine 17604 * the device capacity in number of blocks and the device native 17605 * block size. If this function returns a failure, then the 17606 * values in *capp and *lbap are undefined. If the capacity 17607 * returned is 0xffffffff then the lun is too large for a 17608 * normal READ CAPACITY command and the results of a 17609 * READ CAPACITY 16 will be used instead. 17610 * 17611 * Arguments: un - ptr to soft state struct for the target 17612 * capp - ptr to unsigned 64-bit variable to receive the 17613 * capacity value from the command. 17614 * lbap - ptr to unsigned 32-bit varaible to receive the 17615 * block size value from the command 17616 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17617 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17618 * to use the USCSI "direct" chain and bypass the normal 17619 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 17620 * command is issued as part of an error recovery action. 17621 * 17622 * Return Code: 0 - Success 17623 * EIO - IO error 17624 * EACCES - Reservation conflict detected 17625 * EAGAIN - Device is becoming ready 17626 * errno return code from sd_send_scsi_cmd() 17627 * 17628 * Context: Can sleep. Blocks until command completes. 17629 */ 17630 17631 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity) 17632 17633 static int 17634 sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, uint32_t *lbap, 17635 int path_flag) 17636 { 17637 struct scsi_extended_sense sense_buf; 17638 struct uscsi_cmd ucmd_buf; 17639 union scsi_cdb cdb; 17640 uint32_t *capacity_buf; 17641 uint64_t capacity; 17642 uint32_t lbasize; 17643 int status; 17644 17645 ASSERT(un != NULL); 17646 ASSERT(!mutex_owned(SD_MUTEX(un))); 17647 ASSERT(capp != NULL); 17648 ASSERT(lbap != NULL); 17649 17650 SD_TRACE(SD_LOG_IO, un, 17651 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 17652 17653 /* 17654 * First send a READ_CAPACITY command to the target. 17655 * (This command is mandatory under SCSI-2.) 17656 * 17657 * Set up the CDB for the READ_CAPACITY command. The Partial 17658 * Medium Indicator bit is cleared. The address field must be 17659 * zero if the PMI bit is zero. 17660 */ 17661 bzero(&cdb, sizeof (cdb)); 17662 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17663 17664 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP); 17665 17666 cdb.scc_cmd = SCMD_READ_CAPACITY; 17667 17668 ucmd_buf.uscsi_cdb = (char *)&cdb; 17669 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 17670 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; 17671 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; 17672 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17673 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 17674 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 17675 ucmd_buf.uscsi_timeout = 60; 17676 17677 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17678 UIO_SYSSPACE, path_flag); 17679 17680 switch (status) { 17681 case 0: 17682 /* Return failure if we did not get valid capacity data. */ 17683 if (ucmd_buf.uscsi_resid != 0) { 17684 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17685 return (EIO); 17686 } 17687 17688 /* 17689 * Read capacity and block size from the READ CAPACITY 10 data. 17690 * This data may be adjusted later due to device specific 17691 * issues. 17692 * 17693 * According to the SCSI spec, the READ CAPACITY 10 17694 * command returns the following: 17695 * 17696 * bytes 0-3: Maximum logical block address available. 17697 * (MSB in byte:0 & LSB in byte:3) 17698 * 17699 * bytes 4-7: Block length in bytes 17700 * (MSB in byte:4 & LSB in byte:7) 17701 * 17702 */ 17703 capacity = BE_32(capacity_buf[0]); 17704 lbasize = BE_32(capacity_buf[1]); 17705 17706 /* 17707 * Done with capacity_buf 17708 */ 17709 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17710 17711 /* 17712 * if the reported capacity is set to all 0xf's, then 17713 * this disk is too large and requires SBC-2 commands. 17714 * Reissue the request using READ CAPACITY 16. 17715 */ 17716 if (capacity == 0xffffffff) { 17717 status = sd_send_scsi_READ_CAPACITY_16(un, &capacity, 17718 &lbasize, path_flag); 17719 if (status != 0) { 17720 return (status); 17721 } 17722 } 17723 break; /* Success! */ 17724 case EIO: 17725 switch (ucmd_buf.uscsi_status) { 17726 case STATUS_RESERVATION_CONFLICT: 17727 status = EACCES; 17728 break; 17729 case STATUS_CHECK: 17730 /* 17731 * Check condition; look for ASC/ASCQ of 0x04/0x01 17732 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 17733 */ 17734 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 17735 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 17736 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 17737 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17738 return (EAGAIN); 17739 } 17740 break; 17741 default: 17742 break; 17743 } 17744 /* FALLTHRU */ 17745 default: 17746 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17747 return (status); 17748 } 17749 17750 /* 17751 * Some ATAPI CD-ROM drives report inaccurate LBA size values 17752 * (2352 and 0 are common) so for these devices always force the value 17753 * to 2048 as required by the ATAPI specs. 17754 */ 17755 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 17756 lbasize = 2048; 17757 } 17758 17759 /* 17760 * Get the maximum LBA value from the READ CAPACITY data. 17761 * Here we assume that the Partial Medium Indicator (PMI) bit 17762 * was cleared when issuing the command. This means that the LBA 17763 * returned from the device is the LBA of the last logical block 17764 * on the logical unit. The actual logical block count will be 17765 * this value plus one. 17766 * 17767 * Currently the capacity is saved in terms of un->un_sys_blocksize, 17768 * so scale the capacity value to reflect this. 17769 */ 17770 capacity = (capacity + 1) * (lbasize / un->un_sys_blocksize); 17771 17772 /* 17773 * Copy the values from the READ CAPACITY command into the space 17774 * provided by the caller. 17775 */ 17776 *capp = capacity; 17777 *lbap = lbasize; 17778 17779 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: " 17780 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 17781 17782 /* 17783 * Both the lbasize and capacity from the device must be nonzero, 17784 * otherwise we assume that the values are not valid and return 17785 * failure to the caller. (4203735) 17786 */ 17787 if ((capacity == 0) || (lbasize == 0)) { 17788 return (EIO); 17789 } 17790 17791 return (0); 17792 } 17793 17794 /* 17795 * Function: sd_send_scsi_READ_CAPACITY_16 17796 * 17797 * Description: This routine uses the scsi READ CAPACITY 16 command to 17798 * determine the device capacity in number of blocks and the 17799 * device native block size. If this function returns a failure, 17800 * then the values in *capp and *lbap are undefined. 17801 * This routine should always be called by 17802 * sd_send_scsi_READ_CAPACITY which will appy any device 17803 * specific adjustments to capacity and lbasize. 17804 * 17805 * Arguments: un - ptr to soft state struct for the target 17806 * capp - ptr to unsigned 64-bit variable to receive the 17807 * capacity value from the command. 17808 * lbap - ptr to unsigned 32-bit varaible to receive the 17809 * block size value from the command 17810 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17811 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17812 * to use the USCSI "direct" chain and bypass the normal 17813 * command waitq. SD_PATH_DIRECT_PRIORITY is used when 17814 * this command is issued as part of an error recovery 17815 * action. 17816 * 17817 * Return Code: 0 - Success 17818 * EIO - IO error 17819 * EACCES - Reservation conflict detected 17820 * EAGAIN - Device is becoming ready 17821 * errno return code from sd_send_scsi_cmd() 17822 * 17823 * Context: Can sleep. Blocks until command completes. 17824 */ 17825 17826 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16) 17827 17828 static int 17829 sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 17830 uint32_t *lbap, int path_flag) 17831 { 17832 struct scsi_extended_sense sense_buf; 17833 struct uscsi_cmd ucmd_buf; 17834 union scsi_cdb cdb; 17835 uint64_t *capacity16_buf; 17836 uint64_t capacity; 17837 uint32_t lbasize; 17838 int status; 17839 17840 ASSERT(un != NULL); 17841 ASSERT(!mutex_owned(SD_MUTEX(un))); 17842 ASSERT(capp != NULL); 17843 ASSERT(lbap != NULL); 17844 17845 SD_TRACE(SD_LOG_IO, un, 17846 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 17847 17848 /* 17849 * First send a READ_CAPACITY_16 command to the target. 17850 * 17851 * Set up the CDB for the READ_CAPACITY_16 command. The Partial 17852 * Medium Indicator bit is cleared. The address field must be 17853 * zero if the PMI bit is zero. 17854 */ 17855 bzero(&cdb, sizeof (cdb)); 17856 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17857 17858 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP); 17859 17860 ucmd_buf.uscsi_cdb = (char *)&cdb; 17861 ucmd_buf.uscsi_cdblen = CDB_GROUP4; 17862 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; 17863 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; 17864 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17865 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 17866 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 17867 ucmd_buf.uscsi_timeout = 60; 17868 17869 /* 17870 * Read Capacity (16) is a Service Action In command. One 17871 * command byte (0x9E) is overloaded for multiple operations, 17872 * with the second CDB byte specifying the desired operation 17873 */ 17874 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4; 17875 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4; 17876 17877 /* 17878 * Fill in allocation length field 17879 */ 17880 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen); 17881 17882 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17883 UIO_SYSSPACE, path_flag); 17884 17885 switch (status) { 17886 case 0: 17887 /* Return failure if we did not get valid capacity data. */ 17888 if (ucmd_buf.uscsi_resid > 20) { 17889 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 17890 return (EIO); 17891 } 17892 17893 /* 17894 * Read capacity and block size from the READ CAPACITY 10 data. 17895 * This data may be adjusted later due to device specific 17896 * issues. 17897 * 17898 * According to the SCSI spec, the READ CAPACITY 10 17899 * command returns the following: 17900 * 17901 * bytes 0-7: Maximum logical block address available. 17902 * (MSB in byte:0 & LSB in byte:7) 17903 * 17904 * bytes 8-11: Block length in bytes 17905 * (MSB in byte:8 & LSB in byte:11) 17906 * 17907 */ 17908 capacity = BE_64(capacity16_buf[0]); 17909 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); 17910 17911 /* 17912 * Done with capacity16_buf 17913 */ 17914 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 17915 17916 /* 17917 * if the reported capacity is set to all 0xf's, then 17918 * this disk is too large. This could only happen with 17919 * a device that supports LBAs larger than 64 bits which 17920 * are not defined by any current T10 standards. 17921 */ 17922 if (capacity == 0xffffffffffffffff) { 17923 return (EIO); 17924 } 17925 break; /* Success! */ 17926 case EIO: 17927 switch (ucmd_buf.uscsi_status) { 17928 case STATUS_RESERVATION_CONFLICT: 17929 status = EACCES; 17930 break; 17931 case STATUS_CHECK: 17932 /* 17933 * Check condition; look for ASC/ASCQ of 0x04/0x01 17934 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 17935 */ 17936 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 17937 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 17938 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 17939 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 17940 return (EAGAIN); 17941 } 17942 break; 17943 default: 17944 break; 17945 } 17946 /* FALLTHRU */ 17947 default: 17948 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 17949 return (status); 17950 } 17951 17952 *capp = capacity; 17953 *lbap = lbasize; 17954 17955 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: " 17956 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 17957 17958 return (0); 17959 } 17960 17961 17962 /* 17963 * Function: sd_send_scsi_START_STOP_UNIT 17964 * 17965 * Description: Issue a scsi START STOP UNIT command to the target. 17966 * 17967 * Arguments: un - pointer to driver soft state (unit) structure for 17968 * this target. 17969 * flag - SD_TARGET_START 17970 * SD_TARGET_STOP 17971 * SD_TARGET_EJECT 17972 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17973 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17974 * to use the USCSI "direct" chain and bypass the normal 17975 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 17976 * command is issued as part of an error recovery action. 17977 * 17978 * Return Code: 0 - Success 17979 * EIO - IO error 17980 * EACCES - Reservation conflict detected 17981 * ENXIO - Not Ready, medium not present 17982 * errno return code from sd_send_scsi_cmd() 17983 * 17984 * Context: Can sleep. 17985 */ 17986 17987 static int 17988 sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, int path_flag) 17989 { 17990 struct scsi_extended_sense sense_buf; 17991 union scsi_cdb cdb; 17992 struct uscsi_cmd ucmd_buf; 17993 int status; 17994 17995 ASSERT(un != NULL); 17996 ASSERT(!mutex_owned(SD_MUTEX(un))); 17997 17998 SD_TRACE(SD_LOG_IO, un, 17999 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un); 18000 18001 if (un->un_f_check_start_stop && 18002 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) && 18003 (un->un_f_start_stop_supported != TRUE)) { 18004 return (0); 18005 } 18006 18007 /* 18008 * If we are performing an eject operation and 18009 * we receive any command other than SD_TARGET_EJECT 18010 * we should immediately return. 18011 */ 18012 if (flag != SD_TARGET_EJECT) { 18013 mutex_enter(SD_MUTEX(un)); 18014 if (un->un_f_ejecting == TRUE) { 18015 mutex_exit(SD_MUTEX(un)); 18016 return (EAGAIN); 18017 } 18018 mutex_exit(SD_MUTEX(un)); 18019 } 18020 18021 bzero(&cdb, sizeof (cdb)); 18022 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18023 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18024 18025 cdb.scc_cmd = SCMD_START_STOP; 18026 cdb.cdb_opaque[4] = (uchar_t)flag; 18027 18028 ucmd_buf.uscsi_cdb = (char *)&cdb; 18029 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18030 ucmd_buf.uscsi_bufaddr = NULL; 18031 ucmd_buf.uscsi_buflen = 0; 18032 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18033 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18034 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18035 ucmd_buf.uscsi_timeout = 200; 18036 18037 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18038 UIO_SYSSPACE, path_flag); 18039 18040 switch (status) { 18041 case 0: 18042 break; /* Success! */ 18043 case EIO: 18044 switch (ucmd_buf.uscsi_status) { 18045 case STATUS_RESERVATION_CONFLICT: 18046 status = EACCES; 18047 break; 18048 case STATUS_CHECK: 18049 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) { 18050 switch (scsi_sense_key( 18051 (uint8_t *)&sense_buf)) { 18052 case KEY_ILLEGAL_REQUEST: 18053 status = ENOTSUP; 18054 break; 18055 case KEY_NOT_READY: 18056 if (scsi_sense_asc( 18057 (uint8_t *)&sense_buf) 18058 == 0x3A) { 18059 status = ENXIO; 18060 } 18061 break; 18062 default: 18063 break; 18064 } 18065 } 18066 break; 18067 default: 18068 break; 18069 } 18070 break; 18071 default: 18072 break; 18073 } 18074 18075 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n"); 18076 18077 return (status); 18078 } 18079 18080 18081 /* 18082 * Function: sd_start_stop_unit_callback 18083 * 18084 * Description: timeout(9F) callback to begin recovery process for a 18085 * device that has spun down. 18086 * 18087 * Arguments: arg - pointer to associated softstate struct. 18088 * 18089 * Context: Executes in a timeout(9F) thread context 18090 */ 18091 18092 static void 18093 sd_start_stop_unit_callback(void *arg) 18094 { 18095 struct sd_lun *un = arg; 18096 ASSERT(un != NULL); 18097 ASSERT(!mutex_owned(SD_MUTEX(un))); 18098 18099 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n"); 18100 18101 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP); 18102 } 18103 18104 18105 /* 18106 * Function: sd_start_stop_unit_task 18107 * 18108 * Description: Recovery procedure when a drive is spun down. 18109 * 18110 * Arguments: arg - pointer to associated softstate struct. 18111 * 18112 * Context: Executes in a taskq() thread context 18113 */ 18114 18115 static void 18116 sd_start_stop_unit_task(void *arg) 18117 { 18118 struct sd_lun *un = arg; 18119 18120 ASSERT(un != NULL); 18121 ASSERT(!mutex_owned(SD_MUTEX(un))); 18122 18123 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n"); 18124 18125 /* 18126 * Some unformatted drives report not ready error, no need to 18127 * restart if format has been initiated. 18128 */ 18129 mutex_enter(SD_MUTEX(un)); 18130 if (un->un_f_format_in_progress == TRUE) { 18131 mutex_exit(SD_MUTEX(un)); 18132 return; 18133 } 18134 mutex_exit(SD_MUTEX(un)); 18135 18136 /* 18137 * When a START STOP command is issued from here, it is part of a 18138 * failure recovery operation and must be issued before any other 18139 * commands, including any pending retries. Thus it must be sent 18140 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up 18141 * succeeds or not, we will start I/O after the attempt. 18142 */ 18143 (void) sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 18144 SD_PATH_DIRECT_PRIORITY); 18145 18146 /* 18147 * The above call blocks until the START_STOP_UNIT command completes. 18148 * Now that it has completed, we must re-try the original IO that 18149 * received the NOT READY condition in the first place. There are 18150 * three possible conditions here: 18151 * 18152 * (1) The original IO is on un_retry_bp. 18153 * (2) The original IO is on the regular wait queue, and un_retry_bp 18154 * is NULL. 18155 * (3) The original IO is on the regular wait queue, and un_retry_bp 18156 * points to some other, unrelated bp. 18157 * 18158 * For each case, we must call sd_start_cmds() with un_retry_bp 18159 * as the argument. If un_retry_bp is NULL, this will initiate 18160 * processing of the regular wait queue. If un_retry_bp is not NULL, 18161 * then this will process the bp on un_retry_bp. That may or may not 18162 * be the original IO, but that does not matter: the important thing 18163 * is to keep the IO processing going at this point. 18164 * 18165 * Note: This is a very specific error recovery sequence associated 18166 * with a drive that is not spun up. We attempt a START_STOP_UNIT and 18167 * serialize the I/O with completion of the spin-up. 18168 */ 18169 mutex_enter(SD_MUTEX(un)); 18170 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18171 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n", 18172 un, un->un_retry_bp); 18173 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */ 18174 sd_start_cmds(un, un->un_retry_bp); 18175 mutex_exit(SD_MUTEX(un)); 18176 18177 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n"); 18178 } 18179 18180 18181 /* 18182 * Function: sd_send_scsi_INQUIRY 18183 * 18184 * Description: Issue the scsi INQUIRY command. 18185 * 18186 * Arguments: un 18187 * bufaddr 18188 * buflen 18189 * evpd 18190 * page_code 18191 * page_length 18192 * 18193 * Return Code: 0 - Success 18194 * errno return code from sd_send_scsi_cmd() 18195 * 18196 * Context: Can sleep. Does not return until command is completed. 18197 */ 18198 18199 static int 18200 sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, size_t buflen, 18201 uchar_t evpd, uchar_t page_code, size_t *residp) 18202 { 18203 union scsi_cdb cdb; 18204 struct uscsi_cmd ucmd_buf; 18205 int status; 18206 18207 ASSERT(un != NULL); 18208 ASSERT(!mutex_owned(SD_MUTEX(un))); 18209 ASSERT(bufaddr != NULL); 18210 18211 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un); 18212 18213 bzero(&cdb, sizeof (cdb)); 18214 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18215 bzero(bufaddr, buflen); 18216 18217 cdb.scc_cmd = SCMD_INQUIRY; 18218 cdb.cdb_opaque[1] = evpd; 18219 cdb.cdb_opaque[2] = page_code; 18220 FORMG0COUNT(&cdb, buflen); 18221 18222 ucmd_buf.uscsi_cdb = (char *)&cdb; 18223 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18224 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 18225 ucmd_buf.uscsi_buflen = buflen; 18226 ucmd_buf.uscsi_rqbuf = NULL; 18227 ucmd_buf.uscsi_rqlen = 0; 18228 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 18229 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ 18230 18231 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18232 UIO_SYSSPACE, SD_PATH_DIRECT); 18233 18234 if ((status == 0) && (residp != NULL)) { 18235 *residp = ucmd_buf.uscsi_resid; 18236 } 18237 18238 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n"); 18239 18240 return (status); 18241 } 18242 18243 18244 /* 18245 * Function: sd_send_scsi_TEST_UNIT_READY 18246 * 18247 * Description: Issue the scsi TEST UNIT READY command. 18248 * This routine can be told to set the flag USCSI_DIAGNOSE to 18249 * prevent retrying failed commands. Use this when the intent 18250 * is either to check for device readiness, to clear a Unit 18251 * Attention, or to clear any outstanding sense data. 18252 * However under specific conditions the expected behavior 18253 * is for retries to bring a device ready, so use the flag 18254 * with caution. 18255 * 18256 * Arguments: un 18257 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present 18258 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE. 18259 * 0: dont check for media present, do retries on cmd. 18260 * 18261 * Return Code: 0 - Success 18262 * EIO - IO error 18263 * EACCES - Reservation conflict detected 18264 * ENXIO - Not Ready, medium not present 18265 * errno return code from sd_send_scsi_cmd() 18266 * 18267 * Context: Can sleep. Does not return until command is completed. 18268 */ 18269 18270 static int 18271 sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag) 18272 { 18273 struct scsi_extended_sense sense_buf; 18274 union scsi_cdb cdb; 18275 struct uscsi_cmd ucmd_buf; 18276 int status; 18277 18278 ASSERT(un != NULL); 18279 ASSERT(!mutex_owned(SD_MUTEX(un))); 18280 18281 SD_TRACE(SD_LOG_IO, un, 18282 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un); 18283 18284 /* 18285 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect 18286 * timeouts when they receive a TUR and the queue is not empty. Check 18287 * the configuration flag set during attach (indicating the drive has 18288 * this firmware bug) and un_ncmds_in_transport before issuing the 18289 * TUR. If there are 18290 * pending commands return success, this is a bit arbitrary but is ok 18291 * for non-removables (i.e. the eliteI disks) and non-clustering 18292 * configurations. 18293 */ 18294 if (un->un_f_cfg_tur_check == TRUE) { 18295 mutex_enter(SD_MUTEX(un)); 18296 if (un->un_ncmds_in_transport != 0) { 18297 mutex_exit(SD_MUTEX(un)); 18298 return (0); 18299 } 18300 mutex_exit(SD_MUTEX(un)); 18301 } 18302 18303 bzero(&cdb, sizeof (cdb)); 18304 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18305 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18306 18307 cdb.scc_cmd = SCMD_TEST_UNIT_READY; 18308 18309 ucmd_buf.uscsi_cdb = (char *)&cdb; 18310 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18311 ucmd_buf.uscsi_bufaddr = NULL; 18312 ucmd_buf.uscsi_buflen = 0; 18313 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18314 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18315 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18316 18317 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ 18318 if ((flag & SD_DONT_RETRY_TUR) != 0) { 18319 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; 18320 } 18321 ucmd_buf.uscsi_timeout = 60; 18322 18323 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18324 UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : 18325 SD_PATH_STANDARD)); 18326 18327 switch (status) { 18328 case 0: 18329 break; /* Success! */ 18330 case EIO: 18331 switch (ucmd_buf.uscsi_status) { 18332 case STATUS_RESERVATION_CONFLICT: 18333 status = EACCES; 18334 break; 18335 case STATUS_CHECK: 18336 if ((flag & SD_CHECK_FOR_MEDIA) == 0) { 18337 break; 18338 } 18339 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18340 (scsi_sense_key((uint8_t *)&sense_buf) == 18341 KEY_NOT_READY) && 18342 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x3A)) { 18343 status = ENXIO; 18344 } 18345 break; 18346 default: 18347 break; 18348 } 18349 break; 18350 default: 18351 break; 18352 } 18353 18354 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n"); 18355 18356 return (status); 18357 } 18358 18359 18360 /* 18361 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN 18362 * 18363 * Description: Issue the scsi PERSISTENT RESERVE IN command. 18364 * 18365 * Arguments: un 18366 * 18367 * Return Code: 0 - Success 18368 * EACCES 18369 * ENOTSUP 18370 * errno return code from sd_send_scsi_cmd() 18371 * 18372 * Context: Can sleep. Does not return until command is completed. 18373 */ 18374 18375 static int 18376 sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, uchar_t usr_cmd, 18377 uint16_t data_len, uchar_t *data_bufp) 18378 { 18379 struct scsi_extended_sense sense_buf; 18380 union scsi_cdb cdb; 18381 struct uscsi_cmd ucmd_buf; 18382 int status; 18383 int no_caller_buf = FALSE; 18384 18385 ASSERT(un != NULL); 18386 ASSERT(!mutex_owned(SD_MUTEX(un))); 18387 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV)); 18388 18389 SD_TRACE(SD_LOG_IO, un, 18390 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un); 18391 18392 bzero(&cdb, sizeof (cdb)); 18393 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18394 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18395 if (data_bufp == NULL) { 18396 /* Allocate a default buf if the caller did not give one */ 18397 ASSERT(data_len == 0); 18398 data_len = MHIOC_RESV_KEY_SIZE; 18399 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP); 18400 no_caller_buf = TRUE; 18401 } 18402 18403 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN; 18404 cdb.cdb_opaque[1] = usr_cmd; 18405 FORMG1COUNT(&cdb, data_len); 18406 18407 ucmd_buf.uscsi_cdb = (char *)&cdb; 18408 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 18409 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; 18410 ucmd_buf.uscsi_buflen = data_len; 18411 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18412 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18413 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 18414 ucmd_buf.uscsi_timeout = 60; 18415 18416 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18417 UIO_SYSSPACE, SD_PATH_STANDARD); 18418 18419 switch (status) { 18420 case 0: 18421 break; /* Success! */ 18422 case EIO: 18423 switch (ucmd_buf.uscsi_status) { 18424 case STATUS_RESERVATION_CONFLICT: 18425 status = EACCES; 18426 break; 18427 case STATUS_CHECK: 18428 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18429 (scsi_sense_key((uint8_t *)&sense_buf) == 18430 KEY_ILLEGAL_REQUEST)) { 18431 status = ENOTSUP; 18432 } 18433 break; 18434 default: 18435 break; 18436 } 18437 break; 18438 default: 18439 break; 18440 } 18441 18442 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n"); 18443 18444 if (no_caller_buf == TRUE) { 18445 kmem_free(data_bufp, data_len); 18446 } 18447 18448 return (status); 18449 } 18450 18451 18452 /* 18453 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT 18454 * 18455 * Description: This routine is the driver entry point for handling CD-ROM 18456 * multi-host persistent reservation requests (MHIOCGRP_INKEYS, 18457 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the 18458 * device. 18459 * 18460 * Arguments: un - Pointer to soft state struct for the target. 18461 * usr_cmd SCSI-3 reservation facility command (one of 18462 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE, 18463 * SD_SCSI3_PREEMPTANDABORT) 18464 * usr_bufp - user provided pointer register, reserve descriptor or 18465 * preempt and abort structure (mhioc_register_t, 18466 * mhioc_resv_desc_t, mhioc_preemptandabort_t) 18467 * 18468 * Return Code: 0 - Success 18469 * EACCES 18470 * ENOTSUP 18471 * errno return code from sd_send_scsi_cmd() 18472 * 18473 * Context: Can sleep. Does not return until command is completed. 18474 */ 18475 18476 static int 18477 sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, uchar_t usr_cmd, 18478 uchar_t *usr_bufp) 18479 { 18480 struct scsi_extended_sense sense_buf; 18481 union scsi_cdb cdb; 18482 struct uscsi_cmd ucmd_buf; 18483 int status; 18484 uchar_t data_len = sizeof (sd_prout_t); 18485 sd_prout_t *prp; 18486 18487 ASSERT(un != NULL); 18488 ASSERT(!mutex_owned(SD_MUTEX(un))); 18489 ASSERT(data_len == 24); /* required by scsi spec */ 18490 18491 SD_TRACE(SD_LOG_IO, un, 18492 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un); 18493 18494 if (usr_bufp == NULL) { 18495 return (EINVAL); 18496 } 18497 18498 bzero(&cdb, sizeof (cdb)); 18499 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18500 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18501 prp = kmem_zalloc(data_len, KM_SLEEP); 18502 18503 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT; 18504 cdb.cdb_opaque[1] = usr_cmd; 18505 FORMG1COUNT(&cdb, data_len); 18506 18507 ucmd_buf.uscsi_cdb = (char *)&cdb; 18508 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 18509 ucmd_buf.uscsi_bufaddr = (caddr_t)prp; 18510 ucmd_buf.uscsi_buflen = data_len; 18511 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18512 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18513 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 18514 ucmd_buf.uscsi_timeout = 60; 18515 18516 switch (usr_cmd) { 18517 case SD_SCSI3_REGISTER: { 18518 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; 18519 18520 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18521 bcopy(ptr->newkey.key, prp->service_key, 18522 MHIOC_RESV_KEY_SIZE); 18523 prp->aptpl = ptr->aptpl; 18524 break; 18525 } 18526 case SD_SCSI3_RESERVE: 18527 case SD_SCSI3_RELEASE: { 18528 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 18529 18530 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18531 prp->scope_address = BE_32(ptr->scope_specific_addr); 18532 cdb.cdb_opaque[2] = ptr->type; 18533 break; 18534 } 18535 case SD_SCSI3_PREEMPTANDABORT: { 18536 mhioc_preemptandabort_t *ptr = 18537 (mhioc_preemptandabort_t *)usr_bufp; 18538 18539 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18540 bcopy(ptr->victim_key.key, prp->service_key, 18541 MHIOC_RESV_KEY_SIZE); 18542 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr); 18543 cdb.cdb_opaque[2] = ptr->resvdesc.type; 18544 ucmd_buf.uscsi_flags |= USCSI_HEAD; 18545 break; 18546 } 18547 case SD_SCSI3_REGISTERANDIGNOREKEY: 18548 { 18549 mhioc_registerandignorekey_t *ptr; 18550 ptr = (mhioc_registerandignorekey_t *)usr_bufp; 18551 bcopy(ptr->newkey.key, 18552 prp->service_key, MHIOC_RESV_KEY_SIZE); 18553 prp->aptpl = ptr->aptpl; 18554 break; 18555 } 18556 default: 18557 ASSERT(FALSE); 18558 break; 18559 } 18560 18561 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18562 UIO_SYSSPACE, SD_PATH_STANDARD); 18563 18564 switch (status) { 18565 case 0: 18566 break; /* Success! */ 18567 case EIO: 18568 switch (ucmd_buf.uscsi_status) { 18569 case STATUS_RESERVATION_CONFLICT: 18570 status = EACCES; 18571 break; 18572 case STATUS_CHECK: 18573 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18574 (scsi_sense_key((uint8_t *)&sense_buf) == 18575 KEY_ILLEGAL_REQUEST)) { 18576 status = ENOTSUP; 18577 } 18578 break; 18579 default: 18580 break; 18581 } 18582 break; 18583 default: 18584 break; 18585 } 18586 18587 kmem_free(prp, data_len); 18588 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n"); 18589 return (status); 18590 } 18591 18592 18593 /* 18594 * Function: sd_send_scsi_SYNCHRONIZE_CACHE 18595 * 18596 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target 18597 * 18598 * Arguments: un - pointer to the target's soft state struct 18599 * dkc - pointer to the callback structure 18600 * 18601 * Return Code: 0 - success 18602 * errno-type error code 18603 * 18604 * Context: kernel thread context only. 18605 * 18606 * _______________________________________________________________ 18607 * | dkc_flag & | dkc_callback | DKIOCFLUSHWRITECACHE | 18608 * |FLUSH_VOLATILE| | operation | 18609 * |______________|______________|_________________________________| 18610 * | 0 | NULL | Synchronous flush on both | 18611 * | | | volatile and non-volatile cache | 18612 * |______________|______________|_________________________________| 18613 * | 1 | NULL | Synchronous flush on volatile | 18614 * | | | cache; disk drivers may suppress| 18615 * | | | flush if disk table indicates | 18616 * | | | non-volatile cache | 18617 * |______________|______________|_________________________________| 18618 * | 0 | !NULL | Asynchronous flush on both | 18619 * | | | volatile and non-volatile cache;| 18620 * |______________|______________|_________________________________| 18621 * | 1 | !NULL | Asynchronous flush on volatile | 18622 * | | | cache; disk drivers may suppress| 18623 * | | | flush if disk table indicates | 18624 * | | | non-volatile cache | 18625 * |______________|______________|_________________________________| 18626 * 18627 */ 18628 18629 static int 18630 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc) 18631 { 18632 struct sd_uscsi_info *uip; 18633 struct uscsi_cmd *uscmd; 18634 union scsi_cdb *cdb; 18635 struct buf *bp; 18636 int rval = 0; 18637 int is_async; 18638 18639 SD_TRACE(SD_LOG_IO, un, 18640 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un); 18641 18642 ASSERT(un != NULL); 18643 ASSERT(!mutex_owned(SD_MUTEX(un))); 18644 18645 if (dkc == NULL || dkc->dkc_callback == NULL) { 18646 is_async = FALSE; 18647 } else { 18648 is_async = TRUE; 18649 } 18650 18651 mutex_enter(SD_MUTEX(un)); 18652 /* check whether cache flush should be suppressed */ 18653 if (un->un_f_suppress_cache_flush == TRUE) { 18654 mutex_exit(SD_MUTEX(un)); 18655 /* 18656 * suppress the cache flush if the device is told to do 18657 * so by sd.conf or disk table 18658 */ 18659 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_SYNCHRONIZE_CACHE: \ 18660 skip the cache flush since suppress_cache_flush is %d!\n", 18661 un->un_f_suppress_cache_flush); 18662 18663 if (is_async == TRUE) { 18664 /* invoke callback for asynchronous flush */ 18665 (*dkc->dkc_callback)(dkc->dkc_cookie, 0); 18666 } 18667 return (rval); 18668 } 18669 mutex_exit(SD_MUTEX(un)); 18670 18671 /* 18672 * check dkc_flag & FLUSH_VOLATILE so SYNC_NV bit can be 18673 * set properly 18674 */ 18675 cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP); 18676 cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE; 18677 18678 mutex_enter(SD_MUTEX(un)); 18679 if (dkc != NULL && un->un_f_sync_nv_supported && 18680 (dkc->dkc_flag & FLUSH_VOLATILE)) { 18681 /* 18682 * if the device supports SYNC_NV bit, turn on 18683 * the SYNC_NV bit to only flush volatile cache 18684 */ 18685 cdb->cdb_un.tag |= SD_SYNC_NV_BIT; 18686 } 18687 mutex_exit(SD_MUTEX(un)); 18688 18689 /* 18690 * First get some memory for the uscsi_cmd struct and cdb 18691 * and initialize for SYNCHRONIZE_CACHE cmd. 18692 */ 18693 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 18694 uscmd->uscsi_cdblen = CDB_GROUP1; 18695 uscmd->uscsi_cdb = (caddr_t)cdb; 18696 uscmd->uscsi_bufaddr = NULL; 18697 uscmd->uscsi_buflen = 0; 18698 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 18699 uscmd->uscsi_rqlen = SENSE_LENGTH; 18700 uscmd->uscsi_rqresid = SENSE_LENGTH; 18701 uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18702 uscmd->uscsi_timeout = sd_io_time; 18703 18704 /* 18705 * Allocate an sd_uscsi_info struct and fill it with the info 18706 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 18707 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 18708 * since we allocate the buf here in this function, we do not 18709 * need to preserve the prior contents of b_private. 18710 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 18711 */ 18712 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 18713 uip->ui_flags = SD_PATH_DIRECT; 18714 uip->ui_cmdp = uscmd; 18715 18716 bp = getrbuf(KM_SLEEP); 18717 bp->b_private = uip; 18718 18719 /* 18720 * Setup buffer to carry uscsi request. 18721 */ 18722 bp->b_flags = B_BUSY; 18723 bp->b_bcount = 0; 18724 bp->b_blkno = 0; 18725 18726 if (is_async == TRUE) { 18727 bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone; 18728 uip->ui_dkc = *dkc; 18729 } 18730 18731 bp->b_edev = SD_GET_DEV(un); 18732 bp->b_dev = cmpdev(bp->b_edev); /* maybe unnecessary? */ 18733 18734 (void) sd_uscsi_strategy(bp); 18735 18736 /* 18737 * If synchronous request, wait for completion 18738 * If async just return and let b_iodone callback 18739 * cleanup. 18740 * NOTE: On return, u_ncmds_in_driver will be decremented, 18741 * but it was also incremented in sd_uscsi_strategy(), so 18742 * we should be ok. 18743 */ 18744 if (is_async == FALSE) { 18745 (void) biowait(bp); 18746 rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp); 18747 } 18748 18749 return (rval); 18750 } 18751 18752 18753 static int 18754 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp) 18755 { 18756 struct sd_uscsi_info *uip; 18757 struct uscsi_cmd *uscmd; 18758 uint8_t *sense_buf; 18759 struct sd_lun *un; 18760 int status; 18761 union scsi_cdb *cdb; 18762 18763 uip = (struct sd_uscsi_info *)(bp->b_private); 18764 ASSERT(uip != NULL); 18765 18766 uscmd = uip->ui_cmdp; 18767 ASSERT(uscmd != NULL); 18768 18769 sense_buf = (uint8_t *)uscmd->uscsi_rqbuf; 18770 ASSERT(sense_buf != NULL); 18771 18772 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 18773 ASSERT(un != NULL); 18774 18775 cdb = (union scsi_cdb *)uscmd->uscsi_cdb; 18776 18777 status = geterror(bp); 18778 switch (status) { 18779 case 0: 18780 break; /* Success! */ 18781 case EIO: 18782 switch (uscmd->uscsi_status) { 18783 case STATUS_RESERVATION_CONFLICT: 18784 /* Ignore reservation conflict */ 18785 status = 0; 18786 goto done; 18787 18788 case STATUS_CHECK: 18789 if ((uscmd->uscsi_rqstatus == STATUS_GOOD) && 18790 (scsi_sense_key(sense_buf) == 18791 KEY_ILLEGAL_REQUEST)) { 18792 /* Ignore Illegal Request error */ 18793 if (cdb->cdb_un.tag|SD_SYNC_NV_BIT) { 18794 mutex_enter(SD_MUTEX(un)); 18795 un->un_f_sync_nv_supported = FALSE; 18796 mutex_exit(SD_MUTEX(un)); 18797 status = 0; 18798 SD_TRACE(SD_LOG_IO, un, 18799 "un_f_sync_nv_supported \ 18800 is set to false.\n"); 18801 goto done; 18802 } 18803 18804 mutex_enter(SD_MUTEX(un)); 18805 un->un_f_sync_cache_supported = FALSE; 18806 mutex_exit(SD_MUTEX(un)); 18807 SD_TRACE(SD_LOG_IO, un, 18808 "sd_send_scsi_SYNCHRONIZE_CACHE_biodone: \ 18809 un_f_sync_cache_supported set to false \ 18810 with asc = %x, ascq = %x\n", 18811 scsi_sense_asc(sense_buf), 18812 scsi_sense_ascq(sense_buf)); 18813 status = ENOTSUP; 18814 goto done; 18815 } 18816 break; 18817 default: 18818 break; 18819 } 18820 /* FALLTHRU */ 18821 default: 18822 /* 18823 * Don't log an error message if this device 18824 * has removable media. 18825 */ 18826 if (!un->un_f_has_removable_media) { 18827 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18828 "SYNCHRONIZE CACHE command failed (%d)\n", status); 18829 } 18830 break; 18831 } 18832 18833 done: 18834 if (uip->ui_dkc.dkc_callback != NULL) { 18835 (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status); 18836 } 18837 18838 ASSERT((bp->b_flags & B_REMAPPED) == 0); 18839 freerbuf(bp); 18840 kmem_free(uip, sizeof (struct sd_uscsi_info)); 18841 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 18842 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 18843 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 18844 18845 return (status); 18846 } 18847 18848 18849 /* 18850 * Function: sd_send_scsi_GET_CONFIGURATION 18851 * 18852 * Description: Issues the get configuration command to the device. 18853 * Called from sd_check_for_writable_cd & sd_get_media_info 18854 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN 18855 * Arguments: un 18856 * ucmdbuf 18857 * rqbuf 18858 * rqbuflen 18859 * bufaddr 18860 * buflen 18861 * path_flag 18862 * 18863 * Return Code: 0 - Success 18864 * errno return code from sd_send_scsi_cmd() 18865 * 18866 * Context: Can sleep. Does not return until command is completed. 18867 * 18868 */ 18869 18870 static int 18871 sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, struct uscsi_cmd *ucmdbuf, 18872 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen, 18873 int path_flag) 18874 { 18875 char cdb[CDB_GROUP1]; 18876 int status; 18877 18878 ASSERT(un != NULL); 18879 ASSERT(!mutex_owned(SD_MUTEX(un))); 18880 ASSERT(bufaddr != NULL); 18881 ASSERT(ucmdbuf != NULL); 18882 ASSERT(rqbuf != NULL); 18883 18884 SD_TRACE(SD_LOG_IO, un, 18885 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un); 18886 18887 bzero(cdb, sizeof (cdb)); 18888 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 18889 bzero(rqbuf, rqbuflen); 18890 bzero(bufaddr, buflen); 18891 18892 /* 18893 * Set up cdb field for the get configuration command. 18894 */ 18895 cdb[0] = SCMD_GET_CONFIGURATION; 18896 cdb[1] = 0x02; /* Requested Type */ 18897 cdb[8] = SD_PROFILE_HEADER_LEN; 18898 ucmdbuf->uscsi_cdb = cdb; 18899 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 18900 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 18901 ucmdbuf->uscsi_buflen = buflen; 18902 ucmdbuf->uscsi_timeout = sd_io_time; 18903 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 18904 ucmdbuf->uscsi_rqlen = rqbuflen; 18905 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 18906 18907 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, FKIOCTL, 18908 UIO_SYSSPACE, path_flag); 18909 18910 switch (status) { 18911 case 0: 18912 break; /* Success! */ 18913 case EIO: 18914 switch (ucmdbuf->uscsi_status) { 18915 case STATUS_RESERVATION_CONFLICT: 18916 status = EACCES; 18917 break; 18918 default: 18919 break; 18920 } 18921 break; 18922 default: 18923 break; 18924 } 18925 18926 if (status == 0) { 18927 SD_DUMP_MEMORY(un, SD_LOG_IO, 18928 "sd_send_scsi_GET_CONFIGURATION: data", 18929 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 18930 } 18931 18932 SD_TRACE(SD_LOG_IO, un, 18933 "sd_send_scsi_GET_CONFIGURATION: exit\n"); 18934 18935 return (status); 18936 } 18937 18938 /* 18939 * Function: sd_send_scsi_feature_GET_CONFIGURATION 18940 * 18941 * Description: Issues the get configuration command to the device to 18942 * retrieve a specific feature. Called from 18943 * sd_check_for_writable_cd & sd_set_mmc_caps. 18944 * Arguments: un 18945 * ucmdbuf 18946 * rqbuf 18947 * rqbuflen 18948 * bufaddr 18949 * buflen 18950 * feature 18951 * 18952 * Return Code: 0 - Success 18953 * errno return code from sd_send_scsi_cmd() 18954 * 18955 * Context: Can sleep. Does not return until command is completed. 18956 * 18957 */ 18958 static int 18959 sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 18960 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 18961 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag) 18962 { 18963 char cdb[CDB_GROUP1]; 18964 int status; 18965 18966 ASSERT(un != NULL); 18967 ASSERT(!mutex_owned(SD_MUTEX(un))); 18968 ASSERT(bufaddr != NULL); 18969 ASSERT(ucmdbuf != NULL); 18970 ASSERT(rqbuf != NULL); 18971 18972 SD_TRACE(SD_LOG_IO, un, 18973 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un); 18974 18975 bzero(cdb, sizeof (cdb)); 18976 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 18977 bzero(rqbuf, rqbuflen); 18978 bzero(bufaddr, buflen); 18979 18980 /* 18981 * Set up cdb field for the get configuration command. 18982 */ 18983 cdb[0] = SCMD_GET_CONFIGURATION; 18984 cdb[1] = 0x02; /* Requested Type */ 18985 cdb[3] = feature; 18986 cdb[8] = buflen; 18987 ucmdbuf->uscsi_cdb = cdb; 18988 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 18989 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 18990 ucmdbuf->uscsi_buflen = buflen; 18991 ucmdbuf->uscsi_timeout = sd_io_time; 18992 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 18993 ucmdbuf->uscsi_rqlen = rqbuflen; 18994 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 18995 18996 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, FKIOCTL, 18997 UIO_SYSSPACE, path_flag); 18998 18999 switch (status) { 19000 case 0: 19001 break; /* Success! */ 19002 case EIO: 19003 switch (ucmdbuf->uscsi_status) { 19004 case STATUS_RESERVATION_CONFLICT: 19005 status = EACCES; 19006 break; 19007 default: 19008 break; 19009 } 19010 break; 19011 default: 19012 break; 19013 } 19014 19015 if (status == 0) { 19016 SD_DUMP_MEMORY(un, SD_LOG_IO, 19017 "sd_send_scsi_feature_GET_CONFIGURATION: data", 19018 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 19019 } 19020 19021 SD_TRACE(SD_LOG_IO, un, 19022 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n"); 19023 19024 return (status); 19025 } 19026 19027 19028 /* 19029 * Function: sd_send_scsi_MODE_SENSE 19030 * 19031 * Description: Utility function for issuing a scsi MODE SENSE command. 19032 * Note: This routine uses a consistent implementation for Group0, 19033 * Group1, and Group2 commands across all platforms. ATAPI devices 19034 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 19035 * 19036 * Arguments: un - pointer to the softstate struct for the target. 19037 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 19038 * CDB_GROUP[1|2] (10 byte). 19039 * bufaddr - buffer for page data retrieved from the target. 19040 * buflen - size of page to be retrieved. 19041 * page_code - page code of data to be retrieved from the target. 19042 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19043 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19044 * to use the USCSI "direct" chain and bypass the normal 19045 * command waitq. 19046 * 19047 * Return Code: 0 - Success 19048 * errno return code from sd_send_scsi_cmd() 19049 * 19050 * Context: Can sleep. Does not return until command is completed. 19051 */ 19052 19053 static int 19054 sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 19055 size_t buflen, uchar_t page_code, int path_flag) 19056 { 19057 struct scsi_extended_sense sense_buf; 19058 union scsi_cdb cdb; 19059 struct uscsi_cmd ucmd_buf; 19060 int status; 19061 int headlen; 19062 19063 ASSERT(un != NULL); 19064 ASSERT(!mutex_owned(SD_MUTEX(un))); 19065 ASSERT(bufaddr != NULL); 19066 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 19067 (cdbsize == CDB_GROUP2)); 19068 19069 SD_TRACE(SD_LOG_IO, un, 19070 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un); 19071 19072 bzero(&cdb, sizeof (cdb)); 19073 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19074 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19075 bzero(bufaddr, buflen); 19076 19077 if (cdbsize == CDB_GROUP0) { 19078 cdb.scc_cmd = SCMD_MODE_SENSE; 19079 cdb.cdb_opaque[2] = page_code; 19080 FORMG0COUNT(&cdb, buflen); 19081 headlen = MODE_HEADER_LENGTH; 19082 } else { 19083 cdb.scc_cmd = SCMD_MODE_SENSE_G1; 19084 cdb.cdb_opaque[2] = page_code; 19085 FORMG1COUNT(&cdb, buflen); 19086 headlen = MODE_HEADER_LENGTH_GRP2; 19087 } 19088 19089 ASSERT(headlen <= buflen); 19090 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 19091 19092 ucmd_buf.uscsi_cdb = (char *)&cdb; 19093 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 19094 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19095 ucmd_buf.uscsi_buflen = buflen; 19096 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19097 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19098 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19099 ucmd_buf.uscsi_timeout = 60; 19100 19101 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19102 UIO_SYSSPACE, path_flag); 19103 19104 switch (status) { 19105 case 0: 19106 /* 19107 * sr_check_wp() uses 0x3f page code and check the header of 19108 * mode page to determine if target device is write-protected. 19109 * But some USB devices return 0 bytes for 0x3f page code. For 19110 * this case, make sure that mode page header is returned at 19111 * least. 19112 */ 19113 if (buflen - ucmd_buf.uscsi_resid < headlen) 19114 status = EIO; 19115 break; /* Success! */ 19116 case EIO: 19117 switch (ucmd_buf.uscsi_status) { 19118 case STATUS_RESERVATION_CONFLICT: 19119 status = EACCES; 19120 break; 19121 default: 19122 break; 19123 } 19124 break; 19125 default: 19126 break; 19127 } 19128 19129 if (status == 0) { 19130 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data", 19131 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19132 } 19133 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n"); 19134 19135 return (status); 19136 } 19137 19138 19139 /* 19140 * Function: sd_send_scsi_MODE_SELECT 19141 * 19142 * Description: Utility function for issuing a scsi MODE SELECT command. 19143 * Note: This routine uses a consistent implementation for Group0, 19144 * Group1, and Group2 commands across all platforms. ATAPI devices 19145 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 19146 * 19147 * Arguments: un - pointer to the softstate struct for the target. 19148 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 19149 * CDB_GROUP[1|2] (10 byte). 19150 * bufaddr - buffer for page data retrieved from the target. 19151 * buflen - size of page to be retrieved. 19152 * save_page - boolean to determin if SP bit should be set. 19153 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19154 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19155 * to use the USCSI "direct" chain and bypass the normal 19156 * command waitq. 19157 * 19158 * Return Code: 0 - Success 19159 * errno return code from sd_send_scsi_cmd() 19160 * 19161 * Context: Can sleep. Does not return until command is completed. 19162 */ 19163 19164 static int 19165 sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 19166 size_t buflen, uchar_t save_page, int path_flag) 19167 { 19168 struct scsi_extended_sense sense_buf; 19169 union scsi_cdb cdb; 19170 struct uscsi_cmd ucmd_buf; 19171 int status; 19172 19173 ASSERT(un != NULL); 19174 ASSERT(!mutex_owned(SD_MUTEX(un))); 19175 ASSERT(bufaddr != NULL); 19176 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 19177 (cdbsize == CDB_GROUP2)); 19178 19179 SD_TRACE(SD_LOG_IO, un, 19180 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un); 19181 19182 bzero(&cdb, sizeof (cdb)); 19183 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19184 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19185 19186 /* Set the PF bit for many third party drives */ 19187 cdb.cdb_opaque[1] = 0x10; 19188 19189 /* Set the savepage(SP) bit if given */ 19190 if (save_page == SD_SAVE_PAGE) { 19191 cdb.cdb_opaque[1] |= 0x01; 19192 } 19193 19194 if (cdbsize == CDB_GROUP0) { 19195 cdb.scc_cmd = SCMD_MODE_SELECT; 19196 FORMG0COUNT(&cdb, buflen); 19197 } else { 19198 cdb.scc_cmd = SCMD_MODE_SELECT_G1; 19199 FORMG1COUNT(&cdb, buflen); 19200 } 19201 19202 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 19203 19204 ucmd_buf.uscsi_cdb = (char *)&cdb; 19205 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 19206 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19207 ucmd_buf.uscsi_buflen = buflen; 19208 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19209 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19210 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 19211 ucmd_buf.uscsi_timeout = 60; 19212 19213 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19214 UIO_SYSSPACE, path_flag); 19215 19216 switch (status) { 19217 case 0: 19218 break; /* Success! */ 19219 case EIO: 19220 switch (ucmd_buf.uscsi_status) { 19221 case STATUS_RESERVATION_CONFLICT: 19222 status = EACCES; 19223 break; 19224 default: 19225 break; 19226 } 19227 break; 19228 default: 19229 break; 19230 } 19231 19232 if (status == 0) { 19233 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data", 19234 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19235 } 19236 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n"); 19237 19238 return (status); 19239 } 19240 19241 19242 /* 19243 * Function: sd_send_scsi_RDWR 19244 * 19245 * Description: Issue a scsi READ or WRITE command with the given parameters. 19246 * 19247 * Arguments: un: Pointer to the sd_lun struct for the target. 19248 * cmd: SCMD_READ or SCMD_WRITE 19249 * bufaddr: Address of caller's buffer to receive the RDWR data 19250 * buflen: Length of caller's buffer receive the RDWR data. 19251 * start_block: Block number for the start of the RDWR operation. 19252 * (Assumes target-native block size.) 19253 * residp: Pointer to variable to receive the redisual of the 19254 * RDWR operation (may be NULL of no residual requested). 19255 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19256 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19257 * to use the USCSI "direct" chain and bypass the normal 19258 * command waitq. 19259 * 19260 * Return Code: 0 - Success 19261 * errno return code from sd_send_scsi_cmd() 19262 * 19263 * Context: Can sleep. Does not return until command is completed. 19264 */ 19265 19266 static int 19267 sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 19268 size_t buflen, daddr_t start_block, int path_flag) 19269 { 19270 struct scsi_extended_sense sense_buf; 19271 union scsi_cdb cdb; 19272 struct uscsi_cmd ucmd_buf; 19273 uint32_t block_count; 19274 int status; 19275 int cdbsize; 19276 uchar_t flag; 19277 19278 ASSERT(un != NULL); 19279 ASSERT(!mutex_owned(SD_MUTEX(un))); 19280 ASSERT(bufaddr != NULL); 19281 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE)); 19282 19283 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un); 19284 19285 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 19286 return (EINVAL); 19287 } 19288 19289 mutex_enter(SD_MUTEX(un)); 19290 block_count = SD_BYTES2TGTBLOCKS(un, buflen); 19291 mutex_exit(SD_MUTEX(un)); 19292 19293 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE; 19294 19295 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: " 19296 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n", 19297 bufaddr, buflen, start_block, block_count); 19298 19299 bzero(&cdb, sizeof (cdb)); 19300 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19301 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19302 19303 /* Compute CDB size to use */ 19304 if (start_block > 0xffffffff) 19305 cdbsize = CDB_GROUP4; 19306 else if ((start_block & 0xFFE00000) || 19307 (un->un_f_cfg_is_atapi == TRUE)) 19308 cdbsize = CDB_GROUP1; 19309 else 19310 cdbsize = CDB_GROUP0; 19311 19312 switch (cdbsize) { 19313 case CDB_GROUP0: /* 6-byte CDBs */ 19314 cdb.scc_cmd = cmd; 19315 FORMG0ADDR(&cdb, start_block); 19316 FORMG0COUNT(&cdb, block_count); 19317 break; 19318 case CDB_GROUP1: /* 10-byte CDBs */ 19319 cdb.scc_cmd = cmd | SCMD_GROUP1; 19320 FORMG1ADDR(&cdb, start_block); 19321 FORMG1COUNT(&cdb, block_count); 19322 break; 19323 case CDB_GROUP4: /* 16-byte CDBs */ 19324 cdb.scc_cmd = cmd | SCMD_GROUP4; 19325 FORMG4LONGADDR(&cdb, (uint64_t)start_block); 19326 FORMG4COUNT(&cdb, block_count); 19327 break; 19328 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */ 19329 default: 19330 /* All others reserved */ 19331 return (EINVAL); 19332 } 19333 19334 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */ 19335 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 19336 19337 ucmd_buf.uscsi_cdb = (char *)&cdb; 19338 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 19339 ucmd_buf.uscsi_bufaddr = bufaddr; 19340 ucmd_buf.uscsi_buflen = buflen; 19341 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19342 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19343 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; 19344 ucmd_buf.uscsi_timeout = 60; 19345 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19346 UIO_SYSSPACE, path_flag); 19347 switch (status) { 19348 case 0: 19349 break; /* Success! */ 19350 case EIO: 19351 switch (ucmd_buf.uscsi_status) { 19352 case STATUS_RESERVATION_CONFLICT: 19353 status = EACCES; 19354 break; 19355 default: 19356 break; 19357 } 19358 break; 19359 default: 19360 break; 19361 } 19362 19363 if (status == 0) { 19364 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data", 19365 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19366 } 19367 19368 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n"); 19369 19370 return (status); 19371 } 19372 19373 19374 /* 19375 * Function: sd_send_scsi_LOG_SENSE 19376 * 19377 * Description: Issue a scsi LOG_SENSE command with the given parameters. 19378 * 19379 * Arguments: un: Pointer to the sd_lun struct for the target. 19380 * 19381 * Return Code: 0 - Success 19382 * errno return code from sd_send_scsi_cmd() 19383 * 19384 * Context: Can sleep. Does not return until command is completed. 19385 */ 19386 19387 static int 19388 sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, uint16_t buflen, 19389 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, 19390 int path_flag) 19391 19392 { 19393 struct scsi_extended_sense sense_buf; 19394 union scsi_cdb cdb; 19395 struct uscsi_cmd ucmd_buf; 19396 int status; 19397 19398 ASSERT(un != NULL); 19399 ASSERT(!mutex_owned(SD_MUTEX(un))); 19400 19401 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un); 19402 19403 bzero(&cdb, sizeof (cdb)); 19404 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19405 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19406 19407 cdb.scc_cmd = SCMD_LOG_SENSE_G1; 19408 cdb.cdb_opaque[2] = (page_control << 6) | page_code; 19409 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8); 19410 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF); 19411 FORMG1COUNT(&cdb, buflen); 19412 19413 ucmd_buf.uscsi_cdb = (char *)&cdb; 19414 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19415 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19416 ucmd_buf.uscsi_buflen = buflen; 19417 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19418 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19419 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19420 ucmd_buf.uscsi_timeout = 60; 19421 19422 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19423 UIO_SYSSPACE, path_flag); 19424 19425 switch (status) { 19426 case 0: 19427 break; 19428 case EIO: 19429 switch (ucmd_buf.uscsi_status) { 19430 case STATUS_RESERVATION_CONFLICT: 19431 status = EACCES; 19432 break; 19433 case STATUS_CHECK: 19434 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19435 (scsi_sense_key((uint8_t *)&sense_buf) == 19436 KEY_ILLEGAL_REQUEST) && 19437 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x24)) { 19438 /* 19439 * ASC 0x24: INVALID FIELD IN CDB 19440 */ 19441 switch (page_code) { 19442 case START_STOP_CYCLE_PAGE: 19443 /* 19444 * The start stop cycle counter is 19445 * implemented as page 0x31 in earlier 19446 * generation disks. In new generation 19447 * disks the start stop cycle counter is 19448 * implemented as page 0xE. To properly 19449 * handle this case if an attempt for 19450 * log page 0xE is made and fails we 19451 * will try again using page 0x31. 19452 * 19453 * Network storage BU committed to 19454 * maintain the page 0x31 for this 19455 * purpose and will not have any other 19456 * page implemented with page code 0x31 19457 * until all disks transition to the 19458 * standard page. 19459 */ 19460 mutex_enter(SD_MUTEX(un)); 19461 un->un_start_stop_cycle_page = 19462 START_STOP_CYCLE_VU_PAGE; 19463 cdb.cdb_opaque[2] = 19464 (char)(page_control << 6) | 19465 un->un_start_stop_cycle_page; 19466 mutex_exit(SD_MUTEX(un)); 19467 status = sd_send_scsi_cmd( 19468 SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19469 UIO_SYSSPACE, path_flag); 19470 19471 break; 19472 case TEMPERATURE_PAGE: 19473 status = ENOTTY; 19474 break; 19475 default: 19476 break; 19477 } 19478 } 19479 break; 19480 default: 19481 break; 19482 } 19483 break; 19484 default: 19485 break; 19486 } 19487 19488 if (status == 0) { 19489 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data", 19490 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19491 } 19492 19493 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n"); 19494 19495 return (status); 19496 } 19497 19498 19499 /* 19500 * Function: sdioctl 19501 * 19502 * Description: Driver's ioctl(9e) entry point function. 19503 * 19504 * Arguments: dev - device number 19505 * cmd - ioctl operation to be performed 19506 * arg - user argument, contains data to be set or reference 19507 * parameter for get 19508 * flag - bit flag, indicating open settings, 32/64 bit type 19509 * cred_p - user credential pointer 19510 * rval_p - calling process return value (OPT) 19511 * 19512 * Return Code: EINVAL 19513 * ENOTTY 19514 * ENXIO 19515 * EIO 19516 * EFAULT 19517 * ENOTSUP 19518 * EPERM 19519 * 19520 * Context: Called from the device switch at normal priority. 19521 */ 19522 19523 static int 19524 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) 19525 { 19526 struct sd_lun *un = NULL; 19527 int err = 0; 19528 int i = 0; 19529 cred_t *cr; 19530 int tmprval = EINVAL; 19531 int is_valid; 19532 19533 /* 19534 * All device accesses go thru sdstrategy where we check on suspend 19535 * status 19536 */ 19537 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 19538 return (ENXIO); 19539 } 19540 19541 ASSERT(!mutex_owned(SD_MUTEX(un))); 19542 19543 19544 is_valid = SD_IS_VALID_LABEL(un); 19545 19546 /* 19547 * Moved this wait from sd_uscsi_strategy to here for 19548 * reasons of deadlock prevention. Internal driver commands, 19549 * specifically those to change a devices power level, result 19550 * in a call to sd_uscsi_strategy. 19551 */ 19552 mutex_enter(SD_MUTEX(un)); 19553 while ((un->un_state == SD_STATE_SUSPENDED) || 19554 (un->un_state == SD_STATE_PM_CHANGING)) { 19555 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 19556 } 19557 /* 19558 * Twiddling the counter here protects commands from now 19559 * through to the top of sd_uscsi_strategy. Without the 19560 * counter inc. a power down, for example, could get in 19561 * after the above check for state is made and before 19562 * execution gets to the top of sd_uscsi_strategy. 19563 * That would cause problems. 19564 */ 19565 un->un_ncmds_in_driver++; 19566 19567 if (!is_valid && 19568 (flag & (FNDELAY | FNONBLOCK))) { 19569 switch (cmd) { 19570 case DKIOCGGEOM: /* SD_PATH_DIRECT */ 19571 case DKIOCGVTOC: 19572 case DKIOCGAPART: 19573 case DKIOCPARTINFO: 19574 case DKIOCSGEOM: 19575 case DKIOCSAPART: 19576 case DKIOCGETEFI: 19577 case DKIOCPARTITION: 19578 case DKIOCSVTOC: 19579 case DKIOCSETEFI: 19580 case DKIOCGMBOOT: 19581 case DKIOCSMBOOT: 19582 case DKIOCG_PHYGEOM: 19583 case DKIOCG_VIRTGEOM: 19584 /* let cmlb handle it */ 19585 goto skip_ready_valid; 19586 19587 case CDROMPAUSE: 19588 case CDROMRESUME: 19589 case CDROMPLAYMSF: 19590 case CDROMPLAYTRKIND: 19591 case CDROMREADTOCHDR: 19592 case CDROMREADTOCENTRY: 19593 case CDROMSTOP: 19594 case CDROMSTART: 19595 case CDROMVOLCTRL: 19596 case CDROMSUBCHNL: 19597 case CDROMREADMODE2: 19598 case CDROMREADMODE1: 19599 case CDROMREADOFFSET: 19600 case CDROMSBLKMODE: 19601 case CDROMGBLKMODE: 19602 case CDROMGDRVSPEED: 19603 case CDROMSDRVSPEED: 19604 case CDROMCDDA: 19605 case CDROMCDXA: 19606 case CDROMSUBCODE: 19607 if (!ISCD(un)) { 19608 un->un_ncmds_in_driver--; 19609 ASSERT(un->un_ncmds_in_driver >= 0); 19610 mutex_exit(SD_MUTEX(un)); 19611 return (ENOTTY); 19612 } 19613 break; 19614 case FDEJECT: 19615 case DKIOCEJECT: 19616 case CDROMEJECT: 19617 if (!un->un_f_eject_media_supported) { 19618 un->un_ncmds_in_driver--; 19619 ASSERT(un->un_ncmds_in_driver >= 0); 19620 mutex_exit(SD_MUTEX(un)); 19621 return (ENOTTY); 19622 } 19623 break; 19624 case DKIOCFLUSHWRITECACHE: 19625 mutex_exit(SD_MUTEX(un)); 19626 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 19627 if (err != 0) { 19628 mutex_enter(SD_MUTEX(un)); 19629 un->un_ncmds_in_driver--; 19630 ASSERT(un->un_ncmds_in_driver >= 0); 19631 mutex_exit(SD_MUTEX(un)); 19632 return (EIO); 19633 } 19634 mutex_enter(SD_MUTEX(un)); 19635 /* FALLTHROUGH */ 19636 case DKIOCREMOVABLE: 19637 case DKIOCHOTPLUGGABLE: 19638 case DKIOCINFO: 19639 case DKIOCGMEDIAINFO: 19640 case MHIOCENFAILFAST: 19641 case MHIOCSTATUS: 19642 case MHIOCTKOWN: 19643 case MHIOCRELEASE: 19644 case MHIOCGRP_INKEYS: 19645 case MHIOCGRP_INRESV: 19646 case MHIOCGRP_REGISTER: 19647 case MHIOCGRP_RESERVE: 19648 case MHIOCGRP_PREEMPTANDABORT: 19649 case MHIOCGRP_REGISTERANDIGNOREKEY: 19650 case CDROMCLOSETRAY: 19651 case USCSICMD: 19652 goto skip_ready_valid; 19653 default: 19654 break; 19655 } 19656 19657 mutex_exit(SD_MUTEX(un)); 19658 err = sd_ready_and_valid(un); 19659 mutex_enter(SD_MUTEX(un)); 19660 19661 if (err != SD_READY_VALID) { 19662 switch (cmd) { 19663 case DKIOCSTATE: 19664 case CDROMGDRVSPEED: 19665 case CDROMSDRVSPEED: 19666 case FDEJECT: /* for eject command */ 19667 case DKIOCEJECT: 19668 case CDROMEJECT: 19669 case DKIOCREMOVABLE: 19670 case DKIOCHOTPLUGGABLE: 19671 break; 19672 default: 19673 if (un->un_f_has_removable_media) { 19674 err = ENXIO; 19675 } else { 19676 /* Do not map SD_RESERVED_BY_OTHERS to EIO */ 19677 if (err == SD_RESERVED_BY_OTHERS) { 19678 err = EACCES; 19679 } else { 19680 err = EIO; 19681 } 19682 } 19683 un->un_ncmds_in_driver--; 19684 ASSERT(un->un_ncmds_in_driver >= 0); 19685 mutex_exit(SD_MUTEX(un)); 19686 return (err); 19687 } 19688 } 19689 } 19690 19691 skip_ready_valid: 19692 mutex_exit(SD_MUTEX(un)); 19693 19694 switch (cmd) { 19695 case DKIOCINFO: 19696 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n"); 19697 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag); 19698 break; 19699 19700 case DKIOCGMEDIAINFO: 19701 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n"); 19702 err = sd_get_media_info(dev, (caddr_t)arg, flag); 19703 break; 19704 19705 case DKIOCGGEOM: 19706 case DKIOCGVTOC: 19707 case DKIOCGAPART: 19708 case DKIOCPARTINFO: 19709 case DKIOCSGEOM: 19710 case DKIOCSAPART: 19711 case DKIOCGETEFI: 19712 case DKIOCPARTITION: 19713 case DKIOCSVTOC: 19714 case DKIOCSETEFI: 19715 case DKIOCGMBOOT: 19716 case DKIOCSMBOOT: 19717 case DKIOCG_PHYGEOM: 19718 case DKIOCG_VIRTGEOM: 19719 SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd); 19720 19721 /* TUR should spin up */ 19722 19723 if (un->un_f_has_removable_media) 19724 err = sd_send_scsi_TEST_UNIT_READY(un, 19725 SD_CHECK_FOR_MEDIA); 19726 else 19727 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 19728 19729 if (err != 0) 19730 break; 19731 19732 err = cmlb_ioctl(un->un_cmlbhandle, dev, 19733 cmd, arg, flag, cred_p, rval_p, (void *)SD_PATH_DIRECT); 19734 19735 if ((err == 0) && 19736 ((cmd == DKIOCSETEFI) || 19737 (un->un_f_pkstats_enabled) && 19738 (cmd == DKIOCSAPART || cmd == DKIOCSVTOC))) { 19739 19740 tmprval = cmlb_validate(un->un_cmlbhandle, CMLB_SILENT, 19741 (void *)SD_PATH_DIRECT); 19742 if ((tmprval == 0) && un->un_f_pkstats_enabled) { 19743 sd_set_pstats(un); 19744 SD_TRACE(SD_LOG_IO_PARTITION, un, 19745 "sd_ioctl: un:0x%p pstats created and " 19746 "set\n", un); 19747 } 19748 } 19749 19750 if ((cmd == DKIOCSVTOC) || 19751 ((cmd == DKIOCSETEFI) && (tmprval == 0))) { 19752 19753 mutex_enter(SD_MUTEX(un)); 19754 if (un->un_f_devid_supported && 19755 (un->un_f_opt_fab_devid == TRUE)) { 19756 if (un->un_devid == NULL) { 19757 sd_register_devid(un, SD_DEVINFO(un), 19758 SD_TARGET_IS_UNRESERVED); 19759 } else { 19760 /* 19761 * The device id for this disk 19762 * has been fabricated. The 19763 * device id must be preserved 19764 * by writing it back out to 19765 * disk. 19766 */ 19767 if (sd_write_deviceid(un) != 0) { 19768 ddi_devid_free(un->un_devid); 19769 un->un_devid = NULL; 19770 } 19771 } 19772 } 19773 mutex_exit(SD_MUTEX(un)); 19774 } 19775 19776 break; 19777 19778 case DKIOCLOCK: 19779 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n"); 19780 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 19781 SD_PATH_STANDARD); 19782 break; 19783 19784 case DKIOCUNLOCK: 19785 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n"); 19786 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 19787 SD_PATH_STANDARD); 19788 break; 19789 19790 case DKIOCSTATE: { 19791 enum dkio_state state; 19792 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n"); 19793 19794 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) { 19795 err = EFAULT; 19796 } else { 19797 err = sd_check_media(dev, state); 19798 if (err == 0) { 19799 if (ddi_copyout(&un->un_mediastate, (void *)arg, 19800 sizeof (int), flag) != 0) 19801 err = EFAULT; 19802 } 19803 } 19804 break; 19805 } 19806 19807 case DKIOCREMOVABLE: 19808 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n"); 19809 i = un->un_f_has_removable_media ? 1 : 0; 19810 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 19811 err = EFAULT; 19812 } else { 19813 err = 0; 19814 } 19815 break; 19816 19817 case DKIOCHOTPLUGGABLE: 19818 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCHOTPLUGGABLE\n"); 19819 i = un->un_f_is_hotpluggable ? 1 : 0; 19820 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 19821 err = EFAULT; 19822 } else { 19823 err = 0; 19824 } 19825 break; 19826 19827 case DKIOCGTEMPERATURE: 19828 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n"); 19829 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag); 19830 break; 19831 19832 case MHIOCENFAILFAST: 19833 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n"); 19834 if ((err = drv_priv(cred_p)) == 0) { 19835 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag); 19836 } 19837 break; 19838 19839 case MHIOCTKOWN: 19840 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n"); 19841 if ((err = drv_priv(cred_p)) == 0) { 19842 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag); 19843 } 19844 break; 19845 19846 case MHIOCRELEASE: 19847 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n"); 19848 if ((err = drv_priv(cred_p)) == 0) { 19849 err = sd_mhdioc_release(dev); 19850 } 19851 break; 19852 19853 case MHIOCSTATUS: 19854 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n"); 19855 if ((err = drv_priv(cred_p)) == 0) { 19856 switch (sd_send_scsi_TEST_UNIT_READY(un, 0)) { 19857 case 0: 19858 err = 0; 19859 break; 19860 case EACCES: 19861 *rval_p = 1; 19862 err = 0; 19863 break; 19864 default: 19865 err = EIO; 19866 break; 19867 } 19868 } 19869 break; 19870 19871 case MHIOCQRESERVE: 19872 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n"); 19873 if ((err = drv_priv(cred_p)) == 0) { 19874 err = sd_reserve_release(dev, SD_RESERVE); 19875 } 19876 break; 19877 19878 case MHIOCREREGISTERDEVID: 19879 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n"); 19880 if (drv_priv(cred_p) == EPERM) { 19881 err = EPERM; 19882 } else if (!un->un_f_devid_supported) { 19883 err = ENOTTY; 19884 } else { 19885 err = sd_mhdioc_register_devid(dev); 19886 } 19887 break; 19888 19889 case MHIOCGRP_INKEYS: 19890 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n"); 19891 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 19892 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 19893 err = ENOTSUP; 19894 } else { 19895 err = sd_mhdioc_inkeys(dev, (caddr_t)arg, 19896 flag); 19897 } 19898 } 19899 break; 19900 19901 case MHIOCGRP_INRESV: 19902 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n"); 19903 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 19904 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 19905 err = ENOTSUP; 19906 } else { 19907 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag); 19908 } 19909 } 19910 break; 19911 19912 case MHIOCGRP_REGISTER: 19913 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n"); 19914 if ((err = drv_priv(cred_p)) != EPERM) { 19915 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 19916 err = ENOTSUP; 19917 } else if (arg != NULL) { 19918 mhioc_register_t reg; 19919 if (ddi_copyin((void *)arg, ®, 19920 sizeof (mhioc_register_t), flag) != 0) { 19921 err = EFAULT; 19922 } else { 19923 err = 19924 sd_send_scsi_PERSISTENT_RESERVE_OUT( 19925 un, SD_SCSI3_REGISTER, 19926 (uchar_t *)®); 19927 } 19928 } 19929 } 19930 break; 19931 19932 case MHIOCGRP_RESERVE: 19933 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n"); 19934 if ((err = drv_priv(cred_p)) != EPERM) { 19935 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 19936 err = ENOTSUP; 19937 } else if (arg != NULL) { 19938 mhioc_resv_desc_t resv_desc; 19939 if (ddi_copyin((void *)arg, &resv_desc, 19940 sizeof (mhioc_resv_desc_t), flag) != 0) { 19941 err = EFAULT; 19942 } else { 19943 err = 19944 sd_send_scsi_PERSISTENT_RESERVE_OUT( 19945 un, SD_SCSI3_RESERVE, 19946 (uchar_t *)&resv_desc); 19947 } 19948 } 19949 } 19950 break; 19951 19952 case MHIOCGRP_PREEMPTANDABORT: 19953 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 19954 if ((err = drv_priv(cred_p)) != EPERM) { 19955 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 19956 err = ENOTSUP; 19957 } else if (arg != NULL) { 19958 mhioc_preemptandabort_t preempt_abort; 19959 if (ddi_copyin((void *)arg, &preempt_abort, 19960 sizeof (mhioc_preemptandabort_t), 19961 flag) != 0) { 19962 err = EFAULT; 19963 } else { 19964 err = 19965 sd_send_scsi_PERSISTENT_RESERVE_OUT( 19966 un, SD_SCSI3_PREEMPTANDABORT, 19967 (uchar_t *)&preempt_abort); 19968 } 19969 } 19970 } 19971 break; 19972 19973 case MHIOCGRP_REGISTERANDIGNOREKEY: 19974 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTERANDIGNOREKEY\n"); 19975 if ((err = drv_priv(cred_p)) != EPERM) { 19976 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 19977 err = ENOTSUP; 19978 } else if (arg != NULL) { 19979 mhioc_registerandignorekey_t r_and_i; 19980 if (ddi_copyin((void *)arg, (void *)&r_and_i, 19981 sizeof (mhioc_registerandignorekey_t), 19982 flag) != 0) { 19983 err = EFAULT; 19984 } else { 19985 err = 19986 sd_send_scsi_PERSISTENT_RESERVE_OUT( 19987 un, SD_SCSI3_REGISTERANDIGNOREKEY, 19988 (uchar_t *)&r_and_i); 19989 } 19990 } 19991 } 19992 break; 19993 19994 case USCSICMD: 19995 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n"); 19996 cr = ddi_get_cred(); 19997 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 19998 err = EPERM; 19999 } else { 20000 enum uio_seg uioseg; 20001 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE : 20002 UIO_USERSPACE; 20003 if (un->un_f_format_in_progress == TRUE) { 20004 err = EAGAIN; 20005 break; 20006 } 20007 err = sd_send_scsi_cmd(dev, (struct uscsi_cmd *)arg, 20008 flag, uioseg, SD_PATH_STANDARD); 20009 } 20010 break; 20011 20012 case CDROMPAUSE: 20013 case CDROMRESUME: 20014 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n"); 20015 if (!ISCD(un)) { 20016 err = ENOTTY; 20017 } else { 20018 err = sr_pause_resume(dev, cmd); 20019 } 20020 break; 20021 20022 case CDROMPLAYMSF: 20023 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n"); 20024 if (!ISCD(un)) { 20025 err = ENOTTY; 20026 } else { 20027 err = sr_play_msf(dev, (caddr_t)arg, flag); 20028 } 20029 break; 20030 20031 case CDROMPLAYTRKIND: 20032 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); 20033 #if defined(__i386) || defined(__amd64) 20034 /* 20035 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead 20036 */ 20037 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 20038 #else 20039 if (!ISCD(un)) { 20040 #endif 20041 err = ENOTTY; 20042 } else { 20043 err = sr_play_trkind(dev, (caddr_t)arg, flag); 20044 } 20045 break; 20046 20047 case CDROMREADTOCHDR: 20048 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n"); 20049 if (!ISCD(un)) { 20050 err = ENOTTY; 20051 } else { 20052 err = sr_read_tochdr(dev, (caddr_t)arg, flag); 20053 } 20054 break; 20055 20056 case CDROMREADTOCENTRY: 20057 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n"); 20058 if (!ISCD(un)) { 20059 err = ENOTTY; 20060 } else { 20061 err = sr_read_tocentry(dev, (caddr_t)arg, flag); 20062 } 20063 break; 20064 20065 case CDROMSTOP: 20066 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n"); 20067 if (!ISCD(un)) { 20068 err = ENOTTY; 20069 } else { 20070 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_STOP, 20071 SD_PATH_STANDARD); 20072 } 20073 break; 20074 20075 case CDROMSTART: 20076 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n"); 20077 if (!ISCD(un)) { 20078 err = ENOTTY; 20079 } else { 20080 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 20081 SD_PATH_STANDARD); 20082 } 20083 break; 20084 20085 case CDROMCLOSETRAY: 20086 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n"); 20087 if (!ISCD(un)) { 20088 err = ENOTTY; 20089 } else { 20090 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_CLOSE, 20091 SD_PATH_STANDARD); 20092 } 20093 break; 20094 20095 case FDEJECT: /* for eject command */ 20096 case DKIOCEJECT: 20097 case CDROMEJECT: 20098 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n"); 20099 if (!un->un_f_eject_media_supported) { 20100 err = ENOTTY; 20101 } else { 20102 err = sr_eject(dev); 20103 } 20104 break; 20105 20106 case CDROMVOLCTRL: 20107 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n"); 20108 if (!ISCD(un)) { 20109 err = ENOTTY; 20110 } else { 20111 err = sr_volume_ctrl(dev, (caddr_t)arg, flag); 20112 } 20113 break; 20114 20115 case CDROMSUBCHNL: 20116 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n"); 20117 if (!ISCD(un)) { 20118 err = ENOTTY; 20119 } else { 20120 err = sr_read_subchannel(dev, (caddr_t)arg, flag); 20121 } 20122 break; 20123 20124 case CDROMREADMODE2: 20125 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n"); 20126 if (!ISCD(un)) { 20127 err = ENOTTY; 20128 } else if (un->un_f_cfg_is_atapi == TRUE) { 20129 /* 20130 * If the drive supports READ CD, use that instead of 20131 * switching the LBA size via a MODE SELECT 20132 * Block Descriptor 20133 */ 20134 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag); 20135 } else { 20136 err = sr_read_mode2(dev, (caddr_t)arg, flag); 20137 } 20138 break; 20139 20140 case CDROMREADMODE1: 20141 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n"); 20142 if (!ISCD(un)) { 20143 err = ENOTTY; 20144 } else { 20145 err = sr_read_mode1(dev, (caddr_t)arg, flag); 20146 } 20147 break; 20148 20149 case CDROMREADOFFSET: 20150 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n"); 20151 if (!ISCD(un)) { 20152 err = ENOTTY; 20153 } else { 20154 err = sr_read_sony_session_offset(dev, (caddr_t)arg, 20155 flag); 20156 } 20157 break; 20158 20159 case CDROMSBLKMODE: 20160 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n"); 20161 /* 20162 * There is no means of changing block size in case of atapi 20163 * drives, thus return ENOTTY if drive type is atapi 20164 */ 20165 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 20166 err = ENOTTY; 20167 } else if (un->un_f_mmc_cap == TRUE) { 20168 20169 /* 20170 * MMC Devices do not support changing the 20171 * logical block size 20172 * 20173 * Note: EINVAL is being returned instead of ENOTTY to 20174 * maintain consistancy with the original mmc 20175 * driver update. 20176 */ 20177 err = EINVAL; 20178 } else { 20179 mutex_enter(SD_MUTEX(un)); 20180 if ((!(un->un_exclopen & (1<<SDPART(dev)))) || 20181 (un->un_ncmds_in_transport > 0)) { 20182 mutex_exit(SD_MUTEX(un)); 20183 err = EINVAL; 20184 } else { 20185 mutex_exit(SD_MUTEX(un)); 20186 err = sr_change_blkmode(dev, cmd, arg, flag); 20187 } 20188 } 20189 break; 20190 20191 case CDROMGBLKMODE: 20192 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n"); 20193 if (!ISCD(un)) { 20194 err = ENOTTY; 20195 } else if ((un->un_f_cfg_is_atapi != FALSE) && 20196 (un->un_f_blockcount_is_valid != FALSE)) { 20197 /* 20198 * Drive is an ATAPI drive so return target block 20199 * size for ATAPI drives since we cannot change the 20200 * blocksize on ATAPI drives. Used primarily to detect 20201 * if an ATAPI cdrom is present. 20202 */ 20203 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg, 20204 sizeof (int), flag) != 0) { 20205 err = EFAULT; 20206 } else { 20207 err = 0; 20208 } 20209 20210 } else { 20211 /* 20212 * Drive supports changing block sizes via a Mode 20213 * Select. 20214 */ 20215 err = sr_change_blkmode(dev, cmd, arg, flag); 20216 } 20217 break; 20218 20219 case CDROMGDRVSPEED: 20220 case CDROMSDRVSPEED: 20221 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n"); 20222 if (!ISCD(un)) { 20223 err = ENOTTY; 20224 } else if (un->un_f_mmc_cap == TRUE) { 20225 /* 20226 * Note: In the future the driver implementation 20227 * for getting and 20228 * setting cd speed should entail: 20229 * 1) If non-mmc try the Toshiba mode page 20230 * (sr_change_speed) 20231 * 2) If mmc but no support for Real Time Streaming try 20232 * the SET CD SPEED (0xBB) command 20233 * (sr_atapi_change_speed) 20234 * 3) If mmc and support for Real Time Streaming 20235 * try the GET PERFORMANCE and SET STREAMING 20236 * commands (not yet implemented, 4380808) 20237 */ 20238 /* 20239 * As per recent MMC spec, CD-ROM speed is variable 20240 * and changes with LBA. Since there is no such 20241 * things as drive speed now, fail this ioctl. 20242 * 20243 * Note: EINVAL is returned for consistancy of original 20244 * implementation which included support for getting 20245 * the drive speed of mmc devices but not setting 20246 * the drive speed. Thus EINVAL would be returned 20247 * if a set request was made for an mmc device. 20248 * We no longer support get or set speed for 20249 * mmc but need to remain consistent with regard 20250 * to the error code returned. 20251 */ 20252 err = EINVAL; 20253 } else if (un->un_f_cfg_is_atapi == TRUE) { 20254 err = sr_atapi_change_speed(dev, cmd, arg, flag); 20255 } else { 20256 err = sr_change_speed(dev, cmd, arg, flag); 20257 } 20258 break; 20259 20260 case CDROMCDDA: 20261 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n"); 20262 if (!ISCD(un)) { 20263 err = ENOTTY; 20264 } else { 20265 err = sr_read_cdda(dev, (void *)arg, flag); 20266 } 20267 break; 20268 20269 case CDROMCDXA: 20270 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n"); 20271 if (!ISCD(un)) { 20272 err = ENOTTY; 20273 } else { 20274 err = sr_read_cdxa(dev, (caddr_t)arg, flag); 20275 } 20276 break; 20277 20278 case CDROMSUBCODE: 20279 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n"); 20280 if (!ISCD(un)) { 20281 err = ENOTTY; 20282 } else { 20283 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag); 20284 } 20285 break; 20286 20287 20288 #ifdef SDDEBUG 20289 /* RESET/ABORTS testing ioctls */ 20290 case DKIOCRESET: { 20291 int reset_level; 20292 20293 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) { 20294 err = EFAULT; 20295 } else { 20296 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: " 20297 "reset_level = 0x%lx\n", reset_level); 20298 if (scsi_reset(SD_ADDRESS(un), reset_level)) { 20299 err = 0; 20300 } else { 20301 err = EIO; 20302 } 20303 } 20304 break; 20305 } 20306 20307 case DKIOCABORT: 20308 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n"); 20309 if (scsi_abort(SD_ADDRESS(un), NULL)) { 20310 err = 0; 20311 } else { 20312 err = EIO; 20313 } 20314 break; 20315 #endif 20316 20317 #ifdef SD_FAULT_INJECTION 20318 /* SDIOC FaultInjection testing ioctls */ 20319 case SDIOCSTART: 20320 case SDIOCSTOP: 20321 case SDIOCINSERTPKT: 20322 case SDIOCINSERTXB: 20323 case SDIOCINSERTUN: 20324 case SDIOCINSERTARQ: 20325 case SDIOCPUSH: 20326 case SDIOCRETRIEVE: 20327 case SDIOCRUN: 20328 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" 20329 "SDIOC detected cmd:0x%X:\n", cmd); 20330 /* call error generator */ 20331 sd_faultinjection_ioctl(cmd, arg, un); 20332 err = 0; 20333 break; 20334 20335 #endif /* SD_FAULT_INJECTION */ 20336 20337 case DKIOCFLUSHWRITECACHE: 20338 { 20339 struct dk_callback *dkc = (struct dk_callback *)arg; 20340 20341 mutex_enter(SD_MUTEX(un)); 20342 if (!un->un_f_sync_cache_supported || 20343 !un->un_f_write_cache_enabled) { 20344 err = un->un_f_sync_cache_supported ? 20345 0 : ENOTSUP; 20346 mutex_exit(SD_MUTEX(un)); 20347 if ((flag & FKIOCTL) && dkc != NULL && 20348 dkc->dkc_callback != NULL) { 20349 (*dkc->dkc_callback)(dkc->dkc_cookie, 20350 err); 20351 /* 20352 * Did callback and reported error. 20353 * Since we did a callback, ioctl 20354 * should return 0. 20355 */ 20356 err = 0; 20357 } 20358 break; 20359 } 20360 mutex_exit(SD_MUTEX(un)); 20361 20362 if ((flag & FKIOCTL) && dkc != NULL && 20363 dkc->dkc_callback != NULL) { 20364 /* async SYNC CACHE request */ 20365 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 20366 } else { 20367 /* synchronous SYNC CACHE request */ 20368 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 20369 } 20370 } 20371 break; 20372 20373 case DKIOCGETWCE: { 20374 20375 int wce; 20376 20377 if ((err = sd_get_write_cache_enabled(un, &wce)) != 0) { 20378 break; 20379 } 20380 20381 if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) { 20382 err = EFAULT; 20383 } 20384 break; 20385 } 20386 20387 case DKIOCSETWCE: { 20388 20389 int wce, sync_supported; 20390 20391 if (ddi_copyin((void *)arg, &wce, sizeof (wce), flag)) { 20392 err = EFAULT; 20393 break; 20394 } 20395 20396 /* 20397 * Synchronize multiple threads trying to enable 20398 * or disable the cache via the un_f_wcc_cv 20399 * condition variable. 20400 */ 20401 mutex_enter(SD_MUTEX(un)); 20402 20403 /* 20404 * Don't allow the cache to be enabled if the 20405 * config file has it disabled. 20406 */ 20407 if (un->un_f_opt_disable_cache && wce) { 20408 mutex_exit(SD_MUTEX(un)); 20409 err = EINVAL; 20410 break; 20411 } 20412 20413 /* 20414 * Wait for write cache change in progress 20415 * bit to be clear before proceeding. 20416 */ 20417 while (un->un_f_wcc_inprog) 20418 cv_wait(&un->un_wcc_cv, SD_MUTEX(un)); 20419 20420 un->un_f_wcc_inprog = 1; 20421 20422 if (un->un_f_write_cache_enabled && wce == 0) { 20423 /* 20424 * Disable the write cache. Don't clear 20425 * un_f_write_cache_enabled until after 20426 * the mode select and flush are complete. 20427 */ 20428 sync_supported = un->un_f_sync_cache_supported; 20429 20430 /* 20431 * If cache flush is suppressed, we assume that the 20432 * controller firmware will take care of managing the 20433 * write cache for us: no need to explicitly 20434 * disable it. 20435 */ 20436 if (!un->un_f_suppress_cache_flush) { 20437 mutex_exit(SD_MUTEX(un)); 20438 if ((err = sd_cache_control(un, 20439 SD_CACHE_NOCHANGE, 20440 SD_CACHE_DISABLE)) == 0 && 20441 sync_supported) { 20442 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, 20443 NULL); 20444 } 20445 } else { 20446 mutex_exit(SD_MUTEX(un)); 20447 } 20448 20449 mutex_enter(SD_MUTEX(un)); 20450 if (err == 0) { 20451 un->un_f_write_cache_enabled = 0; 20452 } 20453 20454 } else if (!un->un_f_write_cache_enabled && wce != 0) { 20455 /* 20456 * Set un_f_write_cache_enabled first, so there is 20457 * no window where the cache is enabled, but the 20458 * bit says it isn't. 20459 */ 20460 un->un_f_write_cache_enabled = 1; 20461 20462 /* 20463 * If cache flush is suppressed, we assume that the 20464 * controller firmware will take care of managing the 20465 * write cache for us: no need to explicitly 20466 * enable it. 20467 */ 20468 if (!un->un_f_suppress_cache_flush) { 20469 mutex_exit(SD_MUTEX(un)); 20470 err = sd_cache_control(un, SD_CACHE_NOCHANGE, 20471 SD_CACHE_ENABLE); 20472 } else { 20473 mutex_exit(SD_MUTEX(un)); 20474 } 20475 20476 mutex_enter(SD_MUTEX(un)); 20477 20478 if (err) { 20479 un->un_f_write_cache_enabled = 0; 20480 } 20481 } 20482 20483 un->un_f_wcc_inprog = 0; 20484 cv_broadcast(&un->un_wcc_cv); 20485 mutex_exit(SD_MUTEX(un)); 20486 break; 20487 } 20488 20489 default: 20490 err = ENOTTY; 20491 break; 20492 } 20493 mutex_enter(SD_MUTEX(un)); 20494 un->un_ncmds_in_driver--; 20495 ASSERT(un->un_ncmds_in_driver >= 0); 20496 mutex_exit(SD_MUTEX(un)); 20497 20498 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 20499 return (err); 20500 } 20501 20502 20503 /* 20504 * Function: sd_dkio_ctrl_info 20505 * 20506 * Description: This routine is the driver entry point for handling controller 20507 * information ioctl requests (DKIOCINFO). 20508 * 20509 * Arguments: dev - the device number 20510 * arg - pointer to user provided dk_cinfo structure 20511 * specifying the controller type and attributes. 20512 * flag - this argument is a pass through to ddi_copyxxx() 20513 * directly from the mode argument of ioctl(). 20514 * 20515 * Return Code: 0 20516 * EFAULT 20517 * ENXIO 20518 */ 20519 20520 static int 20521 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag) 20522 { 20523 struct sd_lun *un = NULL; 20524 struct dk_cinfo *info; 20525 dev_info_t *pdip; 20526 int lun, tgt; 20527 20528 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20529 return (ENXIO); 20530 } 20531 20532 info = (struct dk_cinfo *) 20533 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); 20534 20535 switch (un->un_ctype) { 20536 case CTYPE_CDROM: 20537 info->dki_ctype = DKC_CDROM; 20538 break; 20539 default: 20540 info->dki_ctype = DKC_SCSI_CCS; 20541 break; 20542 } 20543 pdip = ddi_get_parent(SD_DEVINFO(un)); 20544 info->dki_cnum = ddi_get_instance(pdip); 20545 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) { 20546 (void) strcpy(info->dki_cname, ddi_get_name(pdip)); 20547 } else { 20548 (void) strncpy(info->dki_cname, ddi_node_name(pdip), 20549 DK_DEVLEN - 1); 20550 } 20551 20552 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 20553 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0); 20554 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 20555 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0); 20556 20557 /* Unit Information */ 20558 info->dki_unit = ddi_get_instance(SD_DEVINFO(un)); 20559 info->dki_slave = ((tgt << 3) | lun); 20560 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)), 20561 DK_DEVLEN - 1); 20562 info->dki_flags = DKI_FMTVOL; 20563 info->dki_partition = SDPART(dev); 20564 20565 /* Max Transfer size of this device in blocks */ 20566 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize; 20567 info->dki_addr = 0; 20568 info->dki_space = 0; 20569 info->dki_prio = 0; 20570 info->dki_vec = 0; 20571 20572 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) { 20573 kmem_free(info, sizeof (struct dk_cinfo)); 20574 return (EFAULT); 20575 } else { 20576 kmem_free(info, sizeof (struct dk_cinfo)); 20577 return (0); 20578 } 20579 } 20580 20581 20582 /* 20583 * Function: sd_get_media_info 20584 * 20585 * Description: This routine is the driver entry point for handling ioctl 20586 * requests for the media type or command set profile used by the 20587 * drive to operate on the media (DKIOCGMEDIAINFO). 20588 * 20589 * Arguments: dev - the device number 20590 * arg - pointer to user provided dk_minfo structure 20591 * specifying the media type, logical block size and 20592 * drive capacity. 20593 * flag - this argument is a pass through to ddi_copyxxx() 20594 * directly from the mode argument of ioctl(). 20595 * 20596 * Return Code: 0 20597 * EACCESS 20598 * EFAULT 20599 * ENXIO 20600 * EIO 20601 */ 20602 20603 static int 20604 sd_get_media_info(dev_t dev, caddr_t arg, int flag) 20605 { 20606 struct sd_lun *un = NULL; 20607 struct uscsi_cmd com; 20608 struct scsi_inquiry *sinq; 20609 struct dk_minfo media_info; 20610 u_longlong_t media_capacity; 20611 uint64_t capacity; 20612 uint_t lbasize; 20613 uchar_t *out_data; 20614 uchar_t *rqbuf; 20615 int rval = 0; 20616 int rtn; 20617 20618 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 20619 (un->un_state == SD_STATE_OFFLINE)) { 20620 return (ENXIO); 20621 } 20622 20623 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info: entry\n"); 20624 20625 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 20626 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 20627 20628 /* Issue a TUR to determine if the drive is ready with media present */ 20629 rval = sd_send_scsi_TEST_UNIT_READY(un, SD_CHECK_FOR_MEDIA); 20630 if (rval == ENXIO) { 20631 goto done; 20632 } 20633 20634 /* Now get configuration data */ 20635 if (ISCD(un)) { 20636 media_info.dki_media_type = DK_CDROM; 20637 20638 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 20639 if (un->un_f_mmc_cap == TRUE) { 20640 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, 20641 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN, 20642 SD_PATH_STANDARD); 20643 20644 if (rtn) { 20645 /* 20646 * Failed for other than an illegal request 20647 * or command not supported 20648 */ 20649 if ((com.uscsi_status == STATUS_CHECK) && 20650 (com.uscsi_rqstatus == STATUS_GOOD)) { 20651 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 20652 (rqbuf[12] != 0x20)) { 20653 rval = EIO; 20654 goto done; 20655 } 20656 } 20657 } else { 20658 /* 20659 * The GET CONFIGURATION command succeeded 20660 * so set the media type according to the 20661 * returned data 20662 */ 20663 media_info.dki_media_type = out_data[6]; 20664 media_info.dki_media_type <<= 8; 20665 media_info.dki_media_type |= out_data[7]; 20666 } 20667 } 20668 } else { 20669 /* 20670 * The profile list is not available, so we attempt to identify 20671 * the media type based on the inquiry data 20672 */ 20673 sinq = un->un_sd->sd_inq; 20674 if ((sinq->inq_dtype == DTYPE_DIRECT) || 20675 (sinq->inq_dtype == DTYPE_OPTICAL)) { 20676 /* This is a direct access device or optical disk */ 20677 media_info.dki_media_type = DK_FIXED_DISK; 20678 20679 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 20680 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 20681 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 20682 media_info.dki_media_type = DK_ZIP; 20683 } else if ( 20684 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 20685 media_info.dki_media_type = DK_JAZ; 20686 } 20687 } 20688 } else { 20689 /* 20690 * Not a CD, direct access or optical disk so return 20691 * unknown media 20692 */ 20693 media_info.dki_media_type = DK_UNKNOWN; 20694 } 20695 } 20696 20697 /* Now read the capacity so we can provide the lbasize and capacity */ 20698 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 20699 SD_PATH_DIRECT)) { 20700 case 0: 20701 break; 20702 case EACCES: 20703 rval = EACCES; 20704 goto done; 20705 default: 20706 rval = EIO; 20707 goto done; 20708 } 20709 20710 media_info.dki_lbsize = lbasize; 20711 media_capacity = capacity; 20712 20713 /* 20714 * sd_send_scsi_READ_CAPACITY() reports capacity in 20715 * un->un_sys_blocksize chunks. So we need to convert it into 20716 * cap.lbasize chunks. 20717 */ 20718 media_capacity *= un->un_sys_blocksize; 20719 media_capacity /= lbasize; 20720 media_info.dki_capacity = media_capacity; 20721 20722 if (ddi_copyout(&media_info, arg, sizeof (struct dk_minfo), flag)) { 20723 rval = EFAULT; 20724 /* Put goto. Anybody might add some code below in future */ 20725 goto done; 20726 } 20727 done: 20728 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 20729 kmem_free(rqbuf, SENSE_LENGTH); 20730 return (rval); 20731 } 20732 20733 20734 /* 20735 * Function: sd_check_media 20736 * 20737 * Description: This utility routine implements the functionality for the 20738 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the 20739 * driver state changes from that specified by the user 20740 * (inserted or ejected). For example, if the user specifies 20741 * DKIO_EJECTED and the current media state is inserted this 20742 * routine will immediately return DKIO_INSERTED. However, if the 20743 * current media state is not inserted the user thread will be 20744 * blocked until the drive state changes. If DKIO_NONE is specified 20745 * the user thread will block until a drive state change occurs. 20746 * 20747 * Arguments: dev - the device number 20748 * state - user pointer to a dkio_state, updated with the current 20749 * drive state at return. 20750 * 20751 * Return Code: ENXIO 20752 * EIO 20753 * EAGAIN 20754 * EINTR 20755 */ 20756 20757 static int 20758 sd_check_media(dev_t dev, enum dkio_state state) 20759 { 20760 struct sd_lun *un = NULL; 20761 enum dkio_state prev_state; 20762 opaque_t token = NULL; 20763 int rval = 0; 20764 20765 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20766 return (ENXIO); 20767 } 20768 20769 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n"); 20770 20771 mutex_enter(SD_MUTEX(un)); 20772 20773 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: " 20774 "state=%x, mediastate=%x\n", state, un->un_mediastate); 20775 20776 prev_state = un->un_mediastate; 20777 20778 /* is there anything to do? */ 20779 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) { 20780 /* 20781 * submit the request to the scsi_watch service; 20782 * scsi_media_watch_cb() does the real work 20783 */ 20784 mutex_exit(SD_MUTEX(un)); 20785 20786 /* 20787 * This change handles the case where a scsi watch request is 20788 * added to a device that is powered down. To accomplish this 20789 * we power up the device before adding the scsi watch request, 20790 * since the scsi watch sends a TUR directly to the device 20791 * which the device cannot handle if it is powered down. 20792 */ 20793 if (sd_pm_entry(un) != DDI_SUCCESS) { 20794 mutex_enter(SD_MUTEX(un)); 20795 goto done; 20796 } 20797 20798 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), 20799 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 20800 (caddr_t)dev); 20801 20802 sd_pm_exit(un); 20803 20804 mutex_enter(SD_MUTEX(un)); 20805 if (token == NULL) { 20806 rval = EAGAIN; 20807 goto done; 20808 } 20809 20810 /* 20811 * This is a special case IOCTL that doesn't return 20812 * until the media state changes. Routine sdpower 20813 * knows about and handles this so don't count it 20814 * as an active cmd in the driver, which would 20815 * keep the device busy to the pm framework. 20816 * If the count isn't decremented the device can't 20817 * be powered down. 20818 */ 20819 un->un_ncmds_in_driver--; 20820 ASSERT(un->un_ncmds_in_driver >= 0); 20821 20822 /* 20823 * if a prior request had been made, this will be the same 20824 * token, as scsi_watch was designed that way. 20825 */ 20826 un->un_swr_token = token; 20827 un->un_specified_mediastate = state; 20828 20829 /* 20830 * now wait for media change 20831 * we will not be signalled unless mediastate == state but it is 20832 * still better to test for this condition, since there is a 20833 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED 20834 */ 20835 SD_TRACE(SD_LOG_COMMON, un, 20836 "sd_check_media: waiting for media state change\n"); 20837 while (un->un_mediastate == state) { 20838 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) { 20839 SD_TRACE(SD_LOG_COMMON, un, 20840 "sd_check_media: waiting for media state " 20841 "was interrupted\n"); 20842 un->un_ncmds_in_driver++; 20843 rval = EINTR; 20844 goto done; 20845 } 20846 SD_TRACE(SD_LOG_COMMON, un, 20847 "sd_check_media: received signal, state=%x\n", 20848 un->un_mediastate); 20849 } 20850 /* 20851 * Inc the counter to indicate the device once again 20852 * has an active outstanding cmd. 20853 */ 20854 un->un_ncmds_in_driver++; 20855 } 20856 20857 /* invalidate geometry */ 20858 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) { 20859 sr_ejected(un); 20860 } 20861 20862 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) { 20863 uint64_t capacity; 20864 uint_t lbasize; 20865 20866 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n"); 20867 mutex_exit(SD_MUTEX(un)); 20868 /* 20869 * Since the following routines use SD_PATH_DIRECT, we must 20870 * call PM directly before the upcoming disk accesses. This 20871 * may cause the disk to be power/spin up. 20872 */ 20873 20874 if (sd_pm_entry(un) == DDI_SUCCESS) { 20875 rval = sd_send_scsi_READ_CAPACITY(un, 20876 &capacity, 20877 &lbasize, SD_PATH_DIRECT); 20878 if (rval != 0) { 20879 sd_pm_exit(un); 20880 mutex_enter(SD_MUTEX(un)); 20881 goto done; 20882 } 20883 } else { 20884 rval = EIO; 20885 mutex_enter(SD_MUTEX(un)); 20886 goto done; 20887 } 20888 mutex_enter(SD_MUTEX(un)); 20889 20890 sd_update_block_info(un, lbasize, capacity); 20891 20892 /* 20893 * Check if the media in the device is writable or not 20894 */ 20895 if (ISCD(un)) 20896 sd_check_for_writable_cd(un, SD_PATH_DIRECT); 20897 20898 mutex_exit(SD_MUTEX(un)); 20899 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 20900 if ((cmlb_validate(un->un_cmlbhandle, 0, 20901 (void *)SD_PATH_DIRECT) == 0) && un->un_f_pkstats_enabled) { 20902 sd_set_pstats(un); 20903 SD_TRACE(SD_LOG_IO_PARTITION, un, 20904 "sd_check_media: un:0x%p pstats created and " 20905 "set\n", un); 20906 } 20907 20908 rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 20909 SD_PATH_DIRECT); 20910 sd_pm_exit(un); 20911 20912 mutex_enter(SD_MUTEX(un)); 20913 } 20914 done: 20915 un->un_f_watcht_stopped = FALSE; 20916 if (un->un_swr_token) { 20917 /* 20918 * Use of this local token and the mutex ensures that we avoid 20919 * some race conditions associated with terminating the 20920 * scsi watch. 20921 */ 20922 token = un->un_swr_token; 20923 un->un_swr_token = (opaque_t)NULL; 20924 mutex_exit(SD_MUTEX(un)); 20925 (void) scsi_watch_request_terminate(token, 20926 SCSI_WATCH_TERMINATE_WAIT); 20927 mutex_enter(SD_MUTEX(un)); 20928 } 20929 20930 /* 20931 * Update the capacity kstat value, if no media previously 20932 * (capacity kstat is 0) and a media has been inserted 20933 * (un_f_blockcount_is_valid == TRUE) 20934 */ 20935 if (un->un_errstats) { 20936 struct sd_errstats *stp = NULL; 20937 20938 stp = (struct sd_errstats *)un->un_errstats->ks_data; 20939 if ((stp->sd_capacity.value.ui64 == 0) && 20940 (un->un_f_blockcount_is_valid == TRUE)) { 20941 stp->sd_capacity.value.ui64 = 20942 (uint64_t)((uint64_t)un->un_blockcount * 20943 un->un_sys_blocksize); 20944 } 20945 } 20946 mutex_exit(SD_MUTEX(un)); 20947 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n"); 20948 return (rval); 20949 } 20950 20951 20952 /* 20953 * Function: sd_delayed_cv_broadcast 20954 * 20955 * Description: Delayed cv_broadcast to allow for target to recover from media 20956 * insertion. 20957 * 20958 * Arguments: arg - driver soft state (unit) structure 20959 */ 20960 20961 static void 20962 sd_delayed_cv_broadcast(void *arg) 20963 { 20964 struct sd_lun *un = arg; 20965 20966 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n"); 20967 20968 mutex_enter(SD_MUTEX(un)); 20969 un->un_dcvb_timeid = NULL; 20970 cv_broadcast(&un->un_state_cv); 20971 mutex_exit(SD_MUTEX(un)); 20972 } 20973 20974 20975 /* 20976 * Function: sd_media_watch_cb 20977 * 20978 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This 20979 * routine processes the TUR sense data and updates the driver 20980 * state if a transition has occurred. The user thread 20981 * (sd_check_media) is then signalled. 20982 * 20983 * Arguments: arg - the device 'dev_t' is used for context to discriminate 20984 * among multiple watches that share this callback function 20985 * resultp - scsi watch facility result packet containing scsi 20986 * packet, status byte and sense data 20987 * 20988 * Return Code: 0 for success, -1 for failure 20989 */ 20990 20991 static int 20992 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 20993 { 20994 struct sd_lun *un; 20995 struct scsi_status *statusp = resultp->statusp; 20996 uint8_t *sensep = (uint8_t *)resultp->sensep; 20997 enum dkio_state state = DKIO_NONE; 20998 dev_t dev = (dev_t)arg; 20999 uchar_t actual_sense_length; 21000 uint8_t skey, asc, ascq; 21001 21002 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21003 return (-1); 21004 } 21005 actual_sense_length = resultp->actual_sense_length; 21006 21007 mutex_enter(SD_MUTEX(un)); 21008 SD_TRACE(SD_LOG_COMMON, un, 21009 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n", 21010 *((char *)statusp), (void *)sensep, actual_sense_length); 21011 21012 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) { 21013 un->un_mediastate = DKIO_DEV_GONE; 21014 cv_broadcast(&un->un_state_cv); 21015 mutex_exit(SD_MUTEX(un)); 21016 21017 return (0); 21018 } 21019 21020 /* 21021 * If there was a check condition then sensep points to valid sense data 21022 * If status was not a check condition but a reservation or busy status 21023 * then the new state is DKIO_NONE 21024 */ 21025 if (sensep != NULL) { 21026 skey = scsi_sense_key(sensep); 21027 asc = scsi_sense_asc(sensep); 21028 ascq = scsi_sense_ascq(sensep); 21029 21030 SD_INFO(SD_LOG_COMMON, un, 21031 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n", 21032 skey, asc, ascq); 21033 /* This routine only uses up to 13 bytes of sense data. */ 21034 if (actual_sense_length >= 13) { 21035 if (skey == KEY_UNIT_ATTENTION) { 21036 if (asc == 0x28) { 21037 state = DKIO_INSERTED; 21038 } 21039 } else if (skey == KEY_NOT_READY) { 21040 /* 21041 * if 02/04/02 means that the host 21042 * should send start command. Explicitly 21043 * leave the media state as is 21044 * (inserted) as the media is inserted 21045 * and host has stopped device for PM 21046 * reasons. Upon next true read/write 21047 * to this media will bring the 21048 * device to the right state good for 21049 * media access. 21050 */ 21051 if (asc == 0x3a) { 21052 state = DKIO_EJECTED; 21053 } else { 21054 /* 21055 * If the drive is busy with an 21056 * operation or long write, keep the 21057 * media in an inserted state. 21058 */ 21059 21060 if ((asc == 0x04) && 21061 ((ascq == 0x02) || 21062 (ascq == 0x07) || 21063 (ascq == 0x08))) { 21064 state = DKIO_INSERTED; 21065 } 21066 } 21067 } else if (skey == KEY_NO_SENSE) { 21068 if ((asc == 0x00) && (ascq == 0x00)) { 21069 /* 21070 * Sense Data 00/00/00 does not provide 21071 * any information about the state of 21072 * the media. Ignore it. 21073 */ 21074 mutex_exit(SD_MUTEX(un)); 21075 return (0); 21076 } 21077 } 21078 } 21079 } else if ((*((char *)statusp) == STATUS_GOOD) && 21080 (resultp->pkt->pkt_reason == CMD_CMPLT)) { 21081 state = DKIO_INSERTED; 21082 } 21083 21084 SD_TRACE(SD_LOG_COMMON, un, 21085 "sd_media_watch_cb: state=%x, specified=%x\n", 21086 state, un->un_specified_mediastate); 21087 21088 /* 21089 * now signal the waiting thread if this is *not* the specified state; 21090 * delay the signal if the state is DKIO_INSERTED to allow the target 21091 * to recover 21092 */ 21093 if (state != un->un_specified_mediastate) { 21094 un->un_mediastate = state; 21095 if (state == DKIO_INSERTED) { 21096 /* 21097 * delay the signal to give the drive a chance 21098 * to do what it apparently needs to do 21099 */ 21100 SD_TRACE(SD_LOG_COMMON, un, 21101 "sd_media_watch_cb: delayed cv_broadcast\n"); 21102 if (un->un_dcvb_timeid == NULL) { 21103 un->un_dcvb_timeid = 21104 timeout(sd_delayed_cv_broadcast, un, 21105 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY)); 21106 } 21107 } else { 21108 SD_TRACE(SD_LOG_COMMON, un, 21109 "sd_media_watch_cb: immediate cv_broadcast\n"); 21110 cv_broadcast(&un->un_state_cv); 21111 } 21112 } 21113 mutex_exit(SD_MUTEX(un)); 21114 return (0); 21115 } 21116 21117 21118 /* 21119 * Function: sd_dkio_get_temp 21120 * 21121 * Description: This routine is the driver entry point for handling ioctl 21122 * requests to get the disk temperature. 21123 * 21124 * Arguments: dev - the device number 21125 * arg - pointer to user provided dk_temperature structure. 21126 * flag - this argument is a pass through to ddi_copyxxx() 21127 * directly from the mode argument of ioctl(). 21128 * 21129 * Return Code: 0 21130 * EFAULT 21131 * ENXIO 21132 * EAGAIN 21133 */ 21134 21135 static int 21136 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag) 21137 { 21138 struct sd_lun *un = NULL; 21139 struct dk_temperature *dktemp = NULL; 21140 uchar_t *temperature_page; 21141 int rval = 0; 21142 int path_flag = SD_PATH_STANDARD; 21143 21144 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21145 return (ENXIO); 21146 } 21147 21148 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP); 21149 21150 /* copyin the disk temp argument to get the user flags */ 21151 if (ddi_copyin((void *)arg, dktemp, 21152 sizeof (struct dk_temperature), flag) != 0) { 21153 rval = EFAULT; 21154 goto done; 21155 } 21156 21157 /* Initialize the temperature to invalid. */ 21158 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 21159 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 21160 21161 /* 21162 * Note: Investigate removing the "bypass pm" semantic. 21163 * Can we just bypass PM always? 21164 */ 21165 if (dktemp->dkt_flags & DKT_BYPASS_PM) { 21166 path_flag = SD_PATH_DIRECT; 21167 ASSERT(!mutex_owned(&un->un_pm_mutex)); 21168 mutex_enter(&un->un_pm_mutex); 21169 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 21170 /* 21171 * If DKT_BYPASS_PM is set, and the drive happens to be 21172 * in low power mode, we can not wake it up, Need to 21173 * return EAGAIN. 21174 */ 21175 mutex_exit(&un->un_pm_mutex); 21176 rval = EAGAIN; 21177 goto done; 21178 } else { 21179 /* 21180 * Indicate to PM the device is busy. This is required 21181 * to avoid a race - i.e. the ioctl is issuing a 21182 * command and the pm framework brings down the device 21183 * to low power mode (possible power cut-off on some 21184 * platforms). 21185 */ 21186 mutex_exit(&un->un_pm_mutex); 21187 if (sd_pm_entry(un) != DDI_SUCCESS) { 21188 rval = EAGAIN; 21189 goto done; 21190 } 21191 } 21192 } 21193 21194 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP); 21195 21196 if ((rval = sd_send_scsi_LOG_SENSE(un, temperature_page, 21197 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag)) != 0) { 21198 goto done2; 21199 } 21200 21201 /* 21202 * For the current temperature verify that the parameter length is 0x02 21203 * and the parameter code is 0x00 21204 */ 21205 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) && 21206 (temperature_page[5] == 0x00)) { 21207 if (temperature_page[9] == 0xFF) { 21208 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 21209 } else { 21210 dktemp->dkt_cur_temp = (short)(temperature_page[9]); 21211 } 21212 } 21213 21214 /* 21215 * For the reference temperature verify that the parameter 21216 * length is 0x02 and the parameter code is 0x01 21217 */ 21218 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) && 21219 (temperature_page[11] == 0x01)) { 21220 if (temperature_page[15] == 0xFF) { 21221 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 21222 } else { 21223 dktemp->dkt_ref_temp = (short)(temperature_page[15]); 21224 } 21225 } 21226 21227 /* Do the copyout regardless of the temperature commands status. */ 21228 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature), 21229 flag) != 0) { 21230 rval = EFAULT; 21231 } 21232 21233 done2: 21234 if (path_flag == SD_PATH_DIRECT) { 21235 sd_pm_exit(un); 21236 } 21237 21238 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE); 21239 done: 21240 if (dktemp != NULL) { 21241 kmem_free(dktemp, sizeof (struct dk_temperature)); 21242 } 21243 21244 return (rval); 21245 } 21246 21247 21248 /* 21249 * Function: sd_log_page_supported 21250 * 21251 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of 21252 * supported log pages. 21253 * 21254 * Arguments: un - 21255 * log_page - 21256 * 21257 * Return Code: -1 - on error (log sense is optional and may not be supported). 21258 * 0 - log page not found. 21259 * 1 - log page found. 21260 */ 21261 21262 static int 21263 sd_log_page_supported(struct sd_lun *un, int log_page) 21264 { 21265 uchar_t *log_page_data; 21266 int i; 21267 int match = 0; 21268 int log_size; 21269 21270 log_page_data = kmem_zalloc(0xFF, KM_SLEEP); 21271 21272 if (sd_send_scsi_LOG_SENSE(un, log_page_data, 0xFF, 0, 0x01, 0, 21273 SD_PATH_DIRECT) != 0) { 21274 SD_ERROR(SD_LOG_COMMON, un, 21275 "sd_log_page_supported: failed log page retrieval\n"); 21276 kmem_free(log_page_data, 0xFF); 21277 return (-1); 21278 } 21279 log_size = log_page_data[3]; 21280 21281 /* 21282 * The list of supported log pages start from the fourth byte. Check 21283 * until we run out of log pages or a match is found. 21284 */ 21285 for (i = 4; (i < (log_size + 4)) && !match; i++) { 21286 if (log_page_data[i] == log_page) { 21287 match++; 21288 } 21289 } 21290 kmem_free(log_page_data, 0xFF); 21291 return (match); 21292 } 21293 21294 21295 /* 21296 * Function: sd_mhdioc_failfast 21297 * 21298 * Description: This routine is the driver entry point for handling ioctl 21299 * requests to enable/disable the multihost failfast option. 21300 * (MHIOCENFAILFAST) 21301 * 21302 * Arguments: dev - the device number 21303 * arg - user specified probing interval. 21304 * flag - this argument is a pass through to ddi_copyxxx() 21305 * directly from the mode argument of ioctl(). 21306 * 21307 * Return Code: 0 21308 * EFAULT 21309 * ENXIO 21310 */ 21311 21312 static int 21313 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag) 21314 { 21315 struct sd_lun *un = NULL; 21316 int mh_time; 21317 int rval = 0; 21318 21319 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21320 return (ENXIO); 21321 } 21322 21323 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag)) 21324 return (EFAULT); 21325 21326 if (mh_time) { 21327 mutex_enter(SD_MUTEX(un)); 21328 un->un_resvd_status |= SD_FAILFAST; 21329 mutex_exit(SD_MUTEX(un)); 21330 /* 21331 * If mh_time is INT_MAX, then this ioctl is being used for 21332 * SCSI-3 PGR purposes, and we don't need to spawn watch thread. 21333 */ 21334 if (mh_time != INT_MAX) { 21335 rval = sd_check_mhd(dev, mh_time); 21336 } 21337 } else { 21338 (void) sd_check_mhd(dev, 0); 21339 mutex_enter(SD_MUTEX(un)); 21340 un->un_resvd_status &= ~SD_FAILFAST; 21341 mutex_exit(SD_MUTEX(un)); 21342 } 21343 return (rval); 21344 } 21345 21346 21347 /* 21348 * Function: sd_mhdioc_takeown 21349 * 21350 * Description: This routine is the driver entry point for handling ioctl 21351 * requests to forcefully acquire exclusive access rights to the 21352 * multihost disk (MHIOCTKOWN). 21353 * 21354 * Arguments: dev - the device number 21355 * arg - user provided structure specifying the delay 21356 * parameters in milliseconds 21357 * flag - this argument is a pass through to ddi_copyxxx() 21358 * directly from the mode argument of ioctl(). 21359 * 21360 * Return Code: 0 21361 * EFAULT 21362 * ENXIO 21363 */ 21364 21365 static int 21366 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag) 21367 { 21368 struct sd_lun *un = NULL; 21369 struct mhioctkown *tkown = NULL; 21370 int rval = 0; 21371 21372 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21373 return (ENXIO); 21374 } 21375 21376 if (arg != NULL) { 21377 tkown = (struct mhioctkown *) 21378 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP); 21379 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag); 21380 if (rval != 0) { 21381 rval = EFAULT; 21382 goto error; 21383 } 21384 } 21385 21386 rval = sd_take_ownership(dev, tkown); 21387 mutex_enter(SD_MUTEX(un)); 21388 if (rval == 0) { 21389 un->un_resvd_status |= SD_RESERVE; 21390 if (tkown != NULL && tkown->reinstate_resv_delay != 0) { 21391 sd_reinstate_resv_delay = 21392 tkown->reinstate_resv_delay * 1000; 21393 } else { 21394 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 21395 } 21396 /* 21397 * Give the scsi_watch routine interval set by 21398 * the MHIOCENFAILFAST ioctl precedence here. 21399 */ 21400 if ((un->un_resvd_status & SD_FAILFAST) == 0) { 21401 mutex_exit(SD_MUTEX(un)); 21402 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000); 21403 SD_TRACE(SD_LOG_IOCTL_MHD, un, 21404 "sd_mhdioc_takeown : %d\n", 21405 sd_reinstate_resv_delay); 21406 } else { 21407 mutex_exit(SD_MUTEX(un)); 21408 } 21409 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY, 21410 sd_mhd_reset_notify_cb, (caddr_t)un); 21411 } else { 21412 un->un_resvd_status &= ~SD_RESERVE; 21413 mutex_exit(SD_MUTEX(un)); 21414 } 21415 21416 error: 21417 if (tkown != NULL) { 21418 kmem_free(tkown, sizeof (struct mhioctkown)); 21419 } 21420 return (rval); 21421 } 21422 21423 21424 /* 21425 * Function: sd_mhdioc_release 21426 * 21427 * Description: This routine is the driver entry point for handling ioctl 21428 * requests to release exclusive access rights to the multihost 21429 * disk (MHIOCRELEASE). 21430 * 21431 * Arguments: dev - the device number 21432 * 21433 * Return Code: 0 21434 * ENXIO 21435 */ 21436 21437 static int 21438 sd_mhdioc_release(dev_t dev) 21439 { 21440 struct sd_lun *un = NULL; 21441 timeout_id_t resvd_timeid_save; 21442 int resvd_status_save; 21443 int rval = 0; 21444 21445 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21446 return (ENXIO); 21447 } 21448 21449 mutex_enter(SD_MUTEX(un)); 21450 resvd_status_save = un->un_resvd_status; 21451 un->un_resvd_status &= 21452 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE); 21453 if (un->un_resvd_timeid) { 21454 resvd_timeid_save = un->un_resvd_timeid; 21455 un->un_resvd_timeid = NULL; 21456 mutex_exit(SD_MUTEX(un)); 21457 (void) untimeout(resvd_timeid_save); 21458 } else { 21459 mutex_exit(SD_MUTEX(un)); 21460 } 21461 21462 /* 21463 * destroy any pending timeout thread that may be attempting to 21464 * reinstate reservation on this device. 21465 */ 21466 sd_rmv_resv_reclaim_req(dev); 21467 21468 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) { 21469 mutex_enter(SD_MUTEX(un)); 21470 if ((un->un_mhd_token) && 21471 ((un->un_resvd_status & SD_FAILFAST) == 0)) { 21472 mutex_exit(SD_MUTEX(un)); 21473 (void) sd_check_mhd(dev, 0); 21474 } else { 21475 mutex_exit(SD_MUTEX(un)); 21476 } 21477 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 21478 sd_mhd_reset_notify_cb, (caddr_t)un); 21479 } else { 21480 /* 21481 * sd_mhd_watch_cb will restart the resvd recover timeout thread 21482 */ 21483 mutex_enter(SD_MUTEX(un)); 21484 un->un_resvd_status = resvd_status_save; 21485 mutex_exit(SD_MUTEX(un)); 21486 } 21487 return (rval); 21488 } 21489 21490 21491 /* 21492 * Function: sd_mhdioc_register_devid 21493 * 21494 * Description: This routine is the driver entry point for handling ioctl 21495 * requests to register the device id (MHIOCREREGISTERDEVID). 21496 * 21497 * Note: The implementation for this ioctl has been updated to 21498 * be consistent with the original PSARC case (1999/357) 21499 * (4375899, 4241671, 4220005) 21500 * 21501 * Arguments: dev - the device number 21502 * 21503 * Return Code: 0 21504 * ENXIO 21505 */ 21506 21507 static int 21508 sd_mhdioc_register_devid(dev_t dev) 21509 { 21510 struct sd_lun *un = NULL; 21511 int rval = 0; 21512 21513 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21514 return (ENXIO); 21515 } 21516 21517 ASSERT(!mutex_owned(SD_MUTEX(un))); 21518 21519 mutex_enter(SD_MUTEX(un)); 21520 21521 /* If a devid already exists, de-register it */ 21522 if (un->un_devid != NULL) { 21523 ddi_devid_unregister(SD_DEVINFO(un)); 21524 /* 21525 * After unregister devid, needs to free devid memory 21526 */ 21527 ddi_devid_free(un->un_devid); 21528 un->un_devid = NULL; 21529 } 21530 21531 /* Check for reservation conflict */ 21532 mutex_exit(SD_MUTEX(un)); 21533 rval = sd_send_scsi_TEST_UNIT_READY(un, 0); 21534 mutex_enter(SD_MUTEX(un)); 21535 21536 switch (rval) { 21537 case 0: 21538 sd_register_devid(un, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED); 21539 break; 21540 case EACCES: 21541 break; 21542 default: 21543 rval = EIO; 21544 } 21545 21546 mutex_exit(SD_MUTEX(un)); 21547 return (rval); 21548 } 21549 21550 21551 /* 21552 * Function: sd_mhdioc_inkeys 21553 * 21554 * Description: This routine is the driver entry point for handling ioctl 21555 * requests to issue the SCSI-3 Persistent In Read Keys command 21556 * to the device (MHIOCGRP_INKEYS). 21557 * 21558 * Arguments: dev - the device number 21559 * arg - user provided in_keys structure 21560 * flag - this argument is a pass through to ddi_copyxxx() 21561 * directly from the mode argument of ioctl(). 21562 * 21563 * Return Code: code returned by sd_persistent_reservation_in_read_keys() 21564 * ENXIO 21565 * EFAULT 21566 */ 21567 21568 static int 21569 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag) 21570 { 21571 struct sd_lun *un; 21572 mhioc_inkeys_t inkeys; 21573 int rval = 0; 21574 21575 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21576 return (ENXIO); 21577 } 21578 21579 #ifdef _MULTI_DATAMODEL 21580 switch (ddi_model_convert_from(flag & FMODELS)) { 21581 case DDI_MODEL_ILP32: { 21582 struct mhioc_inkeys32 inkeys32; 21583 21584 if (ddi_copyin(arg, &inkeys32, 21585 sizeof (struct mhioc_inkeys32), flag) != 0) { 21586 return (EFAULT); 21587 } 21588 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li; 21589 if ((rval = sd_persistent_reservation_in_read_keys(un, 21590 &inkeys, flag)) != 0) { 21591 return (rval); 21592 } 21593 inkeys32.generation = inkeys.generation; 21594 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32), 21595 flag) != 0) { 21596 return (EFAULT); 21597 } 21598 break; 21599 } 21600 case DDI_MODEL_NONE: 21601 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), 21602 flag) != 0) { 21603 return (EFAULT); 21604 } 21605 if ((rval = sd_persistent_reservation_in_read_keys(un, 21606 &inkeys, flag)) != 0) { 21607 return (rval); 21608 } 21609 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), 21610 flag) != 0) { 21611 return (EFAULT); 21612 } 21613 break; 21614 } 21615 21616 #else /* ! _MULTI_DATAMODEL */ 21617 21618 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) { 21619 return (EFAULT); 21620 } 21621 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag); 21622 if (rval != 0) { 21623 return (rval); 21624 } 21625 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) { 21626 return (EFAULT); 21627 } 21628 21629 #endif /* _MULTI_DATAMODEL */ 21630 21631 return (rval); 21632 } 21633 21634 21635 /* 21636 * Function: sd_mhdioc_inresv 21637 * 21638 * Description: This routine is the driver entry point for handling ioctl 21639 * requests to issue the SCSI-3 Persistent In Read Reservations 21640 * command to the device (MHIOCGRP_INKEYS). 21641 * 21642 * Arguments: dev - the device number 21643 * arg - user provided in_resv structure 21644 * flag - this argument is a pass through to ddi_copyxxx() 21645 * directly from the mode argument of ioctl(). 21646 * 21647 * Return Code: code returned by sd_persistent_reservation_in_read_resv() 21648 * ENXIO 21649 * EFAULT 21650 */ 21651 21652 static int 21653 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag) 21654 { 21655 struct sd_lun *un; 21656 mhioc_inresvs_t inresvs; 21657 int rval = 0; 21658 21659 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21660 return (ENXIO); 21661 } 21662 21663 #ifdef _MULTI_DATAMODEL 21664 21665 switch (ddi_model_convert_from(flag & FMODELS)) { 21666 case DDI_MODEL_ILP32: { 21667 struct mhioc_inresvs32 inresvs32; 21668 21669 if (ddi_copyin(arg, &inresvs32, 21670 sizeof (struct mhioc_inresvs32), flag) != 0) { 21671 return (EFAULT); 21672 } 21673 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li; 21674 if ((rval = sd_persistent_reservation_in_read_resv(un, 21675 &inresvs, flag)) != 0) { 21676 return (rval); 21677 } 21678 inresvs32.generation = inresvs.generation; 21679 if (ddi_copyout(&inresvs32, arg, 21680 sizeof (struct mhioc_inresvs32), flag) != 0) { 21681 return (EFAULT); 21682 } 21683 break; 21684 } 21685 case DDI_MODEL_NONE: 21686 if (ddi_copyin(arg, &inresvs, 21687 sizeof (mhioc_inresvs_t), flag) != 0) { 21688 return (EFAULT); 21689 } 21690 if ((rval = sd_persistent_reservation_in_read_resv(un, 21691 &inresvs, flag)) != 0) { 21692 return (rval); 21693 } 21694 if (ddi_copyout(&inresvs, arg, 21695 sizeof (mhioc_inresvs_t), flag) != 0) { 21696 return (EFAULT); 21697 } 21698 break; 21699 } 21700 21701 #else /* ! _MULTI_DATAMODEL */ 21702 21703 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) { 21704 return (EFAULT); 21705 } 21706 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag); 21707 if (rval != 0) { 21708 return (rval); 21709 } 21710 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) { 21711 return (EFAULT); 21712 } 21713 21714 #endif /* ! _MULTI_DATAMODEL */ 21715 21716 return (rval); 21717 } 21718 21719 21720 /* 21721 * The following routines support the clustering functionality described below 21722 * and implement lost reservation reclaim functionality. 21723 * 21724 * Clustering 21725 * ---------- 21726 * The clustering code uses two different, independent forms of SCSI 21727 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3 21728 * Persistent Group Reservations. For any particular disk, it will use either 21729 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk. 21730 * 21731 * SCSI-2 21732 * The cluster software takes ownership of a multi-hosted disk by issuing the 21733 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the 21734 * MHIOCRELEASE ioctl. Closely related is the MHIOCENFAILFAST ioctl -- a 21735 * cluster, just after taking ownership of the disk with the MHIOCTKOWN ioctl 21736 * then issues the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the 21737 * driver. The meaning of failfast is that if the driver (on this host) ever 21738 * encounters the scsi error return code RESERVATION_CONFLICT from the device, 21739 * it should immediately panic the host. The motivation for this ioctl is that 21740 * if this host does encounter reservation conflict, the underlying cause is 21741 * that some other host of the cluster has decided that this host is no longer 21742 * in the cluster and has seized control of the disks for itself. Since this 21743 * host is no longer in the cluster, it ought to panic itself. The 21744 * MHIOCENFAILFAST ioctl does two things: 21745 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT 21746 * error to panic the host 21747 * (b) it sets up a periodic timer to test whether this host still has 21748 * "access" (in that no other host has reserved the device): if the 21749 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The 21750 * purpose of that periodic timer is to handle scenarios where the host is 21751 * otherwise temporarily quiescent, temporarily doing no real i/o. 21752 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host, 21753 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for 21754 * the device itself. 21755 * 21756 * SCSI-3 PGR 21757 * A direct semantic implementation of the SCSI-3 Persistent Reservation 21758 * facility is supported through the shared multihost disk ioctls 21759 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE, 21760 * MHIOCGRP_PREEMPTANDABORT) 21761 * 21762 * Reservation Reclaim: 21763 * -------------------- 21764 * To support the lost reservation reclaim operations this driver creates a 21765 * single thread to handle reinstating reservations on all devices that have 21766 * lost reservations sd_resv_reclaim_requests are logged for all devices that 21767 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb 21768 * and the reservation reclaim thread loops through the requests to regain the 21769 * lost reservations. 21770 */ 21771 21772 /* 21773 * Function: sd_check_mhd() 21774 * 21775 * Description: This function sets up and submits a scsi watch request or 21776 * terminates an existing watch request. This routine is used in 21777 * support of reservation reclaim. 21778 * 21779 * Arguments: dev - the device 'dev_t' is used for context to discriminate 21780 * among multiple watches that share the callback function 21781 * interval - the number of microseconds specifying the watch 21782 * interval for issuing TEST UNIT READY commands. If 21783 * set to 0 the watch should be terminated. If the 21784 * interval is set to 0 and if the device is required 21785 * to hold reservation while disabling failfast, the 21786 * watch is restarted with an interval of 21787 * reinstate_resv_delay. 21788 * 21789 * Return Code: 0 - Successful submit/terminate of scsi watch request 21790 * ENXIO - Indicates an invalid device was specified 21791 * EAGAIN - Unable to submit the scsi watch request 21792 */ 21793 21794 static int 21795 sd_check_mhd(dev_t dev, int interval) 21796 { 21797 struct sd_lun *un; 21798 opaque_t token; 21799 21800 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21801 return (ENXIO); 21802 } 21803 21804 /* is this a watch termination request? */ 21805 if (interval == 0) { 21806 mutex_enter(SD_MUTEX(un)); 21807 /* if there is an existing watch task then terminate it */ 21808 if (un->un_mhd_token) { 21809 token = un->un_mhd_token; 21810 un->un_mhd_token = NULL; 21811 mutex_exit(SD_MUTEX(un)); 21812 (void) scsi_watch_request_terminate(token, 21813 SCSI_WATCH_TERMINATE_WAIT); 21814 mutex_enter(SD_MUTEX(un)); 21815 } else { 21816 mutex_exit(SD_MUTEX(un)); 21817 /* 21818 * Note: If we return here we don't check for the 21819 * failfast case. This is the original legacy 21820 * implementation but perhaps we should be checking 21821 * the failfast case. 21822 */ 21823 return (0); 21824 } 21825 /* 21826 * If the device is required to hold reservation while 21827 * disabling failfast, we need to restart the scsi_watch 21828 * routine with an interval of reinstate_resv_delay. 21829 */ 21830 if (un->un_resvd_status & SD_RESERVE) { 21831 interval = sd_reinstate_resv_delay/1000; 21832 } else { 21833 /* no failfast so bail */ 21834 mutex_exit(SD_MUTEX(un)); 21835 return (0); 21836 } 21837 mutex_exit(SD_MUTEX(un)); 21838 } 21839 21840 /* 21841 * adjust minimum time interval to 1 second, 21842 * and convert from msecs to usecs 21843 */ 21844 if (interval > 0 && interval < 1000) { 21845 interval = 1000; 21846 } 21847 interval *= 1000; 21848 21849 /* 21850 * submit the request to the scsi_watch service 21851 */ 21852 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval, 21853 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev); 21854 if (token == NULL) { 21855 return (EAGAIN); 21856 } 21857 21858 /* 21859 * save token for termination later on 21860 */ 21861 mutex_enter(SD_MUTEX(un)); 21862 un->un_mhd_token = token; 21863 mutex_exit(SD_MUTEX(un)); 21864 return (0); 21865 } 21866 21867 21868 /* 21869 * Function: sd_mhd_watch_cb() 21870 * 21871 * Description: This function is the call back function used by the scsi watch 21872 * facility. The scsi watch facility sends the "Test Unit Ready" 21873 * and processes the status. If applicable (i.e. a "Unit Attention" 21874 * status and automatic "Request Sense" not used) the scsi watch 21875 * facility will send a "Request Sense" and retrieve the sense data 21876 * to be passed to this callback function. In either case the 21877 * automatic "Request Sense" or the facility submitting one, this 21878 * callback is passed the status and sense data. 21879 * 21880 * Arguments: arg - the device 'dev_t' is used for context to discriminate 21881 * among multiple watches that share this callback function 21882 * resultp - scsi watch facility result packet containing scsi 21883 * packet, status byte and sense data 21884 * 21885 * Return Code: 0 - continue the watch task 21886 * non-zero - terminate the watch task 21887 */ 21888 21889 static int 21890 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 21891 { 21892 struct sd_lun *un; 21893 struct scsi_status *statusp; 21894 uint8_t *sensep; 21895 struct scsi_pkt *pkt; 21896 uchar_t actual_sense_length; 21897 dev_t dev = (dev_t)arg; 21898 21899 ASSERT(resultp != NULL); 21900 statusp = resultp->statusp; 21901 sensep = (uint8_t *)resultp->sensep; 21902 pkt = resultp->pkt; 21903 actual_sense_length = resultp->actual_sense_length; 21904 21905 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21906 return (ENXIO); 21907 } 21908 21909 SD_TRACE(SD_LOG_IOCTL_MHD, un, 21910 "sd_mhd_watch_cb: reason '%s', status '%s'\n", 21911 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp))); 21912 21913 /* Begin processing of the status and/or sense data */ 21914 if (pkt->pkt_reason != CMD_CMPLT) { 21915 /* Handle the incomplete packet */ 21916 sd_mhd_watch_incomplete(un, pkt); 21917 return (0); 21918 } else if (*((unsigned char *)statusp) != STATUS_GOOD) { 21919 if (*((unsigned char *)statusp) 21920 == STATUS_RESERVATION_CONFLICT) { 21921 /* 21922 * Handle a reservation conflict by panicking if 21923 * configured for failfast or by logging the conflict 21924 * and updating the reservation status 21925 */ 21926 mutex_enter(SD_MUTEX(un)); 21927 if ((un->un_resvd_status & SD_FAILFAST) && 21928 (sd_failfast_enable)) { 21929 sd_panic_for_res_conflict(un); 21930 /*NOTREACHED*/ 21931 } 21932 SD_INFO(SD_LOG_IOCTL_MHD, un, 21933 "sd_mhd_watch_cb: Reservation Conflict\n"); 21934 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 21935 mutex_exit(SD_MUTEX(un)); 21936 } 21937 } 21938 21939 if (sensep != NULL) { 21940 if (actual_sense_length >= (SENSE_LENGTH - 2)) { 21941 mutex_enter(SD_MUTEX(un)); 21942 if ((scsi_sense_asc(sensep) == 21943 SD_SCSI_RESET_SENSE_CODE) && 21944 (un->un_resvd_status & SD_RESERVE)) { 21945 /* 21946 * The additional sense code indicates a power 21947 * on or bus device reset has occurred; update 21948 * the reservation status. 21949 */ 21950 un->un_resvd_status |= 21951 (SD_LOST_RESERVE | SD_WANT_RESERVE); 21952 SD_INFO(SD_LOG_IOCTL_MHD, un, 21953 "sd_mhd_watch_cb: Lost Reservation\n"); 21954 } 21955 } else { 21956 return (0); 21957 } 21958 } else { 21959 mutex_enter(SD_MUTEX(un)); 21960 } 21961 21962 if ((un->un_resvd_status & SD_RESERVE) && 21963 (un->un_resvd_status & SD_LOST_RESERVE)) { 21964 if (un->un_resvd_status & SD_WANT_RESERVE) { 21965 /* 21966 * A reset occurred in between the last probe and this 21967 * one so if a timeout is pending cancel it. 21968 */ 21969 if (un->un_resvd_timeid) { 21970 timeout_id_t temp_id = un->un_resvd_timeid; 21971 un->un_resvd_timeid = NULL; 21972 mutex_exit(SD_MUTEX(un)); 21973 (void) untimeout(temp_id); 21974 mutex_enter(SD_MUTEX(un)); 21975 } 21976 un->un_resvd_status &= ~SD_WANT_RESERVE; 21977 } 21978 if (un->un_resvd_timeid == 0) { 21979 /* Schedule a timeout to handle the lost reservation */ 21980 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover, 21981 (void *)dev, 21982 drv_usectohz(sd_reinstate_resv_delay)); 21983 } 21984 } 21985 mutex_exit(SD_MUTEX(un)); 21986 return (0); 21987 } 21988 21989 21990 /* 21991 * Function: sd_mhd_watch_incomplete() 21992 * 21993 * Description: This function is used to find out why a scsi pkt sent by the 21994 * scsi watch facility was not completed. Under some scenarios this 21995 * routine will return. Otherwise it will send a bus reset to see 21996 * if the drive is still online. 21997 * 21998 * Arguments: un - driver soft state (unit) structure 21999 * pkt - incomplete scsi pkt 22000 */ 22001 22002 static void 22003 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt) 22004 { 22005 int be_chatty; 22006 int perr; 22007 22008 ASSERT(pkt != NULL); 22009 ASSERT(un != NULL); 22010 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT)); 22011 perr = (pkt->pkt_statistics & STAT_PERR); 22012 22013 mutex_enter(SD_MUTEX(un)); 22014 if (un->un_state == SD_STATE_DUMPING) { 22015 mutex_exit(SD_MUTEX(un)); 22016 return; 22017 } 22018 22019 switch (pkt->pkt_reason) { 22020 case CMD_UNX_BUS_FREE: 22021 /* 22022 * If we had a parity error that caused the target to drop BSY*, 22023 * don't be chatty about it. 22024 */ 22025 if (perr && be_chatty) { 22026 be_chatty = 0; 22027 } 22028 break; 22029 case CMD_TAG_REJECT: 22030 /* 22031 * The SCSI-2 spec states that a tag reject will be sent by the 22032 * target if tagged queuing is not supported. A tag reject may 22033 * also be sent during certain initialization periods or to 22034 * control internal resources. For the latter case the target 22035 * may also return Queue Full. 22036 * 22037 * If this driver receives a tag reject from a target that is 22038 * going through an init period or controlling internal 22039 * resources tagged queuing will be disabled. This is a less 22040 * than optimal behavior but the driver is unable to determine 22041 * the target state and assumes tagged queueing is not supported 22042 */ 22043 pkt->pkt_flags = 0; 22044 un->un_tagflags = 0; 22045 22046 if (un->un_f_opt_queueing == TRUE) { 22047 un->un_throttle = min(un->un_throttle, 3); 22048 } else { 22049 un->un_throttle = 1; 22050 } 22051 mutex_exit(SD_MUTEX(un)); 22052 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 22053 mutex_enter(SD_MUTEX(un)); 22054 break; 22055 case CMD_INCOMPLETE: 22056 /* 22057 * The transport stopped with an abnormal state, fallthrough and 22058 * reset the target and/or bus unless selection did not complete 22059 * (indicated by STATE_GOT_BUS) in which case we don't want to 22060 * go through a target/bus reset 22061 */ 22062 if (pkt->pkt_state == STATE_GOT_BUS) { 22063 break; 22064 } 22065 /*FALLTHROUGH*/ 22066 22067 case CMD_TIMEOUT: 22068 default: 22069 /* 22070 * The lun may still be running the command, so a lun reset 22071 * should be attempted. If the lun reset fails or cannot be 22072 * issued, than try a target reset. Lastly try a bus reset. 22073 */ 22074 if ((pkt->pkt_statistics & 22075 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 22076 int reset_retval = 0; 22077 mutex_exit(SD_MUTEX(un)); 22078 if (un->un_f_allow_bus_device_reset == TRUE) { 22079 if (un->un_f_lun_reset_enabled == TRUE) { 22080 reset_retval = 22081 scsi_reset(SD_ADDRESS(un), 22082 RESET_LUN); 22083 } 22084 if (reset_retval == 0) { 22085 reset_retval = 22086 scsi_reset(SD_ADDRESS(un), 22087 RESET_TARGET); 22088 } 22089 } 22090 if (reset_retval == 0) { 22091 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 22092 } 22093 mutex_enter(SD_MUTEX(un)); 22094 } 22095 break; 22096 } 22097 22098 /* A device/bus reset has occurred; update the reservation status. */ 22099 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics & 22100 (STAT_BUS_RESET | STAT_DEV_RESET))) { 22101 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 22102 un->un_resvd_status |= 22103 (SD_LOST_RESERVE | SD_WANT_RESERVE); 22104 SD_INFO(SD_LOG_IOCTL_MHD, un, 22105 "sd_mhd_watch_incomplete: Lost Reservation\n"); 22106 } 22107 } 22108 22109 /* 22110 * The disk has been turned off; Update the device state. 22111 * 22112 * Note: Should we be offlining the disk here? 22113 */ 22114 if (pkt->pkt_state == STATE_GOT_BUS) { 22115 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: " 22116 "Disk not responding to selection\n"); 22117 if (un->un_state != SD_STATE_OFFLINE) { 22118 New_state(un, SD_STATE_OFFLINE); 22119 } 22120 } else if (be_chatty) { 22121 /* 22122 * suppress messages if they are all the same pkt reason; 22123 * with TQ, many (up to 256) are returned with the same 22124 * pkt_reason 22125 */ 22126 if (pkt->pkt_reason != un->un_last_pkt_reason) { 22127 SD_ERROR(SD_LOG_IOCTL_MHD, un, 22128 "sd_mhd_watch_incomplete: " 22129 "SCSI transport failed: reason '%s'\n", 22130 scsi_rname(pkt->pkt_reason)); 22131 } 22132 } 22133 un->un_last_pkt_reason = pkt->pkt_reason; 22134 mutex_exit(SD_MUTEX(un)); 22135 } 22136 22137 22138 /* 22139 * Function: sd_sname() 22140 * 22141 * Description: This is a simple little routine to return a string containing 22142 * a printable description of command status byte for use in 22143 * logging. 22144 * 22145 * Arguments: status - pointer to a status byte 22146 * 22147 * Return Code: char * - string containing status description. 22148 */ 22149 22150 static char * 22151 sd_sname(uchar_t status) 22152 { 22153 switch (status & STATUS_MASK) { 22154 case STATUS_GOOD: 22155 return ("good status"); 22156 case STATUS_CHECK: 22157 return ("check condition"); 22158 case STATUS_MET: 22159 return ("condition met"); 22160 case STATUS_BUSY: 22161 return ("busy"); 22162 case STATUS_INTERMEDIATE: 22163 return ("intermediate"); 22164 case STATUS_INTERMEDIATE_MET: 22165 return ("intermediate - condition met"); 22166 case STATUS_RESERVATION_CONFLICT: 22167 return ("reservation_conflict"); 22168 case STATUS_TERMINATED: 22169 return ("command terminated"); 22170 case STATUS_QFULL: 22171 return ("queue full"); 22172 default: 22173 return ("<unknown status>"); 22174 } 22175 } 22176 22177 22178 /* 22179 * Function: sd_mhd_resvd_recover() 22180 * 22181 * Description: This function adds a reservation entry to the 22182 * sd_resv_reclaim_request list and signals the reservation 22183 * reclaim thread that there is work pending. If the reservation 22184 * reclaim thread has not been previously created this function 22185 * will kick it off. 22186 * 22187 * Arguments: arg - the device 'dev_t' is used for context to discriminate 22188 * among multiple watches that share this callback function 22189 * 22190 * Context: This routine is called by timeout() and is run in interrupt 22191 * context. It must not sleep or call other functions which may 22192 * sleep. 22193 */ 22194 22195 static void 22196 sd_mhd_resvd_recover(void *arg) 22197 { 22198 dev_t dev = (dev_t)arg; 22199 struct sd_lun *un; 22200 struct sd_thr_request *sd_treq = NULL; 22201 struct sd_thr_request *sd_cur = NULL; 22202 struct sd_thr_request *sd_prev = NULL; 22203 int already_there = 0; 22204 22205 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22206 return; 22207 } 22208 22209 mutex_enter(SD_MUTEX(un)); 22210 un->un_resvd_timeid = NULL; 22211 if (un->un_resvd_status & SD_WANT_RESERVE) { 22212 /* 22213 * There was a reset so don't issue the reserve, allow the 22214 * sd_mhd_watch_cb callback function to notice this and 22215 * reschedule the timeout for reservation. 22216 */ 22217 mutex_exit(SD_MUTEX(un)); 22218 return; 22219 } 22220 mutex_exit(SD_MUTEX(un)); 22221 22222 /* 22223 * Add this device to the sd_resv_reclaim_request list and the 22224 * sd_resv_reclaim_thread should take care of the rest. 22225 * 22226 * Note: We can't sleep in this context so if the memory allocation 22227 * fails allow the sd_mhd_watch_cb callback function to notice this and 22228 * reschedule the timeout for reservation. (4378460) 22229 */ 22230 sd_treq = (struct sd_thr_request *) 22231 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP); 22232 if (sd_treq == NULL) { 22233 return; 22234 } 22235 22236 sd_treq->sd_thr_req_next = NULL; 22237 sd_treq->dev = dev; 22238 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22239 if (sd_tr.srq_thr_req_head == NULL) { 22240 sd_tr.srq_thr_req_head = sd_treq; 22241 } else { 22242 sd_cur = sd_prev = sd_tr.srq_thr_req_head; 22243 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) { 22244 if (sd_cur->dev == dev) { 22245 /* 22246 * already in Queue so don't log 22247 * another request for the device 22248 */ 22249 already_there = 1; 22250 break; 22251 } 22252 sd_prev = sd_cur; 22253 } 22254 if (!already_there) { 22255 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: " 22256 "logging request for %lx\n", dev); 22257 sd_prev->sd_thr_req_next = sd_treq; 22258 } else { 22259 kmem_free(sd_treq, sizeof (struct sd_thr_request)); 22260 } 22261 } 22262 22263 /* 22264 * Create a kernel thread to do the reservation reclaim and free up this 22265 * thread. We cannot block this thread while we go away to do the 22266 * reservation reclaim 22267 */ 22268 if (sd_tr.srq_resv_reclaim_thread == NULL) 22269 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0, 22270 sd_resv_reclaim_thread, NULL, 22271 0, &p0, TS_RUN, v.v_maxsyspri - 2); 22272 22273 /* Tell the reservation reclaim thread that it has work to do */ 22274 cv_signal(&sd_tr.srq_resv_reclaim_cv); 22275 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22276 } 22277 22278 /* 22279 * Function: sd_resv_reclaim_thread() 22280 * 22281 * Description: This function implements the reservation reclaim operations 22282 * 22283 * Arguments: arg - the device 'dev_t' is used for context to discriminate 22284 * among multiple watches that share this callback function 22285 */ 22286 22287 static void 22288 sd_resv_reclaim_thread() 22289 { 22290 struct sd_lun *un; 22291 struct sd_thr_request *sd_mhreq; 22292 22293 /* Wait for work */ 22294 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22295 if (sd_tr.srq_thr_req_head == NULL) { 22296 cv_wait(&sd_tr.srq_resv_reclaim_cv, 22297 &sd_tr.srq_resv_reclaim_mutex); 22298 } 22299 22300 /* Loop while we have work */ 22301 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) { 22302 un = ddi_get_soft_state(sd_state, 22303 SDUNIT(sd_tr.srq_thr_cur_req->dev)); 22304 if (un == NULL) { 22305 /* 22306 * softstate structure is NULL so just 22307 * dequeue the request and continue 22308 */ 22309 sd_tr.srq_thr_req_head = 22310 sd_tr.srq_thr_cur_req->sd_thr_req_next; 22311 kmem_free(sd_tr.srq_thr_cur_req, 22312 sizeof (struct sd_thr_request)); 22313 continue; 22314 } 22315 22316 /* dequeue the request */ 22317 sd_mhreq = sd_tr.srq_thr_cur_req; 22318 sd_tr.srq_thr_req_head = 22319 sd_tr.srq_thr_cur_req->sd_thr_req_next; 22320 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22321 22322 /* 22323 * Reclaim reservation only if SD_RESERVE is still set. There 22324 * may have been a call to MHIOCRELEASE before we got here. 22325 */ 22326 mutex_enter(SD_MUTEX(un)); 22327 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 22328 /* 22329 * Note: The SD_LOST_RESERVE flag is cleared before 22330 * reclaiming the reservation. If this is done after the 22331 * call to sd_reserve_release a reservation loss in the 22332 * window between pkt completion of reserve cmd and 22333 * mutex_enter below may not be recognized 22334 */ 22335 un->un_resvd_status &= ~SD_LOST_RESERVE; 22336 mutex_exit(SD_MUTEX(un)); 22337 22338 if (sd_reserve_release(sd_mhreq->dev, 22339 SD_RESERVE) == 0) { 22340 mutex_enter(SD_MUTEX(un)); 22341 un->un_resvd_status |= SD_RESERVE; 22342 mutex_exit(SD_MUTEX(un)); 22343 SD_INFO(SD_LOG_IOCTL_MHD, un, 22344 "sd_resv_reclaim_thread: " 22345 "Reservation Recovered\n"); 22346 } else { 22347 mutex_enter(SD_MUTEX(un)); 22348 un->un_resvd_status |= SD_LOST_RESERVE; 22349 mutex_exit(SD_MUTEX(un)); 22350 SD_INFO(SD_LOG_IOCTL_MHD, un, 22351 "sd_resv_reclaim_thread: Failed " 22352 "Reservation Recovery\n"); 22353 } 22354 } else { 22355 mutex_exit(SD_MUTEX(un)); 22356 } 22357 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22358 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req); 22359 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22360 sd_mhreq = sd_tr.srq_thr_cur_req = NULL; 22361 /* 22362 * wakeup the destroy thread if anyone is waiting on 22363 * us to complete. 22364 */ 22365 cv_signal(&sd_tr.srq_inprocess_cv); 22366 SD_TRACE(SD_LOG_IOCTL_MHD, un, 22367 "sd_resv_reclaim_thread: cv_signalling current request \n"); 22368 } 22369 22370 /* 22371 * cleanup the sd_tr structure now that this thread will not exist 22372 */ 22373 ASSERT(sd_tr.srq_thr_req_head == NULL); 22374 ASSERT(sd_tr.srq_thr_cur_req == NULL); 22375 sd_tr.srq_resv_reclaim_thread = NULL; 22376 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22377 thread_exit(); 22378 } 22379 22380 22381 /* 22382 * Function: sd_rmv_resv_reclaim_req() 22383 * 22384 * Description: This function removes any pending reservation reclaim requests 22385 * for the specified device. 22386 * 22387 * Arguments: dev - the device 'dev_t' 22388 */ 22389 22390 static void 22391 sd_rmv_resv_reclaim_req(dev_t dev) 22392 { 22393 struct sd_thr_request *sd_mhreq; 22394 struct sd_thr_request *sd_prev; 22395 22396 /* Remove a reservation reclaim request from the list */ 22397 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22398 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) { 22399 /* 22400 * We are attempting to reinstate reservation for 22401 * this device. We wait for sd_reserve_release() 22402 * to return before we return. 22403 */ 22404 cv_wait(&sd_tr.srq_inprocess_cv, 22405 &sd_tr.srq_resv_reclaim_mutex); 22406 } else { 22407 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head; 22408 if (sd_mhreq && sd_mhreq->dev == dev) { 22409 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next; 22410 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22411 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22412 return; 22413 } 22414 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) { 22415 if (sd_mhreq && sd_mhreq->dev == dev) { 22416 break; 22417 } 22418 sd_prev = sd_mhreq; 22419 } 22420 if (sd_mhreq != NULL) { 22421 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next; 22422 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22423 } 22424 } 22425 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22426 } 22427 22428 22429 /* 22430 * Function: sd_mhd_reset_notify_cb() 22431 * 22432 * Description: This is a call back function for scsi_reset_notify. This 22433 * function updates the softstate reserved status and logs the 22434 * reset. The driver scsi watch facility callback function 22435 * (sd_mhd_watch_cb) and reservation reclaim thread functionality 22436 * will reclaim the reservation. 22437 * 22438 * Arguments: arg - driver soft state (unit) structure 22439 */ 22440 22441 static void 22442 sd_mhd_reset_notify_cb(caddr_t arg) 22443 { 22444 struct sd_lun *un = (struct sd_lun *)arg; 22445 22446 mutex_enter(SD_MUTEX(un)); 22447 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 22448 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE); 22449 SD_INFO(SD_LOG_IOCTL_MHD, un, 22450 "sd_mhd_reset_notify_cb: Lost Reservation\n"); 22451 } 22452 mutex_exit(SD_MUTEX(un)); 22453 } 22454 22455 22456 /* 22457 * Function: sd_take_ownership() 22458 * 22459 * Description: This routine implements an algorithm to achieve a stable 22460 * reservation on disks which don't implement priority reserve, 22461 * and makes sure that other host lose re-reservation attempts. 22462 * This algorithm contains of a loop that keeps issuing the RESERVE 22463 * for some period of time (min_ownership_delay, default 6 seconds) 22464 * During that loop, it looks to see if there has been a bus device 22465 * reset or bus reset (both of which cause an existing reservation 22466 * to be lost). If the reservation is lost issue RESERVE until a 22467 * period of min_ownership_delay with no resets has gone by, or 22468 * until max_ownership_delay has expired. This loop ensures that 22469 * the host really did manage to reserve the device, in spite of 22470 * resets. The looping for min_ownership_delay (default six 22471 * seconds) is important to early generation clustering products, 22472 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an 22473 * MHIOCENFAILFAST periodic timer of two seconds. By having 22474 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having 22475 * MHIOCENFAILFAST poll every two seconds, the idea is that by the 22476 * time the MHIOCTKOWN ioctl returns, the other host (if any) will 22477 * have already noticed, via the MHIOCENFAILFAST polling, that it 22478 * no longer "owns" the disk and will have panicked itself. Thus, 22479 * the host issuing the MHIOCTKOWN is assured (with timing 22480 * dependencies) that by the time it actually starts to use the 22481 * disk for real work, the old owner is no longer accessing it. 22482 * 22483 * min_ownership_delay is the minimum amount of time for which the 22484 * disk must be reserved continuously devoid of resets before the 22485 * MHIOCTKOWN ioctl will return success. 22486 * 22487 * max_ownership_delay indicates the amount of time by which the 22488 * take ownership should succeed or timeout with an error. 22489 * 22490 * Arguments: dev - the device 'dev_t' 22491 * *p - struct containing timing info. 22492 * 22493 * Return Code: 0 for success or error code 22494 */ 22495 22496 static int 22497 sd_take_ownership(dev_t dev, struct mhioctkown *p) 22498 { 22499 struct sd_lun *un; 22500 int rval; 22501 int err; 22502 int reservation_count = 0; 22503 int min_ownership_delay = 6000000; /* in usec */ 22504 int max_ownership_delay = 30000000; /* in usec */ 22505 clock_t start_time; /* starting time of this algorithm */ 22506 clock_t end_time; /* time limit for giving up */ 22507 clock_t ownership_time; /* time limit for stable ownership */ 22508 clock_t current_time; 22509 clock_t previous_current_time; 22510 22511 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22512 return (ENXIO); 22513 } 22514 22515 /* 22516 * Attempt a device reservation. A priority reservation is requested. 22517 */ 22518 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE)) 22519 != SD_SUCCESS) { 22520 SD_ERROR(SD_LOG_IOCTL_MHD, un, 22521 "sd_take_ownership: return(1)=%d\n", rval); 22522 return (rval); 22523 } 22524 22525 /* Update the softstate reserved status to indicate the reservation */ 22526 mutex_enter(SD_MUTEX(un)); 22527 un->un_resvd_status |= SD_RESERVE; 22528 un->un_resvd_status &= 22529 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT); 22530 mutex_exit(SD_MUTEX(un)); 22531 22532 if (p != NULL) { 22533 if (p->min_ownership_delay != 0) { 22534 min_ownership_delay = p->min_ownership_delay * 1000; 22535 } 22536 if (p->max_ownership_delay != 0) { 22537 max_ownership_delay = p->max_ownership_delay * 1000; 22538 } 22539 } 22540 SD_INFO(SD_LOG_IOCTL_MHD, un, 22541 "sd_take_ownership: min, max delays: %d, %d\n", 22542 min_ownership_delay, max_ownership_delay); 22543 22544 start_time = ddi_get_lbolt(); 22545 current_time = start_time; 22546 ownership_time = current_time + drv_usectohz(min_ownership_delay); 22547 end_time = start_time + drv_usectohz(max_ownership_delay); 22548 22549 while (current_time - end_time < 0) { 22550 delay(drv_usectohz(500000)); 22551 22552 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) { 22553 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) { 22554 mutex_enter(SD_MUTEX(un)); 22555 rval = (un->un_resvd_status & 22556 SD_RESERVATION_CONFLICT) ? EACCES : EIO; 22557 mutex_exit(SD_MUTEX(un)); 22558 break; 22559 } 22560 } 22561 previous_current_time = current_time; 22562 current_time = ddi_get_lbolt(); 22563 mutex_enter(SD_MUTEX(un)); 22564 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) { 22565 ownership_time = ddi_get_lbolt() + 22566 drv_usectohz(min_ownership_delay); 22567 reservation_count = 0; 22568 } else { 22569 reservation_count++; 22570 } 22571 un->un_resvd_status |= SD_RESERVE; 22572 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE); 22573 mutex_exit(SD_MUTEX(un)); 22574 22575 SD_INFO(SD_LOG_IOCTL_MHD, un, 22576 "sd_take_ownership: ticks for loop iteration=%ld, " 22577 "reservation=%s\n", (current_time - previous_current_time), 22578 reservation_count ? "ok" : "reclaimed"); 22579 22580 if (current_time - ownership_time >= 0 && 22581 reservation_count >= 4) { 22582 rval = 0; /* Achieved a stable ownership */ 22583 break; 22584 } 22585 if (current_time - end_time >= 0) { 22586 rval = EACCES; /* No ownership in max possible time */ 22587 break; 22588 } 22589 } 22590 SD_TRACE(SD_LOG_IOCTL_MHD, un, 22591 "sd_take_ownership: return(2)=%d\n", rval); 22592 return (rval); 22593 } 22594 22595 22596 /* 22597 * Function: sd_reserve_release() 22598 * 22599 * Description: This function builds and sends scsi RESERVE, RELEASE, and 22600 * PRIORITY RESERVE commands based on a user specified command type 22601 * 22602 * Arguments: dev - the device 'dev_t' 22603 * cmd - user specified command type; one of SD_PRIORITY_RESERVE, 22604 * SD_RESERVE, SD_RELEASE 22605 * 22606 * Return Code: 0 or Error Code 22607 */ 22608 22609 static int 22610 sd_reserve_release(dev_t dev, int cmd) 22611 { 22612 struct uscsi_cmd *com = NULL; 22613 struct sd_lun *un = NULL; 22614 char cdb[CDB_GROUP0]; 22615 int rval; 22616 22617 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) || 22618 (cmd == SD_PRIORITY_RESERVE)); 22619 22620 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22621 return (ENXIO); 22622 } 22623 22624 /* instantiate and initialize the command and cdb */ 22625 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 22626 bzero(cdb, CDB_GROUP0); 22627 com->uscsi_flags = USCSI_SILENT; 22628 com->uscsi_timeout = un->un_reserve_release_time; 22629 com->uscsi_cdblen = CDB_GROUP0; 22630 com->uscsi_cdb = cdb; 22631 if (cmd == SD_RELEASE) { 22632 cdb[0] = SCMD_RELEASE; 22633 } else { 22634 cdb[0] = SCMD_RESERVE; 22635 } 22636 22637 /* Send the command. */ 22638 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 22639 SD_PATH_STANDARD); 22640 22641 /* 22642 * "break" a reservation that is held by another host, by issuing a 22643 * reset if priority reserve is desired, and we could not get the 22644 * device. 22645 */ 22646 if ((cmd == SD_PRIORITY_RESERVE) && 22647 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 22648 /* 22649 * First try to reset the LUN. If we cannot, then try a target 22650 * reset, followed by a bus reset if the target reset fails. 22651 */ 22652 int reset_retval = 0; 22653 if (un->un_f_lun_reset_enabled == TRUE) { 22654 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 22655 } 22656 if (reset_retval == 0) { 22657 /* The LUN reset either failed or was not issued */ 22658 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 22659 } 22660 if ((reset_retval == 0) && 22661 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) { 22662 rval = EIO; 22663 kmem_free(com, sizeof (*com)); 22664 return (rval); 22665 } 22666 22667 bzero(com, sizeof (struct uscsi_cmd)); 22668 com->uscsi_flags = USCSI_SILENT; 22669 com->uscsi_cdb = cdb; 22670 com->uscsi_cdblen = CDB_GROUP0; 22671 com->uscsi_timeout = 5; 22672 22673 /* 22674 * Reissue the last reserve command, this time without request 22675 * sense. Assume that it is just a regular reserve command. 22676 */ 22677 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 22678 SD_PATH_STANDARD); 22679 } 22680 22681 /* Return an error if still getting a reservation conflict. */ 22682 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 22683 rval = EACCES; 22684 } 22685 22686 kmem_free(com, sizeof (*com)); 22687 return (rval); 22688 } 22689 22690 22691 #define SD_NDUMP_RETRIES 12 22692 /* 22693 * System Crash Dump routine 22694 */ 22695 22696 static int 22697 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 22698 { 22699 int instance; 22700 int partition; 22701 int i; 22702 int err; 22703 struct sd_lun *un; 22704 struct scsi_pkt *wr_pktp; 22705 struct buf *wr_bp; 22706 struct buf wr_buf; 22707 daddr_t tgt_byte_offset; /* rmw - byte offset for target */ 22708 daddr_t tgt_blkno; /* rmw - blkno for target */ 22709 size_t tgt_byte_count; /* rmw - # of bytes to xfer */ 22710 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */ 22711 size_t io_start_offset; 22712 int doing_rmw = FALSE; 22713 int rval; 22714 #if defined(__i386) || defined(__amd64) 22715 ssize_t dma_resid; 22716 daddr_t oblkno; 22717 #endif 22718 diskaddr_t nblks = 0; 22719 diskaddr_t start_block; 22720 22721 instance = SDUNIT(dev); 22722 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 22723 !SD_IS_VALID_LABEL(un) || ISCD(un)) { 22724 return (ENXIO); 22725 } 22726 22727 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 22728 22729 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); 22730 22731 partition = SDPART(dev); 22732 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); 22733 22734 /* Validate blocks to dump at against partition size. */ 22735 22736 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 22737 &nblks, &start_block, NULL, NULL, (void *)SD_PATH_DIRECT); 22738 22739 if ((blkno + nblk) > nblks) { 22740 SD_TRACE(SD_LOG_DUMP, un, 22741 "sddump: dump range larger than partition: " 22742 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 22743 blkno, nblk, nblks); 22744 return (EINVAL); 22745 } 22746 22747 mutex_enter(&un->un_pm_mutex); 22748 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 22749 struct scsi_pkt *start_pktp; 22750 22751 mutex_exit(&un->un_pm_mutex); 22752 22753 /* 22754 * use pm framework to power on HBA 1st 22755 */ 22756 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 22757 22758 /* 22759 * Dump no long uses sdpower to power on a device, it's 22760 * in-line here so it can be done in polled mode. 22761 */ 22762 22763 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n"); 22764 22765 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL, 22766 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL); 22767 22768 if (start_pktp == NULL) { 22769 /* We were not given a SCSI packet, fail. */ 22770 return (EIO); 22771 } 22772 bzero(start_pktp->pkt_cdbp, CDB_GROUP0); 22773 start_pktp->pkt_cdbp[0] = SCMD_START_STOP; 22774 start_pktp->pkt_cdbp[4] = SD_TARGET_START; 22775 start_pktp->pkt_flags = FLAG_NOINTR; 22776 22777 mutex_enter(SD_MUTEX(un)); 22778 SD_FILL_SCSI1_LUN(un, start_pktp); 22779 mutex_exit(SD_MUTEX(un)); 22780 /* 22781 * Scsi_poll returns 0 (success) if the command completes and 22782 * the status block is STATUS_GOOD. 22783 */ 22784 if (sd_scsi_poll(un, start_pktp) != 0) { 22785 scsi_destroy_pkt(start_pktp); 22786 return (EIO); 22787 } 22788 scsi_destroy_pkt(start_pktp); 22789 (void) sd_ddi_pm_resume(un); 22790 } else { 22791 mutex_exit(&un->un_pm_mutex); 22792 } 22793 22794 mutex_enter(SD_MUTEX(un)); 22795 un->un_throttle = 0; 22796 22797 /* 22798 * The first time through, reset the specific target device. 22799 * However, when cpr calls sddump we know that sd is in a 22800 * a good state so no bus reset is required. 22801 * Clear sense data via Request Sense cmd. 22802 * In sddump we don't care about allow_bus_device_reset anymore 22803 */ 22804 22805 if ((un->un_state != SD_STATE_SUSPENDED) && 22806 (un->un_state != SD_STATE_DUMPING)) { 22807 22808 New_state(un, SD_STATE_DUMPING); 22809 22810 if (un->un_f_is_fibre == FALSE) { 22811 mutex_exit(SD_MUTEX(un)); 22812 /* 22813 * Attempt a bus reset for parallel scsi. 22814 * 22815 * Note: A bus reset is required because on some host 22816 * systems (i.e. E420R) a bus device reset is 22817 * insufficient to reset the state of the target. 22818 * 22819 * Note: Don't issue the reset for fibre-channel, 22820 * because this tends to hang the bus (loop) for 22821 * too long while everyone is logging out and in 22822 * and the deadman timer for dumping will fire 22823 * before the dump is complete. 22824 */ 22825 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) { 22826 mutex_enter(SD_MUTEX(un)); 22827 Restore_state(un); 22828 mutex_exit(SD_MUTEX(un)); 22829 return (EIO); 22830 } 22831 22832 /* Delay to give the device some recovery time. */ 22833 drv_usecwait(10000); 22834 22835 if (sd_send_polled_RQS(un) == SD_FAILURE) { 22836 SD_INFO(SD_LOG_DUMP, un, 22837 "sddump: sd_send_polled_RQS failed\n"); 22838 } 22839 mutex_enter(SD_MUTEX(un)); 22840 } 22841 } 22842 22843 /* 22844 * Convert the partition-relative block number to a 22845 * disk physical block number. 22846 */ 22847 blkno += start_block; 22848 22849 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno); 22850 22851 22852 /* 22853 * Check if the device has a non-512 block size. 22854 */ 22855 wr_bp = NULL; 22856 if (NOT_DEVBSIZE(un)) { 22857 tgt_byte_offset = blkno * un->un_sys_blocksize; 22858 tgt_byte_count = nblk * un->un_sys_blocksize; 22859 if ((tgt_byte_offset % un->un_tgt_blocksize) || 22860 (tgt_byte_count % un->un_tgt_blocksize)) { 22861 doing_rmw = TRUE; 22862 /* 22863 * Calculate the block number and number of block 22864 * in terms of the media block size. 22865 */ 22866 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 22867 tgt_nblk = 22868 ((tgt_byte_offset + tgt_byte_count + 22869 (un->un_tgt_blocksize - 1)) / 22870 un->un_tgt_blocksize) - tgt_blkno; 22871 22872 /* 22873 * Invoke the routine which is going to do read part 22874 * of read-modify-write. 22875 * Note that this routine returns a pointer to 22876 * a valid bp in wr_bp. 22877 */ 22878 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk, 22879 &wr_bp); 22880 if (err) { 22881 mutex_exit(SD_MUTEX(un)); 22882 return (err); 22883 } 22884 /* 22885 * Offset is being calculated as - 22886 * (original block # * system block size) - 22887 * (new block # * target block size) 22888 */ 22889 io_start_offset = 22890 ((uint64_t)(blkno * un->un_sys_blocksize)) - 22891 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize)); 22892 22893 ASSERT((io_start_offset >= 0) && 22894 (io_start_offset < un->un_tgt_blocksize)); 22895 /* 22896 * Do the modify portion of read modify write. 22897 */ 22898 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset], 22899 (size_t)nblk * un->un_sys_blocksize); 22900 } else { 22901 doing_rmw = FALSE; 22902 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 22903 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize; 22904 } 22905 22906 /* Convert blkno and nblk to target blocks */ 22907 blkno = tgt_blkno; 22908 nblk = tgt_nblk; 22909 } else { 22910 wr_bp = &wr_buf; 22911 bzero(wr_bp, sizeof (struct buf)); 22912 wr_bp->b_flags = B_BUSY; 22913 wr_bp->b_un.b_addr = addr; 22914 wr_bp->b_bcount = nblk << DEV_BSHIFT; 22915 wr_bp->b_resid = 0; 22916 } 22917 22918 mutex_exit(SD_MUTEX(un)); 22919 22920 /* 22921 * Obtain a SCSI packet for the write command. 22922 * It should be safe to call the allocator here without 22923 * worrying about being locked for DVMA mapping because 22924 * the address we're passed is already a DVMA mapping 22925 * 22926 * We are also not going to worry about semaphore ownership 22927 * in the dump buffer. Dumping is single threaded at present. 22928 */ 22929 22930 wr_pktp = NULL; 22931 22932 #if defined(__i386) || defined(__amd64) 22933 dma_resid = wr_bp->b_bcount; 22934 oblkno = blkno; 22935 while (dma_resid != 0) { 22936 #endif 22937 22938 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 22939 wr_bp->b_flags &= ~B_ERROR; 22940 22941 #if defined(__i386) || defined(__amd64) 22942 blkno = oblkno + 22943 ((wr_bp->b_bcount - dma_resid) / 22944 un->un_tgt_blocksize); 22945 nblk = dma_resid / un->un_tgt_blocksize; 22946 22947 if (wr_pktp) { 22948 /* Partial DMA transfers after initial transfer */ 22949 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp, 22950 blkno, nblk); 22951 } else { 22952 /* Initial transfer */ 22953 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 22954 un->un_pkt_flags, NULL_FUNC, NULL, 22955 blkno, nblk); 22956 } 22957 #else 22958 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 22959 0, NULL_FUNC, NULL, blkno, nblk); 22960 #endif 22961 22962 if (rval == 0) { 22963 /* We were given a SCSI packet, continue. */ 22964 break; 22965 } 22966 22967 if (i == 0) { 22968 if (wr_bp->b_flags & B_ERROR) { 22969 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 22970 "no resources for dumping; " 22971 "error code: 0x%x, retrying", 22972 geterror(wr_bp)); 22973 } else { 22974 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 22975 "no resources for dumping; retrying"); 22976 } 22977 } else if (i != (SD_NDUMP_RETRIES - 1)) { 22978 if (wr_bp->b_flags & B_ERROR) { 22979 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 22980 "no resources for dumping; error code: " 22981 "0x%x, retrying\n", geterror(wr_bp)); 22982 } 22983 } else { 22984 if (wr_bp->b_flags & B_ERROR) { 22985 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 22986 "no resources for dumping; " 22987 "error code: 0x%x, retries failed, " 22988 "giving up.\n", geterror(wr_bp)); 22989 } else { 22990 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 22991 "no resources for dumping; " 22992 "retries failed, giving up.\n"); 22993 } 22994 mutex_enter(SD_MUTEX(un)); 22995 Restore_state(un); 22996 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) { 22997 mutex_exit(SD_MUTEX(un)); 22998 scsi_free_consistent_buf(wr_bp); 22999 } else { 23000 mutex_exit(SD_MUTEX(un)); 23001 } 23002 return (EIO); 23003 } 23004 drv_usecwait(10000); 23005 } 23006 23007 #if defined(__i386) || defined(__amd64) 23008 /* 23009 * save the resid from PARTIAL_DMA 23010 */ 23011 dma_resid = wr_pktp->pkt_resid; 23012 if (dma_resid != 0) 23013 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid); 23014 wr_pktp->pkt_resid = 0; 23015 #endif 23016 23017 /* SunBug 1222170 */ 23018 wr_pktp->pkt_flags = FLAG_NOINTR; 23019 23020 err = EIO; 23021 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 23022 23023 /* 23024 * Scsi_poll returns 0 (success) if the command completes and 23025 * the status block is STATUS_GOOD. We should only check 23026 * errors if this condition is not true. Even then we should 23027 * send our own request sense packet only if we have a check 23028 * condition and auto request sense has not been performed by 23029 * the hba. 23030 */ 23031 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n"); 23032 23033 if ((sd_scsi_poll(un, wr_pktp) == 0) && 23034 (wr_pktp->pkt_resid == 0)) { 23035 err = SD_SUCCESS; 23036 break; 23037 } 23038 23039 /* 23040 * Check CMD_DEV_GONE 1st, give up if device is gone. 23041 */ 23042 if (wr_pktp->pkt_reason == CMD_DEV_GONE) { 23043 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 23044 "Device is gone\n"); 23045 break; 23046 } 23047 23048 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) { 23049 SD_INFO(SD_LOG_DUMP, un, 23050 "sddump: write failed with CHECK, try # %d\n", i); 23051 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) { 23052 (void) sd_send_polled_RQS(un); 23053 } 23054 23055 continue; 23056 } 23057 23058 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) { 23059 int reset_retval = 0; 23060 23061 SD_INFO(SD_LOG_DUMP, un, 23062 "sddump: write failed with BUSY, try # %d\n", i); 23063 23064 if (un->un_f_lun_reset_enabled == TRUE) { 23065 reset_retval = scsi_reset(SD_ADDRESS(un), 23066 RESET_LUN); 23067 } 23068 if (reset_retval == 0) { 23069 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 23070 } 23071 (void) sd_send_polled_RQS(un); 23072 23073 } else { 23074 SD_INFO(SD_LOG_DUMP, un, 23075 "sddump: write failed with 0x%x, try # %d\n", 23076 SD_GET_PKT_STATUS(wr_pktp), i); 23077 mutex_enter(SD_MUTEX(un)); 23078 sd_reset_target(un, wr_pktp); 23079 mutex_exit(SD_MUTEX(un)); 23080 } 23081 23082 /* 23083 * If we are not getting anywhere with lun/target resets, 23084 * let's reset the bus. 23085 */ 23086 if (i == SD_NDUMP_RETRIES/2) { 23087 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 23088 (void) sd_send_polled_RQS(un); 23089 } 23090 23091 } 23092 #if defined(__i386) || defined(__amd64) 23093 } /* dma_resid */ 23094 #endif 23095 23096 scsi_destroy_pkt(wr_pktp); 23097 mutex_enter(SD_MUTEX(un)); 23098 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) { 23099 mutex_exit(SD_MUTEX(un)); 23100 scsi_free_consistent_buf(wr_bp); 23101 } else { 23102 mutex_exit(SD_MUTEX(un)); 23103 } 23104 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err); 23105 return (err); 23106 } 23107 23108 /* 23109 * Function: sd_scsi_poll() 23110 * 23111 * Description: This is a wrapper for the scsi_poll call. 23112 * 23113 * Arguments: sd_lun - The unit structure 23114 * scsi_pkt - The scsi packet being sent to the device. 23115 * 23116 * Return Code: 0 - Command completed successfully with good status 23117 * -1 - Command failed. This could indicate a check condition 23118 * or other status value requiring recovery action. 23119 * 23120 */ 23121 23122 static int 23123 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp) 23124 { 23125 int status; 23126 23127 ASSERT(un != NULL); 23128 ASSERT(!mutex_owned(SD_MUTEX(un))); 23129 ASSERT(pktp != NULL); 23130 23131 status = SD_SUCCESS; 23132 23133 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) { 23134 pktp->pkt_flags |= un->un_tagflags; 23135 pktp->pkt_flags &= ~FLAG_NODISCON; 23136 } 23137 23138 status = sd_ddi_scsi_poll(pktp); 23139 /* 23140 * Scsi_poll returns 0 (success) if the command completes and the 23141 * status block is STATUS_GOOD. We should only check errors if this 23142 * condition is not true. Even then we should send our own request 23143 * sense packet only if we have a check condition and auto 23144 * request sense has not been performed by the hba. 23145 * Don't get RQS data if pkt_reason is CMD_DEV_GONE. 23146 */ 23147 if ((status != SD_SUCCESS) && 23148 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) && 23149 (pktp->pkt_state & STATE_ARQ_DONE) == 0 && 23150 (pktp->pkt_reason != CMD_DEV_GONE)) 23151 (void) sd_send_polled_RQS(un); 23152 23153 return (status); 23154 } 23155 23156 /* 23157 * Function: sd_send_polled_RQS() 23158 * 23159 * Description: This sends the request sense command to a device. 23160 * 23161 * Arguments: sd_lun - The unit structure 23162 * 23163 * Return Code: 0 - Command completed successfully with good status 23164 * -1 - Command failed. 23165 * 23166 */ 23167 23168 static int 23169 sd_send_polled_RQS(struct sd_lun *un) 23170 { 23171 int ret_val; 23172 struct scsi_pkt *rqs_pktp; 23173 struct buf *rqs_bp; 23174 23175 ASSERT(un != NULL); 23176 ASSERT(!mutex_owned(SD_MUTEX(un))); 23177 23178 ret_val = SD_SUCCESS; 23179 23180 rqs_pktp = un->un_rqs_pktp; 23181 rqs_bp = un->un_rqs_bp; 23182 23183 mutex_enter(SD_MUTEX(un)); 23184 23185 if (un->un_sense_isbusy) { 23186 ret_val = SD_FAILURE; 23187 mutex_exit(SD_MUTEX(un)); 23188 return (ret_val); 23189 } 23190 23191 /* 23192 * If the request sense buffer (and packet) is not in use, 23193 * let's set the un_sense_isbusy and send our packet 23194 */ 23195 un->un_sense_isbusy = 1; 23196 rqs_pktp->pkt_resid = 0; 23197 rqs_pktp->pkt_reason = 0; 23198 rqs_pktp->pkt_flags |= FLAG_NOINTR; 23199 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH); 23200 23201 mutex_exit(SD_MUTEX(un)); 23202 23203 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at" 23204 " 0x%p\n", rqs_bp->b_un.b_addr); 23205 23206 /* 23207 * Can't send this to sd_scsi_poll, we wrap ourselves around the 23208 * axle - it has a call into us! 23209 */ 23210 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) { 23211 SD_INFO(SD_LOG_COMMON, un, 23212 "sd_send_polled_RQS: RQS failed\n"); 23213 } 23214 23215 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:", 23216 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX); 23217 23218 mutex_enter(SD_MUTEX(un)); 23219 un->un_sense_isbusy = 0; 23220 mutex_exit(SD_MUTEX(un)); 23221 23222 return (ret_val); 23223 } 23224 23225 /* 23226 * Defines needed for localized version of the scsi_poll routine. 23227 */ 23228 #define SD_CSEC 10000 /* usecs */ 23229 #define SD_SEC_TO_CSEC (1000000/SD_CSEC) 23230 23231 23232 /* 23233 * Function: sd_ddi_scsi_poll() 23234 * 23235 * Description: Localized version of the scsi_poll routine. The purpose is to 23236 * send a scsi_pkt to a device as a polled command. This version 23237 * is to ensure more robust handling of transport errors. 23238 * Specifically this routine cures not ready, coming ready 23239 * transition for power up and reset of sonoma's. This can take 23240 * up to 45 seconds for power-on and 20 seconds for reset of a 23241 * sonoma lun. 23242 * 23243 * Arguments: scsi_pkt - The scsi_pkt being sent to a device 23244 * 23245 * Return Code: 0 - Command completed successfully with good status 23246 * -1 - Command failed. 23247 * 23248 */ 23249 23250 static int 23251 sd_ddi_scsi_poll(struct scsi_pkt *pkt) 23252 { 23253 int busy_count; 23254 int timeout; 23255 int rval = SD_FAILURE; 23256 int savef; 23257 uint8_t *sensep; 23258 long savet; 23259 void (*savec)(); 23260 /* 23261 * The following is defined in machdep.c and is used in determining if 23262 * the scsi transport system will do polled I/O instead of interrupt 23263 * I/O when called from xx_dump(). 23264 */ 23265 extern int do_polled_io; 23266 23267 /* 23268 * save old flags in pkt, to restore at end 23269 */ 23270 savef = pkt->pkt_flags; 23271 savec = pkt->pkt_comp; 23272 savet = pkt->pkt_time; 23273 23274 pkt->pkt_flags |= FLAG_NOINTR; 23275 23276 /* 23277 * XXX there is nothing in the SCSA spec that states that we should not 23278 * do a callback for polled cmds; however, removing this will break sd 23279 * and probably other target drivers 23280 */ 23281 pkt->pkt_comp = NULL; 23282 23283 /* 23284 * we don't like a polled command without timeout. 23285 * 60 seconds seems long enough. 23286 */ 23287 if (pkt->pkt_time == 0) { 23288 pkt->pkt_time = SCSI_POLL_TIMEOUT; 23289 } 23290 23291 /* 23292 * Send polled cmd. 23293 * 23294 * We do some error recovery for various errors. Tran_busy, 23295 * queue full, and non-dispatched commands are retried every 10 msec. 23296 * as they are typically transient failures. Busy status and Not 23297 * Ready are retried every second as this status takes a while to 23298 * change. Unit attention is retried for pkt_time (60) times 23299 * with no delay. 23300 */ 23301 timeout = pkt->pkt_time * SD_SEC_TO_CSEC; 23302 23303 for (busy_count = 0; busy_count < timeout; busy_count++) { 23304 int rc; 23305 int poll_delay; 23306 23307 /* 23308 * Initialize pkt status variables. 23309 */ 23310 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0; 23311 23312 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) { 23313 if (rc != TRAN_BUSY) { 23314 /* Transport failed - give up. */ 23315 break; 23316 } else { 23317 /* Transport busy - try again. */ 23318 poll_delay = 1 * SD_CSEC; /* 10 msec */ 23319 } 23320 } else { 23321 /* 23322 * Transport accepted - check pkt status. 23323 */ 23324 rc = (*pkt->pkt_scbp) & STATUS_MASK; 23325 if (pkt->pkt_reason == CMD_CMPLT && 23326 rc == STATUS_CHECK && 23327 pkt->pkt_state & STATE_ARQ_DONE) { 23328 struct scsi_arq_status *arqstat = 23329 (struct scsi_arq_status *)(pkt->pkt_scbp); 23330 23331 sensep = (uint8_t *)&arqstat->sts_sensedata; 23332 } else { 23333 sensep = NULL; 23334 } 23335 23336 if ((pkt->pkt_reason == CMD_CMPLT) && 23337 (rc == STATUS_GOOD)) { 23338 /* No error - we're done */ 23339 rval = SD_SUCCESS; 23340 break; 23341 23342 } else if (pkt->pkt_reason == CMD_DEV_GONE) { 23343 /* Lost connection - give up */ 23344 break; 23345 23346 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) && 23347 (pkt->pkt_state == 0)) { 23348 /* Pkt not dispatched - try again. */ 23349 poll_delay = 1 * SD_CSEC; /* 10 msec. */ 23350 23351 } else if ((pkt->pkt_reason == CMD_CMPLT) && 23352 (rc == STATUS_QFULL)) { 23353 /* Queue full - try again. */ 23354 poll_delay = 1 * SD_CSEC; /* 10 msec. */ 23355 23356 } else if ((pkt->pkt_reason == CMD_CMPLT) && 23357 (rc == STATUS_BUSY)) { 23358 /* Busy - try again. */ 23359 poll_delay = 100 * SD_CSEC; /* 1 sec. */ 23360 busy_count += (SD_SEC_TO_CSEC - 1); 23361 23362 } else if ((sensep != NULL) && 23363 (scsi_sense_key(sensep) == 23364 KEY_UNIT_ATTENTION)) { 23365 /* Unit Attention - try again */ 23366 busy_count += (SD_SEC_TO_CSEC - 1); /* 1 */ 23367 continue; 23368 23369 } else if ((sensep != NULL) && 23370 (scsi_sense_key(sensep) == KEY_NOT_READY) && 23371 (scsi_sense_asc(sensep) == 0x04) && 23372 (scsi_sense_ascq(sensep) == 0x01)) { 23373 /* Not ready -> ready - try again. */ 23374 poll_delay = 100 * SD_CSEC; /* 1 sec. */ 23375 busy_count += (SD_SEC_TO_CSEC - 1); 23376 23377 } else { 23378 /* BAD status - give up. */ 23379 break; 23380 } 23381 } 23382 23383 if ((curthread->t_flag & T_INTR_THREAD) == 0 && 23384 !do_polled_io) { 23385 delay(drv_usectohz(poll_delay)); 23386 } else { 23387 /* we busy wait during cpr_dump or interrupt threads */ 23388 drv_usecwait(poll_delay); 23389 } 23390 } 23391 23392 pkt->pkt_flags = savef; 23393 pkt->pkt_comp = savec; 23394 pkt->pkt_time = savet; 23395 return (rval); 23396 } 23397 23398 23399 /* 23400 * Function: sd_persistent_reservation_in_read_keys 23401 * 23402 * Description: This routine is the driver entry point for handling CD-ROM 23403 * multi-host persistent reservation requests (MHIOCGRP_INKEYS) 23404 * by sending the SCSI-3 PRIN commands to the device. 23405 * Processes the read keys command response by copying the 23406 * reservation key information into the user provided buffer. 23407 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented. 23408 * 23409 * Arguments: un - Pointer to soft state struct for the target. 23410 * usrp - user provided pointer to multihost Persistent In Read 23411 * Keys structure (mhioc_inkeys_t) 23412 * flag - this argument is a pass through to ddi_copyxxx() 23413 * directly from the mode argument of ioctl(). 23414 * 23415 * Return Code: 0 - Success 23416 * EACCES 23417 * ENOTSUP 23418 * errno return code from sd_send_scsi_cmd() 23419 * 23420 * Context: Can sleep. Does not return until command is completed. 23421 */ 23422 23423 static int 23424 sd_persistent_reservation_in_read_keys(struct sd_lun *un, 23425 mhioc_inkeys_t *usrp, int flag) 23426 { 23427 #ifdef _MULTI_DATAMODEL 23428 struct mhioc_key_list32 li32; 23429 #endif 23430 sd_prin_readkeys_t *in; 23431 mhioc_inkeys_t *ptr; 23432 mhioc_key_list_t li; 23433 uchar_t *data_bufp; 23434 int data_len; 23435 int rval; 23436 size_t copysz; 23437 23438 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) { 23439 return (EINVAL); 23440 } 23441 bzero(&li, sizeof (mhioc_key_list_t)); 23442 23443 /* 23444 * Get the listsize from user 23445 */ 23446 #ifdef _MULTI_DATAMODEL 23447 23448 switch (ddi_model_convert_from(flag & FMODELS)) { 23449 case DDI_MODEL_ILP32: 23450 copysz = sizeof (struct mhioc_key_list32); 23451 if (ddi_copyin(ptr->li, &li32, copysz, flag)) { 23452 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23453 "sd_persistent_reservation_in_read_keys: " 23454 "failed ddi_copyin: mhioc_key_list32_t\n"); 23455 rval = EFAULT; 23456 goto done; 23457 } 23458 li.listsize = li32.listsize; 23459 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list; 23460 break; 23461 23462 case DDI_MODEL_NONE: 23463 copysz = sizeof (mhioc_key_list_t); 23464 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 23465 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23466 "sd_persistent_reservation_in_read_keys: " 23467 "failed ddi_copyin: mhioc_key_list_t\n"); 23468 rval = EFAULT; 23469 goto done; 23470 } 23471 break; 23472 } 23473 23474 #else /* ! _MULTI_DATAMODEL */ 23475 copysz = sizeof (mhioc_key_list_t); 23476 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 23477 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23478 "sd_persistent_reservation_in_read_keys: " 23479 "failed ddi_copyin: mhioc_key_list_t\n"); 23480 rval = EFAULT; 23481 goto done; 23482 } 23483 #endif 23484 23485 data_len = li.listsize * MHIOC_RESV_KEY_SIZE; 23486 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t)); 23487 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 23488 23489 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 23490 data_len, data_bufp)) != 0) { 23491 goto done; 23492 } 23493 in = (sd_prin_readkeys_t *)data_bufp; 23494 ptr->generation = BE_32(in->generation); 23495 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE; 23496 23497 /* 23498 * Return the min(listsize, listlen) keys 23499 */ 23500 #ifdef _MULTI_DATAMODEL 23501 23502 switch (ddi_model_convert_from(flag & FMODELS)) { 23503 case DDI_MODEL_ILP32: 23504 li32.listlen = li.listlen; 23505 if (ddi_copyout(&li32, ptr->li, copysz, flag)) { 23506 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23507 "sd_persistent_reservation_in_read_keys: " 23508 "failed ddi_copyout: mhioc_key_list32_t\n"); 23509 rval = EFAULT; 23510 goto done; 23511 } 23512 break; 23513 23514 case DDI_MODEL_NONE: 23515 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 23516 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23517 "sd_persistent_reservation_in_read_keys: " 23518 "failed ddi_copyout: mhioc_key_list_t\n"); 23519 rval = EFAULT; 23520 goto done; 23521 } 23522 break; 23523 } 23524 23525 #else /* ! _MULTI_DATAMODEL */ 23526 23527 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 23528 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23529 "sd_persistent_reservation_in_read_keys: " 23530 "failed ddi_copyout: mhioc_key_list_t\n"); 23531 rval = EFAULT; 23532 goto done; 23533 } 23534 23535 #endif /* _MULTI_DATAMODEL */ 23536 23537 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE, 23538 li.listsize * MHIOC_RESV_KEY_SIZE); 23539 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) { 23540 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23541 "sd_persistent_reservation_in_read_keys: " 23542 "failed ddi_copyout: keylist\n"); 23543 rval = EFAULT; 23544 } 23545 done: 23546 kmem_free(data_bufp, data_len); 23547 return (rval); 23548 } 23549 23550 23551 /* 23552 * Function: sd_persistent_reservation_in_read_resv 23553 * 23554 * Description: This routine is the driver entry point for handling CD-ROM 23555 * multi-host persistent reservation requests (MHIOCGRP_INRESV) 23556 * by sending the SCSI-3 PRIN commands to the device. 23557 * Process the read persistent reservations command response by 23558 * copying the reservation information into the user provided 23559 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented. 23560 * 23561 * Arguments: un - Pointer to soft state struct for the target. 23562 * usrp - user provided pointer to multihost Persistent In Read 23563 * Keys structure (mhioc_inkeys_t) 23564 * flag - this argument is a pass through to ddi_copyxxx() 23565 * directly from the mode argument of ioctl(). 23566 * 23567 * Return Code: 0 - Success 23568 * EACCES 23569 * ENOTSUP 23570 * errno return code from sd_send_scsi_cmd() 23571 * 23572 * Context: Can sleep. Does not return until command is completed. 23573 */ 23574 23575 static int 23576 sd_persistent_reservation_in_read_resv(struct sd_lun *un, 23577 mhioc_inresvs_t *usrp, int flag) 23578 { 23579 #ifdef _MULTI_DATAMODEL 23580 struct mhioc_resv_desc_list32 resvlist32; 23581 #endif 23582 sd_prin_readresv_t *in; 23583 mhioc_inresvs_t *ptr; 23584 sd_readresv_desc_t *readresv_ptr; 23585 mhioc_resv_desc_list_t resvlist; 23586 mhioc_resv_desc_t resvdesc; 23587 uchar_t *data_bufp; 23588 int data_len; 23589 int rval; 23590 int i; 23591 size_t copysz; 23592 mhioc_resv_desc_t *bufp; 23593 23594 if ((ptr = usrp) == NULL) { 23595 return (EINVAL); 23596 } 23597 23598 /* 23599 * Get the listsize from user 23600 */ 23601 #ifdef _MULTI_DATAMODEL 23602 switch (ddi_model_convert_from(flag & FMODELS)) { 23603 case DDI_MODEL_ILP32: 23604 copysz = sizeof (struct mhioc_resv_desc_list32); 23605 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) { 23606 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23607 "sd_persistent_reservation_in_read_resv: " 23608 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23609 rval = EFAULT; 23610 goto done; 23611 } 23612 resvlist.listsize = resvlist32.listsize; 23613 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list; 23614 break; 23615 23616 case DDI_MODEL_NONE: 23617 copysz = sizeof (mhioc_resv_desc_list_t); 23618 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 23619 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23620 "sd_persistent_reservation_in_read_resv: " 23621 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23622 rval = EFAULT; 23623 goto done; 23624 } 23625 break; 23626 } 23627 #else /* ! _MULTI_DATAMODEL */ 23628 copysz = sizeof (mhioc_resv_desc_list_t); 23629 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 23630 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23631 "sd_persistent_reservation_in_read_resv: " 23632 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23633 rval = EFAULT; 23634 goto done; 23635 } 23636 #endif /* ! _MULTI_DATAMODEL */ 23637 23638 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN; 23639 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t)); 23640 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 23641 23642 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_RESV, 23643 data_len, data_bufp)) != 0) { 23644 goto done; 23645 } 23646 in = (sd_prin_readresv_t *)data_bufp; 23647 ptr->generation = BE_32(in->generation); 23648 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN; 23649 23650 /* 23651 * Return the min(listsize, listlen( keys 23652 */ 23653 #ifdef _MULTI_DATAMODEL 23654 23655 switch (ddi_model_convert_from(flag & FMODELS)) { 23656 case DDI_MODEL_ILP32: 23657 resvlist32.listlen = resvlist.listlen; 23658 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) { 23659 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23660 "sd_persistent_reservation_in_read_resv: " 23661 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 23662 rval = EFAULT; 23663 goto done; 23664 } 23665 break; 23666 23667 case DDI_MODEL_NONE: 23668 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 23669 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23670 "sd_persistent_reservation_in_read_resv: " 23671 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 23672 rval = EFAULT; 23673 goto done; 23674 } 23675 break; 23676 } 23677 23678 #else /* ! _MULTI_DATAMODEL */ 23679 23680 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 23681 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23682 "sd_persistent_reservation_in_read_resv: " 23683 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 23684 rval = EFAULT; 23685 goto done; 23686 } 23687 23688 #endif /* ! _MULTI_DATAMODEL */ 23689 23690 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc; 23691 bufp = resvlist.list; 23692 copysz = sizeof (mhioc_resv_desc_t); 23693 for (i = 0; i < min(resvlist.listlen, resvlist.listsize); 23694 i++, readresv_ptr++, bufp++) { 23695 23696 bcopy(&readresv_ptr->resvkey, &resvdesc.key, 23697 MHIOC_RESV_KEY_SIZE); 23698 resvdesc.type = readresv_ptr->type; 23699 resvdesc.scope = readresv_ptr->scope; 23700 resvdesc.scope_specific_addr = 23701 BE_32(readresv_ptr->scope_specific_addr); 23702 23703 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) { 23704 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23705 "sd_persistent_reservation_in_read_resv: " 23706 "failed ddi_copyout: resvlist\n"); 23707 rval = EFAULT; 23708 goto done; 23709 } 23710 } 23711 done: 23712 kmem_free(data_bufp, data_len); 23713 return (rval); 23714 } 23715 23716 23717 /* 23718 * Function: sr_change_blkmode() 23719 * 23720 * Description: This routine is the driver entry point for handling CD-ROM 23721 * block mode ioctl requests. Support for returning and changing 23722 * the current block size in use by the device is implemented. The 23723 * LBA size is changed via a MODE SELECT Block Descriptor. 23724 * 23725 * This routine issues a mode sense with an allocation length of 23726 * 12 bytes for the mode page header and a single block descriptor. 23727 * 23728 * Arguments: dev - the device 'dev_t' 23729 * cmd - the request type; one of CDROMGBLKMODE (get) or 23730 * CDROMSBLKMODE (set) 23731 * data - current block size or requested block size 23732 * flag - this argument is a pass through to ddi_copyxxx() directly 23733 * from the mode argument of ioctl(). 23734 * 23735 * Return Code: the code returned by sd_send_scsi_cmd() 23736 * EINVAL if invalid arguments are provided 23737 * EFAULT if ddi_copyxxx() fails 23738 * ENXIO if fail ddi_get_soft_state 23739 * EIO if invalid mode sense block descriptor length 23740 * 23741 */ 23742 23743 static int 23744 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag) 23745 { 23746 struct sd_lun *un = NULL; 23747 struct mode_header *sense_mhp, *select_mhp; 23748 struct block_descriptor *sense_desc, *select_desc; 23749 int current_bsize; 23750 int rval = EINVAL; 23751 uchar_t *sense = NULL; 23752 uchar_t *select = NULL; 23753 23754 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE)); 23755 23756 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23757 return (ENXIO); 23758 } 23759 23760 /* 23761 * The block length is changed via the Mode Select block descriptor, the 23762 * "Read/Write Error Recovery" mode page (0x1) contents are not actually 23763 * required as part of this routine. Therefore the mode sense allocation 23764 * length is specified to be the length of a mode page header and a 23765 * block descriptor. 23766 */ 23767 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 23768 23769 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 23770 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD)) != 0) { 23771 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23772 "sr_change_blkmode: Mode Sense Failed\n"); 23773 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23774 return (rval); 23775 } 23776 23777 /* Check the block descriptor len to handle only 1 block descriptor */ 23778 sense_mhp = (struct mode_header *)sense; 23779 if ((sense_mhp->bdesc_length == 0) || 23780 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) { 23781 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23782 "sr_change_blkmode: Mode Sense returned invalid block" 23783 " descriptor length\n"); 23784 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23785 return (EIO); 23786 } 23787 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH); 23788 current_bsize = ((sense_desc->blksize_hi << 16) | 23789 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo); 23790 23791 /* Process command */ 23792 switch (cmd) { 23793 case CDROMGBLKMODE: 23794 /* Return the block size obtained during the mode sense */ 23795 if (ddi_copyout(¤t_bsize, (void *)data, 23796 sizeof (int), flag) != 0) 23797 rval = EFAULT; 23798 break; 23799 case CDROMSBLKMODE: 23800 /* Validate the requested block size */ 23801 switch (data) { 23802 case CDROM_BLK_512: 23803 case CDROM_BLK_1024: 23804 case CDROM_BLK_2048: 23805 case CDROM_BLK_2056: 23806 case CDROM_BLK_2336: 23807 case CDROM_BLK_2340: 23808 case CDROM_BLK_2352: 23809 case CDROM_BLK_2368: 23810 case CDROM_BLK_2448: 23811 case CDROM_BLK_2646: 23812 case CDROM_BLK_2647: 23813 break; 23814 default: 23815 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23816 "sr_change_blkmode: " 23817 "Block Size '%ld' Not Supported\n", data); 23818 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23819 return (EINVAL); 23820 } 23821 23822 /* 23823 * The current block size matches the requested block size so 23824 * there is no need to send the mode select to change the size 23825 */ 23826 if (current_bsize == data) { 23827 break; 23828 } 23829 23830 /* Build the select data for the requested block size */ 23831 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 23832 select_mhp = (struct mode_header *)select; 23833 select_desc = 23834 (struct block_descriptor *)(select + MODE_HEADER_LENGTH); 23835 /* 23836 * The LBA size is changed via the block descriptor, so the 23837 * descriptor is built according to the user data 23838 */ 23839 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH; 23840 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16); 23841 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8); 23842 select_desc->blksize_lo = (char)((data) & 0x000000ff); 23843 23844 /* Send the mode select for the requested block size */ 23845 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 23846 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 23847 SD_PATH_STANDARD)) != 0) { 23848 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23849 "sr_change_blkmode: Mode Select Failed\n"); 23850 /* 23851 * The mode select failed for the requested block size, 23852 * so reset the data for the original block size and 23853 * send it to the target. The error is indicated by the 23854 * return value for the failed mode select. 23855 */ 23856 select_desc->blksize_hi = sense_desc->blksize_hi; 23857 select_desc->blksize_mid = sense_desc->blksize_mid; 23858 select_desc->blksize_lo = sense_desc->blksize_lo; 23859 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 23860 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 23861 SD_PATH_STANDARD); 23862 } else { 23863 ASSERT(!mutex_owned(SD_MUTEX(un))); 23864 mutex_enter(SD_MUTEX(un)); 23865 sd_update_block_info(un, (uint32_t)data, 0); 23866 mutex_exit(SD_MUTEX(un)); 23867 } 23868 break; 23869 default: 23870 /* should not reach here, but check anyway */ 23871 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23872 "sr_change_blkmode: Command '%x' Not Supported\n", cmd); 23873 rval = EINVAL; 23874 break; 23875 } 23876 23877 if (select) { 23878 kmem_free(select, BUFLEN_CHG_BLK_MODE); 23879 } 23880 if (sense) { 23881 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23882 } 23883 return (rval); 23884 } 23885 23886 23887 /* 23888 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines 23889 * implement driver support for getting and setting the CD speed. The command 23890 * set used will be based on the device type. If the device has not been 23891 * identified as MMC the Toshiba vendor specific mode page will be used. If 23892 * the device is MMC but does not support the Real Time Streaming feature 23893 * the SET CD SPEED command will be used to set speed and mode page 0x2A will 23894 * be used to read the speed. 23895 */ 23896 23897 /* 23898 * Function: sr_change_speed() 23899 * 23900 * Description: This routine is the driver entry point for handling CD-ROM 23901 * drive speed ioctl requests for devices supporting the Toshiba 23902 * vendor specific drive speed mode page. Support for returning 23903 * and changing the current drive speed in use by the device is 23904 * implemented. 23905 * 23906 * Arguments: dev - the device 'dev_t' 23907 * cmd - the request type; one of CDROMGDRVSPEED (get) or 23908 * CDROMSDRVSPEED (set) 23909 * data - current drive speed or requested drive speed 23910 * flag - this argument is a pass through to ddi_copyxxx() directly 23911 * from the mode argument of ioctl(). 23912 * 23913 * Return Code: the code returned by sd_send_scsi_cmd() 23914 * EINVAL if invalid arguments are provided 23915 * EFAULT if ddi_copyxxx() fails 23916 * ENXIO if fail ddi_get_soft_state 23917 * EIO if invalid mode sense block descriptor length 23918 */ 23919 23920 static int 23921 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 23922 { 23923 struct sd_lun *un = NULL; 23924 struct mode_header *sense_mhp, *select_mhp; 23925 struct mode_speed *sense_page, *select_page; 23926 int current_speed; 23927 int rval = EINVAL; 23928 int bd_len; 23929 uchar_t *sense = NULL; 23930 uchar_t *select = NULL; 23931 23932 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 23933 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23934 return (ENXIO); 23935 } 23936 23937 /* 23938 * Note: The drive speed is being modified here according to a Toshiba 23939 * vendor specific mode page (0x31). 23940 */ 23941 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 23942 23943 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 23944 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED, 23945 SD_PATH_STANDARD)) != 0) { 23946 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23947 "sr_change_speed: Mode Sense Failed\n"); 23948 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 23949 return (rval); 23950 } 23951 sense_mhp = (struct mode_header *)sense; 23952 23953 /* Check the block descriptor len to handle only 1 block descriptor */ 23954 bd_len = sense_mhp->bdesc_length; 23955 if (bd_len > MODE_BLK_DESC_LENGTH) { 23956 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23957 "sr_change_speed: Mode Sense returned invalid block " 23958 "descriptor length\n"); 23959 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 23960 return (EIO); 23961 } 23962 23963 sense_page = (struct mode_speed *) 23964 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 23965 current_speed = sense_page->speed; 23966 23967 /* Process command */ 23968 switch (cmd) { 23969 case CDROMGDRVSPEED: 23970 /* Return the drive speed obtained during the mode sense */ 23971 if (current_speed == 0x2) { 23972 current_speed = CDROM_TWELVE_SPEED; 23973 } 23974 if (ddi_copyout(¤t_speed, (void *)data, 23975 sizeof (int), flag) != 0) { 23976 rval = EFAULT; 23977 } 23978 break; 23979 case CDROMSDRVSPEED: 23980 /* Validate the requested drive speed */ 23981 switch ((uchar_t)data) { 23982 case CDROM_TWELVE_SPEED: 23983 data = 0x2; 23984 /*FALLTHROUGH*/ 23985 case CDROM_NORMAL_SPEED: 23986 case CDROM_DOUBLE_SPEED: 23987 case CDROM_QUAD_SPEED: 23988 case CDROM_MAXIMUM_SPEED: 23989 break; 23990 default: 23991 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23992 "sr_change_speed: " 23993 "Drive Speed '%d' Not Supported\n", (uchar_t)data); 23994 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 23995 return (EINVAL); 23996 } 23997 23998 /* 23999 * The current drive speed matches the requested drive speed so 24000 * there is no need to send the mode select to change the speed 24001 */ 24002 if (current_speed == data) { 24003 break; 24004 } 24005 24006 /* Build the select data for the requested drive speed */ 24007 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 24008 select_mhp = (struct mode_header *)select; 24009 select_mhp->bdesc_length = 0; 24010 select_page = 24011 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 24012 select_page = 24013 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 24014 select_page->mode_page.code = CDROM_MODE_SPEED; 24015 select_page->mode_page.length = 2; 24016 select_page->speed = (uchar_t)data; 24017 24018 /* Send the mode select for the requested block size */ 24019 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 24020 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 24021 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 24022 /* 24023 * The mode select failed for the requested drive speed, 24024 * so reset the data for the original drive speed and 24025 * send it to the target. The error is indicated by the 24026 * return value for the failed mode select. 24027 */ 24028 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24029 "sr_drive_speed: Mode Select Failed\n"); 24030 select_page->speed = sense_page->speed; 24031 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 24032 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 24033 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 24034 } 24035 break; 24036 default: 24037 /* should not reach here, but check anyway */ 24038 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24039 "sr_change_speed: Command '%x' Not Supported\n", cmd); 24040 rval = EINVAL; 24041 break; 24042 } 24043 24044 if (select) { 24045 kmem_free(select, BUFLEN_MODE_CDROM_SPEED); 24046 } 24047 if (sense) { 24048 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24049 } 24050 24051 return (rval); 24052 } 24053 24054 24055 /* 24056 * Function: sr_atapi_change_speed() 24057 * 24058 * Description: This routine is the driver entry point for handling CD-ROM 24059 * drive speed ioctl requests for MMC devices that do not support 24060 * the Real Time Streaming feature (0x107). 24061 * 24062 * Note: This routine will use the SET SPEED command which may not 24063 * be supported by all devices. 24064 * 24065 * Arguments: dev- the device 'dev_t' 24066 * cmd- the request type; one of CDROMGDRVSPEED (get) or 24067 * CDROMSDRVSPEED (set) 24068 * data- current drive speed or requested drive speed 24069 * flag- this argument is a pass through to ddi_copyxxx() directly 24070 * from the mode argument of ioctl(). 24071 * 24072 * Return Code: the code returned by sd_send_scsi_cmd() 24073 * EINVAL if invalid arguments are provided 24074 * EFAULT if ddi_copyxxx() fails 24075 * ENXIO if fail ddi_get_soft_state 24076 * EIO if invalid mode sense block descriptor length 24077 */ 24078 24079 static int 24080 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 24081 { 24082 struct sd_lun *un; 24083 struct uscsi_cmd *com = NULL; 24084 struct mode_header_grp2 *sense_mhp; 24085 uchar_t *sense_page; 24086 uchar_t *sense = NULL; 24087 char cdb[CDB_GROUP5]; 24088 int bd_len; 24089 int current_speed = 0; 24090 int max_speed = 0; 24091 int rval; 24092 24093 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 24094 24095 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24096 return (ENXIO); 24097 } 24098 24099 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 24100 24101 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 24102 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, 24103 SD_PATH_STANDARD)) != 0) { 24104 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24105 "sr_atapi_change_speed: Mode Sense Failed\n"); 24106 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24107 return (rval); 24108 } 24109 24110 /* Check the block descriptor len to handle only 1 block descriptor */ 24111 sense_mhp = (struct mode_header_grp2 *)sense; 24112 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 24113 if (bd_len > MODE_BLK_DESC_LENGTH) { 24114 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24115 "sr_atapi_change_speed: Mode Sense returned invalid " 24116 "block descriptor length\n"); 24117 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24118 return (EIO); 24119 } 24120 24121 /* Calculate the current and maximum drive speeds */ 24122 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 24123 current_speed = (sense_page[14] << 8) | sense_page[15]; 24124 max_speed = (sense_page[8] << 8) | sense_page[9]; 24125 24126 /* Process the command */ 24127 switch (cmd) { 24128 case CDROMGDRVSPEED: 24129 current_speed /= SD_SPEED_1X; 24130 if (ddi_copyout(¤t_speed, (void *)data, 24131 sizeof (int), flag) != 0) 24132 rval = EFAULT; 24133 break; 24134 case CDROMSDRVSPEED: 24135 /* Convert the speed code to KB/sec */ 24136 switch ((uchar_t)data) { 24137 case CDROM_NORMAL_SPEED: 24138 current_speed = SD_SPEED_1X; 24139 break; 24140 case CDROM_DOUBLE_SPEED: 24141 current_speed = 2 * SD_SPEED_1X; 24142 break; 24143 case CDROM_QUAD_SPEED: 24144 current_speed = 4 * SD_SPEED_1X; 24145 break; 24146 case CDROM_TWELVE_SPEED: 24147 current_speed = 12 * SD_SPEED_1X; 24148 break; 24149 case CDROM_MAXIMUM_SPEED: 24150 current_speed = 0xffff; 24151 break; 24152 default: 24153 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24154 "sr_atapi_change_speed: invalid drive speed %d\n", 24155 (uchar_t)data); 24156 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24157 return (EINVAL); 24158 } 24159 24160 /* Check the request against the drive's max speed. */ 24161 if (current_speed != 0xffff) { 24162 if (current_speed > max_speed) { 24163 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24164 return (EINVAL); 24165 } 24166 } 24167 24168 /* 24169 * Build and send the SET SPEED command 24170 * 24171 * Note: The SET SPEED (0xBB) command used in this routine is 24172 * obsolete per the SCSI MMC spec but still supported in the 24173 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 24174 * therefore the command is still implemented in this routine. 24175 */ 24176 bzero(cdb, sizeof (cdb)); 24177 cdb[0] = (char)SCMD_SET_CDROM_SPEED; 24178 cdb[2] = (uchar_t)(current_speed >> 8); 24179 cdb[3] = (uchar_t)current_speed; 24180 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24181 com->uscsi_cdb = (caddr_t)cdb; 24182 com->uscsi_cdblen = CDB_GROUP5; 24183 com->uscsi_bufaddr = NULL; 24184 com->uscsi_buflen = 0; 24185 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24186 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, 0, SD_PATH_STANDARD); 24187 break; 24188 default: 24189 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24190 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd); 24191 rval = EINVAL; 24192 } 24193 24194 if (sense) { 24195 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24196 } 24197 if (com) { 24198 kmem_free(com, sizeof (*com)); 24199 } 24200 return (rval); 24201 } 24202 24203 24204 /* 24205 * Function: sr_pause_resume() 24206 * 24207 * Description: This routine is the driver entry point for handling CD-ROM 24208 * pause/resume ioctl requests. This only affects the audio play 24209 * operation. 24210 * 24211 * Arguments: dev - the device 'dev_t' 24212 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used 24213 * for setting the resume bit of the cdb. 24214 * 24215 * Return Code: the code returned by sd_send_scsi_cmd() 24216 * EINVAL if invalid mode specified 24217 * 24218 */ 24219 24220 static int 24221 sr_pause_resume(dev_t dev, int cmd) 24222 { 24223 struct sd_lun *un; 24224 struct uscsi_cmd *com; 24225 char cdb[CDB_GROUP1]; 24226 int rval; 24227 24228 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24229 return (ENXIO); 24230 } 24231 24232 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24233 bzero(cdb, CDB_GROUP1); 24234 cdb[0] = SCMD_PAUSE_RESUME; 24235 switch (cmd) { 24236 case CDROMRESUME: 24237 cdb[8] = 1; 24238 break; 24239 case CDROMPAUSE: 24240 cdb[8] = 0; 24241 break; 24242 default: 24243 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:" 24244 " Command '%x' Not Supported\n", cmd); 24245 rval = EINVAL; 24246 goto done; 24247 } 24248 24249 com->uscsi_cdb = cdb; 24250 com->uscsi_cdblen = CDB_GROUP1; 24251 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24252 24253 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24254 SD_PATH_STANDARD); 24255 24256 done: 24257 kmem_free(com, sizeof (*com)); 24258 return (rval); 24259 } 24260 24261 24262 /* 24263 * Function: sr_play_msf() 24264 * 24265 * Description: This routine is the driver entry point for handling CD-ROM 24266 * ioctl requests to output the audio signals at the specified 24267 * starting address and continue the audio play until the specified 24268 * ending address (CDROMPLAYMSF) The address is in Minute Second 24269 * Frame (MSF) format. 24270 * 24271 * Arguments: dev - the device 'dev_t' 24272 * data - pointer to user provided audio msf structure, 24273 * specifying start/end addresses. 24274 * flag - this argument is a pass through to ddi_copyxxx() 24275 * directly from the mode argument of ioctl(). 24276 * 24277 * Return Code: the code returned by sd_send_scsi_cmd() 24278 * EFAULT if ddi_copyxxx() fails 24279 * ENXIO if fail ddi_get_soft_state 24280 * EINVAL if data pointer is NULL 24281 */ 24282 24283 static int 24284 sr_play_msf(dev_t dev, caddr_t data, int flag) 24285 { 24286 struct sd_lun *un; 24287 struct uscsi_cmd *com; 24288 struct cdrom_msf msf_struct; 24289 struct cdrom_msf *msf = &msf_struct; 24290 char cdb[CDB_GROUP1]; 24291 int rval; 24292 24293 if (data == NULL) { 24294 return (EINVAL); 24295 } 24296 24297 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24298 return (ENXIO); 24299 } 24300 24301 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) { 24302 return (EFAULT); 24303 } 24304 24305 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24306 bzero(cdb, CDB_GROUP1); 24307 cdb[0] = SCMD_PLAYAUDIO_MSF; 24308 if (un->un_f_cfg_playmsf_bcd == TRUE) { 24309 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0); 24310 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0); 24311 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0); 24312 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1); 24313 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1); 24314 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1); 24315 } else { 24316 cdb[3] = msf->cdmsf_min0; 24317 cdb[4] = msf->cdmsf_sec0; 24318 cdb[5] = msf->cdmsf_frame0; 24319 cdb[6] = msf->cdmsf_min1; 24320 cdb[7] = msf->cdmsf_sec1; 24321 cdb[8] = msf->cdmsf_frame1; 24322 } 24323 com->uscsi_cdb = cdb; 24324 com->uscsi_cdblen = CDB_GROUP1; 24325 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24326 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24327 SD_PATH_STANDARD); 24328 kmem_free(com, sizeof (*com)); 24329 return (rval); 24330 } 24331 24332 24333 /* 24334 * Function: sr_play_trkind() 24335 * 24336 * Description: This routine is the driver entry point for handling CD-ROM 24337 * ioctl requests to output the audio signals at the specified 24338 * starting address and continue the audio play until the specified 24339 * ending address (CDROMPLAYTRKIND). The address is in Track Index 24340 * format. 24341 * 24342 * Arguments: dev - the device 'dev_t' 24343 * data - pointer to user provided audio track/index structure, 24344 * specifying start/end addresses. 24345 * flag - this argument is a pass through to ddi_copyxxx() 24346 * directly from the mode argument of ioctl(). 24347 * 24348 * Return Code: the code returned by sd_send_scsi_cmd() 24349 * EFAULT if ddi_copyxxx() fails 24350 * ENXIO if fail ddi_get_soft_state 24351 * EINVAL if data pointer is NULL 24352 */ 24353 24354 static int 24355 sr_play_trkind(dev_t dev, caddr_t data, int flag) 24356 { 24357 struct cdrom_ti ti_struct; 24358 struct cdrom_ti *ti = &ti_struct; 24359 struct uscsi_cmd *com = NULL; 24360 char cdb[CDB_GROUP1]; 24361 int rval; 24362 24363 if (data == NULL) { 24364 return (EINVAL); 24365 } 24366 24367 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) { 24368 return (EFAULT); 24369 } 24370 24371 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24372 bzero(cdb, CDB_GROUP1); 24373 cdb[0] = SCMD_PLAYAUDIO_TI; 24374 cdb[4] = ti->cdti_trk0; 24375 cdb[5] = ti->cdti_ind0; 24376 cdb[7] = ti->cdti_trk1; 24377 cdb[8] = ti->cdti_ind1; 24378 com->uscsi_cdb = cdb; 24379 com->uscsi_cdblen = CDB_GROUP1; 24380 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24381 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24382 SD_PATH_STANDARD); 24383 kmem_free(com, sizeof (*com)); 24384 return (rval); 24385 } 24386 24387 24388 /* 24389 * Function: sr_read_all_subcodes() 24390 * 24391 * Description: This routine is the driver entry point for handling CD-ROM 24392 * ioctl requests to return raw subcode data while the target is 24393 * playing audio (CDROMSUBCODE). 24394 * 24395 * Arguments: dev - the device 'dev_t' 24396 * data - pointer to user provided cdrom subcode structure, 24397 * specifying the transfer length and address. 24398 * flag - this argument is a pass through to ddi_copyxxx() 24399 * directly from the mode argument of ioctl(). 24400 * 24401 * Return Code: the code returned by sd_send_scsi_cmd() 24402 * EFAULT if ddi_copyxxx() fails 24403 * ENXIO if fail ddi_get_soft_state 24404 * EINVAL if data pointer is NULL 24405 */ 24406 24407 static int 24408 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag) 24409 { 24410 struct sd_lun *un = NULL; 24411 struct uscsi_cmd *com = NULL; 24412 struct cdrom_subcode *subcode = NULL; 24413 int rval; 24414 size_t buflen; 24415 char cdb[CDB_GROUP5]; 24416 24417 #ifdef _MULTI_DATAMODEL 24418 /* To support ILP32 applications in an LP64 world */ 24419 struct cdrom_subcode32 cdrom_subcode32; 24420 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32; 24421 #endif 24422 if (data == NULL) { 24423 return (EINVAL); 24424 } 24425 24426 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24427 return (ENXIO); 24428 } 24429 24430 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP); 24431 24432 #ifdef _MULTI_DATAMODEL 24433 switch (ddi_model_convert_from(flag & FMODELS)) { 24434 case DDI_MODEL_ILP32: 24435 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) { 24436 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24437 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24438 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24439 return (EFAULT); 24440 } 24441 /* Convert the ILP32 uscsi data from the application to LP64 */ 24442 cdrom_subcode32tocdrom_subcode(cdsc32, subcode); 24443 break; 24444 case DDI_MODEL_NONE: 24445 if (ddi_copyin(data, subcode, 24446 sizeof (struct cdrom_subcode), flag)) { 24447 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24448 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24449 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24450 return (EFAULT); 24451 } 24452 break; 24453 } 24454 #else /* ! _MULTI_DATAMODEL */ 24455 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) { 24456 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24457 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24458 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24459 return (EFAULT); 24460 } 24461 #endif /* _MULTI_DATAMODEL */ 24462 24463 /* 24464 * Since MMC-2 expects max 3 bytes for length, check if the 24465 * length input is greater than 3 bytes 24466 */ 24467 if ((subcode->cdsc_length & 0xFF000000) != 0) { 24468 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24469 "sr_read_all_subcodes: " 24470 "cdrom transfer length too large: %d (limit %d)\n", 24471 subcode->cdsc_length, 0xFFFFFF); 24472 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24473 return (EINVAL); 24474 } 24475 24476 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length; 24477 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24478 bzero(cdb, CDB_GROUP5); 24479 24480 if (un->un_f_mmc_cap == TRUE) { 24481 cdb[0] = (char)SCMD_READ_CD; 24482 cdb[2] = (char)0xff; 24483 cdb[3] = (char)0xff; 24484 cdb[4] = (char)0xff; 24485 cdb[5] = (char)0xff; 24486 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 24487 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 24488 cdb[8] = ((subcode->cdsc_length) & 0x000000ff); 24489 cdb[10] = 1; 24490 } else { 24491 /* 24492 * Note: A vendor specific command (0xDF) is being used her to 24493 * request a read of all subcodes. 24494 */ 24495 cdb[0] = (char)SCMD_READ_ALL_SUBCODES; 24496 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24); 24497 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 24498 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 24499 cdb[9] = ((subcode->cdsc_length) & 0x000000ff); 24500 } 24501 com->uscsi_cdb = cdb; 24502 com->uscsi_cdblen = CDB_GROUP5; 24503 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr; 24504 com->uscsi_buflen = buflen; 24505 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24506 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 24507 SD_PATH_STANDARD); 24508 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24509 kmem_free(com, sizeof (*com)); 24510 return (rval); 24511 } 24512 24513 24514 /* 24515 * Function: sr_read_subchannel() 24516 * 24517 * Description: This routine is the driver entry point for handling CD-ROM 24518 * ioctl requests to return the Q sub-channel data of the CD 24519 * current position block. (CDROMSUBCHNL) The data includes the 24520 * track number, index number, absolute CD-ROM address (LBA or MSF 24521 * format per the user) , track relative CD-ROM address (LBA or MSF 24522 * format per the user), control data and audio status. 24523 * 24524 * Arguments: dev - the device 'dev_t' 24525 * data - pointer to user provided cdrom sub-channel structure 24526 * flag - this argument is a pass through to ddi_copyxxx() 24527 * directly from the mode argument of ioctl(). 24528 * 24529 * Return Code: the code returned by sd_send_scsi_cmd() 24530 * EFAULT if ddi_copyxxx() fails 24531 * ENXIO if fail ddi_get_soft_state 24532 * EINVAL if data pointer is NULL 24533 */ 24534 24535 static int 24536 sr_read_subchannel(dev_t dev, caddr_t data, int flag) 24537 { 24538 struct sd_lun *un; 24539 struct uscsi_cmd *com; 24540 struct cdrom_subchnl subchanel; 24541 struct cdrom_subchnl *subchnl = &subchanel; 24542 char cdb[CDB_GROUP1]; 24543 caddr_t buffer; 24544 int rval; 24545 24546 if (data == NULL) { 24547 return (EINVAL); 24548 } 24549 24550 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24551 (un->un_state == SD_STATE_OFFLINE)) { 24552 return (ENXIO); 24553 } 24554 24555 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) { 24556 return (EFAULT); 24557 } 24558 24559 buffer = kmem_zalloc((size_t)16, KM_SLEEP); 24560 bzero(cdb, CDB_GROUP1); 24561 cdb[0] = SCMD_READ_SUBCHANNEL; 24562 /* Set the MSF bit based on the user requested address format */ 24563 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02; 24564 /* 24565 * Set the Q bit in byte 2 to indicate that Q sub-channel data be 24566 * returned 24567 */ 24568 cdb[2] = 0x40; 24569 /* 24570 * Set byte 3 to specify the return data format. A value of 0x01 24571 * indicates that the CD-ROM current position should be returned. 24572 */ 24573 cdb[3] = 0x01; 24574 cdb[8] = 0x10; 24575 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24576 com->uscsi_cdb = cdb; 24577 com->uscsi_cdblen = CDB_GROUP1; 24578 com->uscsi_bufaddr = buffer; 24579 com->uscsi_buflen = 16; 24580 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24581 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24582 SD_PATH_STANDARD); 24583 if (rval != 0) { 24584 kmem_free(buffer, 16); 24585 kmem_free(com, sizeof (*com)); 24586 return (rval); 24587 } 24588 24589 /* Process the returned Q sub-channel data */ 24590 subchnl->cdsc_audiostatus = buffer[1]; 24591 subchnl->cdsc_adr = (buffer[5] & 0xF0); 24592 subchnl->cdsc_ctrl = (buffer[5] & 0x0F); 24593 subchnl->cdsc_trk = buffer[6]; 24594 subchnl->cdsc_ind = buffer[7]; 24595 if (subchnl->cdsc_format & CDROM_LBA) { 24596 subchnl->cdsc_absaddr.lba = 24597 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 24598 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 24599 subchnl->cdsc_reladdr.lba = 24600 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) + 24601 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]); 24602 } else if (un->un_f_cfg_readsub_bcd == TRUE) { 24603 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]); 24604 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]); 24605 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]); 24606 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]); 24607 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]); 24608 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]); 24609 } else { 24610 subchnl->cdsc_absaddr.msf.minute = buffer[9]; 24611 subchnl->cdsc_absaddr.msf.second = buffer[10]; 24612 subchnl->cdsc_absaddr.msf.frame = buffer[11]; 24613 subchnl->cdsc_reladdr.msf.minute = buffer[13]; 24614 subchnl->cdsc_reladdr.msf.second = buffer[14]; 24615 subchnl->cdsc_reladdr.msf.frame = buffer[15]; 24616 } 24617 kmem_free(buffer, 16); 24618 kmem_free(com, sizeof (*com)); 24619 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag) 24620 != 0) { 24621 return (EFAULT); 24622 } 24623 return (rval); 24624 } 24625 24626 24627 /* 24628 * Function: sr_read_tocentry() 24629 * 24630 * Description: This routine is the driver entry point for handling CD-ROM 24631 * ioctl requests to read from the Table of Contents (TOC) 24632 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL 24633 * fields, the starting address (LBA or MSF format per the user) 24634 * and the data mode if the user specified track is a data track. 24635 * 24636 * Note: The READ HEADER (0x44) command used in this routine is 24637 * obsolete per the SCSI MMC spec but still supported in the 24638 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 24639 * therefore the command is still implemented in this routine. 24640 * 24641 * Arguments: dev - the device 'dev_t' 24642 * data - pointer to user provided toc entry structure, 24643 * specifying the track # and the address format 24644 * (LBA or MSF). 24645 * flag - this argument is a pass through to ddi_copyxxx() 24646 * directly from the mode argument of ioctl(). 24647 * 24648 * Return Code: the code returned by sd_send_scsi_cmd() 24649 * EFAULT if ddi_copyxxx() fails 24650 * ENXIO if fail ddi_get_soft_state 24651 * EINVAL if data pointer is NULL 24652 */ 24653 24654 static int 24655 sr_read_tocentry(dev_t dev, caddr_t data, int flag) 24656 { 24657 struct sd_lun *un = NULL; 24658 struct uscsi_cmd *com; 24659 struct cdrom_tocentry toc_entry; 24660 struct cdrom_tocentry *entry = &toc_entry; 24661 caddr_t buffer; 24662 int rval; 24663 char cdb[CDB_GROUP1]; 24664 24665 if (data == NULL) { 24666 return (EINVAL); 24667 } 24668 24669 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24670 (un->un_state == SD_STATE_OFFLINE)) { 24671 return (ENXIO); 24672 } 24673 24674 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) { 24675 return (EFAULT); 24676 } 24677 24678 /* Validate the requested track and address format */ 24679 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) { 24680 return (EINVAL); 24681 } 24682 24683 if (entry->cdte_track == 0) { 24684 return (EINVAL); 24685 } 24686 24687 buffer = kmem_zalloc((size_t)12, KM_SLEEP); 24688 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24689 bzero(cdb, CDB_GROUP1); 24690 24691 cdb[0] = SCMD_READ_TOC; 24692 /* Set the MSF bit based on the user requested address format */ 24693 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2); 24694 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 24695 cdb[6] = BYTE_TO_BCD(entry->cdte_track); 24696 } else { 24697 cdb[6] = entry->cdte_track; 24698 } 24699 24700 /* 24701 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 24702 * (4 byte TOC response header + 8 byte track descriptor) 24703 */ 24704 cdb[8] = 12; 24705 com->uscsi_cdb = cdb; 24706 com->uscsi_cdblen = CDB_GROUP1; 24707 com->uscsi_bufaddr = buffer; 24708 com->uscsi_buflen = 0x0C; 24709 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ); 24710 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24711 SD_PATH_STANDARD); 24712 if (rval != 0) { 24713 kmem_free(buffer, 12); 24714 kmem_free(com, sizeof (*com)); 24715 return (rval); 24716 } 24717 24718 /* Process the toc entry */ 24719 entry->cdte_adr = (buffer[5] & 0xF0) >> 4; 24720 entry->cdte_ctrl = (buffer[5] & 0x0F); 24721 if (entry->cdte_format & CDROM_LBA) { 24722 entry->cdte_addr.lba = 24723 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 24724 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 24725 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) { 24726 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]); 24727 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]); 24728 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]); 24729 /* 24730 * Send a READ TOC command using the LBA address format to get 24731 * the LBA for the track requested so it can be used in the 24732 * READ HEADER request 24733 * 24734 * Note: The MSF bit of the READ HEADER command specifies the 24735 * output format. The block address specified in that command 24736 * must be in LBA format. 24737 */ 24738 cdb[1] = 0; 24739 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24740 SD_PATH_STANDARD); 24741 if (rval != 0) { 24742 kmem_free(buffer, 12); 24743 kmem_free(com, sizeof (*com)); 24744 return (rval); 24745 } 24746 } else { 24747 entry->cdte_addr.msf.minute = buffer[9]; 24748 entry->cdte_addr.msf.second = buffer[10]; 24749 entry->cdte_addr.msf.frame = buffer[11]; 24750 /* 24751 * Send a READ TOC command using the LBA address format to get 24752 * the LBA for the track requested so it can be used in the 24753 * READ HEADER request 24754 * 24755 * Note: The MSF bit of the READ HEADER command specifies the 24756 * output format. The block address specified in that command 24757 * must be in LBA format. 24758 */ 24759 cdb[1] = 0; 24760 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24761 SD_PATH_STANDARD); 24762 if (rval != 0) { 24763 kmem_free(buffer, 12); 24764 kmem_free(com, sizeof (*com)); 24765 return (rval); 24766 } 24767 } 24768 24769 /* 24770 * Build and send the READ HEADER command to determine the data mode of 24771 * the user specified track. 24772 */ 24773 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) && 24774 (entry->cdte_track != CDROM_LEADOUT)) { 24775 bzero(cdb, CDB_GROUP1); 24776 cdb[0] = SCMD_READ_HEADER; 24777 cdb[2] = buffer[8]; 24778 cdb[3] = buffer[9]; 24779 cdb[4] = buffer[10]; 24780 cdb[5] = buffer[11]; 24781 cdb[8] = 0x08; 24782 com->uscsi_buflen = 0x08; 24783 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24784 SD_PATH_STANDARD); 24785 if (rval == 0) { 24786 entry->cdte_datamode = buffer[0]; 24787 } else { 24788 /* 24789 * READ HEADER command failed, since this is 24790 * obsoleted in one spec, its better to return 24791 * -1 for an invlid track so that we can still 24792 * receive the rest of the TOC data. 24793 */ 24794 entry->cdte_datamode = (uchar_t)-1; 24795 } 24796 } else { 24797 entry->cdte_datamode = (uchar_t)-1; 24798 } 24799 24800 kmem_free(buffer, 12); 24801 kmem_free(com, sizeof (*com)); 24802 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0) 24803 return (EFAULT); 24804 24805 return (rval); 24806 } 24807 24808 24809 /* 24810 * Function: sr_read_tochdr() 24811 * 24812 * Description: This routine is the driver entry point for handling CD-ROM 24813 * ioctl requests to read the Table of Contents (TOC) header 24814 * (CDROMREADTOHDR). The TOC header consists of the disk starting 24815 * and ending track numbers 24816 * 24817 * Arguments: dev - the device 'dev_t' 24818 * data - pointer to user provided toc header structure, 24819 * specifying the starting and ending track numbers. 24820 * flag - this argument is a pass through to ddi_copyxxx() 24821 * directly from the mode argument of ioctl(). 24822 * 24823 * Return Code: the code returned by sd_send_scsi_cmd() 24824 * EFAULT if ddi_copyxxx() fails 24825 * ENXIO if fail ddi_get_soft_state 24826 * EINVAL if data pointer is NULL 24827 */ 24828 24829 static int 24830 sr_read_tochdr(dev_t dev, caddr_t data, int flag) 24831 { 24832 struct sd_lun *un; 24833 struct uscsi_cmd *com; 24834 struct cdrom_tochdr toc_header; 24835 struct cdrom_tochdr *hdr = &toc_header; 24836 char cdb[CDB_GROUP1]; 24837 int rval; 24838 caddr_t buffer; 24839 24840 if (data == NULL) { 24841 return (EINVAL); 24842 } 24843 24844 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24845 (un->un_state == SD_STATE_OFFLINE)) { 24846 return (ENXIO); 24847 } 24848 24849 buffer = kmem_zalloc(4, KM_SLEEP); 24850 bzero(cdb, CDB_GROUP1); 24851 cdb[0] = SCMD_READ_TOC; 24852 /* 24853 * Specifying a track number of 0x00 in the READ TOC command indicates 24854 * that the TOC header should be returned 24855 */ 24856 cdb[6] = 0x00; 24857 /* 24858 * Bytes 7 & 8 are the 4 byte allocation length for TOC header. 24859 * (2 byte data len + 1 byte starting track # + 1 byte ending track #) 24860 */ 24861 cdb[8] = 0x04; 24862 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24863 com->uscsi_cdb = cdb; 24864 com->uscsi_cdblen = CDB_GROUP1; 24865 com->uscsi_bufaddr = buffer; 24866 com->uscsi_buflen = 0x04; 24867 com->uscsi_timeout = 300; 24868 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24869 24870 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24871 SD_PATH_STANDARD); 24872 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 24873 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]); 24874 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]); 24875 } else { 24876 hdr->cdth_trk0 = buffer[2]; 24877 hdr->cdth_trk1 = buffer[3]; 24878 } 24879 kmem_free(buffer, 4); 24880 kmem_free(com, sizeof (*com)); 24881 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) { 24882 return (EFAULT); 24883 } 24884 return (rval); 24885 } 24886 24887 24888 /* 24889 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(), 24890 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for 24891 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data, 24892 * digital audio and extended architecture digital audio. These modes are 24893 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3 24894 * MMC specs. 24895 * 24896 * In addition to support for the various data formats these routines also 24897 * include support for devices that implement only the direct access READ 24898 * commands (0x08, 0x28), devices that implement the READ_CD commands 24899 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and 24900 * READ CDXA commands (0xD8, 0xDB) 24901 */ 24902 24903 /* 24904 * Function: sr_read_mode1() 24905 * 24906 * Description: This routine is the driver entry point for handling CD-ROM 24907 * ioctl read mode1 requests (CDROMREADMODE1). 24908 * 24909 * Arguments: dev - the device 'dev_t' 24910 * data - pointer to user provided cd read structure specifying 24911 * the lba buffer address and length. 24912 * flag - this argument is a pass through to ddi_copyxxx() 24913 * directly from the mode argument of ioctl(). 24914 * 24915 * Return Code: the code returned by sd_send_scsi_cmd() 24916 * EFAULT if ddi_copyxxx() fails 24917 * ENXIO if fail ddi_get_soft_state 24918 * EINVAL if data pointer is NULL 24919 */ 24920 24921 static int 24922 sr_read_mode1(dev_t dev, caddr_t data, int flag) 24923 { 24924 struct sd_lun *un; 24925 struct cdrom_read mode1_struct; 24926 struct cdrom_read *mode1 = &mode1_struct; 24927 int rval; 24928 #ifdef _MULTI_DATAMODEL 24929 /* To support ILP32 applications in an LP64 world */ 24930 struct cdrom_read32 cdrom_read32; 24931 struct cdrom_read32 *cdrd32 = &cdrom_read32; 24932 #endif /* _MULTI_DATAMODEL */ 24933 24934 if (data == NULL) { 24935 return (EINVAL); 24936 } 24937 24938 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24939 (un->un_state == SD_STATE_OFFLINE)) { 24940 return (ENXIO); 24941 } 24942 24943 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 24944 "sd_read_mode1: entry: un:0x%p\n", un); 24945 24946 #ifdef _MULTI_DATAMODEL 24947 switch (ddi_model_convert_from(flag & FMODELS)) { 24948 case DDI_MODEL_ILP32: 24949 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 24950 return (EFAULT); 24951 } 24952 /* Convert the ILP32 uscsi data from the application to LP64 */ 24953 cdrom_read32tocdrom_read(cdrd32, mode1); 24954 break; 24955 case DDI_MODEL_NONE: 24956 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 24957 return (EFAULT); 24958 } 24959 } 24960 #else /* ! _MULTI_DATAMODEL */ 24961 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 24962 return (EFAULT); 24963 } 24964 #endif /* _MULTI_DATAMODEL */ 24965 24966 rval = sd_send_scsi_READ(un, mode1->cdread_bufaddr, 24967 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD); 24968 24969 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 24970 "sd_read_mode1: exit: un:0x%p\n", un); 24971 24972 return (rval); 24973 } 24974 24975 24976 /* 24977 * Function: sr_read_cd_mode2() 24978 * 24979 * Description: This routine is the driver entry point for handling CD-ROM 24980 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 24981 * support the READ CD (0xBE) command or the 1st generation 24982 * READ CD (0xD4) command. 24983 * 24984 * Arguments: dev - the device 'dev_t' 24985 * data - pointer to user provided cd read structure specifying 24986 * the lba buffer address and length. 24987 * flag - this argument is a pass through to ddi_copyxxx() 24988 * directly from the mode argument of ioctl(). 24989 * 24990 * Return Code: the code returned by sd_send_scsi_cmd() 24991 * EFAULT if ddi_copyxxx() fails 24992 * ENXIO if fail ddi_get_soft_state 24993 * EINVAL if data pointer is NULL 24994 */ 24995 24996 static int 24997 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag) 24998 { 24999 struct sd_lun *un; 25000 struct uscsi_cmd *com; 25001 struct cdrom_read mode2_struct; 25002 struct cdrom_read *mode2 = &mode2_struct; 25003 uchar_t cdb[CDB_GROUP5]; 25004 int nblocks; 25005 int rval; 25006 #ifdef _MULTI_DATAMODEL 25007 /* To support ILP32 applications in an LP64 world */ 25008 struct cdrom_read32 cdrom_read32; 25009 struct cdrom_read32 *cdrd32 = &cdrom_read32; 25010 #endif /* _MULTI_DATAMODEL */ 25011 25012 if (data == NULL) { 25013 return (EINVAL); 25014 } 25015 25016 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25017 (un->un_state == SD_STATE_OFFLINE)) { 25018 return (ENXIO); 25019 } 25020 25021 #ifdef _MULTI_DATAMODEL 25022 switch (ddi_model_convert_from(flag & FMODELS)) { 25023 case DDI_MODEL_ILP32: 25024 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 25025 return (EFAULT); 25026 } 25027 /* Convert the ILP32 uscsi data from the application to LP64 */ 25028 cdrom_read32tocdrom_read(cdrd32, mode2); 25029 break; 25030 case DDI_MODEL_NONE: 25031 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 25032 return (EFAULT); 25033 } 25034 break; 25035 } 25036 25037 #else /* ! _MULTI_DATAMODEL */ 25038 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 25039 return (EFAULT); 25040 } 25041 #endif /* _MULTI_DATAMODEL */ 25042 25043 bzero(cdb, sizeof (cdb)); 25044 if (un->un_f_cfg_read_cd_xd4 == TRUE) { 25045 /* Read command supported by 1st generation atapi drives */ 25046 cdb[0] = SCMD_READ_CDD4; 25047 } else { 25048 /* Universal CD Access Command */ 25049 cdb[0] = SCMD_READ_CD; 25050 } 25051 25052 /* 25053 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book 25054 */ 25055 cdb[1] = CDROM_SECTOR_TYPE_MODE2; 25056 25057 /* set the start address */ 25058 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF); 25059 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF); 25060 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 25061 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF); 25062 25063 /* set the transfer length */ 25064 nblocks = mode2->cdread_buflen / 2336; 25065 cdb[6] = (uchar_t)(nblocks >> 16); 25066 cdb[7] = (uchar_t)(nblocks >> 8); 25067 cdb[8] = (uchar_t)nblocks; 25068 25069 /* set the filter bits */ 25070 cdb[9] = CDROM_READ_CD_USERDATA; 25071 25072 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25073 com->uscsi_cdb = (caddr_t)cdb; 25074 com->uscsi_cdblen = sizeof (cdb); 25075 com->uscsi_bufaddr = mode2->cdread_bufaddr; 25076 com->uscsi_buflen = mode2->cdread_buflen; 25077 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25078 25079 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25080 SD_PATH_STANDARD); 25081 kmem_free(com, sizeof (*com)); 25082 return (rval); 25083 } 25084 25085 25086 /* 25087 * Function: sr_read_mode2() 25088 * 25089 * Description: This routine is the driver entry point for handling CD-ROM 25090 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 25091 * do not support the READ CD (0xBE) command. 25092 * 25093 * Arguments: dev - the device 'dev_t' 25094 * data - pointer to user provided cd read structure specifying 25095 * the lba buffer address and length. 25096 * flag - this argument is a pass through to ddi_copyxxx() 25097 * directly from the mode argument of ioctl(). 25098 * 25099 * Return Code: the code returned by sd_send_scsi_cmd() 25100 * EFAULT if ddi_copyxxx() fails 25101 * ENXIO if fail ddi_get_soft_state 25102 * EINVAL if data pointer is NULL 25103 * EIO if fail to reset block size 25104 * EAGAIN if commands are in progress in the driver 25105 */ 25106 25107 static int 25108 sr_read_mode2(dev_t dev, caddr_t data, int flag) 25109 { 25110 struct sd_lun *un; 25111 struct cdrom_read mode2_struct; 25112 struct cdrom_read *mode2 = &mode2_struct; 25113 int rval; 25114 uint32_t restore_blksize; 25115 struct uscsi_cmd *com; 25116 uchar_t cdb[CDB_GROUP0]; 25117 int nblocks; 25118 25119 #ifdef _MULTI_DATAMODEL 25120 /* To support ILP32 applications in an LP64 world */ 25121 struct cdrom_read32 cdrom_read32; 25122 struct cdrom_read32 *cdrd32 = &cdrom_read32; 25123 #endif /* _MULTI_DATAMODEL */ 25124 25125 if (data == NULL) { 25126 return (EINVAL); 25127 } 25128 25129 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25130 (un->un_state == SD_STATE_OFFLINE)) { 25131 return (ENXIO); 25132 } 25133 25134 /* 25135 * Because this routine will update the device and driver block size 25136 * being used we want to make sure there are no commands in progress. 25137 * If commands are in progress the user will have to try again. 25138 * 25139 * We check for 1 instead of 0 because we increment un_ncmds_in_driver 25140 * in sdioctl to protect commands from sdioctl through to the top of 25141 * sd_uscsi_strategy. See sdioctl for details. 25142 */ 25143 mutex_enter(SD_MUTEX(un)); 25144 if (un->un_ncmds_in_driver != 1) { 25145 mutex_exit(SD_MUTEX(un)); 25146 return (EAGAIN); 25147 } 25148 mutex_exit(SD_MUTEX(un)); 25149 25150 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25151 "sd_read_mode2: entry: un:0x%p\n", un); 25152 25153 #ifdef _MULTI_DATAMODEL 25154 switch (ddi_model_convert_from(flag & FMODELS)) { 25155 case DDI_MODEL_ILP32: 25156 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 25157 return (EFAULT); 25158 } 25159 /* Convert the ILP32 uscsi data from the application to LP64 */ 25160 cdrom_read32tocdrom_read(cdrd32, mode2); 25161 break; 25162 case DDI_MODEL_NONE: 25163 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 25164 return (EFAULT); 25165 } 25166 break; 25167 } 25168 #else /* ! _MULTI_DATAMODEL */ 25169 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) { 25170 return (EFAULT); 25171 } 25172 #endif /* _MULTI_DATAMODEL */ 25173 25174 /* Store the current target block size for restoration later */ 25175 restore_blksize = un->un_tgt_blocksize; 25176 25177 /* Change the device and soft state target block size to 2336 */ 25178 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) { 25179 rval = EIO; 25180 goto done; 25181 } 25182 25183 25184 bzero(cdb, sizeof (cdb)); 25185 25186 /* set READ operation */ 25187 cdb[0] = SCMD_READ; 25188 25189 /* adjust lba for 2kbyte blocks from 512 byte blocks */ 25190 mode2->cdread_lba >>= 2; 25191 25192 /* set the start address */ 25193 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F); 25194 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 25195 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF); 25196 25197 /* set the transfer length */ 25198 nblocks = mode2->cdread_buflen / 2336; 25199 cdb[4] = (uchar_t)nblocks & 0xFF; 25200 25201 /* build command */ 25202 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25203 com->uscsi_cdb = (caddr_t)cdb; 25204 com->uscsi_cdblen = sizeof (cdb); 25205 com->uscsi_bufaddr = mode2->cdread_bufaddr; 25206 com->uscsi_buflen = mode2->cdread_buflen; 25207 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25208 25209 /* 25210 * Issue SCSI command with user space address for read buffer. 25211 * 25212 * This sends the command through main channel in the driver. 25213 * 25214 * Since this is accessed via an IOCTL call, we go through the 25215 * standard path, so that if the device was powered down, then 25216 * it would be 'awakened' to handle the command. 25217 */ 25218 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25219 SD_PATH_STANDARD); 25220 25221 kmem_free(com, sizeof (*com)); 25222 25223 /* Restore the device and soft state target block size */ 25224 if (sr_sector_mode(dev, restore_blksize) != 0) { 25225 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25226 "can't do switch back to mode 1\n"); 25227 /* 25228 * If sd_send_scsi_READ succeeded we still need to report 25229 * an error because we failed to reset the block size 25230 */ 25231 if (rval == 0) { 25232 rval = EIO; 25233 } 25234 } 25235 25236 done: 25237 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25238 "sd_read_mode2: exit: un:0x%p\n", un); 25239 25240 return (rval); 25241 } 25242 25243 25244 /* 25245 * Function: sr_sector_mode() 25246 * 25247 * Description: This utility function is used by sr_read_mode2 to set the target 25248 * block size based on the user specified size. This is a legacy 25249 * implementation based upon a vendor specific mode page 25250 * 25251 * Arguments: dev - the device 'dev_t' 25252 * data - flag indicating if block size is being set to 2336 or 25253 * 512. 25254 * 25255 * Return Code: the code returned by sd_send_scsi_cmd() 25256 * EFAULT if ddi_copyxxx() fails 25257 * ENXIO if fail ddi_get_soft_state 25258 * EINVAL if data pointer is NULL 25259 */ 25260 25261 static int 25262 sr_sector_mode(dev_t dev, uint32_t blksize) 25263 { 25264 struct sd_lun *un; 25265 uchar_t *sense; 25266 uchar_t *select; 25267 int rval; 25268 25269 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25270 (un->un_state == SD_STATE_OFFLINE)) { 25271 return (ENXIO); 25272 } 25273 25274 sense = kmem_zalloc(20, KM_SLEEP); 25275 25276 /* Note: This is a vendor specific mode page (0x81) */ 25277 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 20, 0x81, 25278 SD_PATH_STANDARD)) != 0) { 25279 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 25280 "sr_sector_mode: Mode Sense failed\n"); 25281 kmem_free(sense, 20); 25282 return (rval); 25283 } 25284 select = kmem_zalloc(20, KM_SLEEP); 25285 select[3] = 0x08; 25286 select[10] = ((blksize >> 8) & 0xff); 25287 select[11] = (blksize & 0xff); 25288 select[12] = 0x01; 25289 select[13] = 0x06; 25290 select[14] = sense[14]; 25291 select[15] = sense[15]; 25292 if (blksize == SD_MODE2_BLKSIZE) { 25293 select[14] |= 0x01; 25294 } 25295 25296 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 20, 25297 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 25298 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 25299 "sr_sector_mode: Mode Select failed\n"); 25300 } else { 25301 /* 25302 * Only update the softstate block size if we successfully 25303 * changed the device block mode. 25304 */ 25305 mutex_enter(SD_MUTEX(un)); 25306 sd_update_block_info(un, blksize, 0); 25307 mutex_exit(SD_MUTEX(un)); 25308 } 25309 kmem_free(sense, 20); 25310 kmem_free(select, 20); 25311 return (rval); 25312 } 25313 25314 25315 /* 25316 * Function: sr_read_cdda() 25317 * 25318 * Description: This routine is the driver entry point for handling CD-ROM 25319 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If 25320 * the target supports CDDA these requests are handled via a vendor 25321 * specific command (0xD8) If the target does not support CDDA 25322 * these requests are handled via the READ CD command (0xBE). 25323 * 25324 * Arguments: dev - the device 'dev_t' 25325 * data - pointer to user provided CD-DA structure specifying 25326 * the track starting address, transfer length, and 25327 * subcode options. 25328 * flag - this argument is a pass through to ddi_copyxxx() 25329 * directly from the mode argument of ioctl(). 25330 * 25331 * Return Code: the code returned by sd_send_scsi_cmd() 25332 * EFAULT if ddi_copyxxx() fails 25333 * ENXIO if fail ddi_get_soft_state 25334 * EINVAL if invalid arguments are provided 25335 * ENOTTY 25336 */ 25337 25338 static int 25339 sr_read_cdda(dev_t dev, caddr_t data, int flag) 25340 { 25341 struct sd_lun *un; 25342 struct uscsi_cmd *com; 25343 struct cdrom_cdda *cdda; 25344 int rval; 25345 size_t buflen; 25346 char cdb[CDB_GROUP5]; 25347 25348 #ifdef _MULTI_DATAMODEL 25349 /* To support ILP32 applications in an LP64 world */ 25350 struct cdrom_cdda32 cdrom_cdda32; 25351 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32; 25352 #endif /* _MULTI_DATAMODEL */ 25353 25354 if (data == NULL) { 25355 return (EINVAL); 25356 } 25357 25358 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25359 return (ENXIO); 25360 } 25361 25362 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP); 25363 25364 #ifdef _MULTI_DATAMODEL 25365 switch (ddi_model_convert_from(flag & FMODELS)) { 25366 case DDI_MODEL_ILP32: 25367 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) { 25368 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25369 "sr_read_cdda: ddi_copyin Failed\n"); 25370 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25371 return (EFAULT); 25372 } 25373 /* Convert the ILP32 uscsi data from the application to LP64 */ 25374 cdrom_cdda32tocdrom_cdda(cdda32, cdda); 25375 break; 25376 case DDI_MODEL_NONE: 25377 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 25378 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25379 "sr_read_cdda: ddi_copyin Failed\n"); 25380 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25381 return (EFAULT); 25382 } 25383 break; 25384 } 25385 #else /* ! _MULTI_DATAMODEL */ 25386 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 25387 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25388 "sr_read_cdda: ddi_copyin Failed\n"); 25389 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25390 return (EFAULT); 25391 } 25392 #endif /* _MULTI_DATAMODEL */ 25393 25394 /* 25395 * Since MMC-2 expects max 3 bytes for length, check if the 25396 * length input is greater than 3 bytes 25397 */ 25398 if ((cdda->cdda_length & 0xFF000000) != 0) { 25399 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: " 25400 "cdrom transfer length too large: %d (limit %d)\n", 25401 cdda->cdda_length, 0xFFFFFF); 25402 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25403 return (EINVAL); 25404 } 25405 25406 switch (cdda->cdda_subcode) { 25407 case CDROM_DA_NO_SUBCODE: 25408 buflen = CDROM_BLK_2352 * cdda->cdda_length; 25409 break; 25410 case CDROM_DA_SUBQ: 25411 buflen = CDROM_BLK_2368 * cdda->cdda_length; 25412 break; 25413 case CDROM_DA_ALL_SUBCODE: 25414 buflen = CDROM_BLK_2448 * cdda->cdda_length; 25415 break; 25416 case CDROM_DA_SUBCODE_ONLY: 25417 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length; 25418 break; 25419 default: 25420 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25421 "sr_read_cdda: Subcode '0x%x' Not Supported\n", 25422 cdda->cdda_subcode); 25423 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25424 return (EINVAL); 25425 } 25426 25427 /* Build and send the command */ 25428 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25429 bzero(cdb, CDB_GROUP5); 25430 25431 if (un->un_f_cfg_cdda == TRUE) { 25432 cdb[0] = (char)SCMD_READ_CD; 25433 cdb[1] = 0x04; 25434 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 25435 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 25436 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 25437 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 25438 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 25439 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 25440 cdb[8] = ((cdda->cdda_length) & 0x000000ff); 25441 cdb[9] = 0x10; 25442 switch (cdda->cdda_subcode) { 25443 case CDROM_DA_NO_SUBCODE : 25444 cdb[10] = 0x0; 25445 break; 25446 case CDROM_DA_SUBQ : 25447 cdb[10] = 0x2; 25448 break; 25449 case CDROM_DA_ALL_SUBCODE : 25450 cdb[10] = 0x1; 25451 break; 25452 case CDROM_DA_SUBCODE_ONLY : 25453 /* FALLTHROUGH */ 25454 default : 25455 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25456 kmem_free(com, sizeof (*com)); 25457 return (ENOTTY); 25458 } 25459 } else { 25460 cdb[0] = (char)SCMD_READ_CDDA; 25461 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 25462 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 25463 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 25464 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 25465 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24); 25466 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 25467 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 25468 cdb[9] = ((cdda->cdda_length) & 0x000000ff); 25469 cdb[10] = cdda->cdda_subcode; 25470 } 25471 25472 com->uscsi_cdb = cdb; 25473 com->uscsi_cdblen = CDB_GROUP5; 25474 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data; 25475 com->uscsi_buflen = buflen; 25476 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25477 25478 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25479 SD_PATH_STANDARD); 25480 25481 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25482 kmem_free(com, sizeof (*com)); 25483 return (rval); 25484 } 25485 25486 25487 /* 25488 * Function: sr_read_cdxa() 25489 * 25490 * Description: This routine is the driver entry point for handling CD-ROM 25491 * ioctl requests to return CD-XA (Extended Architecture) data. 25492 * (CDROMCDXA). 25493 * 25494 * Arguments: dev - the device 'dev_t' 25495 * data - pointer to user provided CD-XA structure specifying 25496 * the data starting address, transfer length, and format 25497 * flag - this argument is a pass through to ddi_copyxxx() 25498 * directly from the mode argument of ioctl(). 25499 * 25500 * Return Code: the code returned by sd_send_scsi_cmd() 25501 * EFAULT if ddi_copyxxx() fails 25502 * ENXIO if fail ddi_get_soft_state 25503 * EINVAL if data pointer is NULL 25504 */ 25505 25506 static int 25507 sr_read_cdxa(dev_t dev, caddr_t data, int flag) 25508 { 25509 struct sd_lun *un; 25510 struct uscsi_cmd *com; 25511 struct cdrom_cdxa *cdxa; 25512 int rval; 25513 size_t buflen; 25514 char cdb[CDB_GROUP5]; 25515 uchar_t read_flags; 25516 25517 #ifdef _MULTI_DATAMODEL 25518 /* To support ILP32 applications in an LP64 world */ 25519 struct cdrom_cdxa32 cdrom_cdxa32; 25520 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32; 25521 #endif /* _MULTI_DATAMODEL */ 25522 25523 if (data == NULL) { 25524 return (EINVAL); 25525 } 25526 25527 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25528 return (ENXIO); 25529 } 25530 25531 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP); 25532 25533 #ifdef _MULTI_DATAMODEL 25534 switch (ddi_model_convert_from(flag & FMODELS)) { 25535 case DDI_MODEL_ILP32: 25536 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) { 25537 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25538 return (EFAULT); 25539 } 25540 /* 25541 * Convert the ILP32 uscsi data from the 25542 * application to LP64 for internal use. 25543 */ 25544 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa); 25545 break; 25546 case DDI_MODEL_NONE: 25547 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 25548 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25549 return (EFAULT); 25550 } 25551 break; 25552 } 25553 #else /* ! _MULTI_DATAMODEL */ 25554 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 25555 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25556 return (EFAULT); 25557 } 25558 #endif /* _MULTI_DATAMODEL */ 25559 25560 /* 25561 * Since MMC-2 expects max 3 bytes for length, check if the 25562 * length input is greater than 3 bytes 25563 */ 25564 if ((cdxa->cdxa_length & 0xFF000000) != 0) { 25565 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: " 25566 "cdrom transfer length too large: %d (limit %d)\n", 25567 cdxa->cdxa_length, 0xFFFFFF); 25568 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25569 return (EINVAL); 25570 } 25571 25572 switch (cdxa->cdxa_format) { 25573 case CDROM_XA_DATA: 25574 buflen = CDROM_BLK_2048 * cdxa->cdxa_length; 25575 read_flags = 0x10; 25576 break; 25577 case CDROM_XA_SECTOR_DATA: 25578 buflen = CDROM_BLK_2352 * cdxa->cdxa_length; 25579 read_flags = 0xf8; 25580 break; 25581 case CDROM_XA_DATA_W_ERROR: 25582 buflen = CDROM_BLK_2646 * cdxa->cdxa_length; 25583 read_flags = 0xfc; 25584 break; 25585 default: 25586 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25587 "sr_read_cdxa: Format '0x%x' Not Supported\n", 25588 cdxa->cdxa_format); 25589 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25590 return (EINVAL); 25591 } 25592 25593 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25594 bzero(cdb, CDB_GROUP5); 25595 if (un->un_f_mmc_cap == TRUE) { 25596 cdb[0] = (char)SCMD_READ_CD; 25597 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 25598 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 25599 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 25600 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 25601 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 25602 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 25603 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff); 25604 cdb[9] = (char)read_flags; 25605 } else { 25606 /* 25607 * Note: A vendor specific command (0xDB) is being used her to 25608 * request a read of all subcodes. 25609 */ 25610 cdb[0] = (char)SCMD_READ_CDXA; 25611 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 25612 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 25613 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 25614 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 25615 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24); 25616 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 25617 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 25618 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff); 25619 cdb[10] = cdxa->cdxa_format; 25620 } 25621 com->uscsi_cdb = cdb; 25622 com->uscsi_cdblen = CDB_GROUP5; 25623 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data; 25624 com->uscsi_buflen = buflen; 25625 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25626 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25627 SD_PATH_STANDARD); 25628 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25629 kmem_free(com, sizeof (*com)); 25630 return (rval); 25631 } 25632 25633 25634 /* 25635 * Function: sr_eject() 25636 * 25637 * Description: This routine is the driver entry point for handling CD-ROM 25638 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT) 25639 * 25640 * Arguments: dev - the device 'dev_t' 25641 * 25642 * Return Code: the code returned by sd_send_scsi_cmd() 25643 */ 25644 25645 static int 25646 sr_eject(dev_t dev) 25647 { 25648 struct sd_lun *un; 25649 int rval; 25650 25651 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25652 (un->un_state == SD_STATE_OFFLINE)) { 25653 return (ENXIO); 25654 } 25655 25656 /* 25657 * To prevent race conditions with the eject 25658 * command, keep track of an eject command as 25659 * it progresses. If we are already handling 25660 * an eject command in the driver for the given 25661 * unit and another request to eject is received 25662 * immediately return EAGAIN so we don't lose 25663 * the command if the current eject command fails. 25664 */ 25665 mutex_enter(SD_MUTEX(un)); 25666 if (un->un_f_ejecting == TRUE) { 25667 mutex_exit(SD_MUTEX(un)); 25668 return (EAGAIN); 25669 } 25670 un->un_f_ejecting = TRUE; 25671 mutex_exit(SD_MUTEX(un)); 25672 25673 if ((rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 25674 SD_PATH_STANDARD)) != 0) { 25675 mutex_enter(SD_MUTEX(un)); 25676 un->un_f_ejecting = FALSE; 25677 mutex_exit(SD_MUTEX(un)); 25678 return (rval); 25679 } 25680 25681 rval = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_EJECT, 25682 SD_PATH_STANDARD); 25683 25684 if (rval == 0) { 25685 mutex_enter(SD_MUTEX(un)); 25686 sr_ejected(un); 25687 un->un_mediastate = DKIO_EJECTED; 25688 un->un_f_ejecting = FALSE; 25689 cv_broadcast(&un->un_state_cv); 25690 mutex_exit(SD_MUTEX(un)); 25691 } else { 25692 mutex_enter(SD_MUTEX(un)); 25693 un->un_f_ejecting = FALSE; 25694 mutex_exit(SD_MUTEX(un)); 25695 } 25696 return (rval); 25697 } 25698 25699 25700 /* 25701 * Function: sr_ejected() 25702 * 25703 * Description: This routine updates the soft state structure to invalidate the 25704 * geometry information after the media has been ejected or a 25705 * media eject has been detected. 25706 * 25707 * Arguments: un - driver soft state (unit) structure 25708 */ 25709 25710 static void 25711 sr_ejected(struct sd_lun *un) 25712 { 25713 struct sd_errstats *stp; 25714 25715 ASSERT(un != NULL); 25716 ASSERT(mutex_owned(SD_MUTEX(un))); 25717 25718 un->un_f_blockcount_is_valid = FALSE; 25719 un->un_f_tgt_blocksize_is_valid = FALSE; 25720 mutex_exit(SD_MUTEX(un)); 25721 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 25722 mutex_enter(SD_MUTEX(un)); 25723 25724 if (un->un_errstats != NULL) { 25725 stp = (struct sd_errstats *)un->un_errstats->ks_data; 25726 stp->sd_capacity.value.ui64 = 0; 25727 } 25728 25729 /* remove "capacity-of-device" properties */ 25730 (void) ddi_prop_remove(DDI_DEV_T_NONE, SD_DEVINFO(un), 25731 "device-nblocks"); 25732 (void) ddi_prop_remove(DDI_DEV_T_NONE, SD_DEVINFO(un), 25733 "device-blksize"); 25734 } 25735 25736 25737 /* 25738 * Function: sr_check_wp() 25739 * 25740 * Description: This routine checks the write protection of a removable 25741 * media disk and hotpluggable devices via the write protect bit of 25742 * the Mode Page Header device specific field. Some devices choke 25743 * on unsupported mode page. In order to workaround this issue, 25744 * this routine has been implemented to use 0x3f mode page(request 25745 * for all pages) for all device types. 25746 * 25747 * Arguments: dev - the device 'dev_t' 25748 * 25749 * Return Code: int indicating if the device is write protected (1) or not (0) 25750 * 25751 * Context: Kernel thread. 25752 * 25753 */ 25754 25755 static int 25756 sr_check_wp(dev_t dev) 25757 { 25758 struct sd_lun *un; 25759 uchar_t device_specific; 25760 uchar_t *sense; 25761 int hdrlen; 25762 int rval = FALSE; 25763 25764 /* 25765 * Note: The return codes for this routine should be reworked to 25766 * properly handle the case of a NULL softstate. 25767 */ 25768 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25769 return (FALSE); 25770 } 25771 25772 if (un->un_f_cfg_is_atapi == TRUE) { 25773 /* 25774 * The mode page contents are not required; set the allocation 25775 * length for the mode page header only 25776 */ 25777 hdrlen = MODE_HEADER_LENGTH_GRP2; 25778 sense = kmem_zalloc(hdrlen, KM_SLEEP); 25779 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, hdrlen, 25780 MODEPAGE_ALLPAGES, SD_PATH_STANDARD) != 0) 25781 goto err_exit; 25782 device_specific = 25783 ((struct mode_header_grp2 *)sense)->device_specific; 25784 } else { 25785 hdrlen = MODE_HEADER_LENGTH; 25786 sense = kmem_zalloc(hdrlen, KM_SLEEP); 25787 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, hdrlen, 25788 MODEPAGE_ALLPAGES, SD_PATH_STANDARD) != 0) 25789 goto err_exit; 25790 device_specific = 25791 ((struct mode_header *)sense)->device_specific; 25792 } 25793 25794 /* 25795 * Write protect mode sense failed; not all disks 25796 * understand this query. Return FALSE assuming that 25797 * these devices are not writable. 25798 */ 25799 if (device_specific & WRITE_PROTECT) { 25800 rval = TRUE; 25801 } 25802 25803 err_exit: 25804 kmem_free(sense, hdrlen); 25805 return (rval); 25806 } 25807 25808 /* 25809 * Function: sr_volume_ctrl() 25810 * 25811 * Description: This routine is the driver entry point for handling CD-ROM 25812 * audio output volume ioctl requests. (CDROMVOLCTRL) 25813 * 25814 * Arguments: dev - the device 'dev_t' 25815 * data - pointer to user audio volume control structure 25816 * flag - this argument is a pass through to ddi_copyxxx() 25817 * directly from the mode argument of ioctl(). 25818 * 25819 * Return Code: the code returned by sd_send_scsi_cmd() 25820 * EFAULT if ddi_copyxxx() fails 25821 * ENXIO if fail ddi_get_soft_state 25822 * EINVAL if data pointer is NULL 25823 * 25824 */ 25825 25826 static int 25827 sr_volume_ctrl(dev_t dev, caddr_t data, int flag) 25828 { 25829 struct sd_lun *un; 25830 struct cdrom_volctrl volume; 25831 struct cdrom_volctrl *vol = &volume; 25832 uchar_t *sense_page; 25833 uchar_t *select_page; 25834 uchar_t *sense; 25835 uchar_t *select; 25836 int sense_buflen; 25837 int select_buflen; 25838 int rval; 25839 25840 if (data == NULL) { 25841 return (EINVAL); 25842 } 25843 25844 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25845 (un->un_state == SD_STATE_OFFLINE)) { 25846 return (ENXIO); 25847 } 25848 25849 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) { 25850 return (EFAULT); 25851 } 25852 25853 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 25854 struct mode_header_grp2 *sense_mhp; 25855 struct mode_header_grp2 *select_mhp; 25856 int bd_len; 25857 25858 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN; 25859 select_buflen = MODE_HEADER_LENGTH_GRP2 + 25860 MODEPAGE_AUDIO_CTRL_LEN; 25861 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 25862 select = kmem_zalloc(select_buflen, KM_SLEEP); 25863 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 25864 sense_buflen, MODEPAGE_AUDIO_CTRL, 25865 SD_PATH_STANDARD)) != 0) { 25866 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 25867 "sr_volume_ctrl: Mode Sense Failed\n"); 25868 kmem_free(sense, sense_buflen); 25869 kmem_free(select, select_buflen); 25870 return (rval); 25871 } 25872 sense_mhp = (struct mode_header_grp2 *)sense; 25873 select_mhp = (struct mode_header_grp2 *)select; 25874 bd_len = (sense_mhp->bdesc_length_hi << 8) | 25875 sense_mhp->bdesc_length_lo; 25876 if (bd_len > MODE_BLK_DESC_LENGTH) { 25877 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25878 "sr_volume_ctrl: Mode Sense returned invalid " 25879 "block descriptor length\n"); 25880 kmem_free(sense, sense_buflen); 25881 kmem_free(select, select_buflen); 25882 return (EIO); 25883 } 25884 sense_page = (uchar_t *) 25885 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 25886 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2); 25887 select_mhp->length_msb = 0; 25888 select_mhp->length_lsb = 0; 25889 select_mhp->bdesc_length_hi = 0; 25890 select_mhp->bdesc_length_lo = 0; 25891 } else { 25892 struct mode_header *sense_mhp, *select_mhp; 25893 25894 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 25895 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 25896 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 25897 select = kmem_zalloc(select_buflen, KM_SLEEP); 25898 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 25899 sense_buflen, MODEPAGE_AUDIO_CTRL, 25900 SD_PATH_STANDARD)) != 0) { 25901 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25902 "sr_volume_ctrl: Mode Sense Failed\n"); 25903 kmem_free(sense, sense_buflen); 25904 kmem_free(select, select_buflen); 25905 return (rval); 25906 } 25907 sense_mhp = (struct mode_header *)sense; 25908 select_mhp = (struct mode_header *)select; 25909 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) { 25910 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25911 "sr_volume_ctrl: Mode Sense returned invalid " 25912 "block descriptor length\n"); 25913 kmem_free(sense, sense_buflen); 25914 kmem_free(select, select_buflen); 25915 return (EIO); 25916 } 25917 sense_page = (uchar_t *) 25918 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 25919 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH); 25920 select_mhp->length = 0; 25921 select_mhp->bdesc_length = 0; 25922 } 25923 /* 25924 * Note: An audio control data structure could be created and overlayed 25925 * on the following in place of the array indexing method implemented. 25926 */ 25927 25928 /* Build the select data for the user volume data */ 25929 select_page[0] = MODEPAGE_AUDIO_CTRL; 25930 select_page[1] = 0xE; 25931 /* Set the immediate bit */ 25932 select_page[2] = 0x04; 25933 /* Zero out reserved fields */ 25934 select_page[3] = 0x00; 25935 select_page[4] = 0x00; 25936 /* Return sense data for fields not to be modified */ 25937 select_page[5] = sense_page[5]; 25938 select_page[6] = sense_page[6]; 25939 select_page[7] = sense_page[7]; 25940 /* Set the user specified volume levels for channel 0 and 1 */ 25941 select_page[8] = 0x01; 25942 select_page[9] = vol->channel0; 25943 select_page[10] = 0x02; 25944 select_page[11] = vol->channel1; 25945 /* Channel 2 and 3 are currently unsupported so return the sense data */ 25946 select_page[12] = sense_page[12]; 25947 select_page[13] = sense_page[13]; 25948 select_page[14] = sense_page[14]; 25949 select_page[15] = sense_page[15]; 25950 25951 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 25952 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, select, 25953 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 25954 } else { 25955 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 25956 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 25957 } 25958 25959 kmem_free(sense, sense_buflen); 25960 kmem_free(select, select_buflen); 25961 return (rval); 25962 } 25963 25964 25965 /* 25966 * Function: sr_read_sony_session_offset() 25967 * 25968 * Description: This routine is the driver entry point for handling CD-ROM 25969 * ioctl requests for session offset information. (CDROMREADOFFSET) 25970 * The address of the first track in the last session of a 25971 * multi-session CD-ROM is returned 25972 * 25973 * Note: This routine uses a vendor specific key value in the 25974 * command control field without implementing any vendor check here 25975 * or in the ioctl routine. 25976 * 25977 * Arguments: dev - the device 'dev_t' 25978 * data - pointer to an int to hold the requested address 25979 * flag - this argument is a pass through to ddi_copyxxx() 25980 * directly from the mode argument of ioctl(). 25981 * 25982 * Return Code: the code returned by sd_send_scsi_cmd() 25983 * EFAULT if ddi_copyxxx() fails 25984 * ENXIO if fail ddi_get_soft_state 25985 * EINVAL if data pointer is NULL 25986 */ 25987 25988 static int 25989 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag) 25990 { 25991 struct sd_lun *un; 25992 struct uscsi_cmd *com; 25993 caddr_t buffer; 25994 char cdb[CDB_GROUP1]; 25995 int session_offset = 0; 25996 int rval; 25997 25998 if (data == NULL) { 25999 return (EINVAL); 26000 } 26001 26002 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26003 (un->un_state == SD_STATE_OFFLINE)) { 26004 return (ENXIO); 26005 } 26006 26007 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP); 26008 bzero(cdb, CDB_GROUP1); 26009 cdb[0] = SCMD_READ_TOC; 26010 /* 26011 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 26012 * (4 byte TOC response header + 8 byte response data) 26013 */ 26014 cdb[8] = SONY_SESSION_OFFSET_LEN; 26015 /* Byte 9 is the control byte. A vendor specific value is used */ 26016 cdb[9] = SONY_SESSION_OFFSET_KEY; 26017 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26018 com->uscsi_cdb = cdb; 26019 com->uscsi_cdblen = CDB_GROUP1; 26020 com->uscsi_bufaddr = buffer; 26021 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN; 26022 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26023 26024 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26025 SD_PATH_STANDARD); 26026 if (rval != 0) { 26027 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 26028 kmem_free(com, sizeof (*com)); 26029 return (rval); 26030 } 26031 if (buffer[1] == SONY_SESSION_OFFSET_VALID) { 26032 session_offset = 26033 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 26034 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 26035 /* 26036 * Offset returned offset in current lbasize block's. Convert to 26037 * 2k block's to return to the user 26038 */ 26039 if (un->un_tgt_blocksize == CDROM_BLK_512) { 26040 session_offset >>= 2; 26041 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) { 26042 session_offset >>= 1; 26043 } 26044 } 26045 26046 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) { 26047 rval = EFAULT; 26048 } 26049 26050 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 26051 kmem_free(com, sizeof (*com)); 26052 return (rval); 26053 } 26054 26055 26056 /* 26057 * Function: sd_wm_cache_constructor() 26058 * 26059 * Description: Cache Constructor for the wmap cache for the read/modify/write 26060 * devices. 26061 * 26062 * Arguments: wm - A pointer to the sd_w_map to be initialized. 26063 * un - sd_lun structure for the device. 26064 * flag - the km flags passed to constructor 26065 * 26066 * Return Code: 0 on success. 26067 * -1 on failure. 26068 */ 26069 26070 /*ARGSUSED*/ 26071 static int 26072 sd_wm_cache_constructor(void *wm, void *un, int flags) 26073 { 26074 bzero(wm, sizeof (struct sd_w_map)); 26075 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL); 26076 return (0); 26077 } 26078 26079 26080 /* 26081 * Function: sd_wm_cache_destructor() 26082 * 26083 * Description: Cache destructor for the wmap cache for the read/modify/write 26084 * devices. 26085 * 26086 * Arguments: wm - A pointer to the sd_w_map to be initialized. 26087 * un - sd_lun structure for the device. 26088 */ 26089 /*ARGSUSED*/ 26090 static void 26091 sd_wm_cache_destructor(void *wm, void *un) 26092 { 26093 cv_destroy(&((struct sd_w_map *)wm)->wm_avail); 26094 } 26095 26096 26097 /* 26098 * Function: sd_range_lock() 26099 * 26100 * Description: Lock the range of blocks specified as parameter to ensure 26101 * that read, modify write is atomic and no other i/o writes 26102 * to the same location. The range is specified in terms 26103 * of start and end blocks. Block numbers are the actual 26104 * media block numbers and not system. 26105 * 26106 * Arguments: un - sd_lun structure for the device. 26107 * startb - The starting block number 26108 * endb - The end block number 26109 * typ - type of i/o - simple/read_modify_write 26110 * 26111 * Return Code: wm - pointer to the wmap structure. 26112 * 26113 * Context: This routine can sleep. 26114 */ 26115 26116 static struct sd_w_map * 26117 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ) 26118 { 26119 struct sd_w_map *wmp = NULL; 26120 struct sd_w_map *sl_wmp = NULL; 26121 struct sd_w_map *tmp_wmp; 26122 wm_state state = SD_WM_CHK_LIST; 26123 26124 26125 ASSERT(un != NULL); 26126 ASSERT(!mutex_owned(SD_MUTEX(un))); 26127 26128 mutex_enter(SD_MUTEX(un)); 26129 26130 while (state != SD_WM_DONE) { 26131 26132 switch (state) { 26133 case SD_WM_CHK_LIST: 26134 /* 26135 * This is the starting state. Check the wmap list 26136 * to see if the range is currently available. 26137 */ 26138 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) { 26139 /* 26140 * If this is a simple write and no rmw 26141 * i/o is pending then try to lock the 26142 * range as the range should be available. 26143 */ 26144 state = SD_WM_LOCK_RANGE; 26145 } else { 26146 tmp_wmp = sd_get_range(un, startb, endb); 26147 if (tmp_wmp != NULL) { 26148 if ((wmp != NULL) && ONLIST(un, wmp)) { 26149 /* 26150 * Should not keep onlist wmps 26151 * while waiting this macro 26152 * will also do wmp = NULL; 26153 */ 26154 FREE_ONLIST_WMAP(un, wmp); 26155 } 26156 /* 26157 * sl_wmp is the wmap on which wait 26158 * is done, since the tmp_wmp points 26159 * to the inuse wmap, set sl_wmp to 26160 * tmp_wmp and change the state to sleep 26161 */ 26162 sl_wmp = tmp_wmp; 26163 state = SD_WM_WAIT_MAP; 26164 } else { 26165 state = SD_WM_LOCK_RANGE; 26166 } 26167 26168 } 26169 break; 26170 26171 case SD_WM_LOCK_RANGE: 26172 ASSERT(un->un_wm_cache); 26173 /* 26174 * The range need to be locked, try to get a wmap. 26175 * First attempt it with NO_SLEEP, want to avoid a sleep 26176 * if possible as we will have to release the sd mutex 26177 * if we have to sleep. 26178 */ 26179 if (wmp == NULL) 26180 wmp = kmem_cache_alloc(un->un_wm_cache, 26181 KM_NOSLEEP); 26182 if (wmp == NULL) { 26183 mutex_exit(SD_MUTEX(un)); 26184 _NOTE(DATA_READABLE_WITHOUT_LOCK 26185 (sd_lun::un_wm_cache)) 26186 wmp = kmem_cache_alloc(un->un_wm_cache, 26187 KM_SLEEP); 26188 mutex_enter(SD_MUTEX(un)); 26189 /* 26190 * we released the mutex so recheck and go to 26191 * check list state. 26192 */ 26193 state = SD_WM_CHK_LIST; 26194 } else { 26195 /* 26196 * We exit out of state machine since we 26197 * have the wmap. Do the housekeeping first. 26198 * place the wmap on the wmap list if it is not 26199 * on it already and then set the state to done. 26200 */ 26201 wmp->wm_start = startb; 26202 wmp->wm_end = endb; 26203 wmp->wm_flags = typ | SD_WM_BUSY; 26204 if (typ & SD_WTYPE_RMW) { 26205 un->un_rmw_count++; 26206 } 26207 /* 26208 * If not already on the list then link 26209 */ 26210 if (!ONLIST(un, wmp)) { 26211 wmp->wm_next = un->un_wm; 26212 wmp->wm_prev = NULL; 26213 if (wmp->wm_next) 26214 wmp->wm_next->wm_prev = wmp; 26215 un->un_wm = wmp; 26216 } 26217 state = SD_WM_DONE; 26218 } 26219 break; 26220 26221 case SD_WM_WAIT_MAP: 26222 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY); 26223 /* 26224 * Wait is done on sl_wmp, which is set in the 26225 * check_list state. 26226 */ 26227 sl_wmp->wm_wanted_count++; 26228 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un)); 26229 sl_wmp->wm_wanted_count--; 26230 /* 26231 * We can reuse the memory from the completed sl_wmp 26232 * lock range for our new lock, but only if noone is 26233 * waiting for it. 26234 */ 26235 ASSERT(!(sl_wmp->wm_flags & SD_WM_BUSY)); 26236 if (sl_wmp->wm_wanted_count == 0) { 26237 if (wmp != NULL) 26238 CHK_N_FREEWMP(un, wmp); 26239 wmp = sl_wmp; 26240 } 26241 sl_wmp = NULL; 26242 /* 26243 * After waking up, need to recheck for availability of 26244 * range. 26245 */ 26246 state = SD_WM_CHK_LIST; 26247 break; 26248 26249 default: 26250 panic("sd_range_lock: " 26251 "Unknown state %d in sd_range_lock", state); 26252 /*NOTREACHED*/ 26253 } /* switch(state) */ 26254 26255 } /* while(state != SD_WM_DONE) */ 26256 26257 mutex_exit(SD_MUTEX(un)); 26258 26259 ASSERT(wmp != NULL); 26260 26261 return (wmp); 26262 } 26263 26264 26265 /* 26266 * Function: sd_get_range() 26267 * 26268 * Description: Find if there any overlapping I/O to this one 26269 * Returns the write-map of 1st such I/O, NULL otherwise. 26270 * 26271 * Arguments: un - sd_lun structure for the device. 26272 * startb - The starting block number 26273 * endb - The end block number 26274 * 26275 * Return Code: wm - pointer to the wmap structure. 26276 */ 26277 26278 static struct sd_w_map * 26279 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb) 26280 { 26281 struct sd_w_map *wmp; 26282 26283 ASSERT(un != NULL); 26284 26285 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) { 26286 if (!(wmp->wm_flags & SD_WM_BUSY)) { 26287 continue; 26288 } 26289 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) { 26290 break; 26291 } 26292 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) { 26293 break; 26294 } 26295 } 26296 26297 return (wmp); 26298 } 26299 26300 26301 /* 26302 * Function: sd_free_inlist_wmap() 26303 * 26304 * Description: Unlink and free a write map struct. 26305 * 26306 * Arguments: un - sd_lun structure for the device. 26307 * wmp - sd_w_map which needs to be unlinked. 26308 */ 26309 26310 static void 26311 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp) 26312 { 26313 ASSERT(un != NULL); 26314 26315 if (un->un_wm == wmp) { 26316 un->un_wm = wmp->wm_next; 26317 } else { 26318 wmp->wm_prev->wm_next = wmp->wm_next; 26319 } 26320 26321 if (wmp->wm_next) { 26322 wmp->wm_next->wm_prev = wmp->wm_prev; 26323 } 26324 26325 wmp->wm_next = wmp->wm_prev = NULL; 26326 26327 kmem_cache_free(un->un_wm_cache, wmp); 26328 } 26329 26330 26331 /* 26332 * Function: sd_range_unlock() 26333 * 26334 * Description: Unlock the range locked by wm. 26335 * Free write map if nobody else is waiting on it. 26336 * 26337 * Arguments: un - sd_lun structure for the device. 26338 * wmp - sd_w_map which needs to be unlinked. 26339 */ 26340 26341 static void 26342 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm) 26343 { 26344 ASSERT(un != NULL); 26345 ASSERT(wm != NULL); 26346 ASSERT(!mutex_owned(SD_MUTEX(un))); 26347 26348 mutex_enter(SD_MUTEX(un)); 26349 26350 if (wm->wm_flags & SD_WTYPE_RMW) { 26351 un->un_rmw_count--; 26352 } 26353 26354 if (wm->wm_wanted_count) { 26355 wm->wm_flags = 0; 26356 /* 26357 * Broadcast that the wmap is available now. 26358 */ 26359 cv_broadcast(&wm->wm_avail); 26360 } else { 26361 /* 26362 * If no one is waiting on the map, it should be free'ed. 26363 */ 26364 sd_free_inlist_wmap(un, wm); 26365 } 26366 26367 mutex_exit(SD_MUTEX(un)); 26368 } 26369 26370 26371 /* 26372 * Function: sd_read_modify_write_task 26373 * 26374 * Description: Called from a taskq thread to initiate the write phase of 26375 * a read-modify-write request. This is used for targets where 26376 * un->un_sys_blocksize != un->un_tgt_blocksize. 26377 * 26378 * Arguments: arg - a pointer to the buf(9S) struct for the write command. 26379 * 26380 * Context: Called under taskq thread context. 26381 */ 26382 26383 static void 26384 sd_read_modify_write_task(void *arg) 26385 { 26386 struct sd_mapblocksize_info *bsp; 26387 struct buf *bp; 26388 struct sd_xbuf *xp; 26389 struct sd_lun *un; 26390 26391 bp = arg; /* The bp is given in arg */ 26392 ASSERT(bp != NULL); 26393 26394 /* Get the pointer to the layer-private data struct */ 26395 xp = SD_GET_XBUF(bp); 26396 ASSERT(xp != NULL); 26397 bsp = xp->xb_private; 26398 ASSERT(bsp != NULL); 26399 26400 un = SD_GET_UN(bp); 26401 ASSERT(un != NULL); 26402 ASSERT(!mutex_owned(SD_MUTEX(un))); 26403 26404 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 26405 "sd_read_modify_write_task: entry: buf:0x%p\n", bp); 26406 26407 /* 26408 * This is the write phase of a read-modify-write request, called 26409 * under the context of a taskq thread in response to the completion 26410 * of the read portion of the rmw request completing under interrupt 26411 * context. The write request must be sent from here down the iostart 26412 * chain as if it were being sent from sd_mapblocksize_iostart(), so 26413 * we use the layer index saved in the layer-private data area. 26414 */ 26415 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp); 26416 26417 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 26418 "sd_read_modify_write_task: exit: buf:0x%p\n", bp); 26419 } 26420 26421 26422 /* 26423 * Function: sddump_do_read_of_rmw() 26424 * 26425 * Description: This routine will be called from sddump, If sddump is called 26426 * with an I/O which not aligned on device blocksize boundary 26427 * then the write has to be converted to read-modify-write. 26428 * Do the read part here in order to keep sddump simple. 26429 * Note - That the sd_mutex is held across the call to this 26430 * routine. 26431 * 26432 * Arguments: un - sd_lun 26433 * blkno - block number in terms of media block size. 26434 * nblk - number of blocks. 26435 * bpp - pointer to pointer to the buf structure. On return 26436 * from this function, *bpp points to the valid buffer 26437 * to which the write has to be done. 26438 * 26439 * Return Code: 0 for success or errno-type return code 26440 */ 26441 26442 static int 26443 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 26444 struct buf **bpp) 26445 { 26446 int err; 26447 int i; 26448 int rval; 26449 struct buf *bp; 26450 struct scsi_pkt *pkt = NULL; 26451 uint32_t target_blocksize; 26452 26453 ASSERT(un != NULL); 26454 ASSERT(mutex_owned(SD_MUTEX(un))); 26455 26456 target_blocksize = un->un_tgt_blocksize; 26457 26458 mutex_exit(SD_MUTEX(un)); 26459 26460 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL, 26461 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL); 26462 if (bp == NULL) { 26463 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26464 "no resources for dumping; giving up"); 26465 err = ENOMEM; 26466 goto done; 26467 } 26468 26469 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL, 26470 blkno, nblk); 26471 if (rval != 0) { 26472 scsi_free_consistent_buf(bp); 26473 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26474 "no resources for dumping; giving up"); 26475 err = ENOMEM; 26476 goto done; 26477 } 26478 26479 pkt->pkt_flags |= FLAG_NOINTR; 26480 26481 err = EIO; 26482 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 26483 26484 /* 26485 * Scsi_poll returns 0 (success) if the command completes and 26486 * the status block is STATUS_GOOD. We should only check 26487 * errors if this condition is not true. Even then we should 26488 * send our own request sense packet only if we have a check 26489 * condition and auto request sense has not been performed by 26490 * the hba. 26491 */ 26492 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n"); 26493 26494 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) { 26495 err = 0; 26496 break; 26497 } 26498 26499 /* 26500 * Check CMD_DEV_GONE 1st, give up if device is gone, 26501 * no need to read RQS data. 26502 */ 26503 if (pkt->pkt_reason == CMD_DEV_GONE) { 26504 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26505 "Device is gone\n"); 26506 break; 26507 } 26508 26509 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) { 26510 SD_INFO(SD_LOG_DUMP, un, 26511 "sddump: read failed with CHECK, try # %d\n", i); 26512 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) { 26513 (void) sd_send_polled_RQS(un); 26514 } 26515 26516 continue; 26517 } 26518 26519 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) { 26520 int reset_retval = 0; 26521 26522 SD_INFO(SD_LOG_DUMP, un, 26523 "sddump: read failed with BUSY, try # %d\n", i); 26524 26525 if (un->un_f_lun_reset_enabled == TRUE) { 26526 reset_retval = scsi_reset(SD_ADDRESS(un), 26527 RESET_LUN); 26528 } 26529 if (reset_retval == 0) { 26530 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 26531 } 26532 (void) sd_send_polled_RQS(un); 26533 26534 } else { 26535 SD_INFO(SD_LOG_DUMP, un, 26536 "sddump: read failed with 0x%x, try # %d\n", 26537 SD_GET_PKT_STATUS(pkt), i); 26538 mutex_enter(SD_MUTEX(un)); 26539 sd_reset_target(un, pkt); 26540 mutex_exit(SD_MUTEX(un)); 26541 } 26542 26543 /* 26544 * If we are not getting anywhere with lun/target resets, 26545 * let's reset the bus. 26546 */ 26547 if (i > SD_NDUMP_RETRIES/2) { 26548 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 26549 (void) sd_send_polled_RQS(un); 26550 } 26551 26552 } 26553 scsi_destroy_pkt(pkt); 26554 26555 if (err != 0) { 26556 scsi_free_consistent_buf(bp); 26557 *bpp = NULL; 26558 } else { 26559 *bpp = bp; 26560 } 26561 26562 done: 26563 mutex_enter(SD_MUTEX(un)); 26564 return (err); 26565 } 26566 26567 26568 /* 26569 * Function: sd_failfast_flushq 26570 * 26571 * Description: Take all bp's on the wait queue that have B_FAILFAST set 26572 * in b_flags and move them onto the failfast queue, then kick 26573 * off a thread to return all bp's on the failfast queue to 26574 * their owners with an error set. 26575 * 26576 * Arguments: un - pointer to the soft state struct for the instance. 26577 * 26578 * Context: may execute in interrupt context. 26579 */ 26580 26581 static void 26582 sd_failfast_flushq(struct sd_lun *un) 26583 { 26584 struct buf *bp; 26585 struct buf *next_waitq_bp; 26586 struct buf *prev_waitq_bp = NULL; 26587 26588 ASSERT(un != NULL); 26589 ASSERT(mutex_owned(SD_MUTEX(un))); 26590 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE); 26591 ASSERT(un->un_failfast_bp == NULL); 26592 26593 SD_TRACE(SD_LOG_IO_FAILFAST, un, 26594 "sd_failfast_flushq: entry: un:0x%p\n", un); 26595 26596 /* 26597 * Check if we should flush all bufs when entering failfast state, or 26598 * just those with B_FAILFAST set. 26599 */ 26600 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { 26601 /* 26602 * Move *all* bp's on the wait queue to the failfast flush 26603 * queue, including those that do NOT have B_FAILFAST set. 26604 */ 26605 if (un->un_failfast_headp == NULL) { 26606 ASSERT(un->un_failfast_tailp == NULL); 26607 un->un_failfast_headp = un->un_waitq_headp; 26608 } else { 26609 ASSERT(un->un_failfast_tailp != NULL); 26610 un->un_failfast_tailp->av_forw = un->un_waitq_headp; 26611 } 26612 26613 un->un_failfast_tailp = un->un_waitq_tailp; 26614 26615 /* update kstat for each bp moved out of the waitq */ 26616 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) { 26617 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 26618 } 26619 26620 /* empty the waitq */ 26621 un->un_waitq_headp = un->un_waitq_tailp = NULL; 26622 26623 } else { 26624 /* 26625 * Go thru the wait queue, pick off all entries with 26626 * B_FAILFAST set, and move these onto the failfast queue. 26627 */ 26628 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) { 26629 /* 26630 * Save the pointer to the next bp on the wait queue, 26631 * so we get to it on the next iteration of this loop. 26632 */ 26633 next_waitq_bp = bp->av_forw; 26634 26635 /* 26636 * If this bp from the wait queue does NOT have 26637 * B_FAILFAST set, just move on to the next element 26638 * in the wait queue. Note, this is the only place 26639 * where it is correct to set prev_waitq_bp. 26640 */ 26641 if ((bp->b_flags & B_FAILFAST) == 0) { 26642 prev_waitq_bp = bp; 26643 continue; 26644 } 26645 26646 /* 26647 * Remove the bp from the wait queue. 26648 */ 26649 if (bp == un->un_waitq_headp) { 26650 /* The bp is the first element of the waitq. */ 26651 un->un_waitq_headp = next_waitq_bp; 26652 if (un->un_waitq_headp == NULL) { 26653 /* The wait queue is now empty */ 26654 un->un_waitq_tailp = NULL; 26655 } 26656 } else { 26657 /* 26658 * The bp is either somewhere in the middle 26659 * or at the end of the wait queue. 26660 */ 26661 ASSERT(un->un_waitq_headp != NULL); 26662 ASSERT(prev_waitq_bp != NULL); 26663 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST) 26664 == 0); 26665 if (bp == un->un_waitq_tailp) { 26666 /* bp is the last entry on the waitq. */ 26667 ASSERT(next_waitq_bp == NULL); 26668 un->un_waitq_tailp = prev_waitq_bp; 26669 } 26670 prev_waitq_bp->av_forw = next_waitq_bp; 26671 } 26672 bp->av_forw = NULL; 26673 26674 /* 26675 * update kstat since the bp is moved out of 26676 * the waitq 26677 */ 26678 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 26679 26680 /* 26681 * Now put the bp onto the failfast queue. 26682 */ 26683 if (un->un_failfast_headp == NULL) { 26684 /* failfast queue is currently empty */ 26685 ASSERT(un->un_failfast_tailp == NULL); 26686 un->un_failfast_headp = 26687 un->un_failfast_tailp = bp; 26688 } else { 26689 /* Add the bp to the end of the failfast q */ 26690 ASSERT(un->un_failfast_tailp != NULL); 26691 ASSERT(un->un_failfast_tailp->b_flags & 26692 B_FAILFAST); 26693 un->un_failfast_tailp->av_forw = bp; 26694 un->un_failfast_tailp = bp; 26695 } 26696 } 26697 } 26698 26699 /* 26700 * Now return all bp's on the failfast queue to their owners. 26701 */ 26702 while ((bp = un->un_failfast_headp) != NULL) { 26703 26704 un->un_failfast_headp = bp->av_forw; 26705 if (un->un_failfast_headp == NULL) { 26706 un->un_failfast_tailp = NULL; 26707 } 26708 26709 /* 26710 * We want to return the bp with a failure error code, but 26711 * we do not want a call to sd_start_cmds() to occur here, 26712 * so use sd_return_failed_command_no_restart() instead of 26713 * sd_return_failed_command(). 26714 */ 26715 sd_return_failed_command_no_restart(un, bp, EIO); 26716 } 26717 26718 /* Flush the xbuf queues if required. */ 26719 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) { 26720 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback); 26721 } 26722 26723 SD_TRACE(SD_LOG_IO_FAILFAST, un, 26724 "sd_failfast_flushq: exit: un:0x%p\n", un); 26725 } 26726 26727 26728 /* 26729 * Function: sd_failfast_flushq_callback 26730 * 26731 * Description: Return TRUE if the given bp meets the criteria for failfast 26732 * flushing. Used with ddi_xbuf_flushq(9F). 26733 * 26734 * Arguments: bp - ptr to buf struct to be examined. 26735 * 26736 * Context: Any 26737 */ 26738 26739 static int 26740 sd_failfast_flushq_callback(struct buf *bp) 26741 { 26742 /* 26743 * Return TRUE if (1) we want to flush ALL bufs when the failfast 26744 * state is entered; OR (2) the given bp has B_FAILFAST set. 26745 */ 26746 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || 26747 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE); 26748 } 26749 26750 26751 26752 #if defined(__i386) || defined(__amd64) 26753 /* 26754 * Function: sd_setup_next_xfer 26755 * 26756 * Description: Prepare next I/O operation using DMA_PARTIAL 26757 * 26758 */ 26759 26760 static int 26761 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 26762 struct scsi_pkt *pkt, struct sd_xbuf *xp) 26763 { 26764 ssize_t num_blks_not_xfered; 26765 daddr_t strt_blk_num; 26766 ssize_t bytes_not_xfered; 26767 int rval; 26768 26769 ASSERT(pkt->pkt_resid == 0); 26770 26771 /* 26772 * Calculate next block number and amount to be transferred. 26773 * 26774 * How much data NOT transfered to the HBA yet. 26775 */ 26776 bytes_not_xfered = xp->xb_dma_resid; 26777 26778 /* 26779 * figure how many blocks NOT transfered to the HBA yet. 26780 */ 26781 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered); 26782 26783 /* 26784 * set starting block number to the end of what WAS transfered. 26785 */ 26786 strt_blk_num = xp->xb_blkno + 26787 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered); 26788 26789 /* 26790 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt 26791 * will call scsi_initpkt with NULL_FUNC so we do not have to release 26792 * the disk mutex here. 26793 */ 26794 rval = sd_setup_next_rw_pkt(un, pkt, bp, 26795 strt_blk_num, num_blks_not_xfered); 26796 26797 if (rval == 0) { 26798 26799 /* 26800 * Success. 26801 * 26802 * Adjust things if there are still more blocks to be 26803 * transfered. 26804 */ 26805 xp->xb_dma_resid = pkt->pkt_resid; 26806 pkt->pkt_resid = 0; 26807 26808 return (1); 26809 } 26810 26811 /* 26812 * There's really only one possible return value from 26813 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt 26814 * returns NULL. 26815 */ 26816 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 26817 26818 bp->b_resid = bp->b_bcount; 26819 bp->b_flags |= B_ERROR; 26820 26821 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26822 "Error setting up next portion of DMA transfer\n"); 26823 26824 return (0); 26825 } 26826 #endif 26827 26828 /* 26829 * Function: sd_panic_for_res_conflict 26830 * 26831 * Description: Call panic with a string formatted with "Reservation Conflict" 26832 * and a human readable identifier indicating the SD instance 26833 * that experienced the reservation conflict. 26834 * 26835 * Arguments: un - pointer to the soft state struct for the instance. 26836 * 26837 * Context: may execute in interrupt context. 26838 */ 26839 26840 #define SD_RESV_CONFLICT_FMT_LEN 40 26841 void 26842 sd_panic_for_res_conflict(struct sd_lun *un) 26843 { 26844 char panic_str[SD_RESV_CONFLICT_FMT_LEN+MAXPATHLEN]; 26845 char path_str[MAXPATHLEN]; 26846 26847 (void) snprintf(panic_str, sizeof (panic_str), 26848 "Reservation Conflict\nDisk: %s", 26849 ddi_pathname(SD_DEVINFO(un), path_str)); 26850 26851 panic(panic_str); 26852 } 26853 26854 /* 26855 * Note: The following sd_faultinjection_ioctl( ) routines implement 26856 * driver support for handling fault injection for error analysis 26857 * causing faults in multiple layers of the driver. 26858 * 26859 */ 26860 26861 #ifdef SD_FAULT_INJECTION 26862 static uint_t sd_fault_injection_on = 0; 26863 26864 /* 26865 * Function: sd_faultinjection_ioctl() 26866 * 26867 * Description: This routine is the driver entry point for handling 26868 * faultinjection ioctls to inject errors into the 26869 * layer model 26870 * 26871 * Arguments: cmd - the ioctl cmd received 26872 * arg - the arguments from user and returns 26873 */ 26874 26875 static void 26876 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) { 26877 26878 uint_t i; 26879 uint_t rval; 26880 26881 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); 26882 26883 mutex_enter(SD_MUTEX(un)); 26884 26885 switch (cmd) { 26886 case SDIOCRUN: 26887 /* Allow pushed faults to be injected */ 26888 SD_INFO(SD_LOG_SDTEST, un, 26889 "sd_faultinjection_ioctl: Injecting Fault Run\n"); 26890 26891 sd_fault_injection_on = 1; 26892 26893 SD_INFO(SD_LOG_IOERR, un, 26894 "sd_faultinjection_ioctl: run finished\n"); 26895 break; 26896 26897 case SDIOCSTART: 26898 /* Start Injection Session */ 26899 SD_INFO(SD_LOG_SDTEST, un, 26900 "sd_faultinjection_ioctl: Injecting Fault Start\n"); 26901 26902 sd_fault_injection_on = 0; 26903 un->sd_injection_mask = 0xFFFFFFFF; 26904 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 26905 un->sd_fi_fifo_pkt[i] = NULL; 26906 un->sd_fi_fifo_xb[i] = NULL; 26907 un->sd_fi_fifo_un[i] = NULL; 26908 un->sd_fi_fifo_arq[i] = NULL; 26909 } 26910 un->sd_fi_fifo_start = 0; 26911 un->sd_fi_fifo_end = 0; 26912 26913 mutex_enter(&(un->un_fi_mutex)); 26914 un->sd_fi_log[0] = '\0'; 26915 un->sd_fi_buf_len = 0; 26916 mutex_exit(&(un->un_fi_mutex)); 26917 26918 SD_INFO(SD_LOG_IOERR, un, 26919 "sd_faultinjection_ioctl: start finished\n"); 26920 break; 26921 26922 case SDIOCSTOP: 26923 /* Stop Injection Session */ 26924 SD_INFO(SD_LOG_SDTEST, un, 26925 "sd_faultinjection_ioctl: Injecting Fault Stop\n"); 26926 sd_fault_injection_on = 0; 26927 un->sd_injection_mask = 0x0; 26928 26929 /* Empty stray or unuseds structs from fifo */ 26930 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 26931 if (un->sd_fi_fifo_pkt[i] != NULL) { 26932 kmem_free(un->sd_fi_fifo_pkt[i], 26933 sizeof (struct sd_fi_pkt)); 26934 } 26935 if (un->sd_fi_fifo_xb[i] != NULL) { 26936 kmem_free(un->sd_fi_fifo_xb[i], 26937 sizeof (struct sd_fi_xb)); 26938 } 26939 if (un->sd_fi_fifo_un[i] != NULL) { 26940 kmem_free(un->sd_fi_fifo_un[i], 26941 sizeof (struct sd_fi_un)); 26942 } 26943 if (un->sd_fi_fifo_arq[i] != NULL) { 26944 kmem_free(un->sd_fi_fifo_arq[i], 26945 sizeof (struct sd_fi_arq)); 26946 } 26947 un->sd_fi_fifo_pkt[i] = NULL; 26948 un->sd_fi_fifo_un[i] = NULL; 26949 un->sd_fi_fifo_xb[i] = NULL; 26950 un->sd_fi_fifo_arq[i] = NULL; 26951 } 26952 un->sd_fi_fifo_start = 0; 26953 un->sd_fi_fifo_end = 0; 26954 26955 SD_INFO(SD_LOG_IOERR, un, 26956 "sd_faultinjection_ioctl: stop finished\n"); 26957 break; 26958 26959 case SDIOCINSERTPKT: 26960 /* Store a packet struct to be pushed onto fifo */ 26961 SD_INFO(SD_LOG_SDTEST, un, 26962 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); 26963 26964 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 26965 26966 sd_fault_injection_on = 0; 26967 26968 /* No more that SD_FI_MAX_ERROR allowed in Queue */ 26969 if (un->sd_fi_fifo_pkt[i] != NULL) { 26970 kmem_free(un->sd_fi_fifo_pkt[i], 26971 sizeof (struct sd_fi_pkt)); 26972 } 26973 if (arg != NULL) { 26974 un->sd_fi_fifo_pkt[i] = 26975 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); 26976 if (un->sd_fi_fifo_pkt[i] == NULL) { 26977 /* Alloc failed don't store anything */ 26978 break; 26979 } 26980 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], 26981 sizeof (struct sd_fi_pkt), 0); 26982 if (rval == -1) { 26983 kmem_free(un->sd_fi_fifo_pkt[i], 26984 sizeof (struct sd_fi_pkt)); 26985 un->sd_fi_fifo_pkt[i] = NULL; 26986 } 26987 } else { 26988 SD_INFO(SD_LOG_IOERR, un, 26989 "sd_faultinjection_ioctl: pkt null\n"); 26990 } 26991 break; 26992 26993 case SDIOCINSERTXB: 26994 /* Store a xb struct to be pushed onto fifo */ 26995 SD_INFO(SD_LOG_SDTEST, un, 26996 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); 26997 26998 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 26999 27000 sd_fault_injection_on = 0; 27001 27002 if (un->sd_fi_fifo_xb[i] != NULL) { 27003 kmem_free(un->sd_fi_fifo_xb[i], 27004 sizeof (struct sd_fi_xb)); 27005 un->sd_fi_fifo_xb[i] = NULL; 27006 } 27007 if (arg != NULL) { 27008 un->sd_fi_fifo_xb[i] = 27009 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); 27010 if (un->sd_fi_fifo_xb[i] == NULL) { 27011 /* Alloc failed don't store anything */ 27012 break; 27013 } 27014 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], 27015 sizeof (struct sd_fi_xb), 0); 27016 27017 if (rval == -1) { 27018 kmem_free(un->sd_fi_fifo_xb[i], 27019 sizeof (struct sd_fi_xb)); 27020 un->sd_fi_fifo_xb[i] = NULL; 27021 } 27022 } else { 27023 SD_INFO(SD_LOG_IOERR, un, 27024 "sd_faultinjection_ioctl: xb null\n"); 27025 } 27026 break; 27027 27028 case SDIOCINSERTUN: 27029 /* Store a un struct to be pushed onto fifo */ 27030 SD_INFO(SD_LOG_SDTEST, un, 27031 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); 27032 27033 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27034 27035 sd_fault_injection_on = 0; 27036 27037 if (un->sd_fi_fifo_un[i] != NULL) { 27038 kmem_free(un->sd_fi_fifo_un[i], 27039 sizeof (struct sd_fi_un)); 27040 un->sd_fi_fifo_un[i] = NULL; 27041 } 27042 if (arg != NULL) { 27043 un->sd_fi_fifo_un[i] = 27044 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); 27045 if (un->sd_fi_fifo_un[i] == NULL) { 27046 /* Alloc failed don't store anything */ 27047 break; 27048 } 27049 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], 27050 sizeof (struct sd_fi_un), 0); 27051 if (rval == -1) { 27052 kmem_free(un->sd_fi_fifo_un[i], 27053 sizeof (struct sd_fi_un)); 27054 un->sd_fi_fifo_un[i] = NULL; 27055 } 27056 27057 } else { 27058 SD_INFO(SD_LOG_IOERR, un, 27059 "sd_faultinjection_ioctl: un null\n"); 27060 } 27061 27062 break; 27063 27064 case SDIOCINSERTARQ: 27065 /* Store a arq struct to be pushed onto fifo */ 27066 SD_INFO(SD_LOG_SDTEST, un, 27067 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); 27068 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27069 27070 sd_fault_injection_on = 0; 27071 27072 if (un->sd_fi_fifo_arq[i] != NULL) { 27073 kmem_free(un->sd_fi_fifo_arq[i], 27074 sizeof (struct sd_fi_arq)); 27075 un->sd_fi_fifo_arq[i] = NULL; 27076 } 27077 if (arg != NULL) { 27078 un->sd_fi_fifo_arq[i] = 27079 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); 27080 if (un->sd_fi_fifo_arq[i] == NULL) { 27081 /* Alloc failed don't store anything */ 27082 break; 27083 } 27084 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], 27085 sizeof (struct sd_fi_arq), 0); 27086 if (rval == -1) { 27087 kmem_free(un->sd_fi_fifo_arq[i], 27088 sizeof (struct sd_fi_arq)); 27089 un->sd_fi_fifo_arq[i] = NULL; 27090 } 27091 27092 } else { 27093 SD_INFO(SD_LOG_IOERR, un, 27094 "sd_faultinjection_ioctl: arq null\n"); 27095 } 27096 27097 break; 27098 27099 case SDIOCPUSH: 27100 /* Push stored xb, pkt, un, and arq onto fifo */ 27101 sd_fault_injection_on = 0; 27102 27103 if (arg != NULL) { 27104 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); 27105 if (rval != -1 && 27106 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 27107 un->sd_fi_fifo_end += i; 27108 } 27109 } else { 27110 SD_INFO(SD_LOG_IOERR, un, 27111 "sd_faultinjection_ioctl: push arg null\n"); 27112 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 27113 un->sd_fi_fifo_end++; 27114 } 27115 } 27116 SD_INFO(SD_LOG_IOERR, un, 27117 "sd_faultinjection_ioctl: push to end=%d\n", 27118 un->sd_fi_fifo_end); 27119 break; 27120 27121 case SDIOCRETRIEVE: 27122 /* Return buffer of log from Injection session */ 27123 SD_INFO(SD_LOG_SDTEST, un, 27124 "sd_faultinjection_ioctl: Injecting Fault Retreive"); 27125 27126 sd_fault_injection_on = 0; 27127 27128 mutex_enter(&(un->un_fi_mutex)); 27129 rval = ddi_copyout(un->sd_fi_log, (void *)arg, 27130 un->sd_fi_buf_len+1, 0); 27131 mutex_exit(&(un->un_fi_mutex)); 27132 27133 if (rval == -1) { 27134 /* 27135 * arg is possibly invalid setting 27136 * it to NULL for return 27137 */ 27138 arg = NULL; 27139 } 27140 break; 27141 } 27142 27143 mutex_exit(SD_MUTEX(un)); 27144 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl:" 27145 " exit\n"); 27146 } 27147 27148 27149 /* 27150 * Function: sd_injection_log() 27151 * 27152 * Description: This routine adds buff to the already existing injection log 27153 * for retrieval via faultinjection_ioctl for use in fault 27154 * detection and recovery 27155 * 27156 * Arguments: buf - the string to add to the log 27157 */ 27158 27159 static void 27160 sd_injection_log(char *buf, struct sd_lun *un) 27161 { 27162 uint_t len; 27163 27164 ASSERT(un != NULL); 27165 ASSERT(buf != NULL); 27166 27167 mutex_enter(&(un->un_fi_mutex)); 27168 27169 len = min(strlen(buf), 255); 27170 /* Add logged value to Injection log to be returned later */ 27171 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) { 27172 uint_t offset = strlen((char *)un->sd_fi_log); 27173 char *destp = (char *)un->sd_fi_log + offset; 27174 int i; 27175 for (i = 0; i < len; i++) { 27176 *destp++ = *buf++; 27177 } 27178 un->sd_fi_buf_len += len; 27179 un->sd_fi_log[un->sd_fi_buf_len] = '\0'; 27180 } 27181 27182 mutex_exit(&(un->un_fi_mutex)); 27183 } 27184 27185 27186 /* 27187 * Function: sd_faultinjection() 27188 * 27189 * Description: This routine takes the pkt and changes its 27190 * content based on error injection scenerio. 27191 * 27192 * Arguments: pktp - packet to be changed 27193 */ 27194 27195 static void 27196 sd_faultinjection(struct scsi_pkt *pktp) 27197 { 27198 uint_t i; 27199 struct sd_fi_pkt *fi_pkt; 27200 struct sd_fi_xb *fi_xb; 27201 struct sd_fi_un *fi_un; 27202 struct sd_fi_arq *fi_arq; 27203 struct buf *bp; 27204 struct sd_xbuf *xb; 27205 struct sd_lun *un; 27206 27207 ASSERT(pktp != NULL); 27208 27209 /* pull bp xb and un from pktp */ 27210 bp = (struct buf *)pktp->pkt_private; 27211 xb = SD_GET_XBUF(bp); 27212 un = SD_GET_UN(bp); 27213 27214 ASSERT(un != NULL); 27215 27216 mutex_enter(SD_MUTEX(un)); 27217 27218 SD_TRACE(SD_LOG_SDTEST, un, 27219 "sd_faultinjection: entry Injection from sdintr\n"); 27220 27221 /* if injection is off return */ 27222 if (sd_fault_injection_on == 0 || 27223 un->sd_fi_fifo_start == un->sd_fi_fifo_end) { 27224 mutex_exit(SD_MUTEX(un)); 27225 return; 27226 } 27227 27228 27229 /* take next set off fifo */ 27230 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; 27231 27232 fi_pkt = un->sd_fi_fifo_pkt[i]; 27233 fi_xb = un->sd_fi_fifo_xb[i]; 27234 fi_un = un->sd_fi_fifo_un[i]; 27235 fi_arq = un->sd_fi_fifo_arq[i]; 27236 27237 27238 /* set variables accordingly */ 27239 /* set pkt if it was on fifo */ 27240 if (fi_pkt != NULL) { 27241 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags"); 27242 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp"); 27243 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp"); 27244 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state"); 27245 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics"); 27246 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason"); 27247 27248 } 27249 27250 /* set xb if it was on fifo */ 27251 if (fi_xb != NULL) { 27252 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno"); 27253 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid"); 27254 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count"); 27255 SD_CONDSET(xb, xb, xb_victim_retry_count, 27256 "xb_victim_retry_count"); 27257 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status"); 27258 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state"); 27259 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid"); 27260 27261 /* copy in block data from sense */ 27262 if (fi_xb->xb_sense_data[0] != -1) { 27263 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, 27264 SENSE_LENGTH); 27265 } 27266 27267 /* copy in extended sense codes */ 27268 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_code, 27269 "es_code"); 27270 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_key, 27271 "es_key"); 27272 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_add_code, 27273 "es_add_code"); 27274 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, 27275 es_qual_code, "es_qual_code"); 27276 } 27277 27278 /* set un if it was on fifo */ 27279 if (fi_un != NULL) { 27280 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb"); 27281 SD_CONDSET(un, un, un_ctype, "un_ctype"); 27282 SD_CONDSET(un, un, un_reset_retry_count, 27283 "un_reset_retry_count"); 27284 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type"); 27285 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status"); 27286 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled"); 27287 SD_CONDSET(un, un, un_f_allow_bus_device_reset, 27288 "un_f_allow_bus_device_reset"); 27289 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing"); 27290 27291 } 27292 27293 /* copy in auto request sense if it was on fifo */ 27294 if (fi_arq != NULL) { 27295 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq)); 27296 } 27297 27298 /* free structs */ 27299 if (un->sd_fi_fifo_pkt[i] != NULL) { 27300 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); 27301 } 27302 if (un->sd_fi_fifo_xb[i] != NULL) { 27303 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); 27304 } 27305 if (un->sd_fi_fifo_un[i] != NULL) { 27306 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); 27307 } 27308 if (un->sd_fi_fifo_arq[i] != NULL) { 27309 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); 27310 } 27311 27312 /* 27313 * kmem_free does not gurantee to set to NULL 27314 * since we uses these to determine if we set 27315 * values or not lets confirm they are always 27316 * NULL after free 27317 */ 27318 un->sd_fi_fifo_pkt[i] = NULL; 27319 un->sd_fi_fifo_un[i] = NULL; 27320 un->sd_fi_fifo_xb[i] = NULL; 27321 un->sd_fi_fifo_arq[i] = NULL; 27322 27323 un->sd_fi_fifo_start++; 27324 27325 mutex_exit(SD_MUTEX(un)); 27326 27327 SD_TRACE(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n"); 27328 } 27329 27330 #endif /* SD_FAULT_INJECTION */ 27331 27332 /* 27333 * This routine is invoked in sd_unit_attach(). Before calling it, the 27334 * properties in conf file should be processed already, and "hotpluggable" 27335 * property was processed also. 27336 * 27337 * The sd driver distinguishes 3 different type of devices: removable media, 27338 * non-removable media, and hotpluggable. Below the differences are defined: 27339 * 27340 * 1. Device ID 27341 * 27342 * The device ID of a device is used to identify this device. Refer to 27343 * ddi_devid_register(9F). 27344 * 27345 * For a non-removable media disk device which can provide 0x80 or 0x83 27346 * VPD page (refer to INQUIRY command of SCSI SPC specification), a unique 27347 * device ID is created to identify this device. For other non-removable 27348 * media devices, a default device ID is created only if this device has 27349 * at least 2 alter cylinders. Otherwise, this device has no devid. 27350 * 27351 * ------------------------------------------------------- 27352 * removable media hotpluggable | Can Have Device ID 27353 * ------------------------------------------------------- 27354 * false false | Yes 27355 * false true | Yes 27356 * true x | No 27357 * ------------------------------------------------------ 27358 * 27359 * 27360 * 2. SCSI group 4 commands 27361 * 27362 * In SCSI specs, only some commands in group 4 command set can use 27363 * 8-byte addresses that can be used to access >2TB storage spaces. 27364 * Other commands have no such capability. Without supporting group4, 27365 * it is impossible to make full use of storage spaces of a disk with 27366 * capacity larger than 2TB. 27367 * 27368 * ----------------------------------------------- 27369 * removable media hotpluggable LP64 | Group 27370 * ----------------------------------------------- 27371 * false false false | 1 27372 * false false true | 4 27373 * false true false | 1 27374 * false true true | 4 27375 * true x x | 5 27376 * ----------------------------------------------- 27377 * 27378 * 27379 * 3. Check for VTOC Label 27380 * 27381 * If a direct-access disk has no EFI label, sd will check if it has a 27382 * valid VTOC label. Now, sd also does that check for removable media 27383 * and hotpluggable devices. 27384 * 27385 * -------------------------------------------------------------- 27386 * Direct-Access removable media hotpluggable | Check Label 27387 * ------------------------------------------------------------- 27388 * false false false | No 27389 * false false true | No 27390 * false true false | Yes 27391 * false true true | Yes 27392 * true x x | Yes 27393 * -------------------------------------------------------------- 27394 * 27395 * 27396 * 4. Building default VTOC label 27397 * 27398 * As section 3 says, sd checks if some kinds of devices have VTOC label. 27399 * If those devices have no valid VTOC label, sd(7d) will attempt to 27400 * create default VTOC for them. Currently sd creates default VTOC label 27401 * for all devices on x86 platform (VTOC_16), but only for removable 27402 * media devices on SPARC (VTOC_8). 27403 * 27404 * ----------------------------------------------------------- 27405 * removable media hotpluggable platform | Default Label 27406 * ----------------------------------------------------------- 27407 * false false sparc | No 27408 * false true x86 | Yes 27409 * false true sparc | Yes 27410 * true x x | Yes 27411 * ---------------------------------------------------------- 27412 * 27413 * 27414 * 5. Supported blocksizes of target devices 27415 * 27416 * Sd supports non-512-byte blocksize for removable media devices only. 27417 * For other devices, only 512-byte blocksize is supported. This may be 27418 * changed in near future because some RAID devices require non-512-byte 27419 * blocksize 27420 * 27421 * ----------------------------------------------------------- 27422 * removable media hotpluggable | non-512-byte blocksize 27423 * ----------------------------------------------------------- 27424 * false false | No 27425 * false true | No 27426 * true x | Yes 27427 * ----------------------------------------------------------- 27428 * 27429 * 27430 * 6. Automatic mount & unmount 27431 * 27432 * Sd(7d) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query 27433 * if a device is removable media device. It return 1 for removable media 27434 * devices, and 0 for others. 27435 * 27436 * The automatic mounting subsystem should distinguish between the types 27437 * of devices and apply automounting policies to each. 27438 * 27439 * 27440 * 7. fdisk partition management 27441 * 27442 * Fdisk is traditional partition method on x86 platform. Sd(7d) driver 27443 * just supports fdisk partitions on x86 platform. On sparc platform, sd 27444 * doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize 27445 * fdisk partitions on both x86 and SPARC platform. 27446 * 27447 * ----------------------------------------------------------- 27448 * platform removable media USB/1394 | fdisk supported 27449 * ----------------------------------------------------------- 27450 * x86 X X | true 27451 * ------------------------------------------------------------ 27452 * sparc X X | false 27453 * ------------------------------------------------------------ 27454 * 27455 * 27456 * 8. MBOOT/MBR 27457 * 27458 * Although sd(7d) doesn't support fdisk on SPARC platform, it does support 27459 * read/write mboot for removable media devices on sparc platform. 27460 * 27461 * ----------------------------------------------------------- 27462 * platform removable media USB/1394 | mboot supported 27463 * ----------------------------------------------------------- 27464 * x86 X X | true 27465 * ------------------------------------------------------------ 27466 * sparc false false | false 27467 * sparc false true | true 27468 * sparc true false | true 27469 * sparc true true | true 27470 * ------------------------------------------------------------ 27471 * 27472 * 27473 * 9. error handling during opening device 27474 * 27475 * If failed to open a disk device, an errno is returned. For some kinds 27476 * of errors, different errno is returned depending on if this device is 27477 * a removable media device. This brings USB/1394 hard disks in line with 27478 * expected hard disk behavior. It is not expected that this breaks any 27479 * application. 27480 * 27481 * ------------------------------------------------------ 27482 * removable media hotpluggable | errno 27483 * ------------------------------------------------------ 27484 * false false | EIO 27485 * false true | EIO 27486 * true x | ENXIO 27487 * ------------------------------------------------------ 27488 * 27489 * 27490 * 11. ioctls: DKIOCEJECT, CDROMEJECT 27491 * 27492 * These IOCTLs are applicable only to removable media devices. 27493 * 27494 * ----------------------------------------------------------- 27495 * removable media hotpluggable |DKIOCEJECT, CDROMEJECT 27496 * ----------------------------------------------------------- 27497 * false false | No 27498 * false true | No 27499 * true x | Yes 27500 * ----------------------------------------------------------- 27501 * 27502 * 27503 * 12. Kstats for partitions 27504 * 27505 * sd creates partition kstat for non-removable media devices. USB and 27506 * Firewire hard disks now have partition kstats 27507 * 27508 * ------------------------------------------------------ 27509 * removable media hotpluggable | kstat 27510 * ------------------------------------------------------ 27511 * false false | Yes 27512 * false true | Yes 27513 * true x | No 27514 * ------------------------------------------------------ 27515 * 27516 * 27517 * 13. Removable media & hotpluggable properties 27518 * 27519 * Sd driver creates a "removable-media" property for removable media 27520 * devices. Parent nexus drivers create a "hotpluggable" property if 27521 * it supports hotplugging. 27522 * 27523 * --------------------------------------------------------------------- 27524 * removable media hotpluggable | "removable-media" " hotpluggable" 27525 * --------------------------------------------------------------------- 27526 * false false | No No 27527 * false true | No Yes 27528 * true false | Yes No 27529 * true true | Yes Yes 27530 * --------------------------------------------------------------------- 27531 * 27532 * 27533 * 14. Power Management 27534 * 27535 * sd only power manages removable media devices or devices that support 27536 * LOG_SENSE or have a "pm-capable" property (PSARC/2002/250) 27537 * 27538 * A parent nexus that supports hotplugging can also set "pm-capable" 27539 * if the disk can be power managed. 27540 * 27541 * ------------------------------------------------------------ 27542 * removable media hotpluggable pm-capable | power manage 27543 * ------------------------------------------------------------ 27544 * false false false | No 27545 * false false true | Yes 27546 * false true false | No 27547 * false true true | Yes 27548 * true x x | Yes 27549 * ------------------------------------------------------------ 27550 * 27551 * USB and firewire hard disks can now be power managed independently 27552 * of the framebuffer 27553 * 27554 * 27555 * 15. Support for USB disks with capacity larger than 1TB 27556 * 27557 * Currently, sd doesn't permit a fixed disk device with capacity 27558 * larger than 1TB to be used in a 32-bit operating system environment. 27559 * However, sd doesn't do that for removable media devices. Instead, it 27560 * assumes that removable media devices cannot have a capacity larger 27561 * than 1TB. Therefore, using those devices on 32-bit system is partially 27562 * supported, which can cause some unexpected results. 27563 * 27564 * --------------------------------------------------------------------- 27565 * removable media USB/1394 | Capacity > 1TB | Used in 32-bit env 27566 * --------------------------------------------------------------------- 27567 * false false | true | no 27568 * false true | true | no 27569 * true false | true | Yes 27570 * true true | true | Yes 27571 * --------------------------------------------------------------------- 27572 * 27573 * 27574 * 16. Check write-protection at open time 27575 * 27576 * When a removable media device is being opened for writing without NDELAY 27577 * flag, sd will check if this device is writable. If attempting to open 27578 * without NDELAY flag a write-protected device, this operation will abort. 27579 * 27580 * ------------------------------------------------------------ 27581 * removable media USB/1394 | WP Check 27582 * ------------------------------------------------------------ 27583 * false false | No 27584 * false true | No 27585 * true false | Yes 27586 * true true | Yes 27587 * ------------------------------------------------------------ 27588 * 27589 * 27590 * 17. syslog when corrupted VTOC is encountered 27591 * 27592 * Currently, if an invalid VTOC is encountered, sd only print syslog 27593 * for fixed SCSI disks. 27594 * ------------------------------------------------------------ 27595 * removable media USB/1394 | print syslog 27596 * ------------------------------------------------------------ 27597 * false false | Yes 27598 * false true | No 27599 * true false | No 27600 * true true | No 27601 * ------------------------------------------------------------ 27602 */ 27603 static void 27604 sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi) 27605 { 27606 int pm_capable_prop; 27607 27608 ASSERT(un->un_sd); 27609 ASSERT(un->un_sd->sd_inq); 27610 27611 /* 27612 * Enable SYNC CACHE support for all devices. 27613 */ 27614 un->un_f_sync_cache_supported = TRUE; 27615 27616 if (un->un_sd->sd_inq->inq_rmb) { 27617 /* 27618 * The media of this device is removable. And for this kind 27619 * of devices, it is possible to change medium after opening 27620 * devices. Thus we should support this operation. 27621 */ 27622 un->un_f_has_removable_media = TRUE; 27623 27624 /* 27625 * support non-512-byte blocksize of removable media devices 27626 */ 27627 un->un_f_non_devbsize_supported = TRUE; 27628 27629 /* 27630 * Assume that all removable media devices support DOOR_LOCK 27631 */ 27632 un->un_f_doorlock_supported = TRUE; 27633 27634 /* 27635 * For a removable media device, it is possible to be opened 27636 * with NDELAY flag when there is no media in drive, in this 27637 * case we don't care if device is writable. But if without 27638 * NDELAY flag, we need to check if media is write-protected. 27639 */ 27640 un->un_f_chk_wp_open = TRUE; 27641 27642 /* 27643 * need to start a SCSI watch thread to monitor media state, 27644 * when media is being inserted or ejected, notify syseventd. 27645 */ 27646 un->un_f_monitor_media_state = TRUE; 27647 27648 /* 27649 * Some devices don't support START_STOP_UNIT command. 27650 * Therefore, we'd better check if a device supports it 27651 * before sending it. 27652 */ 27653 un->un_f_check_start_stop = TRUE; 27654 27655 /* 27656 * support eject media ioctl: 27657 * FDEJECT, DKIOCEJECT, CDROMEJECT 27658 */ 27659 un->un_f_eject_media_supported = TRUE; 27660 27661 /* 27662 * Because many removable-media devices don't support 27663 * LOG_SENSE, we couldn't use this command to check if 27664 * a removable media device support power-management. 27665 * We assume that they support power-management via 27666 * START_STOP_UNIT command and can be spun up and down 27667 * without limitations. 27668 */ 27669 un->un_f_pm_supported = TRUE; 27670 27671 /* 27672 * Need to create a zero length (Boolean) property 27673 * removable-media for the removable media devices. 27674 * Note that the return value of the property is not being 27675 * checked, since if unable to create the property 27676 * then do not want the attach to fail altogether. Consistent 27677 * with other property creation in attach. 27678 */ 27679 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, 27680 DDI_PROP_CANSLEEP, "removable-media", NULL, 0); 27681 27682 } else { 27683 /* 27684 * create device ID for device 27685 */ 27686 un->un_f_devid_supported = TRUE; 27687 27688 /* 27689 * Spin up non-removable-media devices once it is attached 27690 */ 27691 un->un_f_attach_spinup = TRUE; 27692 27693 /* 27694 * According to SCSI specification, Sense data has two kinds of 27695 * format: fixed format, and descriptor format. At present, we 27696 * don't support descriptor format sense data for removable 27697 * media. 27698 */ 27699 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT) { 27700 un->un_f_descr_format_supported = TRUE; 27701 } 27702 27703 /* 27704 * kstats are created only for non-removable media devices. 27705 * 27706 * Set this in sd.conf to 0 in order to disable kstats. The 27707 * default is 1, so they are enabled by default. 27708 */ 27709 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY, 27710 SD_DEVINFO(un), DDI_PROP_DONTPASS, 27711 "enable-partition-kstats", 1)); 27712 27713 /* 27714 * Check if HBA has set the "pm-capable" property. 27715 * If "pm-capable" exists and is non-zero then we can 27716 * power manage the device without checking the start/stop 27717 * cycle count log sense page. 27718 * 27719 * If "pm-capable" exists and is SD_PM_CAPABLE_FALSE (0) 27720 * then we should not power manage the device. 27721 * 27722 * If "pm-capable" doesn't exist then pm_capable_prop will 27723 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case, 27724 * sd will check the start/stop cycle count log sense page 27725 * and power manage the device if the cycle count limit has 27726 * not been exceeded. 27727 */ 27728 pm_capable_prop = ddi_prop_get_int(DDI_DEV_T_ANY, devi, 27729 DDI_PROP_DONTPASS, "pm-capable", SD_PM_CAPABLE_UNDEFINED); 27730 if (pm_capable_prop == SD_PM_CAPABLE_UNDEFINED) { 27731 un->un_f_log_sense_supported = TRUE; 27732 } else { 27733 /* 27734 * pm-capable property exists. 27735 * 27736 * Convert "TRUE" values for pm_capable_prop to 27737 * SD_PM_CAPABLE_TRUE (1) to make it easier to check 27738 * later. "TRUE" values are any values except 27739 * SD_PM_CAPABLE_FALSE (0) and 27740 * SD_PM_CAPABLE_UNDEFINED (-1) 27741 */ 27742 if (pm_capable_prop == SD_PM_CAPABLE_FALSE) { 27743 un->un_f_log_sense_supported = FALSE; 27744 } else { 27745 un->un_f_pm_supported = TRUE; 27746 } 27747 27748 SD_INFO(SD_LOG_ATTACH_DETACH, un, 27749 "sd_unit_attach: un:0x%p pm-capable " 27750 "property set to %d.\n", un, un->un_f_pm_supported); 27751 } 27752 } 27753 27754 if (un->un_f_is_hotpluggable) { 27755 27756 /* 27757 * Have to watch hotpluggable devices as well, since 27758 * that's the only way for userland applications to 27759 * detect hot removal while device is busy/mounted. 27760 */ 27761 un->un_f_monitor_media_state = TRUE; 27762 27763 un->un_f_check_start_stop = TRUE; 27764 27765 } 27766 } 27767 27768 /* 27769 * sd_tg_rdwr: 27770 * Provides rdwr access for cmlb via sd_tgops. The start_block is 27771 * in sys block size, req_length in bytes. 27772 * 27773 */ 27774 static int 27775 sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 27776 diskaddr_t start_block, size_t reqlength, void *tg_cookie) 27777 { 27778 struct sd_lun *un; 27779 int path_flag = (int)(uintptr_t)tg_cookie; 27780 char *dkl = NULL; 27781 diskaddr_t real_addr = start_block; 27782 diskaddr_t first_byte, end_block; 27783 27784 size_t buffer_size = reqlength; 27785 int rval; 27786 diskaddr_t cap; 27787 uint32_t lbasize; 27788 27789 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 27790 if (un == NULL) 27791 return (ENXIO); 27792 27793 if (cmd != TG_READ && cmd != TG_WRITE) 27794 return (EINVAL); 27795 27796 mutex_enter(SD_MUTEX(un)); 27797 if (un->un_f_tgt_blocksize_is_valid == FALSE) { 27798 mutex_exit(SD_MUTEX(un)); 27799 rval = sd_send_scsi_READ_CAPACITY(un, (uint64_t *)&cap, 27800 &lbasize, path_flag); 27801 if (rval != 0) 27802 return (rval); 27803 mutex_enter(SD_MUTEX(un)); 27804 sd_update_block_info(un, lbasize, cap); 27805 if ((un->un_f_tgt_blocksize_is_valid == FALSE)) { 27806 mutex_exit(SD_MUTEX(un)); 27807 return (EIO); 27808 } 27809 } 27810 27811 if (NOT_DEVBSIZE(un)) { 27812 /* 27813 * sys_blocksize != tgt_blocksize, need to re-adjust 27814 * blkno and save the index to beginning of dk_label 27815 */ 27816 first_byte = SD_SYSBLOCKS2BYTES(un, start_block); 27817 real_addr = first_byte / un->un_tgt_blocksize; 27818 27819 end_block = (first_byte + reqlength + 27820 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize; 27821 27822 /* round up buffer size to multiple of target block size */ 27823 buffer_size = (end_block - real_addr) * un->un_tgt_blocksize; 27824 27825 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_tg_rdwr", 27826 "label_addr: 0x%x allocation size: 0x%x\n", 27827 real_addr, buffer_size); 27828 27829 if (((first_byte % un->un_tgt_blocksize) != 0) || 27830 (reqlength % un->un_tgt_blocksize) != 0) 27831 /* the request is not aligned */ 27832 dkl = kmem_zalloc(buffer_size, KM_SLEEP); 27833 } 27834 27835 /* 27836 * The MMC standard allows READ CAPACITY to be 27837 * inaccurate by a bounded amount (in the interest of 27838 * response latency). As a result, failed READs are 27839 * commonplace (due to the reading of metadata and not 27840 * data). Depending on the per-Vendor/drive Sense data, 27841 * the failed READ can cause many (unnecessary) retries. 27842 */ 27843 27844 if (ISCD(un) && (cmd == TG_READ) && 27845 (un->un_f_blockcount_is_valid == TRUE) && 27846 ((start_block == (un->un_blockcount - 1))|| 27847 (start_block == (un->un_blockcount - 2)))) { 27848 path_flag = SD_PATH_DIRECT_PRIORITY; 27849 } 27850 27851 mutex_exit(SD_MUTEX(un)); 27852 if (cmd == TG_READ) { 27853 rval = sd_send_scsi_READ(un, (dkl != NULL)? dkl: bufaddr, 27854 buffer_size, real_addr, path_flag); 27855 if (dkl != NULL) 27856 bcopy(dkl + SD_TGTBYTEOFFSET(un, start_block, 27857 real_addr), bufaddr, reqlength); 27858 } else { 27859 if (dkl) { 27860 rval = sd_send_scsi_READ(un, dkl, buffer_size, 27861 real_addr, path_flag); 27862 if (rval) { 27863 kmem_free(dkl, buffer_size); 27864 return (rval); 27865 } 27866 bcopy(bufaddr, dkl + SD_TGTBYTEOFFSET(un, start_block, 27867 real_addr), reqlength); 27868 } 27869 rval = sd_send_scsi_WRITE(un, (dkl != NULL)? dkl: bufaddr, 27870 buffer_size, real_addr, path_flag); 27871 } 27872 27873 if (dkl != NULL) 27874 kmem_free(dkl, buffer_size); 27875 27876 return (rval); 27877 } 27878 27879 27880 static int 27881 sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie) 27882 { 27883 27884 struct sd_lun *un; 27885 diskaddr_t cap; 27886 uint32_t lbasize; 27887 int path_flag = (int)(uintptr_t)tg_cookie; 27888 int ret = 0; 27889 27890 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 27891 if (un == NULL) 27892 return (ENXIO); 27893 27894 switch (cmd) { 27895 case TG_GETPHYGEOM: 27896 case TG_GETVIRTGEOM: 27897 case TG_GETCAPACITY: 27898 case TG_GETBLOCKSIZE: 27899 mutex_enter(SD_MUTEX(un)); 27900 27901 if ((un->un_f_blockcount_is_valid == TRUE) && 27902 (un->un_f_tgt_blocksize_is_valid == TRUE)) { 27903 cap = un->un_blockcount; 27904 lbasize = un->un_tgt_blocksize; 27905 mutex_exit(SD_MUTEX(un)); 27906 } else { 27907 mutex_exit(SD_MUTEX(un)); 27908 ret = sd_send_scsi_READ_CAPACITY(un, (uint64_t *)&cap, 27909 &lbasize, path_flag); 27910 if (ret != 0) 27911 return (ret); 27912 mutex_enter(SD_MUTEX(un)); 27913 sd_update_block_info(un, lbasize, cap); 27914 if ((un->un_f_blockcount_is_valid == FALSE) || 27915 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 27916 mutex_exit(SD_MUTEX(un)); 27917 return (EIO); 27918 } 27919 mutex_exit(SD_MUTEX(un)); 27920 } 27921 27922 if (cmd == TG_GETCAPACITY) { 27923 *(diskaddr_t *)arg = cap; 27924 return (0); 27925 } 27926 27927 if (cmd == TG_GETBLOCKSIZE) { 27928 *(uint32_t *)arg = lbasize; 27929 return (0); 27930 } 27931 27932 if (cmd == TG_GETPHYGEOM) 27933 ret = sd_get_physical_geometry(un, (cmlb_geom_t *)arg, 27934 cap, lbasize, path_flag); 27935 else 27936 /* TG_GETVIRTGEOM */ 27937 ret = sd_get_virtual_geometry(un, 27938 (cmlb_geom_t *)arg, cap, lbasize); 27939 27940 return (ret); 27941 27942 case TG_GETATTR: 27943 mutex_enter(SD_MUTEX(un)); 27944 ((tg_attribute_t *)arg)->media_is_writable = 27945 un->un_f_mmc_writable_media; 27946 mutex_exit(SD_MUTEX(un)); 27947 return (0); 27948 default: 27949 return (ENOTTY); 27950 27951 } 27952 27953 } 27954