1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * SCSI disk target driver. 30 */ 31 #include <sys/scsi/scsi.h> 32 #include <sys/dkbad.h> 33 #include <sys/dklabel.h> 34 #include <sys/dkio.h> 35 #include <sys/fdio.h> 36 #include <sys/cdio.h> 37 #include <sys/mhd.h> 38 #include <sys/vtoc.h> 39 #include <sys/dktp/fdisk.h> 40 #include <sys/kstat.h> 41 #include <sys/vtrace.h> 42 #include <sys/note.h> 43 #include <sys/thread.h> 44 #include <sys/proc.h> 45 #include <sys/efi_partition.h> 46 #include <sys/var.h> 47 #include <sys/aio_req.h> 48 49 #ifdef __lock_lint 50 #define _LP64 51 #define __amd64 52 #endif 53 54 #if (defined(__fibre)) 55 /* Note: is there a leadville version of the following? */ 56 #include <sys/fc4/fcal_linkapp.h> 57 #endif 58 #include <sys/taskq.h> 59 #include <sys/uuid.h> 60 #include <sys/byteorder.h> 61 #include <sys/sdt.h> 62 63 #include "sd_xbuf.h" 64 65 #include <sys/scsi/targets/sddef.h> 66 #include <sys/cmlb.h> 67 68 69 /* 70 * Loadable module info. 71 */ 72 #if (defined(__fibre)) 73 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver %I%" 74 char _depends_on[] = "misc/scsi misc/cmlb drv/fcp"; 75 #else 76 #define SD_MODULE_NAME "SCSI Disk Driver %I%" 77 char _depends_on[] = "misc/scsi misc/cmlb"; 78 #endif 79 80 /* 81 * Define the interconnect type, to allow the driver to distinguish 82 * between parallel SCSI (sd) and fibre channel (ssd) behaviors. 83 * 84 * This is really for backward compatibility. In the future, the driver 85 * should actually check the "interconnect-type" property as reported by 86 * the HBA; however at present this property is not defined by all HBAs, 87 * so we will use this #define (1) to permit the driver to run in 88 * backward-compatibility mode; and (2) to print a notification message 89 * if an FC HBA does not support the "interconnect-type" property. The 90 * behavior of the driver will be to assume parallel SCSI behaviors unless 91 * the "interconnect-type" property is defined by the HBA **AND** has a 92 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or 93 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre 94 * Channel behaviors (as per the old ssd). (Note that the 95 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and 96 * will result in the driver assuming parallel SCSI behaviors.) 97 * 98 * (see common/sys/scsi/impl/services.h) 99 * 100 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default 101 * since some FC HBAs may already support that, and there is some code in 102 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the 103 * default would confuse that code, and besides things should work fine 104 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the 105 * "interconnect_type" property. 106 * 107 */ 108 #if (defined(__fibre)) 109 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE 110 #else 111 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL 112 #endif 113 114 /* 115 * The name of the driver, established from the module name in _init. 116 */ 117 static char *sd_label = NULL; 118 119 /* 120 * Driver name is unfortunately prefixed on some driver.conf properties. 121 */ 122 #if (defined(__fibre)) 123 #define sd_max_xfer_size ssd_max_xfer_size 124 #define sd_config_list ssd_config_list 125 static char *sd_max_xfer_size = "ssd_max_xfer_size"; 126 static char *sd_config_list = "ssd-config-list"; 127 #else 128 static char *sd_max_xfer_size = "sd_max_xfer_size"; 129 static char *sd_config_list = "sd-config-list"; 130 #endif 131 132 /* 133 * Driver global variables 134 */ 135 136 #if (defined(__fibre)) 137 /* 138 * These #defines are to avoid namespace collisions that occur because this 139 * code is currently used to compile two separate driver modules: sd and ssd. 140 * All global variables need to be treated this way (even if declared static) 141 * in order to allow the debugger to resolve the names properly. 142 * It is anticipated that in the near future the ssd module will be obsoleted, 143 * at which time this namespace issue should go away. 144 */ 145 #define sd_state ssd_state 146 #define sd_io_time ssd_io_time 147 #define sd_failfast_enable ssd_failfast_enable 148 #define sd_ua_retry_count ssd_ua_retry_count 149 #define sd_report_pfa ssd_report_pfa 150 #define sd_max_throttle ssd_max_throttle 151 #define sd_min_throttle ssd_min_throttle 152 #define sd_rot_delay ssd_rot_delay 153 154 #define sd_retry_on_reservation_conflict \ 155 ssd_retry_on_reservation_conflict 156 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay 157 #define sd_resv_conflict_name ssd_resv_conflict_name 158 159 #define sd_component_mask ssd_component_mask 160 #define sd_level_mask ssd_level_mask 161 #define sd_debug_un ssd_debug_un 162 #define sd_error_level ssd_error_level 163 164 #define sd_xbuf_active_limit ssd_xbuf_active_limit 165 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit 166 167 #define sd_tr ssd_tr 168 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout 169 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout 170 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable 171 #define sd_check_media_time ssd_check_media_time 172 #define sd_wait_cmds_complete ssd_wait_cmds_complete 173 #define sd_label_mutex ssd_label_mutex 174 #define sd_detach_mutex ssd_detach_mutex 175 #define sd_log_buf ssd_log_buf 176 #define sd_log_mutex ssd_log_mutex 177 178 #define sd_disk_table ssd_disk_table 179 #define sd_disk_table_size ssd_disk_table_size 180 #define sd_sense_mutex ssd_sense_mutex 181 #define sd_cdbtab ssd_cdbtab 182 183 #define sd_cb_ops ssd_cb_ops 184 #define sd_ops ssd_ops 185 #define sd_additional_codes ssd_additional_codes 186 #define sd_tgops ssd_tgops 187 188 #define sd_minor_data ssd_minor_data 189 #define sd_minor_data_efi ssd_minor_data_efi 190 191 #define sd_tq ssd_tq 192 #define sd_wmr_tq ssd_wmr_tq 193 #define sd_taskq_name ssd_taskq_name 194 #define sd_wmr_taskq_name ssd_wmr_taskq_name 195 #define sd_taskq_minalloc ssd_taskq_minalloc 196 #define sd_taskq_maxalloc ssd_taskq_maxalloc 197 198 #define sd_dump_format_string ssd_dump_format_string 199 200 #define sd_iostart_chain ssd_iostart_chain 201 #define sd_iodone_chain ssd_iodone_chain 202 203 #define sd_pm_idletime ssd_pm_idletime 204 205 #define sd_force_pm_supported ssd_force_pm_supported 206 207 #define sd_dtype_optical_bind ssd_dtype_optical_bind 208 209 #endif 210 211 212 #ifdef SDDEBUG 213 int sd_force_pm_supported = 0; 214 #endif /* SDDEBUG */ 215 216 void *sd_state = NULL; 217 int sd_io_time = SD_IO_TIME; 218 int sd_failfast_enable = 1; 219 int sd_ua_retry_count = SD_UA_RETRY_COUNT; 220 int sd_report_pfa = 1; 221 int sd_max_throttle = SD_MAX_THROTTLE; 222 int sd_min_throttle = SD_MIN_THROTTLE; 223 int sd_rot_delay = 4; /* Default 4ms Rotation delay */ 224 int sd_qfull_throttle_enable = TRUE; 225 226 int sd_retry_on_reservation_conflict = 1; 227 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 228 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) 229 230 static int sd_dtype_optical_bind = -1; 231 232 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ 233 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; 234 235 /* 236 * Global data for debug logging. To enable debug printing, sd_component_mask 237 * and sd_level_mask should be set to the desired bit patterns as outlined in 238 * sddef.h. 239 */ 240 uint_t sd_component_mask = 0x0; 241 uint_t sd_level_mask = 0x0; 242 struct sd_lun *sd_debug_un = NULL; 243 uint_t sd_error_level = SCSI_ERR_RETRYABLE; 244 245 /* Note: these may go away in the future... */ 246 static uint32_t sd_xbuf_active_limit = 512; 247 static uint32_t sd_xbuf_reserve_limit = 16; 248 249 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 }; 250 251 /* 252 * Timer value used to reset the throttle after it has been reduced 253 * (typically in response to TRAN_BUSY or STATUS_QFULL) 254 */ 255 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT; 256 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT; 257 258 /* 259 * Interval value associated with the media change scsi watch. 260 */ 261 static int sd_check_media_time = 3000000; 262 263 /* 264 * Wait value used for in progress operations during a DDI_SUSPEND 265 */ 266 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE; 267 268 /* 269 * sd_label_mutex protects a static buffer used in the disk label 270 * component of the driver 271 */ 272 static kmutex_t sd_label_mutex; 273 274 /* 275 * sd_detach_mutex protects un_layer_count, un_detach_count, and 276 * un_opens_in_progress in the sd_lun structure. 277 */ 278 static kmutex_t sd_detach_mutex; 279 280 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex, 281 sd_lun::{un_layer_count un_detach_count un_opens_in_progress})) 282 283 /* 284 * Global buffer and mutex for debug logging 285 */ 286 static char sd_log_buf[1024]; 287 static kmutex_t sd_log_mutex; 288 289 /* 290 * Structs and globals for recording attached lun information. 291 * This maintains a chain. Each node in the chain represents a SCSI controller. 292 * The structure records the number of luns attached to each target connected 293 * with the controller. 294 * For parallel scsi device only. 295 */ 296 struct sd_scsi_hba_tgt_lun { 297 struct sd_scsi_hba_tgt_lun *next; 298 dev_info_t *pdip; 299 int nlun[NTARGETS_WIDE]; 300 }; 301 302 /* 303 * Flag to indicate the lun is attached or detached 304 */ 305 #define SD_SCSI_LUN_ATTACH 0 306 #define SD_SCSI_LUN_DETACH 1 307 308 static kmutex_t sd_scsi_target_lun_mutex; 309 static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL; 310 311 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 312 sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip)) 313 314 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 315 sd_scsi_target_lun_head)) 316 317 /* 318 * "Smart" Probe Caching structs, globals, #defines, etc. 319 * For parallel scsi and non-self-identify device only. 320 */ 321 322 /* 323 * The following resources and routines are implemented to support 324 * "smart" probing, which caches the scsi_probe() results in an array, 325 * in order to help avoid long probe times. 326 */ 327 struct sd_scsi_probe_cache { 328 struct sd_scsi_probe_cache *next; 329 dev_info_t *pdip; 330 int cache[NTARGETS_WIDE]; 331 }; 332 333 static kmutex_t sd_scsi_probe_cache_mutex; 334 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; 335 336 /* 337 * Really we only need protection on the head of the linked list, but 338 * better safe than sorry. 339 */ 340 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 341 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) 342 343 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 344 sd_scsi_probe_cache_head)) 345 346 347 /* 348 * Vendor specific data name property declarations 349 */ 350 351 #if defined(__fibre) || defined(__i386) ||defined(__amd64) 352 353 static sd_tunables seagate_properties = { 354 SEAGATE_THROTTLE_VALUE, 355 0, 356 0, 357 0, 358 0, 359 0, 360 0, 361 0, 362 0 363 }; 364 365 366 static sd_tunables fujitsu_properties = { 367 FUJITSU_THROTTLE_VALUE, 368 0, 369 0, 370 0, 371 0, 372 0, 373 0, 374 0, 375 0 376 }; 377 378 static sd_tunables ibm_properties = { 379 IBM_THROTTLE_VALUE, 380 0, 381 0, 382 0, 383 0, 384 0, 385 0, 386 0, 387 0 388 }; 389 390 static sd_tunables purple_properties = { 391 PURPLE_THROTTLE_VALUE, 392 0, 393 0, 394 PURPLE_BUSY_RETRIES, 395 PURPLE_RESET_RETRY_COUNT, 396 PURPLE_RESERVE_RELEASE_TIME, 397 0, 398 0, 399 0 400 }; 401 402 static sd_tunables sve_properties = { 403 SVE_THROTTLE_VALUE, 404 0, 405 0, 406 SVE_BUSY_RETRIES, 407 SVE_RESET_RETRY_COUNT, 408 SVE_RESERVE_RELEASE_TIME, 409 SVE_MIN_THROTTLE_VALUE, 410 SVE_DISKSORT_DISABLED_FLAG, 411 0 412 }; 413 414 static sd_tunables maserati_properties = { 415 0, 416 0, 417 0, 418 0, 419 0, 420 0, 421 0, 422 MASERATI_DISKSORT_DISABLED_FLAG, 423 MASERATI_LUN_RESET_ENABLED_FLAG 424 }; 425 426 static sd_tunables pirus_properties = { 427 PIRUS_THROTTLE_VALUE, 428 0, 429 PIRUS_NRR_COUNT, 430 PIRUS_BUSY_RETRIES, 431 PIRUS_RESET_RETRY_COUNT, 432 0, 433 PIRUS_MIN_THROTTLE_VALUE, 434 PIRUS_DISKSORT_DISABLED_FLAG, 435 PIRUS_LUN_RESET_ENABLED_FLAG 436 }; 437 438 #endif 439 440 #if (defined(__sparc) && !defined(__fibre)) || \ 441 (defined(__i386) || defined(__amd64)) 442 443 444 static sd_tunables elite_properties = { 445 ELITE_THROTTLE_VALUE, 446 0, 447 0, 448 0, 449 0, 450 0, 451 0, 452 0, 453 0 454 }; 455 456 static sd_tunables st31200n_properties = { 457 ST31200N_THROTTLE_VALUE, 458 0, 459 0, 460 0, 461 0, 462 0, 463 0, 464 0, 465 0 466 }; 467 468 #endif /* Fibre or not */ 469 470 static sd_tunables lsi_properties_scsi = { 471 LSI_THROTTLE_VALUE, 472 0, 473 LSI_NOTREADY_RETRIES, 474 0, 475 0, 476 0, 477 0, 478 0, 479 0 480 }; 481 482 static sd_tunables symbios_properties = { 483 SYMBIOS_THROTTLE_VALUE, 484 0, 485 SYMBIOS_NOTREADY_RETRIES, 486 0, 487 0, 488 0, 489 0, 490 0, 491 0 492 }; 493 494 static sd_tunables lsi_properties = { 495 0, 496 0, 497 LSI_NOTREADY_RETRIES, 498 0, 499 0, 500 0, 501 0, 502 0, 503 0 504 }; 505 506 static sd_tunables lsi_oem_properties = { 507 0, 508 0, 509 LSI_OEM_NOTREADY_RETRIES, 510 0, 511 0, 512 0, 513 0, 514 0, 515 0, 516 1 517 }; 518 519 520 521 #if (defined(SD_PROP_TST)) 522 523 #define SD_TST_CTYPE_VAL CTYPE_CDROM 524 #define SD_TST_THROTTLE_VAL 16 525 #define SD_TST_NOTREADY_VAL 12 526 #define SD_TST_BUSY_VAL 60 527 #define SD_TST_RST_RETRY_VAL 36 528 #define SD_TST_RSV_REL_TIME 60 529 530 static sd_tunables tst_properties = { 531 SD_TST_THROTTLE_VAL, 532 SD_TST_CTYPE_VAL, 533 SD_TST_NOTREADY_VAL, 534 SD_TST_BUSY_VAL, 535 SD_TST_RST_RETRY_VAL, 536 SD_TST_RSV_REL_TIME, 537 0, 538 0, 539 0 540 }; 541 #endif 542 543 /* This is similar to the ANSI toupper implementation */ 544 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C)) 545 546 /* 547 * Static Driver Configuration Table 548 * 549 * This is the table of disks which need throttle adjustment (or, perhaps 550 * something else as defined by the flags at a future time.) device_id 551 * is a string consisting of concatenated vid (vendor), pid (product/model) 552 * and revision strings as defined in the scsi_inquiry structure. Offsets of 553 * the parts of the string are as defined by the sizes in the scsi_inquiry 554 * structure. Device type is searched as far as the device_id string is 555 * defined. Flags defines which values are to be set in the driver from the 556 * properties list. 557 * 558 * Entries below which begin and end with a "*" are a special case. 559 * These do not have a specific vendor, and the string which follows 560 * can appear anywhere in the 16 byte PID portion of the inquiry data. 561 * 562 * Entries below which begin and end with a " " (blank) are a special 563 * case. The comparison function will treat multiple consecutive blanks 564 * as equivalent to a single blank. For example, this causes a 565 * sd_disk_table entry of " NEC CDROM " to match a device's id string 566 * of "NEC CDROM". 567 * 568 * Note: The MD21 controller type has been obsoleted. 569 * ST318202F is a Legacy device 570 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been 571 * made with an FC connection. The entries here are a legacy. 572 */ 573 static sd_disk_config_t sd_disk_table[] = { 574 #if defined(__fibre) || defined(__i386) || defined(__amd64) 575 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 576 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 577 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 578 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 579 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 580 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 581 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 582 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 583 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 584 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 585 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 586 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 587 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 588 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 589 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 590 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 591 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 592 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 593 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 594 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 595 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 596 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 597 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 598 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties }, 599 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties }, 600 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties }, 601 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties }, 602 { "IBM 1724-100", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 603 { "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 604 { "IBM 1726-22x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 605 { "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 606 { "IBM 1726-42x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 607 { "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 608 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 609 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 610 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 611 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 612 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 613 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 614 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 615 { "IBM 1814", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 616 { "IBM 1814-200", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 617 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 618 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 619 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 620 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 621 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT | 622 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 623 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT | 624 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 625 { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties }, 626 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, 627 { "SUN T3", SD_CONF_BSET_THROTTLE | 628 SD_CONF_BSET_BSY_RETRY_COUNT| 629 SD_CONF_BSET_RST_RETRIES| 630 SD_CONF_BSET_RSV_REL_TIME, 631 &purple_properties }, 632 { "SUN SESS01", SD_CONF_BSET_THROTTLE | 633 SD_CONF_BSET_BSY_RETRY_COUNT| 634 SD_CONF_BSET_RST_RETRIES| 635 SD_CONF_BSET_RSV_REL_TIME| 636 SD_CONF_BSET_MIN_THROTTLE| 637 SD_CONF_BSET_DISKSORT_DISABLED, 638 &sve_properties }, 639 { "SUN T4", SD_CONF_BSET_THROTTLE | 640 SD_CONF_BSET_BSY_RETRY_COUNT| 641 SD_CONF_BSET_RST_RETRIES| 642 SD_CONF_BSET_RSV_REL_TIME, 643 &purple_properties }, 644 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | 645 SD_CONF_BSET_LUN_RESET_ENABLED, 646 &maserati_properties }, 647 { "SUN SE6920", SD_CONF_BSET_THROTTLE | 648 SD_CONF_BSET_NRR_COUNT| 649 SD_CONF_BSET_BSY_RETRY_COUNT| 650 SD_CONF_BSET_RST_RETRIES| 651 SD_CONF_BSET_MIN_THROTTLE| 652 SD_CONF_BSET_DISKSORT_DISABLED| 653 SD_CONF_BSET_LUN_RESET_ENABLED, 654 &pirus_properties }, 655 { "SUN SE6940", SD_CONF_BSET_THROTTLE | 656 SD_CONF_BSET_NRR_COUNT| 657 SD_CONF_BSET_BSY_RETRY_COUNT| 658 SD_CONF_BSET_RST_RETRIES| 659 SD_CONF_BSET_MIN_THROTTLE| 660 SD_CONF_BSET_DISKSORT_DISABLED| 661 SD_CONF_BSET_LUN_RESET_ENABLED, 662 &pirus_properties }, 663 { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE | 664 SD_CONF_BSET_NRR_COUNT| 665 SD_CONF_BSET_BSY_RETRY_COUNT| 666 SD_CONF_BSET_RST_RETRIES| 667 SD_CONF_BSET_MIN_THROTTLE| 668 SD_CONF_BSET_DISKSORT_DISABLED| 669 SD_CONF_BSET_LUN_RESET_ENABLED, 670 &pirus_properties }, 671 { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE | 672 SD_CONF_BSET_NRR_COUNT| 673 SD_CONF_BSET_BSY_RETRY_COUNT| 674 SD_CONF_BSET_RST_RETRIES| 675 SD_CONF_BSET_MIN_THROTTLE| 676 SD_CONF_BSET_DISKSORT_DISABLED| 677 SD_CONF_BSET_LUN_RESET_ENABLED, 678 &pirus_properties }, 679 { "SUN PSX1000", SD_CONF_BSET_THROTTLE | 680 SD_CONF_BSET_NRR_COUNT| 681 SD_CONF_BSET_BSY_RETRY_COUNT| 682 SD_CONF_BSET_RST_RETRIES| 683 SD_CONF_BSET_MIN_THROTTLE| 684 SD_CONF_BSET_DISKSORT_DISABLED| 685 SD_CONF_BSET_LUN_RESET_ENABLED, 686 &pirus_properties }, 687 { "SUN SE6330", SD_CONF_BSET_THROTTLE | 688 SD_CONF_BSET_NRR_COUNT| 689 SD_CONF_BSET_BSY_RETRY_COUNT| 690 SD_CONF_BSET_RST_RETRIES| 691 SD_CONF_BSET_MIN_THROTTLE| 692 SD_CONF_BSET_DISKSORT_DISABLED| 693 SD_CONF_BSET_LUN_RESET_ENABLED, 694 &pirus_properties }, 695 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 696 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 697 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 698 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 699 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, 700 #endif /* fibre or NON-sparc platforms */ 701 #if ((defined(__sparc) && !defined(__fibre)) ||\ 702 (defined(__i386) || defined(__amd64))) 703 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, 704 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, 705 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, 706 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, 707 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, 708 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL }, 709 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, 710 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, 711 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, 712 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, 713 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, 714 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, 715 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, 716 &symbios_properties }, 717 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, 718 &lsi_properties_scsi }, 719 #if defined(__i386) || defined(__amd64) 720 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD 721 | SD_CONF_BSET_READSUB_BCD 722 | SD_CONF_BSET_READ_TOC_ADDR_BCD 723 | SD_CONF_BSET_NO_READ_HEADER 724 | SD_CONF_BSET_READ_CD_XD4), NULL }, 725 726 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD 727 | SD_CONF_BSET_READSUB_BCD 728 | SD_CONF_BSET_READ_TOC_ADDR_BCD 729 | SD_CONF_BSET_NO_READ_HEADER 730 | SD_CONF_BSET_READ_CD_XD4), NULL }, 731 #endif /* __i386 || __amd64 */ 732 #endif /* sparc NON-fibre or NON-sparc platforms */ 733 734 #if (defined(SD_PROP_TST)) 735 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE 736 | SD_CONF_BSET_CTYPE 737 | SD_CONF_BSET_NRR_COUNT 738 | SD_CONF_BSET_FAB_DEVID 739 | SD_CONF_BSET_NOCACHE 740 | SD_CONF_BSET_BSY_RETRY_COUNT 741 | SD_CONF_BSET_PLAYMSF_BCD 742 | SD_CONF_BSET_READSUB_BCD 743 | SD_CONF_BSET_READ_TOC_TRK_BCD 744 | SD_CONF_BSET_READ_TOC_ADDR_BCD 745 | SD_CONF_BSET_NO_READ_HEADER 746 | SD_CONF_BSET_READ_CD_XD4 747 | SD_CONF_BSET_RST_RETRIES 748 | SD_CONF_BSET_RSV_REL_TIME 749 | SD_CONF_BSET_TUR_CHECK), &tst_properties}, 750 #endif 751 }; 752 753 static const int sd_disk_table_size = 754 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); 755 756 757 758 #define SD_INTERCONNECT_PARALLEL 0 759 #define SD_INTERCONNECT_FABRIC 1 760 #define SD_INTERCONNECT_FIBRE 2 761 #define SD_INTERCONNECT_SSA 3 762 #define SD_INTERCONNECT_SATA 4 763 #define SD_IS_PARALLEL_SCSI(un) \ 764 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL) 765 #define SD_IS_SERIAL(un) \ 766 ((un)->un_interconnect_type == SD_INTERCONNECT_SATA) 767 768 /* 769 * Definitions used by device id registration routines 770 */ 771 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */ 772 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */ 773 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */ 774 775 static kmutex_t sd_sense_mutex = {0}; 776 777 /* 778 * Macros for updates of the driver state 779 */ 780 #define New_state(un, s) \ 781 (un)->un_last_state = (un)->un_state, (un)->un_state = (s) 782 #define Restore_state(un) \ 783 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); } 784 785 static struct sd_cdbinfo sd_cdbtab[] = { 786 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, }, 787 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, }, 788 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, }, 789 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, }, 790 }; 791 792 /* 793 * Specifies the number of seconds that must have elapsed since the last 794 * cmd. has completed for a device to be declared idle to the PM framework. 795 */ 796 static int sd_pm_idletime = 1; 797 798 /* 799 * Internal function prototypes 800 */ 801 802 #if (defined(__fibre)) 803 /* 804 * These #defines are to avoid namespace collisions that occur because this 805 * code is currently used to compile two separate driver modules: sd and ssd. 806 * All function names need to be treated this way (even if declared static) 807 * in order to allow the debugger to resolve the names properly. 808 * It is anticipated that in the near future the ssd module will be obsoleted, 809 * at which time this ugliness should go away. 810 */ 811 #define sd_log_trace ssd_log_trace 812 #define sd_log_info ssd_log_info 813 #define sd_log_err ssd_log_err 814 #define sdprobe ssdprobe 815 #define sdinfo ssdinfo 816 #define sd_prop_op ssd_prop_op 817 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init 818 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini 819 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache 820 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache 821 #define sd_scsi_target_lun_init ssd_scsi_target_lun_init 822 #define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini 823 #define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count 824 #define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target 825 #define sd_spin_up_unit ssd_spin_up_unit 826 #define sd_enable_descr_sense ssd_enable_descr_sense 827 #define sd_reenable_dsense_task ssd_reenable_dsense_task 828 #define sd_set_mmc_caps ssd_set_mmc_caps 829 #define sd_read_unit_properties ssd_read_unit_properties 830 #define sd_process_sdconf_file ssd_process_sdconf_file 831 #define sd_process_sdconf_table ssd_process_sdconf_table 832 #define sd_sdconf_id_match ssd_sdconf_id_match 833 #define sd_blank_cmp ssd_blank_cmp 834 #define sd_chk_vers1_data ssd_chk_vers1_data 835 #define sd_set_vers1_properties ssd_set_vers1_properties 836 837 #define sd_get_physical_geometry ssd_get_physical_geometry 838 #define sd_get_virtual_geometry ssd_get_virtual_geometry 839 #define sd_update_block_info ssd_update_block_info 840 #define sd_register_devid ssd_register_devid 841 #define sd_get_devid ssd_get_devid 842 #define sd_create_devid ssd_create_devid 843 #define sd_write_deviceid ssd_write_deviceid 844 #define sd_check_vpd_page_support ssd_check_vpd_page_support 845 #define sd_setup_pm ssd_setup_pm 846 #define sd_create_pm_components ssd_create_pm_components 847 #define sd_ddi_suspend ssd_ddi_suspend 848 #define sd_ddi_pm_suspend ssd_ddi_pm_suspend 849 #define sd_ddi_resume ssd_ddi_resume 850 #define sd_ddi_pm_resume ssd_ddi_pm_resume 851 #define sdpower ssdpower 852 #define sdattach ssdattach 853 #define sddetach ssddetach 854 #define sd_unit_attach ssd_unit_attach 855 #define sd_unit_detach ssd_unit_detach 856 #define sd_set_unit_attributes ssd_set_unit_attributes 857 #define sd_create_errstats ssd_create_errstats 858 #define sd_set_errstats ssd_set_errstats 859 #define sd_set_pstats ssd_set_pstats 860 #define sddump ssddump 861 #define sd_scsi_poll ssd_scsi_poll 862 #define sd_send_polled_RQS ssd_send_polled_RQS 863 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll 864 #define sd_init_event_callbacks ssd_init_event_callbacks 865 #define sd_event_callback ssd_event_callback 866 #define sd_cache_control ssd_cache_control 867 #define sd_get_write_cache_enabled ssd_get_write_cache_enabled 868 #define sd_get_nv_sup ssd_get_nv_sup 869 #define sd_make_device ssd_make_device 870 #define sdopen ssdopen 871 #define sdclose ssdclose 872 #define sd_ready_and_valid ssd_ready_and_valid 873 #define sdmin ssdmin 874 #define sdread ssdread 875 #define sdwrite ssdwrite 876 #define sdaread ssdaread 877 #define sdawrite ssdawrite 878 #define sdstrategy ssdstrategy 879 #define sdioctl ssdioctl 880 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart 881 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart 882 #define sd_checksum_iostart ssd_checksum_iostart 883 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart 884 #define sd_pm_iostart ssd_pm_iostart 885 #define sd_core_iostart ssd_core_iostart 886 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone 887 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone 888 #define sd_checksum_iodone ssd_checksum_iodone 889 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone 890 #define sd_pm_iodone ssd_pm_iodone 891 #define sd_initpkt_for_buf ssd_initpkt_for_buf 892 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf 893 #define sd_setup_rw_pkt ssd_setup_rw_pkt 894 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt 895 #define sd_buf_iodone ssd_buf_iodone 896 #define sd_uscsi_strategy ssd_uscsi_strategy 897 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi 898 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi 899 #define sd_uscsi_iodone ssd_uscsi_iodone 900 #define sd_xbuf_strategy ssd_xbuf_strategy 901 #define sd_xbuf_init ssd_xbuf_init 902 #define sd_pm_entry ssd_pm_entry 903 #define sd_pm_exit ssd_pm_exit 904 905 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler 906 #define sd_pm_timeout_handler ssd_pm_timeout_handler 907 908 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq 909 #define sdintr ssdintr 910 #define sd_start_cmds ssd_start_cmds 911 #define sd_send_scsi_cmd ssd_send_scsi_cmd 912 #define sd_bioclone_alloc ssd_bioclone_alloc 913 #define sd_bioclone_free ssd_bioclone_free 914 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc 915 #define sd_shadow_buf_free ssd_shadow_buf_free 916 #define sd_print_transport_rejected_message \ 917 ssd_print_transport_rejected_message 918 #define sd_retry_command ssd_retry_command 919 #define sd_set_retry_bp ssd_set_retry_bp 920 #define sd_send_request_sense_command ssd_send_request_sense_command 921 #define sd_start_retry_command ssd_start_retry_command 922 #define sd_start_direct_priority_command \ 923 ssd_start_direct_priority_command 924 #define sd_return_failed_command ssd_return_failed_command 925 #define sd_return_failed_command_no_restart \ 926 ssd_return_failed_command_no_restart 927 #define sd_return_command ssd_return_command 928 #define sd_sync_with_callback ssd_sync_with_callback 929 #define sdrunout ssdrunout 930 #define sd_mark_rqs_busy ssd_mark_rqs_busy 931 #define sd_mark_rqs_idle ssd_mark_rqs_idle 932 #define sd_reduce_throttle ssd_reduce_throttle 933 #define sd_restore_throttle ssd_restore_throttle 934 #define sd_print_incomplete_msg ssd_print_incomplete_msg 935 #define sd_init_cdb_limits ssd_init_cdb_limits 936 #define sd_pkt_status_good ssd_pkt_status_good 937 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition 938 #define sd_pkt_status_busy ssd_pkt_status_busy 939 #define sd_pkt_status_reservation_conflict \ 940 ssd_pkt_status_reservation_conflict 941 #define sd_pkt_status_qfull ssd_pkt_status_qfull 942 #define sd_handle_request_sense ssd_handle_request_sense 943 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense 944 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg 945 #define sd_validate_sense_data ssd_validate_sense_data 946 #define sd_decode_sense ssd_decode_sense 947 #define sd_print_sense_msg ssd_print_sense_msg 948 #define sd_sense_key_no_sense ssd_sense_key_no_sense 949 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error 950 #define sd_sense_key_not_ready ssd_sense_key_not_ready 951 #define sd_sense_key_medium_or_hardware_error \ 952 ssd_sense_key_medium_or_hardware_error 953 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request 954 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention 955 #define sd_sense_key_fail_command ssd_sense_key_fail_command 956 #define sd_sense_key_blank_check ssd_sense_key_blank_check 957 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command 958 #define sd_sense_key_default ssd_sense_key_default 959 #define sd_print_retry_msg ssd_print_retry_msg 960 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg 961 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete 962 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err 963 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset 964 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted 965 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout 966 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free 967 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject 968 #define sd_pkt_reason_default ssd_pkt_reason_default 969 #define sd_reset_target ssd_reset_target 970 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback 971 #define sd_start_stop_unit_task ssd_start_stop_unit_task 972 #define sd_taskq_create ssd_taskq_create 973 #define sd_taskq_delete ssd_taskq_delete 974 #define sd_media_change_task ssd_media_change_task 975 #define sd_handle_mchange ssd_handle_mchange 976 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK 977 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY 978 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 979 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION 980 #define sd_send_scsi_feature_GET_CONFIGURATION \ 981 sd_send_scsi_feature_GET_CONFIGURATION 982 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT 983 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY 984 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY 985 #define sd_send_scsi_PERSISTENT_RESERVE_IN \ 986 ssd_send_scsi_PERSISTENT_RESERVE_IN 987 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ 988 ssd_send_scsi_PERSISTENT_RESERVE_OUT 989 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE 990 #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \ 991 ssd_send_scsi_SYNCHRONIZE_CACHE_biodone 992 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE 993 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT 994 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR 995 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE 996 #define sd_alloc_rqs ssd_alloc_rqs 997 #define sd_free_rqs ssd_free_rqs 998 #define sd_dump_memory ssd_dump_memory 999 #define sd_get_media_info ssd_get_media_info 1000 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info 1001 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf 1002 #define sd_setup_next_xfer ssd_setup_next_xfer 1003 #define sd_dkio_get_temp ssd_dkio_get_temp 1004 #define sd_check_mhd ssd_check_mhd 1005 #define sd_mhd_watch_cb ssd_mhd_watch_cb 1006 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete 1007 #define sd_sname ssd_sname 1008 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover 1009 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread 1010 #define sd_take_ownership ssd_take_ownership 1011 #define sd_reserve_release ssd_reserve_release 1012 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req 1013 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb 1014 #define sd_persistent_reservation_in_read_keys \ 1015 ssd_persistent_reservation_in_read_keys 1016 #define sd_persistent_reservation_in_read_resv \ 1017 ssd_persistent_reservation_in_read_resv 1018 #define sd_mhdioc_takeown ssd_mhdioc_takeown 1019 #define sd_mhdioc_failfast ssd_mhdioc_failfast 1020 #define sd_mhdioc_release ssd_mhdioc_release 1021 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid 1022 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys 1023 #define sd_mhdioc_inresv ssd_mhdioc_inresv 1024 #define sr_change_blkmode ssr_change_blkmode 1025 #define sr_change_speed ssr_change_speed 1026 #define sr_atapi_change_speed ssr_atapi_change_speed 1027 #define sr_pause_resume ssr_pause_resume 1028 #define sr_play_msf ssr_play_msf 1029 #define sr_play_trkind ssr_play_trkind 1030 #define sr_read_all_subcodes ssr_read_all_subcodes 1031 #define sr_read_subchannel ssr_read_subchannel 1032 #define sr_read_tocentry ssr_read_tocentry 1033 #define sr_read_tochdr ssr_read_tochdr 1034 #define sr_read_cdda ssr_read_cdda 1035 #define sr_read_cdxa ssr_read_cdxa 1036 #define sr_read_mode1 ssr_read_mode1 1037 #define sr_read_mode2 ssr_read_mode2 1038 #define sr_read_cd_mode2 ssr_read_cd_mode2 1039 #define sr_sector_mode ssr_sector_mode 1040 #define sr_eject ssr_eject 1041 #define sr_ejected ssr_ejected 1042 #define sr_check_wp ssr_check_wp 1043 #define sd_check_media ssd_check_media 1044 #define sd_media_watch_cb ssd_media_watch_cb 1045 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast 1046 #define sr_volume_ctrl ssr_volume_ctrl 1047 #define sr_read_sony_session_offset ssr_read_sony_session_offset 1048 #define sd_log_page_supported ssd_log_page_supported 1049 #define sd_check_for_writable_cd ssd_check_for_writable_cd 1050 #define sd_wm_cache_constructor ssd_wm_cache_constructor 1051 #define sd_wm_cache_destructor ssd_wm_cache_destructor 1052 #define sd_range_lock ssd_range_lock 1053 #define sd_get_range ssd_get_range 1054 #define sd_free_inlist_wmap ssd_free_inlist_wmap 1055 #define sd_range_unlock ssd_range_unlock 1056 #define sd_read_modify_write_task ssd_read_modify_write_task 1057 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw 1058 1059 #define sd_iostart_chain ssd_iostart_chain 1060 #define sd_iodone_chain ssd_iodone_chain 1061 #define sd_initpkt_map ssd_initpkt_map 1062 #define sd_destroypkt_map ssd_destroypkt_map 1063 #define sd_chain_type_map ssd_chain_type_map 1064 #define sd_chain_index_map ssd_chain_index_map 1065 1066 #define sd_failfast_flushctl ssd_failfast_flushctl 1067 #define sd_failfast_flushq ssd_failfast_flushq 1068 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback 1069 1070 #define sd_is_lsi ssd_is_lsi 1071 #define sd_tg_rdwr ssd_tg_rdwr 1072 #define sd_tg_getinfo ssd_tg_getinfo 1073 1074 #endif /* #if (defined(__fibre)) */ 1075 1076 1077 int _init(void); 1078 int _fini(void); 1079 int _info(struct modinfo *modinfop); 1080 1081 /*PRINTFLIKE3*/ 1082 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1083 /*PRINTFLIKE3*/ 1084 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1085 /*PRINTFLIKE3*/ 1086 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1087 1088 static int sdprobe(dev_info_t *devi); 1089 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 1090 void **result); 1091 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1092 int mod_flags, char *name, caddr_t valuep, int *lengthp); 1093 1094 /* 1095 * Smart probe for parallel scsi 1096 */ 1097 static void sd_scsi_probe_cache_init(void); 1098 static void sd_scsi_probe_cache_fini(void); 1099 static void sd_scsi_clear_probe_cache(void); 1100 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)()); 1101 1102 /* 1103 * Attached luns on target for parallel scsi 1104 */ 1105 static void sd_scsi_target_lun_init(void); 1106 static void sd_scsi_target_lun_fini(void); 1107 static int sd_scsi_get_target_lun_count(dev_info_t *dip, int target); 1108 static void sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag); 1109 1110 static int sd_spin_up_unit(struct sd_lun *un); 1111 #ifdef _LP64 1112 static void sd_enable_descr_sense(struct sd_lun *un); 1113 static void sd_reenable_dsense_task(void *arg); 1114 #endif /* _LP64 */ 1115 1116 static void sd_set_mmc_caps(struct sd_lun *un); 1117 1118 static void sd_read_unit_properties(struct sd_lun *un); 1119 static int sd_process_sdconf_file(struct sd_lun *un); 1120 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags, 1121 int *data_list, sd_tunables *values); 1122 static void sd_process_sdconf_table(struct sd_lun *un); 1123 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen); 1124 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen); 1125 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 1126 int list_len, char *dataname_ptr); 1127 static void sd_set_vers1_properties(struct sd_lun *un, int flags, 1128 sd_tunables *prop_list); 1129 1130 static void sd_register_devid(struct sd_lun *un, dev_info_t *devi, 1131 int reservation_flag); 1132 static int sd_get_devid(struct sd_lun *un); 1133 static ddi_devid_t sd_create_devid(struct sd_lun *un); 1134 static int sd_write_deviceid(struct sd_lun *un); 1135 static int sd_get_devid_page(struct sd_lun *un, uchar_t *wwn, int *len); 1136 static int sd_check_vpd_page_support(struct sd_lun *un); 1137 1138 static void sd_setup_pm(struct sd_lun *un, dev_info_t *devi); 1139 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); 1140 1141 static int sd_ddi_suspend(dev_info_t *devi); 1142 static int sd_ddi_pm_suspend(struct sd_lun *un); 1143 static int sd_ddi_resume(dev_info_t *devi); 1144 static int sd_ddi_pm_resume(struct sd_lun *un); 1145 static int sdpower(dev_info_t *devi, int component, int level); 1146 1147 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 1148 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 1149 static int sd_unit_attach(dev_info_t *devi); 1150 static int sd_unit_detach(dev_info_t *devi); 1151 1152 static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi); 1153 static void sd_create_errstats(struct sd_lun *un, int instance); 1154 static void sd_set_errstats(struct sd_lun *un); 1155 static void sd_set_pstats(struct sd_lun *un); 1156 1157 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 1158 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); 1159 static int sd_send_polled_RQS(struct sd_lun *un); 1160 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); 1161 1162 #if (defined(__fibre)) 1163 /* 1164 * Event callbacks (photon) 1165 */ 1166 static void sd_init_event_callbacks(struct sd_lun *un); 1167 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); 1168 #endif 1169 1170 /* 1171 * Defines for sd_cache_control 1172 */ 1173 1174 #define SD_CACHE_ENABLE 1 1175 #define SD_CACHE_DISABLE 0 1176 #define SD_CACHE_NOCHANGE -1 1177 1178 static int sd_cache_control(struct sd_lun *un, int rcd_flag, int wce_flag); 1179 static int sd_get_write_cache_enabled(struct sd_lun *un, int *is_enabled); 1180 static void sd_get_nv_sup(struct sd_lun *un); 1181 static dev_t sd_make_device(dev_info_t *devi); 1182 1183 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize, 1184 uint64_t capacity); 1185 1186 /* 1187 * Driver entry point functions. 1188 */ 1189 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 1190 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 1191 static int sd_ready_and_valid(struct sd_lun *un); 1192 1193 static void sdmin(struct buf *bp); 1194 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p); 1195 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 1196 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1197 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1198 1199 static int sdstrategy(struct buf *bp); 1200 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1201 1202 /* 1203 * Function prototypes for layering functions in the iostart chain. 1204 */ 1205 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un, 1206 struct buf *bp); 1207 static void sd_mapblocksize_iostart(int index, struct sd_lun *un, 1208 struct buf *bp); 1209 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp); 1210 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un, 1211 struct buf *bp); 1212 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp); 1213 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp); 1214 1215 /* 1216 * Function prototypes for layering functions in the iodone chain. 1217 */ 1218 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp); 1219 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp); 1220 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un, 1221 struct buf *bp); 1222 static void sd_mapblocksize_iodone(int index, struct sd_lun *un, 1223 struct buf *bp); 1224 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp); 1225 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un, 1226 struct buf *bp); 1227 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp); 1228 1229 /* 1230 * Prototypes for functions to support buf(9S) based IO. 1231 */ 1232 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg); 1233 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **); 1234 static void sd_destroypkt_for_buf(struct buf *); 1235 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp, 1236 struct buf *bp, int flags, 1237 int (*callback)(caddr_t), caddr_t callback_arg, 1238 diskaddr_t lba, uint32_t blockcount); 1239 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp, 1240 struct buf *bp, diskaddr_t lba, uint32_t blockcount); 1241 1242 /* 1243 * Prototypes for functions to support USCSI IO. 1244 */ 1245 static int sd_uscsi_strategy(struct buf *bp); 1246 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **); 1247 static void sd_destroypkt_for_uscsi(struct buf *); 1248 1249 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 1250 uchar_t chain_type, void *pktinfop); 1251 1252 static int sd_pm_entry(struct sd_lun *un); 1253 static void sd_pm_exit(struct sd_lun *un); 1254 1255 static void sd_pm_idletimeout_handler(void *arg); 1256 1257 /* 1258 * sd_core internal functions (used at the sd_core_io layer). 1259 */ 1260 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp); 1261 static void sdintr(struct scsi_pkt *pktp); 1262 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp); 1263 1264 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1265 enum uio_seg dataspace, int path_flag); 1266 1267 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen, 1268 daddr_t blkno, int (*func)(struct buf *)); 1269 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen, 1270 uint_t bflags, daddr_t blkno, int (*func)(struct buf *)); 1271 static void sd_bioclone_free(struct buf *bp); 1272 static void sd_shadow_buf_free(struct buf *bp); 1273 1274 static void sd_print_transport_rejected_message(struct sd_lun *un, 1275 struct sd_xbuf *xp, int code); 1276 static void sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, 1277 void *arg, int code); 1278 static void sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, 1279 void *arg, int code); 1280 static void sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, 1281 void *arg, int code); 1282 1283 static void sd_retry_command(struct sd_lun *un, struct buf *bp, 1284 int retry_check_flag, 1285 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, 1286 int c), 1287 void *user_arg, int failure_code, clock_t retry_delay, 1288 void (*statp)(kstat_io_t *)); 1289 1290 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, 1291 clock_t retry_delay, void (*statp)(kstat_io_t *)); 1292 1293 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 1294 struct scsi_pkt *pktp); 1295 static void sd_start_retry_command(void *arg); 1296 static void sd_start_direct_priority_command(void *arg); 1297 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, 1298 int errcode); 1299 static void sd_return_failed_command_no_restart(struct sd_lun *un, 1300 struct buf *bp, int errcode); 1301 static void sd_return_command(struct sd_lun *un, struct buf *bp); 1302 static void sd_sync_with_callback(struct sd_lun *un); 1303 static int sdrunout(caddr_t arg); 1304 1305 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp); 1306 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp); 1307 1308 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type); 1309 static void sd_restore_throttle(void *arg); 1310 1311 static void sd_init_cdb_limits(struct sd_lun *un); 1312 1313 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 1314 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1315 1316 /* 1317 * Error handling functions 1318 */ 1319 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 1320 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1321 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, 1322 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1323 static void sd_pkt_status_reservation_conflict(struct sd_lun *un, 1324 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1325 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 1326 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1327 1328 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp, 1329 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1330 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 1331 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1332 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp, 1333 struct sd_xbuf *xp, size_t actual_len); 1334 static void sd_decode_sense(struct sd_lun *un, struct buf *bp, 1335 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1336 1337 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp, 1338 void *arg, int code); 1339 1340 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 1341 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1342 static void sd_sense_key_recoverable_error(struct sd_lun *un, 1343 uint8_t *sense_datap, 1344 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1345 static void sd_sense_key_not_ready(struct sd_lun *un, 1346 uint8_t *sense_datap, 1347 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1348 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 1349 uint8_t *sense_datap, 1350 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1351 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 1352 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1353 static void sd_sense_key_unit_attention(struct sd_lun *un, 1354 uint8_t *sense_datap, 1355 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1356 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 1357 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1358 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 1359 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1360 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 1361 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1362 static void sd_sense_key_default(struct sd_lun *un, 1363 uint8_t *sense_datap, 1364 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1365 1366 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp, 1367 void *arg, int flag); 1368 1369 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 1370 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1371 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 1372 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1373 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 1374 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1375 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 1376 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1377 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 1378 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1379 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 1380 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1381 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 1382 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1383 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 1384 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1385 1386 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp); 1387 1388 static void sd_start_stop_unit_callback(void *arg); 1389 static void sd_start_stop_unit_task(void *arg); 1390 1391 static void sd_taskq_create(void); 1392 static void sd_taskq_delete(void); 1393 static void sd_media_change_task(void *arg); 1394 1395 static int sd_handle_mchange(struct sd_lun *un); 1396 static int sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag); 1397 static int sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, 1398 uint32_t *lbap, int path_flag); 1399 static int sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 1400 uint32_t *lbap, int path_flag); 1401 static int sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, 1402 int path_flag); 1403 static int sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, 1404 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp); 1405 static int sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag); 1406 static int sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, 1407 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp); 1408 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, 1409 uchar_t usr_cmd, uchar_t *usr_bufp); 1410 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, 1411 struct dk_callback *dkc); 1412 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp); 1413 static int sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, 1414 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1415 uchar_t *bufaddr, uint_t buflen, int path_flag); 1416 static int sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 1417 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1418 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag); 1419 static int sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, 1420 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag); 1421 static int sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, 1422 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag); 1423 static int sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 1424 size_t buflen, daddr_t start_block, int path_flag); 1425 #define sd_send_scsi_READ(un, bufaddr, buflen, start_block, path_flag) \ 1426 sd_send_scsi_RDWR(un, SCMD_READ, bufaddr, buflen, start_block, \ 1427 path_flag) 1428 #define sd_send_scsi_WRITE(un, bufaddr, buflen, start_block, path_flag) \ 1429 sd_send_scsi_RDWR(un, SCMD_WRITE, bufaddr, buflen, start_block,\ 1430 path_flag) 1431 1432 static int sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, 1433 uint16_t buflen, uchar_t page_code, uchar_t page_control, 1434 uint16_t param_ptr, int path_flag); 1435 1436 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un); 1437 static void sd_free_rqs(struct sd_lun *un); 1438 1439 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, 1440 uchar_t *data, int len, int fmt); 1441 static void sd_panic_for_res_conflict(struct sd_lun *un); 1442 1443 /* 1444 * Disk Ioctl Function Prototypes 1445 */ 1446 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag); 1447 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag); 1448 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag); 1449 1450 /* 1451 * Multi-host Ioctl Prototypes 1452 */ 1453 static int sd_check_mhd(dev_t dev, int interval); 1454 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1455 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt); 1456 static char *sd_sname(uchar_t status); 1457 static void sd_mhd_resvd_recover(void *arg); 1458 static void sd_resv_reclaim_thread(); 1459 static int sd_take_ownership(dev_t dev, struct mhioctkown *p); 1460 static int sd_reserve_release(dev_t dev, int cmd); 1461 static void sd_rmv_resv_reclaim_req(dev_t dev); 1462 static void sd_mhd_reset_notify_cb(caddr_t arg); 1463 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un, 1464 mhioc_inkeys_t *usrp, int flag); 1465 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un, 1466 mhioc_inresvs_t *usrp, int flag); 1467 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag); 1468 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag); 1469 static int sd_mhdioc_release(dev_t dev); 1470 static int sd_mhdioc_register_devid(dev_t dev); 1471 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag); 1472 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag); 1473 1474 /* 1475 * SCSI removable prototypes 1476 */ 1477 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag); 1478 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1479 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1480 static int sr_pause_resume(dev_t dev, int mode); 1481 static int sr_play_msf(dev_t dev, caddr_t data, int flag); 1482 static int sr_play_trkind(dev_t dev, caddr_t data, int flag); 1483 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag); 1484 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag); 1485 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag); 1486 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag); 1487 static int sr_read_cdda(dev_t dev, caddr_t data, int flag); 1488 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag); 1489 static int sr_read_mode1(dev_t dev, caddr_t data, int flag); 1490 static int sr_read_mode2(dev_t dev, caddr_t data, int flag); 1491 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag); 1492 static int sr_sector_mode(dev_t dev, uint32_t blksize); 1493 static int sr_eject(dev_t dev); 1494 static void sr_ejected(register struct sd_lun *un); 1495 static int sr_check_wp(dev_t dev); 1496 static int sd_check_media(dev_t dev, enum dkio_state state); 1497 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1498 static void sd_delayed_cv_broadcast(void *arg); 1499 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); 1500 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); 1501 1502 static int sd_log_page_supported(struct sd_lun *un, int log_page); 1503 1504 /* 1505 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. 1506 */ 1507 static void sd_check_for_writable_cd(struct sd_lun *un, int path_flag); 1508 static int sd_wm_cache_constructor(void *wm, void *un, int flags); 1509 static void sd_wm_cache_destructor(void *wm, void *un); 1510 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb, 1511 daddr_t endb, ushort_t typ); 1512 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb, 1513 daddr_t endb); 1514 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp); 1515 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm); 1516 static void sd_read_modify_write_task(void * arg); 1517 static int 1518 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 1519 struct buf **bpp); 1520 1521 1522 /* 1523 * Function prototypes for failfast support. 1524 */ 1525 static void sd_failfast_flushq(struct sd_lun *un); 1526 static int sd_failfast_flushq_callback(struct buf *bp); 1527 1528 /* 1529 * Function prototypes to check for lsi devices 1530 */ 1531 static void sd_is_lsi(struct sd_lun *un); 1532 1533 /* 1534 * Function prototypes for partial DMA support 1535 */ 1536 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 1537 struct scsi_pkt *pkt, struct sd_xbuf *xp); 1538 1539 1540 /* Function prototypes for cmlb */ 1541 static int sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 1542 diskaddr_t start_block, size_t reqlength, void *tg_cookie); 1543 1544 static int sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie); 1545 1546 /* 1547 * Constants for failfast support: 1548 * 1549 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO 1550 * failfast processing being performed. 1551 * 1552 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing 1553 * failfast processing on all bufs with B_FAILFAST set. 1554 */ 1555 1556 #define SD_FAILFAST_INACTIVE 0 1557 #define SD_FAILFAST_ACTIVE 1 1558 1559 /* 1560 * Bitmask to control behavior of buf(9S) flushes when a transition to 1561 * the failfast state occurs. Optional bits include: 1562 * 1563 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that 1564 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will 1565 * be flushed. 1566 * 1567 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the 1568 * driver, in addition to the regular wait queue. This includes the xbuf 1569 * queues. When clear, only the driver's wait queue will be flushed. 1570 */ 1571 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 1572 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 1573 1574 /* 1575 * The default behavior is to only flush bufs that have B_FAILFAST set, but 1576 * to flush all queues within the driver. 1577 */ 1578 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; 1579 1580 1581 /* 1582 * SD Testing Fault Injection 1583 */ 1584 #ifdef SD_FAULT_INJECTION 1585 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); 1586 static void sd_faultinjection(struct scsi_pkt *pktp); 1587 static void sd_injection_log(char *buf, struct sd_lun *un); 1588 #endif 1589 1590 /* 1591 * Device driver ops vector 1592 */ 1593 static struct cb_ops sd_cb_ops = { 1594 sdopen, /* open */ 1595 sdclose, /* close */ 1596 sdstrategy, /* strategy */ 1597 nodev, /* print */ 1598 sddump, /* dump */ 1599 sdread, /* read */ 1600 sdwrite, /* write */ 1601 sdioctl, /* ioctl */ 1602 nodev, /* devmap */ 1603 nodev, /* mmap */ 1604 nodev, /* segmap */ 1605 nochpoll, /* poll */ 1606 sd_prop_op, /* cb_prop_op */ 1607 0, /* streamtab */ 1608 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */ 1609 CB_REV, /* cb_rev */ 1610 sdaread, /* async I/O read entry point */ 1611 sdawrite /* async I/O write entry point */ 1612 }; 1613 1614 static struct dev_ops sd_ops = { 1615 DEVO_REV, /* devo_rev, */ 1616 0, /* refcnt */ 1617 sdinfo, /* info */ 1618 nulldev, /* identify */ 1619 sdprobe, /* probe */ 1620 sdattach, /* attach */ 1621 sddetach, /* detach */ 1622 nodev, /* reset */ 1623 &sd_cb_ops, /* driver operations */ 1624 NULL, /* bus operations */ 1625 sdpower /* power */ 1626 }; 1627 1628 1629 /* 1630 * This is the loadable module wrapper. 1631 */ 1632 #include <sys/modctl.h> 1633 1634 static struct modldrv modldrv = { 1635 &mod_driverops, /* Type of module. This one is a driver */ 1636 SD_MODULE_NAME, /* Module name. */ 1637 &sd_ops /* driver ops */ 1638 }; 1639 1640 1641 static struct modlinkage modlinkage = { 1642 MODREV_1, 1643 &modldrv, 1644 NULL 1645 }; 1646 1647 static cmlb_tg_ops_t sd_tgops = { 1648 TG_DK_OPS_VERSION_1, 1649 sd_tg_rdwr, 1650 sd_tg_getinfo 1651 }; 1652 1653 static struct scsi_asq_key_strings sd_additional_codes[] = { 1654 0x81, 0, "Logical Unit is Reserved", 1655 0x85, 0, "Audio Address Not Valid", 1656 0xb6, 0, "Media Load Mechanism Failed", 1657 0xB9, 0, "Audio Play Operation Aborted", 1658 0xbf, 0, "Buffer Overflow for Read All Subcodes Command", 1659 0x53, 2, "Medium removal prevented", 1660 0x6f, 0, "Authentication failed during key exchange", 1661 0x6f, 1, "Key not present", 1662 0x6f, 2, "Key not established", 1663 0x6f, 3, "Read without proper authentication", 1664 0x6f, 4, "Mismatched region to this logical unit", 1665 0x6f, 5, "Region reset count error", 1666 0xffff, 0x0, NULL 1667 }; 1668 1669 1670 /* 1671 * Struct for passing printing information for sense data messages 1672 */ 1673 struct sd_sense_info { 1674 int ssi_severity; 1675 int ssi_pfa_flag; 1676 }; 1677 1678 /* 1679 * Table of function pointers for iostart-side routines. Separate "chains" 1680 * of layered function calls are formed by placing the function pointers 1681 * sequentially in the desired order. Functions are called according to an 1682 * incrementing table index ordering. The last function in each chain must 1683 * be sd_core_iostart(). The corresponding iodone-side routines are expected 1684 * in the sd_iodone_chain[] array. 1685 * 1686 * Note: It may seem more natural to organize both the iostart and iodone 1687 * functions together, into an array of structures (or some similar 1688 * organization) with a common index, rather than two separate arrays which 1689 * must be maintained in synchronization. The purpose of this division is 1690 * to achieve improved performance: individual arrays allows for more 1691 * effective cache line utilization on certain platforms. 1692 */ 1693 1694 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp); 1695 1696 1697 static sd_chain_t sd_iostart_chain[] = { 1698 1699 /* Chain for buf IO for disk drive targets (PM enabled) */ 1700 sd_mapblockaddr_iostart, /* Index: 0 */ 1701 sd_pm_iostart, /* Index: 1 */ 1702 sd_core_iostart, /* Index: 2 */ 1703 1704 /* Chain for buf IO for disk drive targets (PM disabled) */ 1705 sd_mapblockaddr_iostart, /* Index: 3 */ 1706 sd_core_iostart, /* Index: 4 */ 1707 1708 /* Chain for buf IO for removable-media targets (PM enabled) */ 1709 sd_mapblockaddr_iostart, /* Index: 5 */ 1710 sd_mapblocksize_iostart, /* Index: 6 */ 1711 sd_pm_iostart, /* Index: 7 */ 1712 sd_core_iostart, /* Index: 8 */ 1713 1714 /* Chain for buf IO for removable-media targets (PM disabled) */ 1715 sd_mapblockaddr_iostart, /* Index: 9 */ 1716 sd_mapblocksize_iostart, /* Index: 10 */ 1717 sd_core_iostart, /* Index: 11 */ 1718 1719 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1720 sd_mapblockaddr_iostart, /* Index: 12 */ 1721 sd_checksum_iostart, /* Index: 13 */ 1722 sd_pm_iostart, /* Index: 14 */ 1723 sd_core_iostart, /* Index: 15 */ 1724 1725 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1726 sd_mapblockaddr_iostart, /* Index: 16 */ 1727 sd_checksum_iostart, /* Index: 17 */ 1728 sd_core_iostart, /* Index: 18 */ 1729 1730 /* Chain for USCSI commands (all targets) */ 1731 sd_pm_iostart, /* Index: 19 */ 1732 sd_core_iostart, /* Index: 20 */ 1733 1734 /* Chain for checksumming USCSI commands (all targets) */ 1735 sd_checksum_uscsi_iostart, /* Index: 21 */ 1736 sd_pm_iostart, /* Index: 22 */ 1737 sd_core_iostart, /* Index: 23 */ 1738 1739 /* Chain for "direct" USCSI commands (all targets) */ 1740 sd_core_iostart, /* Index: 24 */ 1741 1742 /* Chain for "direct priority" USCSI commands (all targets) */ 1743 sd_core_iostart, /* Index: 25 */ 1744 }; 1745 1746 /* 1747 * Macros to locate the first function of each iostart chain in the 1748 * sd_iostart_chain[] array. These are located by the index in the array. 1749 */ 1750 #define SD_CHAIN_DISK_IOSTART 0 1751 #define SD_CHAIN_DISK_IOSTART_NO_PM 3 1752 #define SD_CHAIN_RMMEDIA_IOSTART 5 1753 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9 1754 #define SD_CHAIN_CHKSUM_IOSTART 12 1755 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16 1756 #define SD_CHAIN_USCSI_CMD_IOSTART 19 1757 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21 1758 #define SD_CHAIN_DIRECT_CMD_IOSTART 24 1759 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25 1760 1761 1762 /* 1763 * Table of function pointers for the iodone-side routines for the driver- 1764 * internal layering mechanism. The calling sequence for iodone routines 1765 * uses a decrementing table index, so the last routine called in a chain 1766 * must be at the lowest array index location for that chain. The last 1767 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs) 1768 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering 1769 * of the functions in an iodone side chain must correspond to the ordering 1770 * of the iostart routines for that chain. Note that there is no iodone 1771 * side routine that corresponds to sd_core_iostart(), so there is no 1772 * entry in the table for this. 1773 */ 1774 1775 static sd_chain_t sd_iodone_chain[] = { 1776 1777 /* Chain for buf IO for disk drive targets (PM enabled) */ 1778 sd_buf_iodone, /* Index: 0 */ 1779 sd_mapblockaddr_iodone, /* Index: 1 */ 1780 sd_pm_iodone, /* Index: 2 */ 1781 1782 /* Chain for buf IO for disk drive targets (PM disabled) */ 1783 sd_buf_iodone, /* Index: 3 */ 1784 sd_mapblockaddr_iodone, /* Index: 4 */ 1785 1786 /* Chain for buf IO for removable-media targets (PM enabled) */ 1787 sd_buf_iodone, /* Index: 5 */ 1788 sd_mapblockaddr_iodone, /* Index: 6 */ 1789 sd_mapblocksize_iodone, /* Index: 7 */ 1790 sd_pm_iodone, /* Index: 8 */ 1791 1792 /* Chain for buf IO for removable-media targets (PM disabled) */ 1793 sd_buf_iodone, /* Index: 9 */ 1794 sd_mapblockaddr_iodone, /* Index: 10 */ 1795 sd_mapblocksize_iodone, /* Index: 11 */ 1796 1797 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1798 sd_buf_iodone, /* Index: 12 */ 1799 sd_mapblockaddr_iodone, /* Index: 13 */ 1800 sd_checksum_iodone, /* Index: 14 */ 1801 sd_pm_iodone, /* Index: 15 */ 1802 1803 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1804 sd_buf_iodone, /* Index: 16 */ 1805 sd_mapblockaddr_iodone, /* Index: 17 */ 1806 sd_checksum_iodone, /* Index: 18 */ 1807 1808 /* Chain for USCSI commands (non-checksum targets) */ 1809 sd_uscsi_iodone, /* Index: 19 */ 1810 sd_pm_iodone, /* Index: 20 */ 1811 1812 /* Chain for USCSI commands (checksum targets) */ 1813 sd_uscsi_iodone, /* Index: 21 */ 1814 sd_checksum_uscsi_iodone, /* Index: 22 */ 1815 sd_pm_iodone, /* Index: 22 */ 1816 1817 /* Chain for "direct" USCSI commands (all targets) */ 1818 sd_uscsi_iodone, /* Index: 24 */ 1819 1820 /* Chain for "direct priority" USCSI commands (all targets) */ 1821 sd_uscsi_iodone, /* Index: 25 */ 1822 }; 1823 1824 1825 /* 1826 * Macros to locate the "first" function in the sd_iodone_chain[] array for 1827 * each iodone-side chain. These are located by the array index, but as the 1828 * iodone side functions are called in a decrementing-index order, the 1829 * highest index number in each chain must be specified (as these correspond 1830 * to the first function in the iodone chain that will be called by the core 1831 * at IO completion time). 1832 */ 1833 1834 #define SD_CHAIN_DISK_IODONE 2 1835 #define SD_CHAIN_DISK_IODONE_NO_PM 4 1836 #define SD_CHAIN_RMMEDIA_IODONE 8 1837 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11 1838 #define SD_CHAIN_CHKSUM_IODONE 15 1839 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18 1840 #define SD_CHAIN_USCSI_CMD_IODONE 20 1841 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22 1842 #define SD_CHAIN_DIRECT_CMD_IODONE 24 1843 #define SD_CHAIN_PRIORITY_CMD_IODONE 25 1844 1845 1846 1847 1848 /* 1849 * Array to map a layering chain index to the appropriate initpkt routine. 1850 * The redundant entries are present so that the index used for accessing 1851 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1852 * with this table as well. 1853 */ 1854 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **); 1855 1856 static sd_initpkt_t sd_initpkt_map[] = { 1857 1858 /* Chain for buf IO for disk drive targets (PM enabled) */ 1859 sd_initpkt_for_buf, /* Index: 0 */ 1860 sd_initpkt_for_buf, /* Index: 1 */ 1861 sd_initpkt_for_buf, /* Index: 2 */ 1862 1863 /* Chain for buf IO for disk drive targets (PM disabled) */ 1864 sd_initpkt_for_buf, /* Index: 3 */ 1865 sd_initpkt_for_buf, /* Index: 4 */ 1866 1867 /* Chain for buf IO for removable-media targets (PM enabled) */ 1868 sd_initpkt_for_buf, /* Index: 5 */ 1869 sd_initpkt_for_buf, /* Index: 6 */ 1870 sd_initpkt_for_buf, /* Index: 7 */ 1871 sd_initpkt_for_buf, /* Index: 8 */ 1872 1873 /* Chain for buf IO for removable-media targets (PM disabled) */ 1874 sd_initpkt_for_buf, /* Index: 9 */ 1875 sd_initpkt_for_buf, /* Index: 10 */ 1876 sd_initpkt_for_buf, /* Index: 11 */ 1877 1878 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1879 sd_initpkt_for_buf, /* Index: 12 */ 1880 sd_initpkt_for_buf, /* Index: 13 */ 1881 sd_initpkt_for_buf, /* Index: 14 */ 1882 sd_initpkt_for_buf, /* Index: 15 */ 1883 1884 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1885 sd_initpkt_for_buf, /* Index: 16 */ 1886 sd_initpkt_for_buf, /* Index: 17 */ 1887 sd_initpkt_for_buf, /* Index: 18 */ 1888 1889 /* Chain for USCSI commands (non-checksum targets) */ 1890 sd_initpkt_for_uscsi, /* Index: 19 */ 1891 sd_initpkt_for_uscsi, /* Index: 20 */ 1892 1893 /* Chain for USCSI commands (checksum targets) */ 1894 sd_initpkt_for_uscsi, /* Index: 21 */ 1895 sd_initpkt_for_uscsi, /* Index: 22 */ 1896 sd_initpkt_for_uscsi, /* Index: 22 */ 1897 1898 /* Chain for "direct" USCSI commands (all targets) */ 1899 sd_initpkt_for_uscsi, /* Index: 24 */ 1900 1901 /* Chain for "direct priority" USCSI commands (all targets) */ 1902 sd_initpkt_for_uscsi, /* Index: 25 */ 1903 1904 }; 1905 1906 1907 /* 1908 * Array to map a layering chain index to the appropriate destroypktpkt routine. 1909 * The redundant entries are present so that the index used for accessing 1910 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1911 * with this table as well. 1912 */ 1913 typedef void (*sd_destroypkt_t)(struct buf *); 1914 1915 static sd_destroypkt_t sd_destroypkt_map[] = { 1916 1917 /* Chain for buf IO for disk drive targets (PM enabled) */ 1918 sd_destroypkt_for_buf, /* Index: 0 */ 1919 sd_destroypkt_for_buf, /* Index: 1 */ 1920 sd_destroypkt_for_buf, /* Index: 2 */ 1921 1922 /* Chain for buf IO for disk drive targets (PM disabled) */ 1923 sd_destroypkt_for_buf, /* Index: 3 */ 1924 sd_destroypkt_for_buf, /* Index: 4 */ 1925 1926 /* Chain for buf IO for removable-media targets (PM enabled) */ 1927 sd_destroypkt_for_buf, /* Index: 5 */ 1928 sd_destroypkt_for_buf, /* Index: 6 */ 1929 sd_destroypkt_for_buf, /* Index: 7 */ 1930 sd_destroypkt_for_buf, /* Index: 8 */ 1931 1932 /* Chain for buf IO for removable-media targets (PM disabled) */ 1933 sd_destroypkt_for_buf, /* Index: 9 */ 1934 sd_destroypkt_for_buf, /* Index: 10 */ 1935 sd_destroypkt_for_buf, /* Index: 11 */ 1936 1937 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1938 sd_destroypkt_for_buf, /* Index: 12 */ 1939 sd_destroypkt_for_buf, /* Index: 13 */ 1940 sd_destroypkt_for_buf, /* Index: 14 */ 1941 sd_destroypkt_for_buf, /* Index: 15 */ 1942 1943 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1944 sd_destroypkt_for_buf, /* Index: 16 */ 1945 sd_destroypkt_for_buf, /* Index: 17 */ 1946 sd_destroypkt_for_buf, /* Index: 18 */ 1947 1948 /* Chain for USCSI commands (non-checksum targets) */ 1949 sd_destroypkt_for_uscsi, /* Index: 19 */ 1950 sd_destroypkt_for_uscsi, /* Index: 20 */ 1951 1952 /* Chain for USCSI commands (checksum targets) */ 1953 sd_destroypkt_for_uscsi, /* Index: 21 */ 1954 sd_destroypkt_for_uscsi, /* Index: 22 */ 1955 sd_destroypkt_for_uscsi, /* Index: 22 */ 1956 1957 /* Chain for "direct" USCSI commands (all targets) */ 1958 sd_destroypkt_for_uscsi, /* Index: 24 */ 1959 1960 /* Chain for "direct priority" USCSI commands (all targets) */ 1961 sd_destroypkt_for_uscsi, /* Index: 25 */ 1962 1963 }; 1964 1965 1966 1967 /* 1968 * Array to map a layering chain index to the appropriate chain "type". 1969 * The chain type indicates a specific property/usage of the chain. 1970 * The redundant entries are present so that the index used for accessing 1971 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1972 * with this table as well. 1973 */ 1974 1975 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */ 1976 #define SD_CHAIN_BUFIO 1 /* regular buf IO */ 1977 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */ 1978 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */ 1979 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */ 1980 /* (for error recovery) */ 1981 1982 static int sd_chain_type_map[] = { 1983 1984 /* Chain for buf IO for disk drive targets (PM enabled) */ 1985 SD_CHAIN_BUFIO, /* Index: 0 */ 1986 SD_CHAIN_BUFIO, /* Index: 1 */ 1987 SD_CHAIN_BUFIO, /* Index: 2 */ 1988 1989 /* Chain for buf IO for disk drive targets (PM disabled) */ 1990 SD_CHAIN_BUFIO, /* Index: 3 */ 1991 SD_CHAIN_BUFIO, /* Index: 4 */ 1992 1993 /* Chain for buf IO for removable-media targets (PM enabled) */ 1994 SD_CHAIN_BUFIO, /* Index: 5 */ 1995 SD_CHAIN_BUFIO, /* Index: 6 */ 1996 SD_CHAIN_BUFIO, /* Index: 7 */ 1997 SD_CHAIN_BUFIO, /* Index: 8 */ 1998 1999 /* Chain for buf IO for removable-media targets (PM disabled) */ 2000 SD_CHAIN_BUFIO, /* Index: 9 */ 2001 SD_CHAIN_BUFIO, /* Index: 10 */ 2002 SD_CHAIN_BUFIO, /* Index: 11 */ 2003 2004 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2005 SD_CHAIN_BUFIO, /* Index: 12 */ 2006 SD_CHAIN_BUFIO, /* Index: 13 */ 2007 SD_CHAIN_BUFIO, /* Index: 14 */ 2008 SD_CHAIN_BUFIO, /* Index: 15 */ 2009 2010 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2011 SD_CHAIN_BUFIO, /* Index: 16 */ 2012 SD_CHAIN_BUFIO, /* Index: 17 */ 2013 SD_CHAIN_BUFIO, /* Index: 18 */ 2014 2015 /* Chain for USCSI commands (non-checksum targets) */ 2016 SD_CHAIN_USCSI, /* Index: 19 */ 2017 SD_CHAIN_USCSI, /* Index: 20 */ 2018 2019 /* Chain for USCSI commands (checksum targets) */ 2020 SD_CHAIN_USCSI, /* Index: 21 */ 2021 SD_CHAIN_USCSI, /* Index: 22 */ 2022 SD_CHAIN_USCSI, /* Index: 22 */ 2023 2024 /* Chain for "direct" USCSI commands (all targets) */ 2025 SD_CHAIN_DIRECT, /* Index: 24 */ 2026 2027 /* Chain for "direct priority" USCSI commands (all targets) */ 2028 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */ 2029 }; 2030 2031 2032 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */ 2033 #define SD_IS_BUFIO(xp) \ 2034 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO) 2035 2036 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */ 2037 #define SD_IS_DIRECT_PRIORITY(xp) \ 2038 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY) 2039 2040 2041 2042 /* 2043 * Struct, array, and macros to map a specific chain to the appropriate 2044 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays. 2045 * 2046 * The sd_chain_index_map[] array is used at attach time to set the various 2047 * un_xxx_chain type members of the sd_lun softstate to the specific layering 2048 * chain to be used with the instance. This allows different instances to use 2049 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart 2050 * and xb_chain_iodone index values in the sd_xbuf are initialized to these 2051 * values at sd_xbuf init time, this allows (1) layering chains may be changed 2052 * dynamically & without the use of locking; and (2) a layer may update the 2053 * xb_chain_io[start|done] member in a given xbuf with its current index value, 2054 * to allow for deferred processing of an IO within the same chain from a 2055 * different execution context. 2056 */ 2057 2058 struct sd_chain_index { 2059 int sci_iostart_index; 2060 int sci_iodone_index; 2061 }; 2062 2063 static struct sd_chain_index sd_chain_index_map[] = { 2064 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE }, 2065 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM }, 2066 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE }, 2067 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM }, 2068 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE }, 2069 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM }, 2070 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE }, 2071 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE }, 2072 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE }, 2073 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE }, 2074 }; 2075 2076 2077 /* 2078 * The following are indexes into the sd_chain_index_map[] array. 2079 */ 2080 2081 /* un->un_buf_chain_type must be set to one of these */ 2082 #define SD_CHAIN_INFO_DISK 0 2083 #define SD_CHAIN_INFO_DISK_NO_PM 1 2084 #define SD_CHAIN_INFO_RMMEDIA 2 2085 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3 2086 #define SD_CHAIN_INFO_CHKSUM 4 2087 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5 2088 2089 /* un->un_uscsi_chain_type must be set to one of these */ 2090 #define SD_CHAIN_INFO_USCSI_CMD 6 2091 /* USCSI with PM disabled is the same as DIRECT */ 2092 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8 2093 #define SD_CHAIN_INFO_USCSI_CHKSUM 7 2094 2095 /* un->un_direct_chain_type must be set to one of these */ 2096 #define SD_CHAIN_INFO_DIRECT_CMD 8 2097 2098 /* un->un_priority_chain_type must be set to one of these */ 2099 #define SD_CHAIN_INFO_PRIORITY_CMD 9 2100 2101 /* size for devid inquiries */ 2102 #define MAX_INQUIRY_SIZE 0xF0 2103 2104 /* 2105 * Macros used by functions to pass a given buf(9S) struct along to the 2106 * next function in the layering chain for further processing. 2107 * 2108 * In the following macros, passing more than three arguments to the called 2109 * routines causes the optimizer for the SPARC compiler to stop doing tail 2110 * call elimination which results in significant performance degradation. 2111 */ 2112 #define SD_BEGIN_IOSTART(index, un, bp) \ 2113 ((*(sd_iostart_chain[index]))(index, un, bp)) 2114 2115 #define SD_BEGIN_IODONE(index, un, bp) \ 2116 ((*(sd_iodone_chain[index]))(index, un, bp)) 2117 2118 #define SD_NEXT_IOSTART(index, un, bp) \ 2119 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp)) 2120 2121 #define SD_NEXT_IODONE(index, un, bp) \ 2122 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp)) 2123 2124 /* 2125 * Function: _init 2126 * 2127 * Description: This is the driver _init(9E) entry point. 2128 * 2129 * Return Code: Returns the value from mod_install(9F) or 2130 * ddi_soft_state_init(9F) as appropriate. 2131 * 2132 * Context: Called when driver module loaded. 2133 */ 2134 2135 int 2136 _init(void) 2137 { 2138 int err; 2139 2140 /* establish driver name from module name */ 2141 sd_label = mod_modname(&modlinkage); 2142 2143 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun), 2144 SD_MAXUNIT); 2145 2146 if (err != 0) { 2147 return (err); 2148 } 2149 2150 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL); 2151 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL); 2152 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL); 2153 2154 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL); 2155 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL); 2156 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL); 2157 2158 /* 2159 * it's ok to init here even for fibre device 2160 */ 2161 sd_scsi_probe_cache_init(); 2162 2163 sd_scsi_target_lun_init(); 2164 2165 /* 2166 * Creating taskq before mod_install ensures that all callers (threads) 2167 * that enter the module after a successfull mod_install encounter 2168 * a valid taskq. 2169 */ 2170 sd_taskq_create(); 2171 2172 err = mod_install(&modlinkage); 2173 if (err != 0) { 2174 /* delete taskq if install fails */ 2175 sd_taskq_delete(); 2176 2177 mutex_destroy(&sd_detach_mutex); 2178 mutex_destroy(&sd_log_mutex); 2179 mutex_destroy(&sd_label_mutex); 2180 2181 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2182 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2183 cv_destroy(&sd_tr.srq_inprocess_cv); 2184 2185 sd_scsi_probe_cache_fini(); 2186 2187 sd_scsi_target_lun_fini(); 2188 2189 ddi_soft_state_fini(&sd_state); 2190 return (err); 2191 } 2192 2193 return (err); 2194 } 2195 2196 2197 /* 2198 * Function: _fini 2199 * 2200 * Description: This is the driver _fini(9E) entry point. 2201 * 2202 * Return Code: Returns the value from mod_remove(9F) 2203 * 2204 * Context: Called when driver module is unloaded. 2205 */ 2206 2207 int 2208 _fini(void) 2209 { 2210 int err; 2211 2212 if ((err = mod_remove(&modlinkage)) != 0) { 2213 return (err); 2214 } 2215 2216 sd_taskq_delete(); 2217 2218 mutex_destroy(&sd_detach_mutex); 2219 mutex_destroy(&sd_log_mutex); 2220 mutex_destroy(&sd_label_mutex); 2221 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2222 2223 sd_scsi_probe_cache_fini(); 2224 2225 sd_scsi_target_lun_fini(); 2226 2227 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2228 cv_destroy(&sd_tr.srq_inprocess_cv); 2229 2230 ddi_soft_state_fini(&sd_state); 2231 2232 return (err); 2233 } 2234 2235 2236 /* 2237 * Function: _info 2238 * 2239 * Description: This is the driver _info(9E) entry point. 2240 * 2241 * Arguments: modinfop - pointer to the driver modinfo structure 2242 * 2243 * Return Code: Returns the value from mod_info(9F). 2244 * 2245 * Context: Kernel thread context 2246 */ 2247 2248 int 2249 _info(struct modinfo *modinfop) 2250 { 2251 return (mod_info(&modlinkage, modinfop)); 2252 } 2253 2254 2255 /* 2256 * The following routines implement the driver message logging facility. 2257 * They provide component- and level- based debug output filtering. 2258 * Output may also be restricted to messages for a single instance by 2259 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set 2260 * to NULL, then messages for all instances are printed. 2261 * 2262 * These routines have been cloned from each other due to the language 2263 * constraints of macros and variable argument list processing. 2264 */ 2265 2266 2267 /* 2268 * Function: sd_log_err 2269 * 2270 * Description: This routine is called by the SD_ERROR macro for debug 2271 * logging of error conditions. 2272 * 2273 * Arguments: comp - driver component being logged 2274 * dev - pointer to driver info structure 2275 * fmt - error string and format to be logged 2276 */ 2277 2278 static void 2279 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...) 2280 { 2281 va_list ap; 2282 dev_info_t *dev; 2283 2284 ASSERT(un != NULL); 2285 dev = SD_DEVINFO(un); 2286 ASSERT(dev != NULL); 2287 2288 /* 2289 * Filter messages based on the global component and level masks. 2290 * Also print if un matches the value of sd_debug_un, or if 2291 * sd_debug_un is set to NULL. 2292 */ 2293 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) && 2294 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2295 mutex_enter(&sd_log_mutex); 2296 va_start(ap, fmt); 2297 (void) vsprintf(sd_log_buf, fmt, ap); 2298 va_end(ap); 2299 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2300 mutex_exit(&sd_log_mutex); 2301 } 2302 #ifdef SD_FAULT_INJECTION 2303 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2304 if (un->sd_injection_mask & comp) { 2305 mutex_enter(&sd_log_mutex); 2306 va_start(ap, fmt); 2307 (void) vsprintf(sd_log_buf, fmt, ap); 2308 va_end(ap); 2309 sd_injection_log(sd_log_buf, un); 2310 mutex_exit(&sd_log_mutex); 2311 } 2312 #endif 2313 } 2314 2315 2316 /* 2317 * Function: sd_log_info 2318 * 2319 * Description: This routine is called by the SD_INFO macro for debug 2320 * logging of general purpose informational conditions. 2321 * 2322 * Arguments: comp - driver component being logged 2323 * dev - pointer to driver info structure 2324 * fmt - info string and format to be logged 2325 */ 2326 2327 static void 2328 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...) 2329 { 2330 va_list ap; 2331 dev_info_t *dev; 2332 2333 ASSERT(un != NULL); 2334 dev = SD_DEVINFO(un); 2335 ASSERT(dev != NULL); 2336 2337 /* 2338 * Filter messages based on the global component and level masks. 2339 * Also print if un matches the value of sd_debug_un, or if 2340 * sd_debug_un is set to NULL. 2341 */ 2342 if ((sd_component_mask & component) && 2343 (sd_level_mask & SD_LOGMASK_INFO) && 2344 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2345 mutex_enter(&sd_log_mutex); 2346 va_start(ap, fmt); 2347 (void) vsprintf(sd_log_buf, fmt, ap); 2348 va_end(ap); 2349 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2350 mutex_exit(&sd_log_mutex); 2351 } 2352 #ifdef SD_FAULT_INJECTION 2353 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2354 if (un->sd_injection_mask & component) { 2355 mutex_enter(&sd_log_mutex); 2356 va_start(ap, fmt); 2357 (void) vsprintf(sd_log_buf, fmt, ap); 2358 va_end(ap); 2359 sd_injection_log(sd_log_buf, un); 2360 mutex_exit(&sd_log_mutex); 2361 } 2362 #endif 2363 } 2364 2365 2366 /* 2367 * Function: sd_log_trace 2368 * 2369 * Description: This routine is called by the SD_TRACE macro for debug 2370 * logging of trace conditions (i.e. function entry/exit). 2371 * 2372 * Arguments: comp - driver component being logged 2373 * dev - pointer to driver info structure 2374 * fmt - trace string and format to be logged 2375 */ 2376 2377 static void 2378 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...) 2379 { 2380 va_list ap; 2381 dev_info_t *dev; 2382 2383 ASSERT(un != NULL); 2384 dev = SD_DEVINFO(un); 2385 ASSERT(dev != NULL); 2386 2387 /* 2388 * Filter messages based on the global component and level masks. 2389 * Also print if un matches the value of sd_debug_un, or if 2390 * sd_debug_un is set to NULL. 2391 */ 2392 if ((sd_component_mask & component) && 2393 (sd_level_mask & SD_LOGMASK_TRACE) && 2394 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2395 mutex_enter(&sd_log_mutex); 2396 va_start(ap, fmt); 2397 (void) vsprintf(sd_log_buf, fmt, ap); 2398 va_end(ap); 2399 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2400 mutex_exit(&sd_log_mutex); 2401 } 2402 #ifdef SD_FAULT_INJECTION 2403 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2404 if (un->sd_injection_mask & component) { 2405 mutex_enter(&sd_log_mutex); 2406 va_start(ap, fmt); 2407 (void) vsprintf(sd_log_buf, fmt, ap); 2408 va_end(ap); 2409 sd_injection_log(sd_log_buf, un); 2410 mutex_exit(&sd_log_mutex); 2411 } 2412 #endif 2413 } 2414 2415 2416 /* 2417 * Function: sdprobe 2418 * 2419 * Description: This is the driver probe(9e) entry point function. 2420 * 2421 * Arguments: devi - opaque device info handle 2422 * 2423 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful. 2424 * DDI_PROBE_FAILURE: If the probe failed. 2425 * DDI_PROBE_PARTIAL: If the instance is not present now, 2426 * but may be present in the future. 2427 */ 2428 2429 static int 2430 sdprobe(dev_info_t *devi) 2431 { 2432 struct scsi_device *devp; 2433 int rval; 2434 int instance; 2435 2436 /* 2437 * if it wasn't for pln, sdprobe could actually be nulldev 2438 * in the "__fibre" case. 2439 */ 2440 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { 2441 return (DDI_PROBE_DONTCARE); 2442 } 2443 2444 devp = ddi_get_driver_private(devi); 2445 2446 if (devp == NULL) { 2447 /* Ooops... nexus driver is mis-configured... */ 2448 return (DDI_PROBE_FAILURE); 2449 } 2450 2451 instance = ddi_get_instance(devi); 2452 2453 if (ddi_get_soft_state(sd_state, instance) != NULL) { 2454 return (DDI_PROBE_PARTIAL); 2455 } 2456 2457 /* 2458 * Call the SCSA utility probe routine to see if we actually 2459 * have a target at this SCSI nexus. 2460 */ 2461 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) { 2462 case SCSIPROBE_EXISTS: 2463 switch (devp->sd_inq->inq_dtype) { 2464 case DTYPE_DIRECT: 2465 rval = DDI_PROBE_SUCCESS; 2466 break; 2467 case DTYPE_RODIRECT: 2468 /* CDs etc. Can be removable media */ 2469 rval = DDI_PROBE_SUCCESS; 2470 break; 2471 case DTYPE_OPTICAL: 2472 /* 2473 * Rewritable optical driver HP115AA 2474 * Can also be removable media 2475 */ 2476 2477 /* 2478 * Do not attempt to bind to DTYPE_OPTICAL if 2479 * pre solaris 9 sparc sd behavior is required 2480 * 2481 * If first time through and sd_dtype_optical_bind 2482 * has not been set in /etc/system check properties 2483 */ 2484 2485 if (sd_dtype_optical_bind < 0) { 2486 sd_dtype_optical_bind = ddi_prop_get_int 2487 (DDI_DEV_T_ANY, devi, 0, 2488 "optical-device-bind", 1); 2489 } 2490 2491 if (sd_dtype_optical_bind == 0) { 2492 rval = DDI_PROBE_FAILURE; 2493 } else { 2494 rval = DDI_PROBE_SUCCESS; 2495 } 2496 break; 2497 2498 case DTYPE_NOTPRESENT: 2499 default: 2500 rval = DDI_PROBE_FAILURE; 2501 break; 2502 } 2503 break; 2504 default: 2505 rval = DDI_PROBE_PARTIAL; 2506 break; 2507 } 2508 2509 /* 2510 * This routine checks for resource allocation prior to freeing, 2511 * so it will take care of the "smart probing" case where a 2512 * scsi_probe() may or may not have been issued and will *not* 2513 * free previously-freed resources. 2514 */ 2515 scsi_unprobe(devp); 2516 return (rval); 2517 } 2518 2519 2520 /* 2521 * Function: sdinfo 2522 * 2523 * Description: This is the driver getinfo(9e) entry point function. 2524 * Given the device number, return the devinfo pointer from 2525 * the scsi_device structure or the instance number 2526 * associated with the dev_t. 2527 * 2528 * Arguments: dip - pointer to device info structure 2529 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO, 2530 * DDI_INFO_DEVT2INSTANCE) 2531 * arg - driver dev_t 2532 * resultp - user buffer for request response 2533 * 2534 * Return Code: DDI_SUCCESS 2535 * DDI_FAILURE 2536 */ 2537 /* ARGSUSED */ 2538 static int 2539 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2540 { 2541 struct sd_lun *un; 2542 dev_t dev; 2543 int instance; 2544 int error; 2545 2546 switch (infocmd) { 2547 case DDI_INFO_DEVT2DEVINFO: 2548 dev = (dev_t)arg; 2549 instance = SDUNIT(dev); 2550 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 2551 return (DDI_FAILURE); 2552 } 2553 *result = (void *) SD_DEVINFO(un); 2554 error = DDI_SUCCESS; 2555 break; 2556 case DDI_INFO_DEVT2INSTANCE: 2557 dev = (dev_t)arg; 2558 instance = SDUNIT(dev); 2559 *result = (void *)(uintptr_t)instance; 2560 error = DDI_SUCCESS; 2561 break; 2562 default: 2563 error = DDI_FAILURE; 2564 } 2565 return (error); 2566 } 2567 2568 /* 2569 * Function: sd_prop_op 2570 * 2571 * Description: This is the driver prop_op(9e) entry point function. 2572 * Return the number of blocks for the partition in question 2573 * or forward the request to the property facilities. 2574 * 2575 * Arguments: dev - device number 2576 * dip - pointer to device info structure 2577 * prop_op - property operator 2578 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent 2579 * name - pointer to property name 2580 * valuep - pointer or address of the user buffer 2581 * lengthp - property length 2582 * 2583 * Return Code: DDI_PROP_SUCCESS 2584 * DDI_PROP_NOT_FOUND 2585 * DDI_PROP_UNDEFINED 2586 * DDI_PROP_NO_MEMORY 2587 * DDI_PROP_BUF_TOO_SMALL 2588 */ 2589 2590 static int 2591 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 2592 char *name, caddr_t valuep, int *lengthp) 2593 { 2594 int instance = ddi_get_instance(dip); 2595 struct sd_lun *un; 2596 uint64_t nblocks64; 2597 uint_t dblk; 2598 2599 /* 2600 * Our dynamic properties are all device specific and size oriented. 2601 * Requests issued under conditions where size is valid are passed 2602 * to ddi_prop_op_nblocks with the size information, otherwise the 2603 * request is passed to ddi_prop_op. Size depends on valid geometry. 2604 */ 2605 un = ddi_get_soft_state(sd_state, instance); 2606 if ((dev == DDI_DEV_T_ANY) || (un == NULL)) { 2607 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2608 name, valuep, lengthp)); 2609 } else if (!SD_IS_VALID_LABEL(un)) { 2610 return (ddi_prop_op(dev, dip, prop_op, mod_flags, name, 2611 valuep, lengthp)); 2612 } 2613 2614 /* get nblocks value */ 2615 ASSERT(!mutex_owned(SD_MUTEX(un))); 2616 2617 (void) cmlb_partinfo(un->un_cmlbhandle, SDPART(dev), 2618 (diskaddr_t *)&nblocks64, NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 2619 2620 /* report size in target size blocks */ 2621 dblk = un->un_tgt_blocksize / un->un_sys_blocksize; 2622 return (ddi_prop_op_nblocks_blksize(dev, dip, prop_op, mod_flags, 2623 name, valuep, lengthp, nblocks64 / dblk, un->un_tgt_blocksize)); 2624 } 2625 2626 /* 2627 * The following functions are for smart probing: 2628 * sd_scsi_probe_cache_init() 2629 * sd_scsi_probe_cache_fini() 2630 * sd_scsi_clear_probe_cache() 2631 * sd_scsi_probe_with_cache() 2632 */ 2633 2634 /* 2635 * Function: sd_scsi_probe_cache_init 2636 * 2637 * Description: Initializes the probe response cache mutex and head pointer. 2638 * 2639 * Context: Kernel thread context 2640 */ 2641 2642 static void 2643 sd_scsi_probe_cache_init(void) 2644 { 2645 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL); 2646 sd_scsi_probe_cache_head = NULL; 2647 } 2648 2649 2650 /* 2651 * Function: sd_scsi_probe_cache_fini 2652 * 2653 * Description: Frees all resources associated with the probe response cache. 2654 * 2655 * Context: Kernel thread context 2656 */ 2657 2658 static void 2659 sd_scsi_probe_cache_fini(void) 2660 { 2661 struct sd_scsi_probe_cache *cp; 2662 struct sd_scsi_probe_cache *ncp; 2663 2664 /* Clean up our smart probing linked list */ 2665 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) { 2666 ncp = cp->next; 2667 kmem_free(cp, sizeof (struct sd_scsi_probe_cache)); 2668 } 2669 sd_scsi_probe_cache_head = NULL; 2670 mutex_destroy(&sd_scsi_probe_cache_mutex); 2671 } 2672 2673 2674 /* 2675 * Function: sd_scsi_clear_probe_cache 2676 * 2677 * Description: This routine clears the probe response cache. This is 2678 * done when open() returns ENXIO so that when deferred 2679 * attach is attempted (possibly after a device has been 2680 * turned on) we will retry the probe. Since we don't know 2681 * which target we failed to open, we just clear the 2682 * entire cache. 2683 * 2684 * Context: Kernel thread context 2685 */ 2686 2687 static void 2688 sd_scsi_clear_probe_cache(void) 2689 { 2690 struct sd_scsi_probe_cache *cp; 2691 int i; 2692 2693 mutex_enter(&sd_scsi_probe_cache_mutex); 2694 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2695 /* 2696 * Reset all entries to SCSIPROBE_EXISTS. This will 2697 * force probing to be performed the next time 2698 * sd_scsi_probe_with_cache is called. 2699 */ 2700 for (i = 0; i < NTARGETS_WIDE; i++) { 2701 cp->cache[i] = SCSIPROBE_EXISTS; 2702 } 2703 } 2704 mutex_exit(&sd_scsi_probe_cache_mutex); 2705 } 2706 2707 2708 /* 2709 * Function: sd_scsi_probe_with_cache 2710 * 2711 * Description: This routine implements support for a scsi device probe 2712 * with cache. The driver maintains a cache of the target 2713 * responses to scsi probes. If we get no response from a 2714 * target during a probe inquiry, we remember that, and we 2715 * avoid additional calls to scsi_probe on non-zero LUNs 2716 * on the same target until the cache is cleared. By doing 2717 * so we avoid the 1/4 sec selection timeout for nonzero 2718 * LUNs. lun0 of a target is always probed. 2719 * 2720 * Arguments: devp - Pointer to a scsi_device(9S) structure 2721 * waitfunc - indicates what the allocator routines should 2722 * do when resources are not available. This value 2723 * is passed on to scsi_probe() when that routine 2724 * is called. 2725 * 2726 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache; 2727 * otherwise the value returned by scsi_probe(9F). 2728 * 2729 * Context: Kernel thread context 2730 */ 2731 2732 static int 2733 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)()) 2734 { 2735 struct sd_scsi_probe_cache *cp; 2736 dev_info_t *pdip = ddi_get_parent(devp->sd_dev); 2737 int lun, tgt; 2738 2739 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2740 SCSI_ADDR_PROP_LUN, 0); 2741 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2742 SCSI_ADDR_PROP_TARGET, -1); 2743 2744 /* Make sure caching enabled and target in range */ 2745 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) { 2746 /* do it the old way (no cache) */ 2747 return (scsi_probe(devp, waitfn)); 2748 } 2749 2750 mutex_enter(&sd_scsi_probe_cache_mutex); 2751 2752 /* Find the cache for this scsi bus instance */ 2753 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2754 if (cp->pdip == pdip) { 2755 break; 2756 } 2757 } 2758 2759 /* If we can't find a cache for this pdip, create one */ 2760 if (cp == NULL) { 2761 int i; 2762 2763 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache), 2764 KM_SLEEP); 2765 cp->pdip = pdip; 2766 cp->next = sd_scsi_probe_cache_head; 2767 sd_scsi_probe_cache_head = cp; 2768 for (i = 0; i < NTARGETS_WIDE; i++) { 2769 cp->cache[i] = SCSIPROBE_EXISTS; 2770 } 2771 } 2772 2773 mutex_exit(&sd_scsi_probe_cache_mutex); 2774 2775 /* Recompute the cache for this target if LUN zero */ 2776 if (lun == 0) { 2777 cp->cache[tgt] = SCSIPROBE_EXISTS; 2778 } 2779 2780 /* Don't probe if cache remembers a NORESP from a previous LUN. */ 2781 if (cp->cache[tgt] != SCSIPROBE_EXISTS) { 2782 return (SCSIPROBE_NORESP); 2783 } 2784 2785 /* Do the actual probe; save & return the result */ 2786 return (cp->cache[tgt] = scsi_probe(devp, waitfn)); 2787 } 2788 2789 2790 /* 2791 * Function: sd_scsi_target_lun_init 2792 * 2793 * Description: Initializes the attached lun chain mutex and head pointer. 2794 * 2795 * Context: Kernel thread context 2796 */ 2797 2798 static void 2799 sd_scsi_target_lun_init(void) 2800 { 2801 mutex_init(&sd_scsi_target_lun_mutex, NULL, MUTEX_DRIVER, NULL); 2802 sd_scsi_target_lun_head = NULL; 2803 } 2804 2805 2806 /* 2807 * Function: sd_scsi_target_lun_fini 2808 * 2809 * Description: Frees all resources associated with the attached lun 2810 * chain 2811 * 2812 * Context: Kernel thread context 2813 */ 2814 2815 static void 2816 sd_scsi_target_lun_fini(void) 2817 { 2818 struct sd_scsi_hba_tgt_lun *cp; 2819 struct sd_scsi_hba_tgt_lun *ncp; 2820 2821 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = ncp) { 2822 ncp = cp->next; 2823 kmem_free(cp, sizeof (struct sd_scsi_hba_tgt_lun)); 2824 } 2825 sd_scsi_target_lun_head = NULL; 2826 mutex_destroy(&sd_scsi_target_lun_mutex); 2827 } 2828 2829 2830 /* 2831 * Function: sd_scsi_get_target_lun_count 2832 * 2833 * Description: This routine will check in the attached lun chain to see 2834 * how many luns are attached on the required SCSI controller 2835 * and target. Currently, some capabilities like tagged queue 2836 * are supported per target based by HBA. So all luns in a 2837 * target have the same capabilities. Based on this assumption, 2838 * sd should only set these capabilities once per target. This 2839 * function is called when sd needs to decide how many luns 2840 * already attached on a target. 2841 * 2842 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2843 * controller device. 2844 * target - The target ID on the controller's SCSI bus. 2845 * 2846 * Return Code: The number of luns attached on the required target and 2847 * controller. 2848 * -1 if target ID is not in parallel SCSI scope or the given 2849 * dip is not in the chain. 2850 * 2851 * Context: Kernel thread context 2852 */ 2853 2854 static int 2855 sd_scsi_get_target_lun_count(dev_info_t *dip, int target) 2856 { 2857 struct sd_scsi_hba_tgt_lun *cp; 2858 2859 if ((target < 0) || (target >= NTARGETS_WIDE)) { 2860 return (-1); 2861 } 2862 2863 mutex_enter(&sd_scsi_target_lun_mutex); 2864 2865 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2866 if (cp->pdip == dip) { 2867 break; 2868 } 2869 } 2870 2871 mutex_exit(&sd_scsi_target_lun_mutex); 2872 2873 if (cp == NULL) { 2874 return (-1); 2875 } 2876 2877 return (cp->nlun[target]); 2878 } 2879 2880 2881 /* 2882 * Function: sd_scsi_update_lun_on_target 2883 * 2884 * Description: This routine is used to update the attached lun chain when a 2885 * lun is attached or detached on a target. 2886 * 2887 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2888 * controller device. 2889 * target - The target ID on the controller's SCSI bus. 2890 * flag - Indicate the lun is attached or detached. 2891 * 2892 * Context: Kernel thread context 2893 */ 2894 2895 static void 2896 sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag) 2897 { 2898 struct sd_scsi_hba_tgt_lun *cp; 2899 2900 mutex_enter(&sd_scsi_target_lun_mutex); 2901 2902 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2903 if (cp->pdip == dip) { 2904 break; 2905 } 2906 } 2907 2908 if ((cp == NULL) && (flag == SD_SCSI_LUN_ATTACH)) { 2909 cp = kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun), 2910 KM_SLEEP); 2911 cp->pdip = dip; 2912 cp->next = sd_scsi_target_lun_head; 2913 sd_scsi_target_lun_head = cp; 2914 } 2915 2916 mutex_exit(&sd_scsi_target_lun_mutex); 2917 2918 if (cp != NULL) { 2919 if (flag == SD_SCSI_LUN_ATTACH) { 2920 cp->nlun[target] ++; 2921 } else { 2922 cp->nlun[target] --; 2923 } 2924 } 2925 } 2926 2927 2928 /* 2929 * Function: sd_spin_up_unit 2930 * 2931 * Description: Issues the following commands to spin-up the device: 2932 * START STOP UNIT, and INQUIRY. 2933 * 2934 * Arguments: un - driver soft state (unit) structure 2935 * 2936 * Return Code: 0 - success 2937 * EIO - failure 2938 * EACCES - reservation conflict 2939 * 2940 * Context: Kernel thread context 2941 */ 2942 2943 static int 2944 sd_spin_up_unit(struct sd_lun *un) 2945 { 2946 size_t resid = 0; 2947 int has_conflict = FALSE; 2948 uchar_t *bufaddr; 2949 2950 ASSERT(un != NULL); 2951 2952 /* 2953 * Send a throwaway START UNIT command. 2954 * 2955 * If we fail on this, we don't care presently what precisely 2956 * is wrong. EMC's arrays will also fail this with a check 2957 * condition (0x2/0x4/0x3) if the device is "inactive," but 2958 * we don't want to fail the attach because it may become 2959 * "active" later. 2960 */ 2961 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, SD_PATH_DIRECT) 2962 == EACCES) 2963 has_conflict = TRUE; 2964 2965 /* 2966 * Send another INQUIRY command to the target. This is necessary for 2967 * non-removable media direct access devices because their INQUIRY data 2968 * may not be fully qualified until they are spun up (perhaps via the 2969 * START command above). Note: This seems to be needed for some 2970 * legacy devices only.) The INQUIRY command should succeed even if a 2971 * Reservation Conflict is present. 2972 */ 2973 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP); 2974 if (sd_send_scsi_INQUIRY(un, bufaddr, SUN_INQSIZE, 0, 0, &resid) != 0) { 2975 kmem_free(bufaddr, SUN_INQSIZE); 2976 return (EIO); 2977 } 2978 2979 /* 2980 * If we got enough INQUIRY data, copy it over the old INQUIRY data. 2981 * Note that this routine does not return a failure here even if the 2982 * INQUIRY command did not return any data. This is a legacy behavior. 2983 */ 2984 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) { 2985 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE); 2986 } 2987 2988 kmem_free(bufaddr, SUN_INQSIZE); 2989 2990 /* If we hit a reservation conflict above, tell the caller. */ 2991 if (has_conflict == TRUE) { 2992 return (EACCES); 2993 } 2994 2995 return (0); 2996 } 2997 2998 #ifdef _LP64 2999 /* 3000 * Function: sd_enable_descr_sense 3001 * 3002 * Description: This routine attempts to select descriptor sense format 3003 * using the Control mode page. Devices that support 64 bit 3004 * LBAs (for >2TB luns) should also implement descriptor 3005 * sense data so we will call this function whenever we see 3006 * a lun larger than 2TB. If for some reason the device 3007 * supports 64 bit LBAs but doesn't support descriptor sense 3008 * presumably the mode select will fail. Everything will 3009 * continue to work normally except that we will not get 3010 * complete sense data for commands that fail with an LBA 3011 * larger than 32 bits. 3012 * 3013 * Arguments: un - driver soft state (unit) structure 3014 * 3015 * Context: Kernel thread context only 3016 */ 3017 3018 static void 3019 sd_enable_descr_sense(struct sd_lun *un) 3020 { 3021 uchar_t *header; 3022 struct mode_control_scsi3 *ctrl_bufp; 3023 size_t buflen; 3024 size_t bd_len; 3025 3026 /* 3027 * Read MODE SENSE page 0xA, Control Mode Page 3028 */ 3029 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH + 3030 sizeof (struct mode_control_scsi3); 3031 header = kmem_zalloc(buflen, KM_SLEEP); 3032 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 3033 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT) != 0) { 3034 SD_ERROR(SD_LOG_COMMON, un, 3035 "sd_enable_descr_sense: mode sense ctrl page failed\n"); 3036 goto eds_exit; 3037 } 3038 3039 /* 3040 * Determine size of Block Descriptors in order to locate 3041 * the mode page data. ATAPI devices return 0, SCSI devices 3042 * should return MODE_BLK_DESC_LENGTH. 3043 */ 3044 bd_len = ((struct mode_header *)header)->bdesc_length; 3045 3046 /* Clear the mode data length field for MODE SELECT */ 3047 ((struct mode_header *)header)->length = 0; 3048 3049 ctrl_bufp = (struct mode_control_scsi3 *) 3050 (header + MODE_HEADER_LENGTH + bd_len); 3051 3052 /* 3053 * If the page length is smaller than the expected value, 3054 * the target device doesn't support D_SENSE. Bail out here. 3055 */ 3056 if (ctrl_bufp->mode_page.length < 3057 sizeof (struct mode_control_scsi3) - 2) { 3058 SD_ERROR(SD_LOG_COMMON, un, 3059 "sd_enable_descr_sense: enable D_SENSE failed\n"); 3060 goto eds_exit; 3061 } 3062 3063 /* 3064 * Clear PS bit for MODE SELECT 3065 */ 3066 ctrl_bufp->mode_page.ps = 0; 3067 3068 /* 3069 * Set D_SENSE to enable descriptor sense format. 3070 */ 3071 ctrl_bufp->d_sense = 1; 3072 3073 /* 3074 * Use MODE SELECT to commit the change to the D_SENSE bit 3075 */ 3076 if (sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 3077 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT) != 0) { 3078 SD_INFO(SD_LOG_COMMON, un, 3079 "sd_enable_descr_sense: mode select ctrl page failed\n"); 3080 goto eds_exit; 3081 } 3082 3083 eds_exit: 3084 kmem_free(header, buflen); 3085 } 3086 3087 /* 3088 * Function: sd_reenable_dsense_task 3089 * 3090 * Description: Re-enable descriptor sense after device or bus reset 3091 * 3092 * Context: Executes in a taskq() thread context 3093 */ 3094 static void 3095 sd_reenable_dsense_task(void *arg) 3096 { 3097 struct sd_lun *un = arg; 3098 3099 ASSERT(un != NULL); 3100 sd_enable_descr_sense(un); 3101 } 3102 #endif /* _LP64 */ 3103 3104 /* 3105 * Function: sd_set_mmc_caps 3106 * 3107 * Description: This routine determines if the device is MMC compliant and if 3108 * the device supports CDDA via a mode sense of the CDVD 3109 * capabilities mode page. Also checks if the device is a 3110 * dvdram writable device. 3111 * 3112 * Arguments: un - driver soft state (unit) structure 3113 * 3114 * Context: Kernel thread context only 3115 */ 3116 3117 static void 3118 sd_set_mmc_caps(struct sd_lun *un) 3119 { 3120 struct mode_header_grp2 *sense_mhp; 3121 uchar_t *sense_page; 3122 caddr_t buf; 3123 int bd_len; 3124 int status; 3125 struct uscsi_cmd com; 3126 int rtn; 3127 uchar_t *out_data_rw, *out_data_hd; 3128 uchar_t *rqbuf_rw, *rqbuf_hd; 3129 3130 ASSERT(un != NULL); 3131 3132 /* 3133 * The flags which will be set in this function are - mmc compliant, 3134 * dvdram writable device, cdda support. Initialize them to FALSE 3135 * and if a capability is detected - it will be set to TRUE. 3136 */ 3137 un->un_f_mmc_cap = FALSE; 3138 un->un_f_dvdram_writable_device = FALSE; 3139 un->un_f_cfg_cdda = FALSE; 3140 3141 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3142 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 3143 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 3144 3145 if (status != 0) { 3146 /* command failed; just return */ 3147 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3148 return; 3149 } 3150 /* 3151 * If the mode sense request for the CDROM CAPABILITIES 3152 * page (0x2A) succeeds the device is assumed to be MMC. 3153 */ 3154 un->un_f_mmc_cap = TRUE; 3155 3156 /* Get to the page data */ 3157 sense_mhp = (struct mode_header_grp2 *)buf; 3158 bd_len = (sense_mhp->bdesc_length_hi << 8) | 3159 sense_mhp->bdesc_length_lo; 3160 if (bd_len > MODE_BLK_DESC_LENGTH) { 3161 /* 3162 * We did not get back the expected block descriptor 3163 * length so we cannot determine if the device supports 3164 * CDDA. However, we still indicate the device is MMC 3165 * according to the successful response to the page 3166 * 0x2A mode sense request. 3167 */ 3168 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3169 "sd_set_mmc_caps: Mode Sense returned " 3170 "invalid block descriptor length\n"); 3171 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3172 return; 3173 } 3174 3175 /* See if read CDDA is supported */ 3176 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + 3177 bd_len); 3178 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE; 3179 3180 /* See if writing DVD RAM is supported. */ 3181 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE; 3182 if (un->un_f_dvdram_writable_device == TRUE) { 3183 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3184 return; 3185 } 3186 3187 /* 3188 * If the device presents DVD or CD capabilities in the mode 3189 * page, we can return here since a RRD will not have 3190 * these capabilities. 3191 */ 3192 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3193 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3194 return; 3195 } 3196 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3197 3198 /* 3199 * If un->un_f_dvdram_writable_device is still FALSE, 3200 * check for a Removable Rigid Disk (RRD). A RRD 3201 * device is identified by the features RANDOM_WRITABLE and 3202 * HARDWARE_DEFECT_MANAGEMENT. 3203 */ 3204 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3205 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3206 3207 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3208 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3209 RANDOM_WRITABLE, SD_PATH_STANDARD); 3210 if (rtn != 0) { 3211 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3212 kmem_free(rqbuf_rw, SENSE_LENGTH); 3213 return; 3214 } 3215 3216 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3217 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3218 3219 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3220 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3221 HARDWARE_DEFECT_MANAGEMENT, SD_PATH_STANDARD); 3222 if (rtn == 0) { 3223 /* 3224 * We have good information, check for random writable 3225 * and hardware defect features. 3226 */ 3227 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3228 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) { 3229 un->un_f_dvdram_writable_device = TRUE; 3230 } 3231 } 3232 3233 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3234 kmem_free(rqbuf_rw, SENSE_LENGTH); 3235 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3236 kmem_free(rqbuf_hd, SENSE_LENGTH); 3237 } 3238 3239 /* 3240 * Function: sd_check_for_writable_cd 3241 * 3242 * Description: This routine determines if the media in the device is 3243 * writable or not. It uses the get configuration command (0x46) 3244 * to determine if the media is writable 3245 * 3246 * Arguments: un - driver soft state (unit) structure 3247 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" 3248 * chain and the normal command waitq, or 3249 * SD_PATH_DIRECT_PRIORITY to use the USCSI 3250 * "direct" chain and bypass the normal command 3251 * waitq. 3252 * 3253 * Context: Never called at interrupt context. 3254 */ 3255 3256 static void 3257 sd_check_for_writable_cd(struct sd_lun *un, int path_flag) 3258 { 3259 struct uscsi_cmd com; 3260 uchar_t *out_data; 3261 uchar_t *rqbuf; 3262 int rtn; 3263 uchar_t *out_data_rw, *out_data_hd; 3264 uchar_t *rqbuf_rw, *rqbuf_hd; 3265 struct mode_header_grp2 *sense_mhp; 3266 uchar_t *sense_page; 3267 caddr_t buf; 3268 int bd_len; 3269 int status; 3270 3271 ASSERT(un != NULL); 3272 ASSERT(mutex_owned(SD_MUTEX(un))); 3273 3274 /* 3275 * Initialize the writable media to false, if configuration info. 3276 * tells us otherwise then only we will set it. 3277 */ 3278 un->un_f_mmc_writable_media = FALSE; 3279 mutex_exit(SD_MUTEX(un)); 3280 3281 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 3282 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3283 3284 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, SENSE_LENGTH, 3285 out_data, SD_PROFILE_HEADER_LEN, path_flag); 3286 3287 mutex_enter(SD_MUTEX(un)); 3288 if (rtn == 0) { 3289 /* 3290 * We have good information, check for writable DVD. 3291 */ 3292 if ((out_data[6] == 0) && (out_data[7] == 0x12)) { 3293 un->un_f_mmc_writable_media = TRUE; 3294 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3295 kmem_free(rqbuf, SENSE_LENGTH); 3296 return; 3297 } 3298 } 3299 3300 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3301 kmem_free(rqbuf, SENSE_LENGTH); 3302 3303 /* 3304 * Determine if this is a RRD type device. 3305 */ 3306 mutex_exit(SD_MUTEX(un)); 3307 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3308 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 3309 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, path_flag); 3310 mutex_enter(SD_MUTEX(un)); 3311 if (status != 0) { 3312 /* command failed; just return */ 3313 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3314 return; 3315 } 3316 3317 /* Get to the page data */ 3318 sense_mhp = (struct mode_header_grp2 *)buf; 3319 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 3320 if (bd_len > MODE_BLK_DESC_LENGTH) { 3321 /* 3322 * We did not get back the expected block descriptor length so 3323 * we cannot check the mode page. 3324 */ 3325 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3326 "sd_check_for_writable_cd: Mode Sense returned " 3327 "invalid block descriptor length\n"); 3328 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3329 return; 3330 } 3331 3332 /* 3333 * If the device presents DVD or CD capabilities in the mode 3334 * page, we can return here since a RRD device will not have 3335 * these capabilities. 3336 */ 3337 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len); 3338 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3339 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3340 return; 3341 } 3342 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3343 3344 /* 3345 * If un->un_f_mmc_writable_media is still FALSE, 3346 * check for RRD type media. A RRD device is identified 3347 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT. 3348 */ 3349 mutex_exit(SD_MUTEX(un)); 3350 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3351 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3352 3353 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3354 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3355 RANDOM_WRITABLE, path_flag); 3356 if (rtn != 0) { 3357 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3358 kmem_free(rqbuf_rw, SENSE_LENGTH); 3359 mutex_enter(SD_MUTEX(un)); 3360 return; 3361 } 3362 3363 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3364 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3365 3366 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3367 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3368 HARDWARE_DEFECT_MANAGEMENT, path_flag); 3369 mutex_enter(SD_MUTEX(un)); 3370 if (rtn == 0) { 3371 /* 3372 * We have good information, check for random writable 3373 * and hardware defect features as current. 3374 */ 3375 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3376 (out_data_rw[10] & 0x1) && 3377 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) && 3378 (out_data_hd[10] & 0x1)) { 3379 un->un_f_mmc_writable_media = TRUE; 3380 } 3381 } 3382 3383 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3384 kmem_free(rqbuf_rw, SENSE_LENGTH); 3385 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3386 kmem_free(rqbuf_hd, SENSE_LENGTH); 3387 } 3388 3389 /* 3390 * Function: sd_read_unit_properties 3391 * 3392 * Description: The following implements a property lookup mechanism. 3393 * Properties for particular disks (keyed on vendor, model 3394 * and rev numbers) are sought in the sd.conf file via 3395 * sd_process_sdconf_file(), and if not found there, are 3396 * looked for in a list hardcoded in this driver via 3397 * sd_process_sdconf_table() Once located the properties 3398 * are used to update the driver unit structure. 3399 * 3400 * Arguments: un - driver soft state (unit) structure 3401 */ 3402 3403 static void 3404 sd_read_unit_properties(struct sd_lun *un) 3405 { 3406 /* 3407 * sd_process_sdconf_file returns SD_FAILURE if it cannot find 3408 * the "sd-config-list" property (from the sd.conf file) or if 3409 * there was not a match for the inquiry vid/pid. If this event 3410 * occurs the static driver configuration table is searched for 3411 * a match. 3412 */ 3413 ASSERT(un != NULL); 3414 if (sd_process_sdconf_file(un) == SD_FAILURE) { 3415 sd_process_sdconf_table(un); 3416 } 3417 3418 /* check for LSI device */ 3419 sd_is_lsi(un); 3420 3421 3422 } 3423 3424 3425 /* 3426 * Function: sd_process_sdconf_file 3427 * 3428 * Description: Use ddi_getlongprop to obtain the properties from the 3429 * driver's config file (ie, sd.conf) and update the driver 3430 * soft state structure accordingly. 3431 * 3432 * Arguments: un - driver soft state (unit) structure 3433 * 3434 * Return Code: SD_SUCCESS - The properties were successfully set according 3435 * to the driver configuration file. 3436 * SD_FAILURE - The driver config list was not obtained or 3437 * there was no vid/pid match. This indicates that 3438 * the static config table should be used. 3439 * 3440 * The config file has a property, "sd-config-list", which consists of 3441 * one or more duplets as follows: 3442 * 3443 * sd-config-list= 3444 * <duplet>, 3445 * [<duplet>,] 3446 * [<duplet>]; 3447 * 3448 * The structure of each duplet is as follows: 3449 * 3450 * <duplet>:= <vid+pid>,<data-property-name_list> 3451 * 3452 * The first entry of the duplet is the device ID string (the concatenated 3453 * vid & pid; not to be confused with a device_id). This is defined in 3454 * the same way as in the sd_disk_table. 3455 * 3456 * The second part of the duplet is a string that identifies a 3457 * data-property-name-list. The data-property-name-list is defined as 3458 * follows: 3459 * 3460 * <data-property-name-list>:=<data-property-name> [<data-property-name>] 3461 * 3462 * The syntax of <data-property-name> depends on the <version> field. 3463 * 3464 * If version = SD_CONF_VERSION_1 we have the following syntax: 3465 * 3466 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3467 * 3468 * where the prop0 value will be used to set prop0 if bit0 set in the 3469 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1 3470 * 3471 */ 3472 3473 static int 3474 sd_process_sdconf_file(struct sd_lun *un) 3475 { 3476 char *config_list = NULL; 3477 int config_list_len; 3478 int len; 3479 int dupletlen = 0; 3480 char *vidptr; 3481 int vidlen; 3482 char *dnlist_ptr; 3483 char *dataname_ptr; 3484 int dnlist_len; 3485 int dataname_len; 3486 int *data_list; 3487 int data_list_len; 3488 int rval = SD_FAILURE; 3489 int i; 3490 3491 ASSERT(un != NULL); 3492 3493 /* Obtain the configuration list associated with the .conf file */ 3494 if (ddi_getlongprop(DDI_DEV_T_ANY, SD_DEVINFO(un), DDI_PROP_DONTPASS, 3495 sd_config_list, (caddr_t)&config_list, &config_list_len) 3496 != DDI_PROP_SUCCESS) { 3497 return (SD_FAILURE); 3498 } 3499 3500 /* 3501 * Compare vids in each duplet to the inquiry vid - if a match is 3502 * made, get the data value and update the soft state structure 3503 * accordingly. 3504 * 3505 * Note: This algorithm is complex and difficult to maintain. It should 3506 * be replaced with a more robust implementation. 3507 */ 3508 for (len = config_list_len, vidptr = config_list; len > 0; 3509 vidptr += dupletlen, len -= dupletlen) { 3510 /* 3511 * Note: The assumption here is that each vid entry is on 3512 * a unique line from its associated duplet. 3513 */ 3514 vidlen = dupletlen = (int)strlen(vidptr); 3515 if ((vidlen == 0) || 3516 (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS)) { 3517 dupletlen++; 3518 continue; 3519 } 3520 3521 /* 3522 * dnlist contains 1 or more blank separated 3523 * data-property-name entries 3524 */ 3525 dnlist_ptr = vidptr + vidlen + 1; 3526 dnlist_len = (int)strlen(dnlist_ptr); 3527 dupletlen += dnlist_len + 2; 3528 3529 /* 3530 * Set a pointer for the first data-property-name 3531 * entry in the list 3532 */ 3533 dataname_ptr = dnlist_ptr; 3534 dataname_len = 0; 3535 3536 /* 3537 * Loop through all data-property-name entries in the 3538 * data-property-name-list setting the properties for each. 3539 */ 3540 while (dataname_len < dnlist_len) { 3541 int version; 3542 3543 /* 3544 * Determine the length of the current 3545 * data-property-name entry by indexing until a 3546 * blank or NULL is encountered. When the space is 3547 * encountered reset it to a NULL for compliance 3548 * with ddi_getlongprop(). 3549 */ 3550 for (i = 0; ((dataname_ptr[i] != ' ') && 3551 (dataname_ptr[i] != '\0')); i++) { 3552 ; 3553 } 3554 3555 dataname_len += i; 3556 /* If not null terminated, Make it so */ 3557 if (dataname_ptr[i] == ' ') { 3558 dataname_ptr[i] = '\0'; 3559 } 3560 dataname_len++; 3561 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3562 "sd_process_sdconf_file: disk:%s, data:%s\n", 3563 vidptr, dataname_ptr); 3564 3565 /* Get the data list */ 3566 if (ddi_getlongprop(DDI_DEV_T_ANY, SD_DEVINFO(un), 0, 3567 dataname_ptr, (caddr_t)&data_list, &data_list_len) 3568 != DDI_PROP_SUCCESS) { 3569 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3570 "sd_process_sdconf_file: data property (%s)" 3571 " has no value\n", dataname_ptr); 3572 dataname_ptr = dnlist_ptr + dataname_len; 3573 continue; 3574 } 3575 3576 version = data_list[0]; 3577 3578 if (version == SD_CONF_VERSION_1) { 3579 sd_tunables values; 3580 3581 /* Set the properties */ 3582 if (sd_chk_vers1_data(un, data_list[1], 3583 &data_list[2], data_list_len, dataname_ptr) 3584 == SD_SUCCESS) { 3585 sd_get_tunables_from_conf(un, 3586 data_list[1], &data_list[2], 3587 &values); 3588 sd_set_vers1_properties(un, 3589 data_list[1], &values); 3590 rval = SD_SUCCESS; 3591 } else { 3592 rval = SD_FAILURE; 3593 } 3594 } else { 3595 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3596 "data property %s version 0x%x is invalid.", 3597 dataname_ptr, version); 3598 rval = SD_FAILURE; 3599 } 3600 kmem_free(data_list, data_list_len); 3601 dataname_ptr = dnlist_ptr + dataname_len; 3602 } 3603 } 3604 3605 /* free up the memory allocated by ddi_getlongprop */ 3606 if (config_list) { 3607 kmem_free(config_list, config_list_len); 3608 } 3609 3610 return (rval); 3611 } 3612 3613 /* 3614 * Function: sd_get_tunables_from_conf() 3615 * 3616 * 3617 * This function reads the data list from the sd.conf file and pulls 3618 * the values that can have numeric values as arguments and places 3619 * the values in the appropriate sd_tunables member. 3620 * Since the order of the data list members varies across platforms 3621 * This function reads them from the data list in a platform specific 3622 * order and places them into the correct sd_tunable member that is 3623 * consistent across all platforms. 3624 */ 3625 static void 3626 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list, 3627 sd_tunables *values) 3628 { 3629 int i; 3630 int mask; 3631 3632 bzero(values, sizeof (sd_tunables)); 3633 3634 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3635 3636 mask = 1 << i; 3637 if (mask > flags) { 3638 break; 3639 } 3640 3641 switch (mask & flags) { 3642 case 0: /* This mask bit not set in flags */ 3643 continue; 3644 case SD_CONF_BSET_THROTTLE: 3645 values->sdt_throttle = data_list[i]; 3646 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3647 "sd_get_tunables_from_conf: throttle = %d\n", 3648 values->sdt_throttle); 3649 break; 3650 case SD_CONF_BSET_CTYPE: 3651 values->sdt_ctype = data_list[i]; 3652 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3653 "sd_get_tunables_from_conf: ctype = %d\n", 3654 values->sdt_ctype); 3655 break; 3656 case SD_CONF_BSET_NRR_COUNT: 3657 values->sdt_not_rdy_retries = data_list[i]; 3658 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3659 "sd_get_tunables_from_conf: not_rdy_retries = %d\n", 3660 values->sdt_not_rdy_retries); 3661 break; 3662 case SD_CONF_BSET_BSY_RETRY_COUNT: 3663 values->sdt_busy_retries = data_list[i]; 3664 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3665 "sd_get_tunables_from_conf: busy_retries = %d\n", 3666 values->sdt_busy_retries); 3667 break; 3668 case SD_CONF_BSET_RST_RETRIES: 3669 values->sdt_reset_retries = data_list[i]; 3670 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3671 "sd_get_tunables_from_conf: reset_retries = %d\n", 3672 values->sdt_reset_retries); 3673 break; 3674 case SD_CONF_BSET_RSV_REL_TIME: 3675 values->sdt_reserv_rel_time = data_list[i]; 3676 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3677 "sd_get_tunables_from_conf: reserv_rel_time = %d\n", 3678 values->sdt_reserv_rel_time); 3679 break; 3680 case SD_CONF_BSET_MIN_THROTTLE: 3681 values->sdt_min_throttle = data_list[i]; 3682 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3683 "sd_get_tunables_from_conf: min_throttle = %d\n", 3684 values->sdt_min_throttle); 3685 break; 3686 case SD_CONF_BSET_DISKSORT_DISABLED: 3687 values->sdt_disk_sort_dis = data_list[i]; 3688 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3689 "sd_get_tunables_from_conf: disk_sort_dis = %d\n", 3690 values->sdt_disk_sort_dis); 3691 break; 3692 case SD_CONF_BSET_LUN_RESET_ENABLED: 3693 values->sdt_lun_reset_enable = data_list[i]; 3694 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3695 "sd_get_tunables_from_conf: lun_reset_enable = %d" 3696 "\n", values->sdt_lun_reset_enable); 3697 break; 3698 case SD_CONF_BSET_CACHE_IS_NV: 3699 values->sdt_suppress_cache_flush = data_list[i]; 3700 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3701 "sd_get_tunables_from_conf: \ 3702 suppress_cache_flush = %d" 3703 "\n", values->sdt_suppress_cache_flush); 3704 break; 3705 } 3706 } 3707 } 3708 3709 /* 3710 * Function: sd_process_sdconf_table 3711 * 3712 * Description: Search the static configuration table for a match on the 3713 * inquiry vid/pid and update the driver soft state structure 3714 * according to the table property values for the device. 3715 * 3716 * The form of a configuration table entry is: 3717 * <vid+pid>,<flags>,<property-data> 3718 * "SEAGATE ST42400N",1,0x40000, 3719 * 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1; 3720 * 3721 * Arguments: un - driver soft state (unit) structure 3722 */ 3723 3724 static void 3725 sd_process_sdconf_table(struct sd_lun *un) 3726 { 3727 char *id = NULL; 3728 int table_index; 3729 int idlen; 3730 3731 ASSERT(un != NULL); 3732 for (table_index = 0; table_index < sd_disk_table_size; 3733 table_index++) { 3734 id = sd_disk_table[table_index].device_id; 3735 idlen = strlen(id); 3736 if (idlen == 0) { 3737 continue; 3738 } 3739 3740 /* 3741 * The static configuration table currently does not 3742 * implement version 10 properties. Additionally, 3743 * multiple data-property-name entries are not 3744 * implemented in the static configuration table. 3745 */ 3746 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 3747 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3748 "sd_process_sdconf_table: disk %s\n", id); 3749 sd_set_vers1_properties(un, 3750 sd_disk_table[table_index].flags, 3751 sd_disk_table[table_index].properties); 3752 break; 3753 } 3754 } 3755 } 3756 3757 3758 /* 3759 * Function: sd_sdconf_id_match 3760 * 3761 * Description: This local function implements a case sensitive vid/pid 3762 * comparison as well as the boundary cases of wild card and 3763 * multiple blanks. 3764 * 3765 * Note: An implicit assumption made here is that the scsi 3766 * inquiry structure will always keep the vid, pid and 3767 * revision strings in consecutive sequence, so they can be 3768 * read as a single string. If this assumption is not the 3769 * case, a separate string, to be used for the check, needs 3770 * to be built with these strings concatenated. 3771 * 3772 * Arguments: un - driver soft state (unit) structure 3773 * id - table or config file vid/pid 3774 * idlen - length of the vid/pid (bytes) 3775 * 3776 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 3777 * SD_FAILURE - Indicates no match with the inquiry vid/pid 3778 */ 3779 3780 static int 3781 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen) 3782 { 3783 struct scsi_inquiry *sd_inq; 3784 int rval = SD_SUCCESS; 3785 3786 ASSERT(un != NULL); 3787 sd_inq = un->un_sd->sd_inq; 3788 ASSERT(id != NULL); 3789 3790 /* 3791 * We use the inq_vid as a pointer to a buffer containing the 3792 * vid and pid and use the entire vid/pid length of the table 3793 * entry for the comparison. This works because the inq_pid 3794 * data member follows inq_vid in the scsi_inquiry structure. 3795 */ 3796 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) { 3797 /* 3798 * The user id string is compared to the inquiry vid/pid 3799 * using a case insensitive comparison and ignoring 3800 * multiple spaces. 3801 */ 3802 rval = sd_blank_cmp(un, id, idlen); 3803 if (rval != SD_SUCCESS) { 3804 /* 3805 * User id strings that start and end with a "*" 3806 * are a special case. These do not have a 3807 * specific vendor, and the product string can 3808 * appear anywhere in the 16 byte PID portion of 3809 * the inquiry data. This is a simple strstr() 3810 * type search for the user id in the inquiry data. 3811 */ 3812 if ((id[0] == '*') && (id[idlen - 1] == '*')) { 3813 char *pidptr = &id[1]; 3814 int i; 3815 int j; 3816 int pidstrlen = idlen - 2; 3817 j = sizeof (SD_INQUIRY(un)->inq_pid) - 3818 pidstrlen; 3819 3820 if (j < 0) { 3821 return (SD_FAILURE); 3822 } 3823 for (i = 0; i < j; i++) { 3824 if (bcmp(&SD_INQUIRY(un)->inq_pid[i], 3825 pidptr, pidstrlen) == 0) { 3826 rval = SD_SUCCESS; 3827 break; 3828 } 3829 } 3830 } 3831 } 3832 } 3833 return (rval); 3834 } 3835 3836 3837 /* 3838 * Function: sd_blank_cmp 3839 * 3840 * Description: If the id string starts and ends with a space, treat 3841 * multiple consecutive spaces as equivalent to a single 3842 * space. For example, this causes a sd_disk_table entry 3843 * of " NEC CDROM " to match a device's id string of 3844 * "NEC CDROM". 3845 * 3846 * Note: The success exit condition for this routine is if 3847 * the pointer to the table entry is '\0' and the cnt of 3848 * the inquiry length is zero. This will happen if the inquiry 3849 * string returned by the device is padded with spaces to be 3850 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The 3851 * SCSI spec states that the inquiry string is to be padded with 3852 * spaces. 3853 * 3854 * Arguments: un - driver soft state (unit) structure 3855 * id - table or config file vid/pid 3856 * idlen - length of the vid/pid (bytes) 3857 * 3858 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 3859 * SD_FAILURE - Indicates no match with the inquiry vid/pid 3860 */ 3861 3862 static int 3863 sd_blank_cmp(struct sd_lun *un, char *id, int idlen) 3864 { 3865 char *p1; 3866 char *p2; 3867 int cnt; 3868 cnt = sizeof (SD_INQUIRY(un)->inq_vid) + 3869 sizeof (SD_INQUIRY(un)->inq_pid); 3870 3871 ASSERT(un != NULL); 3872 p2 = un->un_sd->sd_inq->inq_vid; 3873 ASSERT(id != NULL); 3874 p1 = id; 3875 3876 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) { 3877 /* 3878 * Note: string p1 is terminated by a NUL but string p2 3879 * isn't. The end of p2 is determined by cnt. 3880 */ 3881 for (;;) { 3882 /* skip over any extra blanks in both strings */ 3883 while ((*p1 != '\0') && (*p1 == ' ')) { 3884 p1++; 3885 } 3886 while ((cnt != 0) && (*p2 == ' ')) { 3887 p2++; 3888 cnt--; 3889 } 3890 3891 /* compare the two strings */ 3892 if ((cnt == 0) || 3893 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) { 3894 break; 3895 } 3896 while ((cnt > 0) && 3897 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) { 3898 p1++; 3899 p2++; 3900 cnt--; 3901 } 3902 } 3903 } 3904 3905 /* return SD_SUCCESS if both strings match */ 3906 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE); 3907 } 3908 3909 3910 /* 3911 * Function: sd_chk_vers1_data 3912 * 3913 * Description: Verify the version 1 device properties provided by the 3914 * user via the configuration file 3915 * 3916 * Arguments: un - driver soft state (unit) structure 3917 * flags - integer mask indicating properties to be set 3918 * prop_list - integer list of property values 3919 * list_len - length of user provided data 3920 * 3921 * Return Code: SD_SUCCESS - Indicates the user provided data is valid 3922 * SD_FAILURE - Indicates the user provided data is invalid 3923 */ 3924 3925 static int 3926 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 3927 int list_len, char *dataname_ptr) 3928 { 3929 int i; 3930 int mask = 1; 3931 int index = 0; 3932 3933 ASSERT(un != NULL); 3934 3935 /* Check for a NULL property name and list */ 3936 if (dataname_ptr == NULL) { 3937 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3938 "sd_chk_vers1_data: NULL data property name."); 3939 return (SD_FAILURE); 3940 } 3941 if (prop_list == NULL) { 3942 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3943 "sd_chk_vers1_data: %s NULL data property list.", 3944 dataname_ptr); 3945 return (SD_FAILURE); 3946 } 3947 3948 /* Display a warning if undefined bits are set in the flags */ 3949 if (flags & ~SD_CONF_BIT_MASK) { 3950 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3951 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. " 3952 "Properties not set.", 3953 (flags & ~SD_CONF_BIT_MASK), dataname_ptr); 3954 return (SD_FAILURE); 3955 } 3956 3957 /* 3958 * Verify the length of the list by identifying the highest bit set 3959 * in the flags and validating that the property list has a length 3960 * up to the index of this bit. 3961 */ 3962 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3963 if (flags & mask) { 3964 index++; 3965 } 3966 mask = 1 << i; 3967 } 3968 if ((list_len / sizeof (int)) < (index + 2)) { 3969 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3970 "sd_chk_vers1_data: " 3971 "Data property list %s size is incorrect. " 3972 "Properties not set.", dataname_ptr); 3973 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: " 3974 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS); 3975 return (SD_FAILURE); 3976 } 3977 return (SD_SUCCESS); 3978 } 3979 3980 3981 /* 3982 * Function: sd_set_vers1_properties 3983 * 3984 * Description: Set version 1 device properties based on a property list 3985 * retrieved from the driver configuration file or static 3986 * configuration table. Version 1 properties have the format: 3987 * 3988 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3989 * 3990 * where the prop0 value will be used to set prop0 if bit0 3991 * is set in the flags 3992 * 3993 * Arguments: un - driver soft state (unit) structure 3994 * flags - integer mask indicating properties to be set 3995 * prop_list - integer list of property values 3996 */ 3997 3998 static void 3999 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list) 4000 { 4001 ASSERT(un != NULL); 4002 4003 /* 4004 * Set the flag to indicate cache is to be disabled. An attempt 4005 * to disable the cache via sd_cache_control() will be made 4006 * later during attach once the basic initialization is complete. 4007 */ 4008 if (flags & SD_CONF_BSET_NOCACHE) { 4009 un->un_f_opt_disable_cache = TRUE; 4010 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4011 "sd_set_vers1_properties: caching disabled flag set\n"); 4012 } 4013 4014 /* CD-specific configuration parameters */ 4015 if (flags & SD_CONF_BSET_PLAYMSF_BCD) { 4016 un->un_f_cfg_playmsf_bcd = TRUE; 4017 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4018 "sd_set_vers1_properties: playmsf_bcd set\n"); 4019 } 4020 if (flags & SD_CONF_BSET_READSUB_BCD) { 4021 un->un_f_cfg_readsub_bcd = TRUE; 4022 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4023 "sd_set_vers1_properties: readsub_bcd set\n"); 4024 } 4025 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) { 4026 un->un_f_cfg_read_toc_trk_bcd = TRUE; 4027 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4028 "sd_set_vers1_properties: read_toc_trk_bcd set\n"); 4029 } 4030 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) { 4031 un->un_f_cfg_read_toc_addr_bcd = TRUE; 4032 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4033 "sd_set_vers1_properties: read_toc_addr_bcd set\n"); 4034 } 4035 if (flags & SD_CONF_BSET_NO_READ_HEADER) { 4036 un->un_f_cfg_no_read_header = TRUE; 4037 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4038 "sd_set_vers1_properties: no_read_header set\n"); 4039 } 4040 if (flags & SD_CONF_BSET_READ_CD_XD4) { 4041 un->un_f_cfg_read_cd_xd4 = TRUE; 4042 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4043 "sd_set_vers1_properties: read_cd_xd4 set\n"); 4044 } 4045 4046 /* Support for devices which do not have valid/unique serial numbers */ 4047 if (flags & SD_CONF_BSET_FAB_DEVID) { 4048 un->un_f_opt_fab_devid = TRUE; 4049 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4050 "sd_set_vers1_properties: fab_devid bit set\n"); 4051 } 4052 4053 /* Support for user throttle configuration */ 4054 if (flags & SD_CONF_BSET_THROTTLE) { 4055 ASSERT(prop_list != NULL); 4056 un->un_saved_throttle = un->un_throttle = 4057 prop_list->sdt_throttle; 4058 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4059 "sd_set_vers1_properties: throttle set to %d\n", 4060 prop_list->sdt_throttle); 4061 } 4062 4063 /* Set the per disk retry count according to the conf file or table. */ 4064 if (flags & SD_CONF_BSET_NRR_COUNT) { 4065 ASSERT(prop_list != NULL); 4066 if (prop_list->sdt_not_rdy_retries) { 4067 un->un_notready_retry_count = 4068 prop_list->sdt_not_rdy_retries; 4069 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4070 "sd_set_vers1_properties: not ready retry count" 4071 " set to %d\n", un->un_notready_retry_count); 4072 } 4073 } 4074 4075 /* The controller type is reported for generic disk driver ioctls */ 4076 if (flags & SD_CONF_BSET_CTYPE) { 4077 ASSERT(prop_list != NULL); 4078 switch (prop_list->sdt_ctype) { 4079 case CTYPE_CDROM: 4080 un->un_ctype = prop_list->sdt_ctype; 4081 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4082 "sd_set_vers1_properties: ctype set to " 4083 "CTYPE_CDROM\n"); 4084 break; 4085 case CTYPE_CCS: 4086 un->un_ctype = prop_list->sdt_ctype; 4087 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4088 "sd_set_vers1_properties: ctype set to " 4089 "CTYPE_CCS\n"); 4090 break; 4091 case CTYPE_ROD: /* RW optical */ 4092 un->un_ctype = prop_list->sdt_ctype; 4093 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4094 "sd_set_vers1_properties: ctype set to " 4095 "CTYPE_ROD\n"); 4096 break; 4097 default: 4098 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4099 "sd_set_vers1_properties: Could not set " 4100 "invalid ctype value (%d)", 4101 prop_list->sdt_ctype); 4102 } 4103 } 4104 4105 /* Purple failover timeout */ 4106 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) { 4107 ASSERT(prop_list != NULL); 4108 un->un_busy_retry_count = 4109 prop_list->sdt_busy_retries; 4110 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4111 "sd_set_vers1_properties: " 4112 "busy retry count set to %d\n", 4113 un->un_busy_retry_count); 4114 } 4115 4116 /* Purple reset retry count */ 4117 if (flags & SD_CONF_BSET_RST_RETRIES) { 4118 ASSERT(prop_list != NULL); 4119 un->un_reset_retry_count = 4120 prop_list->sdt_reset_retries; 4121 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4122 "sd_set_vers1_properties: " 4123 "reset retry count set to %d\n", 4124 un->un_reset_retry_count); 4125 } 4126 4127 /* Purple reservation release timeout */ 4128 if (flags & SD_CONF_BSET_RSV_REL_TIME) { 4129 ASSERT(prop_list != NULL); 4130 un->un_reserve_release_time = 4131 prop_list->sdt_reserv_rel_time; 4132 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4133 "sd_set_vers1_properties: " 4134 "reservation release timeout set to %d\n", 4135 un->un_reserve_release_time); 4136 } 4137 4138 /* 4139 * Driver flag telling the driver to verify that no commands are pending 4140 * for a device before issuing a Test Unit Ready. This is a workaround 4141 * for a firmware bug in some Seagate eliteI drives. 4142 */ 4143 if (flags & SD_CONF_BSET_TUR_CHECK) { 4144 un->un_f_cfg_tur_check = TRUE; 4145 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4146 "sd_set_vers1_properties: tur queue check set\n"); 4147 } 4148 4149 if (flags & SD_CONF_BSET_MIN_THROTTLE) { 4150 un->un_min_throttle = prop_list->sdt_min_throttle; 4151 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4152 "sd_set_vers1_properties: min throttle set to %d\n", 4153 un->un_min_throttle); 4154 } 4155 4156 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) { 4157 un->un_f_disksort_disabled = 4158 (prop_list->sdt_disk_sort_dis != 0) ? 4159 TRUE : FALSE; 4160 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4161 "sd_set_vers1_properties: disksort disabled " 4162 "flag set to %d\n", 4163 prop_list->sdt_disk_sort_dis); 4164 } 4165 4166 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) { 4167 un->un_f_lun_reset_enabled = 4168 (prop_list->sdt_lun_reset_enable != 0) ? 4169 TRUE : FALSE; 4170 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4171 "sd_set_vers1_properties: lun reset enabled " 4172 "flag set to %d\n", 4173 prop_list->sdt_lun_reset_enable); 4174 } 4175 4176 if (flags & SD_CONF_BSET_CACHE_IS_NV) { 4177 un->un_f_suppress_cache_flush = 4178 (prop_list->sdt_suppress_cache_flush != 0) ? 4179 TRUE : FALSE; 4180 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4181 "sd_set_vers1_properties: suppress_cache_flush " 4182 "flag set to %d\n", 4183 prop_list->sdt_suppress_cache_flush); 4184 } 4185 4186 /* 4187 * Validate the throttle values. 4188 * If any of the numbers are invalid, set everything to defaults. 4189 */ 4190 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4191 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4192 (un->un_min_throttle > un->un_throttle)) { 4193 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4194 un->un_min_throttle = sd_min_throttle; 4195 } 4196 } 4197 4198 /* 4199 * Function: sd_is_lsi() 4200 * 4201 * Description: Check for lsi devices, step through the static device 4202 * table to match vid/pid. 4203 * 4204 * Args: un - ptr to sd_lun 4205 * 4206 * Notes: When creating new LSI property, need to add the new LSI property 4207 * to this function. 4208 */ 4209 static void 4210 sd_is_lsi(struct sd_lun *un) 4211 { 4212 char *id = NULL; 4213 int table_index; 4214 int idlen; 4215 void *prop; 4216 4217 ASSERT(un != NULL); 4218 for (table_index = 0; table_index < sd_disk_table_size; 4219 table_index++) { 4220 id = sd_disk_table[table_index].device_id; 4221 idlen = strlen(id); 4222 if (idlen == 0) { 4223 continue; 4224 } 4225 4226 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4227 prop = sd_disk_table[table_index].properties; 4228 if (prop == &lsi_properties || 4229 prop == &lsi_oem_properties || 4230 prop == &lsi_properties_scsi || 4231 prop == &symbios_properties) { 4232 un->un_f_cfg_is_lsi = TRUE; 4233 } 4234 break; 4235 } 4236 } 4237 } 4238 4239 /* 4240 * Function: sd_get_physical_geometry 4241 * 4242 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and 4243 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the 4244 * target, and use this information to initialize the physical 4245 * geometry cache specified by pgeom_p. 4246 * 4247 * MODE SENSE is an optional command, so failure in this case 4248 * does not necessarily denote an error. We want to use the 4249 * MODE SENSE commands to derive the physical geometry of the 4250 * device, but if either command fails, the logical geometry is 4251 * used as the fallback for disk label geometry in cmlb. 4252 * 4253 * This requires that un->un_blockcount and un->un_tgt_blocksize 4254 * have already been initialized for the current target and 4255 * that the current values be passed as args so that we don't 4256 * end up ever trying to use -1 as a valid value. This could 4257 * happen if either value is reset while we're not holding 4258 * the mutex. 4259 * 4260 * Arguments: un - driver soft state (unit) structure 4261 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4262 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4263 * to use the USCSI "direct" chain and bypass the normal 4264 * command waitq. 4265 * 4266 * Context: Kernel thread only (can sleep). 4267 */ 4268 4269 static int 4270 sd_get_physical_geometry(struct sd_lun *un, cmlb_geom_t *pgeom_p, 4271 diskaddr_t capacity, int lbasize, int path_flag) 4272 { 4273 struct mode_format *page3p; 4274 struct mode_geometry *page4p; 4275 struct mode_header *headerp; 4276 int sector_size; 4277 int nsect; 4278 int nhead; 4279 int ncyl; 4280 int intrlv; 4281 int spc; 4282 diskaddr_t modesense_capacity; 4283 int rpm; 4284 int bd_len; 4285 int mode_header_length; 4286 uchar_t *p3bufp; 4287 uchar_t *p4bufp; 4288 int cdbsize; 4289 int ret = EIO; 4290 4291 ASSERT(un != NULL); 4292 4293 if (lbasize == 0) { 4294 if (ISCD(un)) { 4295 lbasize = 2048; 4296 } else { 4297 lbasize = un->un_sys_blocksize; 4298 } 4299 } 4300 pgeom_p->g_secsize = (unsigned short)lbasize; 4301 4302 /* 4303 * If the unit is a cd/dvd drive MODE SENSE page three 4304 * and MODE SENSE page four are reserved (see SBC spec 4305 * and MMC spec). To prevent soft errors just return 4306 * using the default LBA size. 4307 */ 4308 if (ISCD(un)) 4309 return (ret); 4310 4311 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0; 4312 4313 /* 4314 * Retrieve MODE SENSE page 3 - Format Device Page 4315 */ 4316 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP); 4317 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p3bufp, 4318 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag) 4319 != 0) { 4320 SD_ERROR(SD_LOG_COMMON, un, 4321 "sd_get_physical_geometry: mode sense page 3 failed\n"); 4322 goto page3_exit; 4323 } 4324 4325 /* 4326 * Determine size of Block Descriptors in order to locate the mode 4327 * page data. ATAPI devices return 0, SCSI devices should return 4328 * MODE_BLK_DESC_LENGTH. 4329 */ 4330 headerp = (struct mode_header *)p3bufp; 4331 if (un->un_f_cfg_is_atapi == TRUE) { 4332 struct mode_header_grp2 *mhp = 4333 (struct mode_header_grp2 *)headerp; 4334 mode_header_length = MODE_HEADER_LENGTH_GRP2; 4335 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4336 } else { 4337 mode_header_length = MODE_HEADER_LENGTH; 4338 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4339 } 4340 4341 if (bd_len > MODE_BLK_DESC_LENGTH) { 4342 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4343 "received unexpected bd_len of %d, page3\n", bd_len); 4344 goto page3_exit; 4345 } 4346 4347 page3p = (struct mode_format *) 4348 ((caddr_t)headerp + mode_header_length + bd_len); 4349 4350 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) { 4351 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4352 "mode sense pg3 code mismatch %d\n", 4353 page3p->mode_page.code); 4354 goto page3_exit; 4355 } 4356 4357 /* 4358 * Use this physical geometry data only if BOTH MODE SENSE commands 4359 * complete successfully; otherwise, revert to the logical geometry. 4360 * So, we need to save everything in temporary variables. 4361 */ 4362 sector_size = BE_16(page3p->data_bytes_sect); 4363 4364 /* 4365 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size 4366 */ 4367 if (sector_size == 0) { 4368 sector_size = un->un_sys_blocksize; 4369 } else { 4370 sector_size &= ~(un->un_sys_blocksize - 1); 4371 } 4372 4373 nsect = BE_16(page3p->sect_track); 4374 intrlv = BE_16(page3p->interleave); 4375 4376 SD_INFO(SD_LOG_COMMON, un, 4377 "sd_get_physical_geometry: Format Parameters (page 3)\n"); 4378 SD_INFO(SD_LOG_COMMON, un, 4379 " mode page: %d; nsect: %d; sector size: %d;\n", 4380 page3p->mode_page.code, nsect, sector_size); 4381 SD_INFO(SD_LOG_COMMON, un, 4382 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv, 4383 BE_16(page3p->track_skew), 4384 BE_16(page3p->cylinder_skew)); 4385 4386 4387 /* 4388 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page 4389 */ 4390 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP); 4391 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p4bufp, 4392 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag) 4393 != 0) { 4394 SD_ERROR(SD_LOG_COMMON, un, 4395 "sd_get_physical_geometry: mode sense page 4 failed\n"); 4396 goto page4_exit; 4397 } 4398 4399 /* 4400 * Determine size of Block Descriptors in order to locate the mode 4401 * page data. ATAPI devices return 0, SCSI devices should return 4402 * MODE_BLK_DESC_LENGTH. 4403 */ 4404 headerp = (struct mode_header *)p4bufp; 4405 if (un->un_f_cfg_is_atapi == TRUE) { 4406 struct mode_header_grp2 *mhp = 4407 (struct mode_header_grp2 *)headerp; 4408 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4409 } else { 4410 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4411 } 4412 4413 if (bd_len > MODE_BLK_DESC_LENGTH) { 4414 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4415 "received unexpected bd_len of %d, page4\n", bd_len); 4416 goto page4_exit; 4417 } 4418 4419 page4p = (struct mode_geometry *) 4420 ((caddr_t)headerp + mode_header_length + bd_len); 4421 4422 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) { 4423 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4424 "mode sense pg4 code mismatch %d\n", 4425 page4p->mode_page.code); 4426 goto page4_exit; 4427 } 4428 4429 /* 4430 * Stash the data now, after we know that both commands completed. 4431 */ 4432 4433 4434 nhead = (int)page4p->heads; /* uchar, so no conversion needed */ 4435 spc = nhead * nsect; 4436 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb; 4437 rpm = BE_16(page4p->rpm); 4438 4439 modesense_capacity = spc * ncyl; 4440 4441 SD_INFO(SD_LOG_COMMON, un, 4442 "sd_get_physical_geometry: Geometry Parameters (page 4)\n"); 4443 SD_INFO(SD_LOG_COMMON, un, 4444 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm); 4445 SD_INFO(SD_LOG_COMMON, un, 4446 " computed capacity(h*s*c): %d;\n", modesense_capacity); 4447 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n", 4448 (void *)pgeom_p, capacity); 4449 4450 /* 4451 * Compensate if the drive's geometry is not rectangular, i.e., 4452 * the product of C * H * S returned by MODE SENSE >= that returned 4453 * by read capacity. This is an idiosyncrasy of the original x86 4454 * disk subsystem. 4455 */ 4456 if (modesense_capacity >= capacity) { 4457 SD_INFO(SD_LOG_COMMON, un, 4458 "sd_get_physical_geometry: adjusting acyl; " 4459 "old: %d; new: %d\n", pgeom_p->g_acyl, 4460 (modesense_capacity - capacity + spc - 1) / spc); 4461 if (sector_size != 0) { 4462 /* 1243403: NEC D38x7 drives don't support sec size */ 4463 pgeom_p->g_secsize = (unsigned short)sector_size; 4464 } 4465 pgeom_p->g_nsect = (unsigned short)nsect; 4466 pgeom_p->g_nhead = (unsigned short)nhead; 4467 pgeom_p->g_capacity = capacity; 4468 pgeom_p->g_acyl = 4469 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc; 4470 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl; 4471 } 4472 4473 pgeom_p->g_rpm = (unsigned short)rpm; 4474 pgeom_p->g_intrlv = (unsigned short)intrlv; 4475 ret = 0; 4476 4477 SD_INFO(SD_LOG_COMMON, un, 4478 "sd_get_physical_geometry: mode sense geometry:\n"); 4479 SD_INFO(SD_LOG_COMMON, un, 4480 " nsect: %d; sector size: %d; interlv: %d\n", 4481 nsect, sector_size, intrlv); 4482 SD_INFO(SD_LOG_COMMON, un, 4483 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n", 4484 nhead, ncyl, rpm, modesense_capacity); 4485 SD_INFO(SD_LOG_COMMON, un, 4486 "sd_get_physical_geometry: (cached)\n"); 4487 SD_INFO(SD_LOG_COMMON, un, 4488 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 4489 pgeom_p->g_ncyl, pgeom_p->g_acyl, 4490 pgeom_p->g_nhead, pgeom_p->g_nsect); 4491 SD_INFO(SD_LOG_COMMON, un, 4492 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n", 4493 pgeom_p->g_secsize, pgeom_p->g_capacity, 4494 pgeom_p->g_intrlv, pgeom_p->g_rpm); 4495 4496 page4_exit: 4497 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH); 4498 page3_exit: 4499 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH); 4500 4501 return (ret); 4502 } 4503 4504 /* 4505 * Function: sd_get_virtual_geometry 4506 * 4507 * Description: Ask the controller to tell us about the target device. 4508 * 4509 * Arguments: un - pointer to softstate 4510 * capacity - disk capacity in #blocks 4511 * lbasize - disk block size in bytes 4512 * 4513 * Context: Kernel thread only 4514 */ 4515 4516 static int 4517 sd_get_virtual_geometry(struct sd_lun *un, cmlb_geom_t *lgeom_p, 4518 diskaddr_t capacity, int lbasize) 4519 { 4520 uint_t geombuf; 4521 int spc; 4522 4523 ASSERT(un != NULL); 4524 4525 /* Set sector size, and total number of sectors */ 4526 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1); 4527 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1); 4528 4529 /* Let the HBA tell us its geometry */ 4530 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1); 4531 4532 /* A value of -1 indicates an undefined "geometry" property */ 4533 if (geombuf == (-1)) { 4534 return (EINVAL); 4535 } 4536 4537 /* Initialize the logical geometry cache. */ 4538 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff; 4539 lgeom_p->g_nsect = geombuf & 0xffff; 4540 lgeom_p->g_secsize = un->un_sys_blocksize; 4541 4542 spc = lgeom_p->g_nhead * lgeom_p->g_nsect; 4543 4544 /* 4545 * Note: The driver originally converted the capacity value from 4546 * target blocks to system blocks. However, the capacity value passed 4547 * to this routine is already in terms of system blocks (this scaling 4548 * is done when the READ CAPACITY command is issued and processed). 4549 * This 'error' may have gone undetected because the usage of g_ncyl 4550 * (which is based upon g_capacity) is very limited within the driver 4551 */ 4552 lgeom_p->g_capacity = capacity; 4553 4554 /* 4555 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The 4556 * hba may return zero values if the device has been removed. 4557 */ 4558 if (spc == 0) { 4559 lgeom_p->g_ncyl = 0; 4560 } else { 4561 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc; 4562 } 4563 lgeom_p->g_acyl = 0; 4564 4565 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n"); 4566 return (0); 4567 4568 } 4569 /* 4570 * Function: sd_update_block_info 4571 * 4572 * Description: Calculate a byte count to sector count bitshift value 4573 * from sector size. 4574 * 4575 * Arguments: un: unit struct. 4576 * lbasize: new target sector size 4577 * capacity: new target capacity, ie. block count 4578 * 4579 * Context: Kernel thread context 4580 */ 4581 4582 static void 4583 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity) 4584 { 4585 uint_t dblk; 4586 4587 if (lbasize != 0) { 4588 un->un_tgt_blocksize = lbasize; 4589 un->un_f_tgt_blocksize_is_valid = TRUE; 4590 } 4591 4592 if (capacity != 0) { 4593 un->un_blockcount = capacity; 4594 un->un_f_blockcount_is_valid = TRUE; 4595 } 4596 4597 /* 4598 * Update device capacity properties. 4599 * 4600 * 'device-nblocks' number of blocks in target's units 4601 * 'device-blksize' data bearing size of target's block 4602 * 4603 * NOTE: math is complicated by the fact that un_tgt_blocksize may 4604 * not be a power of two for checksumming disks with 520/528 byte 4605 * sectors. 4606 */ 4607 if (un->un_f_tgt_blocksize_is_valid && 4608 un->un_f_blockcount_is_valid && 4609 un->un_sys_blocksize) { 4610 dblk = un->un_tgt_blocksize / un->un_sys_blocksize; 4611 (void) ddi_prop_update_int64(DDI_DEV_T_NONE, SD_DEVINFO(un), 4612 "device-nblocks", un->un_blockcount / dblk); 4613 /* 4614 * To save memory, only define "device-blksize" when its 4615 * value is differnet than the default DEV_BSIZE value. 4616 */ 4617 if ((un->un_sys_blocksize * dblk) != DEV_BSIZE) 4618 (void) ddi_prop_update_int(DDI_DEV_T_NONE, 4619 SD_DEVINFO(un), "device-blksize", 4620 un->un_sys_blocksize * dblk); 4621 } 4622 } 4623 4624 4625 /* 4626 * Function: sd_register_devid 4627 * 4628 * Description: This routine will obtain the device id information from the 4629 * target, obtain the serial number, and register the device 4630 * id with the ddi framework. 4631 * 4632 * Arguments: devi - the system's dev_info_t for the device. 4633 * un - driver soft state (unit) structure 4634 * reservation_flag - indicates if a reservation conflict 4635 * occurred during attach 4636 * 4637 * Context: Kernel Thread 4638 */ 4639 static void 4640 sd_register_devid(struct sd_lun *un, dev_info_t *devi, int reservation_flag) 4641 { 4642 int rval = 0; 4643 uchar_t *inq80 = NULL; 4644 size_t inq80_len = MAX_INQUIRY_SIZE; 4645 size_t inq80_resid = 0; 4646 uchar_t *inq83 = NULL; 4647 size_t inq83_len = MAX_INQUIRY_SIZE; 4648 size_t inq83_resid = 0; 4649 int dlen, len; 4650 char *sn; 4651 4652 ASSERT(un != NULL); 4653 ASSERT(mutex_owned(SD_MUTEX(un))); 4654 ASSERT((SD_DEVINFO(un)) == devi); 4655 4656 /* 4657 * If transport has already registered a devid for this target 4658 * then that takes precedence over the driver's determination 4659 * of the devid. 4660 */ 4661 if (ddi_devid_get(SD_DEVINFO(un), &un->un_devid) == DDI_SUCCESS) { 4662 ASSERT(un->un_devid); 4663 return; /* use devid registered by the transport */ 4664 } 4665 4666 /* 4667 * This is the case of antiquated Sun disk drives that have the 4668 * FAB_DEVID property set in the disk_table. These drives 4669 * manage the devid's by storing them in last 2 available sectors 4670 * on the drive and have them fabricated by the ddi layer by calling 4671 * ddi_devid_init and passing the DEVID_FAB flag. 4672 */ 4673 if (un->un_f_opt_fab_devid == TRUE) { 4674 /* 4675 * Depending on EINVAL isn't reliable, since a reserved disk 4676 * may result in invalid geometry, so check to make sure a 4677 * reservation conflict did not occur during attach. 4678 */ 4679 if ((sd_get_devid(un) == EINVAL) && 4680 (reservation_flag != SD_TARGET_IS_RESERVED)) { 4681 /* 4682 * The devid is invalid AND there is no reservation 4683 * conflict. Fabricate a new devid. 4684 */ 4685 (void) sd_create_devid(un); 4686 } 4687 4688 /* Register the devid if it exists */ 4689 if (un->un_devid != NULL) { 4690 (void) ddi_devid_register(SD_DEVINFO(un), 4691 un->un_devid); 4692 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4693 "sd_register_devid: Devid Fabricated\n"); 4694 } 4695 return; 4696 } 4697 4698 /* 4699 * We check the availibility of the World Wide Name (0x83) and Unit 4700 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using 4701 * un_vpd_page_mask from them, we decide which way to get the WWN. If 4702 * 0x83 is availible, that is the best choice. Our next choice is 4703 * 0x80. If neither are availible, we munge the devid from the device 4704 * vid/pid/serial # for Sun qualified disks, or use the ddi framework 4705 * to fabricate a devid for non-Sun qualified disks. 4706 */ 4707 if (sd_check_vpd_page_support(un) == 0) { 4708 /* collect page 80 data if available */ 4709 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) { 4710 4711 mutex_exit(SD_MUTEX(un)); 4712 inq80 = kmem_zalloc(inq80_len, KM_SLEEP); 4713 rval = sd_send_scsi_INQUIRY(un, inq80, inq80_len, 4714 0x01, 0x80, &inq80_resid); 4715 4716 if (rval != 0) { 4717 kmem_free(inq80, inq80_len); 4718 inq80 = NULL; 4719 inq80_len = 0; 4720 } else if (ddi_prop_exists( 4721 DDI_DEV_T_NONE, SD_DEVINFO(un), 4722 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 4723 INQUIRY_SERIAL_NO) == 0) { 4724 /* 4725 * If we don't already have a serial number 4726 * property, do quick verify of data returned 4727 * and define property. 4728 */ 4729 dlen = inq80_len - inq80_resid; 4730 len = (size_t)inq80[3]; 4731 if ((dlen >= 4) && ((len + 4) <= dlen)) { 4732 /* 4733 * Ensure sn termination, skip leading 4734 * blanks, and create property 4735 * 'inquiry-serial-no'. 4736 */ 4737 sn = (char *)&inq80[4]; 4738 sn[len] = 0; 4739 while (*sn && (*sn == ' ')) 4740 sn++; 4741 if (*sn) { 4742 (void) ddi_prop_update_string( 4743 DDI_DEV_T_NONE, 4744 SD_DEVINFO(un), 4745 INQUIRY_SERIAL_NO, sn); 4746 } 4747 } 4748 } 4749 mutex_enter(SD_MUTEX(un)); 4750 } 4751 4752 /* collect page 83 data if available */ 4753 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) { 4754 mutex_exit(SD_MUTEX(un)); 4755 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 4756 rval = sd_send_scsi_INQUIRY(un, inq83, inq83_len, 4757 0x01, 0x83, &inq83_resid); 4758 4759 if (rval != 0) { 4760 kmem_free(inq83, inq83_len); 4761 inq83 = NULL; 4762 inq83_len = 0; 4763 } 4764 mutex_enter(SD_MUTEX(un)); 4765 } 4766 } 4767 4768 /* encode best devid possible based on data available */ 4769 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, 4770 (char *)ddi_driver_name(SD_DEVINFO(un)), 4771 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)), 4772 inq80, inq80_len - inq80_resid, inq83, inq83_len - 4773 inq83_resid, &un->un_devid) == DDI_SUCCESS) { 4774 4775 /* devid successfully encoded, register devid */ 4776 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); 4777 4778 } else { 4779 /* 4780 * Unable to encode a devid based on data available. 4781 * This is not a Sun qualified disk. Older Sun disk 4782 * drives that have the SD_FAB_DEVID property 4783 * set in the disk_table and non Sun qualified 4784 * disks are treated in the same manner. These 4785 * drives manage the devid's by storing them in 4786 * last 2 available sectors on the drive and 4787 * have them fabricated by the ddi layer by 4788 * calling ddi_devid_init and passing the 4789 * DEVID_FAB flag. 4790 * Create a fabricate devid only if there's no 4791 * fabricate devid existed. 4792 */ 4793 if (sd_get_devid(un) == EINVAL) { 4794 (void) sd_create_devid(un); 4795 } 4796 un->un_f_opt_fab_devid = TRUE; 4797 4798 /* Register the devid if it exists */ 4799 if (un->un_devid != NULL) { 4800 (void) ddi_devid_register(SD_DEVINFO(un), 4801 un->un_devid); 4802 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4803 "sd_register_devid: devid fabricated using " 4804 "ddi framework\n"); 4805 } 4806 } 4807 4808 /* clean up resources */ 4809 if (inq80 != NULL) { 4810 kmem_free(inq80, inq80_len); 4811 } 4812 if (inq83 != NULL) { 4813 kmem_free(inq83, inq83_len); 4814 } 4815 } 4816 4817 4818 4819 /* 4820 * Function: sd_get_devid 4821 * 4822 * Description: This routine will return 0 if a valid device id has been 4823 * obtained from the target and stored in the soft state. If a 4824 * valid device id has not been previously read and stored, a 4825 * read attempt will be made. 4826 * 4827 * Arguments: un - driver soft state (unit) structure 4828 * 4829 * Return Code: 0 if we successfully get the device id 4830 * 4831 * Context: Kernel Thread 4832 */ 4833 4834 static int 4835 sd_get_devid(struct sd_lun *un) 4836 { 4837 struct dk_devid *dkdevid; 4838 ddi_devid_t tmpid; 4839 uint_t *ip; 4840 size_t sz; 4841 diskaddr_t blk; 4842 int status; 4843 int chksum; 4844 int i; 4845 size_t buffer_size; 4846 4847 ASSERT(un != NULL); 4848 ASSERT(mutex_owned(SD_MUTEX(un))); 4849 4850 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n", 4851 un); 4852 4853 if (un->un_devid != NULL) { 4854 return (0); 4855 } 4856 4857 mutex_exit(SD_MUTEX(un)); 4858 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 4859 (void *)SD_PATH_DIRECT) != 0) { 4860 mutex_enter(SD_MUTEX(un)); 4861 return (EINVAL); 4862 } 4863 4864 /* 4865 * Read and verify device id, stored in the reserved cylinders at the 4866 * end of the disk. Backup label is on the odd sectors of the last 4867 * track of the last cylinder. Device id will be on track of the next 4868 * to last cylinder. 4869 */ 4870 mutex_enter(SD_MUTEX(un)); 4871 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid)); 4872 mutex_exit(SD_MUTEX(un)); 4873 dkdevid = kmem_alloc(buffer_size, KM_SLEEP); 4874 status = sd_send_scsi_READ(un, dkdevid, buffer_size, blk, 4875 SD_PATH_DIRECT); 4876 if (status != 0) { 4877 goto error; 4878 } 4879 4880 /* Validate the revision */ 4881 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 4882 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 4883 status = EINVAL; 4884 goto error; 4885 } 4886 4887 /* Calculate the checksum */ 4888 chksum = 0; 4889 ip = (uint_t *)dkdevid; 4890 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 4891 i++) { 4892 chksum ^= ip[i]; 4893 } 4894 4895 /* Compare the checksums */ 4896 if (DKD_GETCHKSUM(dkdevid) != chksum) { 4897 status = EINVAL; 4898 goto error; 4899 } 4900 4901 /* Validate the device id */ 4902 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 4903 status = EINVAL; 4904 goto error; 4905 } 4906 4907 /* 4908 * Store the device id in the driver soft state 4909 */ 4910 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 4911 tmpid = kmem_alloc(sz, KM_SLEEP); 4912 4913 mutex_enter(SD_MUTEX(un)); 4914 4915 un->un_devid = tmpid; 4916 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 4917 4918 kmem_free(dkdevid, buffer_size); 4919 4920 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un); 4921 4922 return (status); 4923 error: 4924 mutex_enter(SD_MUTEX(un)); 4925 kmem_free(dkdevid, buffer_size); 4926 return (status); 4927 } 4928 4929 4930 /* 4931 * Function: sd_create_devid 4932 * 4933 * Description: This routine will fabricate the device id and write it 4934 * to the disk. 4935 * 4936 * Arguments: un - driver soft state (unit) structure 4937 * 4938 * Return Code: value of the fabricated device id 4939 * 4940 * Context: Kernel Thread 4941 */ 4942 4943 static ddi_devid_t 4944 sd_create_devid(struct sd_lun *un) 4945 { 4946 ASSERT(un != NULL); 4947 4948 /* Fabricate the devid */ 4949 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid) 4950 == DDI_FAILURE) { 4951 return (NULL); 4952 } 4953 4954 /* Write the devid to disk */ 4955 if (sd_write_deviceid(un) != 0) { 4956 ddi_devid_free(un->un_devid); 4957 un->un_devid = NULL; 4958 } 4959 4960 return (un->un_devid); 4961 } 4962 4963 4964 /* 4965 * Function: sd_write_deviceid 4966 * 4967 * Description: This routine will write the device id to the disk 4968 * reserved sector. 4969 * 4970 * Arguments: un - driver soft state (unit) structure 4971 * 4972 * Return Code: EINVAL 4973 * value returned by sd_send_scsi_cmd 4974 * 4975 * Context: Kernel Thread 4976 */ 4977 4978 static int 4979 sd_write_deviceid(struct sd_lun *un) 4980 { 4981 struct dk_devid *dkdevid; 4982 diskaddr_t blk; 4983 uint_t *ip, chksum; 4984 int status; 4985 int i; 4986 4987 ASSERT(mutex_owned(SD_MUTEX(un))); 4988 4989 mutex_exit(SD_MUTEX(un)); 4990 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 4991 (void *)SD_PATH_DIRECT) != 0) { 4992 mutex_enter(SD_MUTEX(un)); 4993 return (-1); 4994 } 4995 4996 4997 /* Allocate the buffer */ 4998 dkdevid = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP); 4999 5000 /* Fill in the revision */ 5001 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 5002 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 5003 5004 /* Copy in the device id */ 5005 mutex_enter(SD_MUTEX(un)); 5006 bcopy(un->un_devid, &dkdevid->dkd_devid, 5007 ddi_devid_sizeof(un->un_devid)); 5008 mutex_exit(SD_MUTEX(un)); 5009 5010 /* Calculate the checksum */ 5011 chksum = 0; 5012 ip = (uint_t *)dkdevid; 5013 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 5014 i++) { 5015 chksum ^= ip[i]; 5016 } 5017 5018 /* Fill-in checksum */ 5019 DKD_FORMCHKSUM(chksum, dkdevid); 5020 5021 /* Write the reserved sector */ 5022 status = sd_send_scsi_WRITE(un, dkdevid, un->un_sys_blocksize, blk, 5023 SD_PATH_DIRECT); 5024 5025 kmem_free(dkdevid, un->un_sys_blocksize); 5026 5027 mutex_enter(SD_MUTEX(un)); 5028 return (status); 5029 } 5030 5031 5032 /* 5033 * Function: sd_check_vpd_page_support 5034 * 5035 * Description: This routine sends an inquiry command with the EVPD bit set and 5036 * a page code of 0x00 to the device. It is used to determine which 5037 * vital product pages are availible to find the devid. We are 5038 * looking for pages 0x83 or 0x80. If we return a negative 1, the 5039 * device does not support that command. 5040 * 5041 * Arguments: un - driver soft state (unit) structure 5042 * 5043 * Return Code: 0 - success 5044 * 1 - check condition 5045 * 5046 * Context: This routine can sleep. 5047 */ 5048 5049 static int 5050 sd_check_vpd_page_support(struct sd_lun *un) 5051 { 5052 uchar_t *page_list = NULL; 5053 uchar_t page_length = 0xff; /* Use max possible length */ 5054 uchar_t evpd = 0x01; /* Set the EVPD bit */ 5055 uchar_t page_code = 0x00; /* Supported VPD Pages */ 5056 int rval = 0; 5057 int counter; 5058 5059 ASSERT(un != NULL); 5060 ASSERT(mutex_owned(SD_MUTEX(un))); 5061 5062 mutex_exit(SD_MUTEX(un)); 5063 5064 /* 5065 * We'll set the page length to the maximum to save figuring it out 5066 * with an additional call. 5067 */ 5068 page_list = kmem_zalloc(page_length, KM_SLEEP); 5069 5070 rval = sd_send_scsi_INQUIRY(un, page_list, page_length, evpd, 5071 page_code, NULL); 5072 5073 mutex_enter(SD_MUTEX(un)); 5074 5075 /* 5076 * Now we must validate that the device accepted the command, as some 5077 * drives do not support it. If the drive does support it, we will 5078 * return 0, and the supported pages will be in un_vpd_page_mask. If 5079 * not, we return -1. 5080 */ 5081 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) { 5082 /* Loop to find one of the 2 pages we need */ 5083 counter = 4; /* Supported pages start at byte 4, with 0x00 */ 5084 5085 /* 5086 * Pages are returned in ascending order, and 0x83 is what we 5087 * are hoping for. 5088 */ 5089 while ((page_list[counter] <= 0x86) && 5090 (counter <= (page_list[VPD_PAGE_LENGTH] + 5091 VPD_HEAD_OFFSET))) { 5092 /* 5093 * Add 3 because page_list[3] is the number of 5094 * pages minus 3 5095 */ 5096 5097 switch (page_list[counter]) { 5098 case 0x00: 5099 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG; 5100 break; 5101 case 0x80: 5102 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG; 5103 break; 5104 case 0x81: 5105 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG; 5106 break; 5107 case 0x82: 5108 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG; 5109 break; 5110 case 0x83: 5111 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; 5112 break; 5113 case 0x86: 5114 un->un_vpd_page_mask |= SD_VPD_EXTENDED_DATA_PG; 5115 break; 5116 } 5117 counter++; 5118 } 5119 5120 } else { 5121 rval = -1; 5122 5123 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5124 "sd_check_vpd_page_support: This drive does not implement " 5125 "VPD pages.\n"); 5126 } 5127 5128 kmem_free(page_list, page_length); 5129 5130 return (rval); 5131 } 5132 5133 5134 /* 5135 * Function: sd_setup_pm 5136 * 5137 * Description: Initialize Power Management on the device 5138 * 5139 * Context: Kernel Thread 5140 */ 5141 5142 static void 5143 sd_setup_pm(struct sd_lun *un, dev_info_t *devi) 5144 { 5145 uint_t log_page_size; 5146 uchar_t *log_page_data; 5147 int rval; 5148 5149 /* 5150 * Since we are called from attach, holding a mutex for 5151 * un is unnecessary. Because some of the routines called 5152 * from here require SD_MUTEX to not be held, assert this 5153 * right up front. 5154 */ 5155 ASSERT(!mutex_owned(SD_MUTEX(un))); 5156 /* 5157 * Since the sd device does not have the 'reg' property, 5158 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 5159 * The following code is to tell cpr that this device 5160 * DOES need to be suspended and resumed. 5161 */ 5162 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 5163 "pm-hardware-state", "needs-suspend-resume"); 5164 5165 /* 5166 * This complies with the new power management framework 5167 * for certain desktop machines. Create the pm_components 5168 * property as a string array property. 5169 */ 5170 if (un->un_f_pm_supported) { 5171 /* 5172 * not all devices have a motor, try it first. 5173 * some devices may return ILLEGAL REQUEST, some 5174 * will hang 5175 * The following START_STOP_UNIT is used to check if target 5176 * device has a motor. 5177 */ 5178 un->un_f_start_stop_supported = TRUE; 5179 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 5180 SD_PATH_DIRECT) != 0) { 5181 un->un_f_start_stop_supported = FALSE; 5182 } 5183 5184 /* 5185 * create pm properties anyways otherwise the parent can't 5186 * go to sleep 5187 */ 5188 (void) sd_create_pm_components(devi, un); 5189 un->un_f_pm_is_enabled = TRUE; 5190 return; 5191 } 5192 5193 if (!un->un_f_log_sense_supported) { 5194 un->un_power_level = SD_SPINDLE_ON; 5195 un->un_f_pm_is_enabled = FALSE; 5196 return; 5197 } 5198 5199 rval = sd_log_page_supported(un, START_STOP_CYCLE_PAGE); 5200 5201 #ifdef SDDEBUG 5202 if (sd_force_pm_supported) { 5203 /* Force a successful result */ 5204 rval = 1; 5205 } 5206 #endif 5207 5208 /* 5209 * If the start-stop cycle counter log page is not supported 5210 * or if the pm-capable property is SD_PM_CAPABLE_FALSE (0) 5211 * then we should not create the pm_components property. 5212 */ 5213 if (rval == -1) { 5214 /* 5215 * Error. 5216 * Reading log sense failed, most likely this is 5217 * an older drive that does not support log sense. 5218 * If this fails auto-pm is not supported. 5219 */ 5220 un->un_power_level = SD_SPINDLE_ON; 5221 un->un_f_pm_is_enabled = FALSE; 5222 5223 } else if (rval == 0) { 5224 /* 5225 * Page not found. 5226 * The start stop cycle counter is implemented as page 5227 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For 5228 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE). 5229 */ 5230 if (sd_log_page_supported(un, START_STOP_CYCLE_VU_PAGE) == 1) { 5231 /* 5232 * Page found, use this one. 5233 */ 5234 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE; 5235 un->un_f_pm_is_enabled = TRUE; 5236 } else { 5237 /* 5238 * Error or page not found. 5239 * auto-pm is not supported for this device. 5240 */ 5241 un->un_power_level = SD_SPINDLE_ON; 5242 un->un_f_pm_is_enabled = FALSE; 5243 } 5244 } else { 5245 /* 5246 * Page found, use it. 5247 */ 5248 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE; 5249 un->un_f_pm_is_enabled = TRUE; 5250 } 5251 5252 5253 if (un->un_f_pm_is_enabled == TRUE) { 5254 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5255 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5256 5257 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 5258 log_page_size, un->un_start_stop_cycle_page, 5259 0x01, 0, SD_PATH_DIRECT); 5260 #ifdef SDDEBUG 5261 if (sd_force_pm_supported) { 5262 /* Force a successful result */ 5263 rval = 0; 5264 } 5265 #endif 5266 5267 /* 5268 * If the Log sense for Page( Start/stop cycle counter page) 5269 * succeeds, then power managment is supported and we can 5270 * enable auto-pm. 5271 */ 5272 if (rval == 0) { 5273 (void) sd_create_pm_components(devi, un); 5274 } else { 5275 un->un_power_level = SD_SPINDLE_ON; 5276 un->un_f_pm_is_enabled = FALSE; 5277 } 5278 5279 kmem_free(log_page_data, log_page_size); 5280 } 5281 } 5282 5283 5284 /* 5285 * Function: sd_create_pm_components 5286 * 5287 * Description: Initialize PM property. 5288 * 5289 * Context: Kernel thread context 5290 */ 5291 5292 static void 5293 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un) 5294 { 5295 char *pm_comp[] = { "NAME=spindle-motor", "0=off", "1=on", NULL }; 5296 5297 ASSERT(!mutex_owned(SD_MUTEX(un))); 5298 5299 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 5300 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 5301 /* 5302 * When components are initially created they are idle, 5303 * power up any non-removables. 5304 * Note: the return value of pm_raise_power can't be used 5305 * for determining if PM should be enabled for this device. 5306 * Even if you check the return values and remove this 5307 * property created above, the PM framework will not honor the 5308 * change after the first call to pm_raise_power. Hence, 5309 * removal of that property does not help if pm_raise_power 5310 * fails. In the case of removable media, the start/stop 5311 * will fail if the media is not present. 5312 */ 5313 if (un->un_f_attach_spinup && (pm_raise_power(SD_DEVINFO(un), 0, 5314 SD_SPINDLE_ON) == DDI_SUCCESS)) { 5315 mutex_enter(SD_MUTEX(un)); 5316 un->un_power_level = SD_SPINDLE_ON; 5317 mutex_enter(&un->un_pm_mutex); 5318 /* Set to on and not busy. */ 5319 un->un_pm_count = 0; 5320 } else { 5321 mutex_enter(SD_MUTEX(un)); 5322 un->un_power_level = SD_SPINDLE_OFF; 5323 mutex_enter(&un->un_pm_mutex); 5324 /* Set to off. */ 5325 un->un_pm_count = -1; 5326 } 5327 mutex_exit(&un->un_pm_mutex); 5328 mutex_exit(SD_MUTEX(un)); 5329 } else { 5330 un->un_power_level = SD_SPINDLE_ON; 5331 un->un_f_pm_is_enabled = FALSE; 5332 } 5333 } 5334 5335 5336 /* 5337 * Function: sd_ddi_suspend 5338 * 5339 * Description: Performs system power-down operations. This includes 5340 * setting the drive state to indicate its suspended so 5341 * that no new commands will be accepted. Also, wait for 5342 * all commands that are in transport or queued to a timer 5343 * for retry to complete. All timeout threads are cancelled. 5344 * 5345 * Return Code: DDI_FAILURE or DDI_SUCCESS 5346 * 5347 * Context: Kernel thread context 5348 */ 5349 5350 static int 5351 sd_ddi_suspend(dev_info_t *devi) 5352 { 5353 struct sd_lun *un; 5354 clock_t wait_cmds_complete; 5355 5356 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5357 if (un == NULL) { 5358 return (DDI_FAILURE); 5359 } 5360 5361 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n"); 5362 5363 mutex_enter(SD_MUTEX(un)); 5364 5365 /* Return success if the device is already suspended. */ 5366 if (un->un_state == SD_STATE_SUSPENDED) { 5367 mutex_exit(SD_MUTEX(un)); 5368 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5369 "device already suspended, exiting\n"); 5370 return (DDI_SUCCESS); 5371 } 5372 5373 /* Return failure if the device is being used by HA */ 5374 if (un->un_resvd_status & 5375 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) { 5376 mutex_exit(SD_MUTEX(un)); 5377 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5378 "device in use by HA, exiting\n"); 5379 return (DDI_FAILURE); 5380 } 5381 5382 /* 5383 * Return failure if the device is in a resource wait 5384 * or power changing state. 5385 */ 5386 if ((un->un_state == SD_STATE_RWAIT) || 5387 (un->un_state == SD_STATE_PM_CHANGING)) { 5388 mutex_exit(SD_MUTEX(un)); 5389 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5390 "device in resource wait state, exiting\n"); 5391 return (DDI_FAILURE); 5392 } 5393 5394 5395 un->un_save_state = un->un_last_state; 5396 New_state(un, SD_STATE_SUSPENDED); 5397 5398 /* 5399 * Wait for all commands that are in transport or queued to a timer 5400 * for retry to complete. 5401 * 5402 * While waiting, no new commands will be accepted or sent because of 5403 * the new state we set above. 5404 * 5405 * Wait till current operation has completed. If we are in the resource 5406 * wait state (with an intr outstanding) then we need to wait till the 5407 * intr completes and starts the next cmd. We want to wait for 5408 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND. 5409 */ 5410 wait_cmds_complete = ddi_get_lbolt() + 5411 (sd_wait_cmds_complete * drv_usectohz(1000000)); 5412 5413 while (un->un_ncmds_in_transport != 0) { 5414 /* 5415 * Fail if commands do not finish in the specified time. 5416 */ 5417 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un), 5418 wait_cmds_complete) == -1) { 5419 /* 5420 * Undo the state changes made above. Everything 5421 * must go back to it's original value. 5422 */ 5423 Restore_state(un); 5424 un->un_last_state = un->un_save_state; 5425 /* Wake up any threads that might be waiting. */ 5426 cv_broadcast(&un->un_suspend_cv); 5427 mutex_exit(SD_MUTEX(un)); 5428 SD_ERROR(SD_LOG_IO_PM, un, 5429 "sd_ddi_suspend: failed due to outstanding cmds\n"); 5430 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n"); 5431 return (DDI_FAILURE); 5432 } 5433 } 5434 5435 /* 5436 * Cancel SCSI watch thread and timeouts, if any are active 5437 */ 5438 5439 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) { 5440 opaque_t temp_token = un->un_swr_token; 5441 mutex_exit(SD_MUTEX(un)); 5442 scsi_watch_suspend(temp_token); 5443 mutex_enter(SD_MUTEX(un)); 5444 } 5445 5446 if (un->un_reset_throttle_timeid != NULL) { 5447 timeout_id_t temp_id = un->un_reset_throttle_timeid; 5448 un->un_reset_throttle_timeid = NULL; 5449 mutex_exit(SD_MUTEX(un)); 5450 (void) untimeout(temp_id); 5451 mutex_enter(SD_MUTEX(un)); 5452 } 5453 5454 if (un->un_dcvb_timeid != NULL) { 5455 timeout_id_t temp_id = un->un_dcvb_timeid; 5456 un->un_dcvb_timeid = NULL; 5457 mutex_exit(SD_MUTEX(un)); 5458 (void) untimeout(temp_id); 5459 mutex_enter(SD_MUTEX(un)); 5460 } 5461 5462 mutex_enter(&un->un_pm_mutex); 5463 if (un->un_pm_timeid != NULL) { 5464 timeout_id_t temp_id = un->un_pm_timeid; 5465 un->un_pm_timeid = NULL; 5466 mutex_exit(&un->un_pm_mutex); 5467 mutex_exit(SD_MUTEX(un)); 5468 (void) untimeout(temp_id); 5469 mutex_enter(SD_MUTEX(un)); 5470 } else { 5471 mutex_exit(&un->un_pm_mutex); 5472 } 5473 5474 if (un->un_retry_timeid != NULL) { 5475 timeout_id_t temp_id = un->un_retry_timeid; 5476 un->un_retry_timeid = NULL; 5477 mutex_exit(SD_MUTEX(un)); 5478 (void) untimeout(temp_id); 5479 mutex_enter(SD_MUTEX(un)); 5480 } 5481 5482 if (un->un_direct_priority_timeid != NULL) { 5483 timeout_id_t temp_id = un->un_direct_priority_timeid; 5484 un->un_direct_priority_timeid = NULL; 5485 mutex_exit(SD_MUTEX(un)); 5486 (void) untimeout(temp_id); 5487 mutex_enter(SD_MUTEX(un)); 5488 } 5489 5490 if (un->un_f_is_fibre == TRUE) { 5491 /* 5492 * Remove callbacks for insert and remove events 5493 */ 5494 if (un->un_insert_event != NULL) { 5495 mutex_exit(SD_MUTEX(un)); 5496 (void) ddi_remove_event_handler(un->un_insert_cb_id); 5497 mutex_enter(SD_MUTEX(un)); 5498 un->un_insert_event = NULL; 5499 } 5500 5501 if (un->un_remove_event != NULL) { 5502 mutex_exit(SD_MUTEX(un)); 5503 (void) ddi_remove_event_handler(un->un_remove_cb_id); 5504 mutex_enter(SD_MUTEX(un)); 5505 un->un_remove_event = NULL; 5506 } 5507 } 5508 5509 mutex_exit(SD_MUTEX(un)); 5510 5511 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); 5512 5513 return (DDI_SUCCESS); 5514 } 5515 5516 5517 /* 5518 * Function: sd_ddi_pm_suspend 5519 * 5520 * Description: Set the drive state to low power. 5521 * Someone else is required to actually change the drive 5522 * power level. 5523 * 5524 * Arguments: un - driver soft state (unit) structure 5525 * 5526 * Return Code: DDI_FAILURE or DDI_SUCCESS 5527 * 5528 * Context: Kernel thread context 5529 */ 5530 5531 static int 5532 sd_ddi_pm_suspend(struct sd_lun *un) 5533 { 5534 ASSERT(un != NULL); 5535 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: entry\n"); 5536 5537 ASSERT(!mutex_owned(SD_MUTEX(un))); 5538 mutex_enter(SD_MUTEX(un)); 5539 5540 /* 5541 * Exit if power management is not enabled for this device, or if 5542 * the device is being used by HA. 5543 */ 5544 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status & 5545 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) { 5546 mutex_exit(SD_MUTEX(un)); 5547 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exiting\n"); 5548 return (DDI_SUCCESS); 5549 } 5550 5551 SD_INFO(SD_LOG_POWER, un, "sd_ddi_pm_suspend: un_ncmds_in_driver=%ld\n", 5552 un->un_ncmds_in_driver); 5553 5554 /* 5555 * See if the device is not busy, ie.: 5556 * - we have no commands in the driver for this device 5557 * - not waiting for resources 5558 */ 5559 if ((un->un_ncmds_in_driver == 0) && 5560 (un->un_state != SD_STATE_RWAIT)) { 5561 /* 5562 * The device is not busy, so it is OK to go to low power state. 5563 * Indicate low power, but rely on someone else to actually 5564 * change it. 5565 */ 5566 mutex_enter(&un->un_pm_mutex); 5567 un->un_pm_count = -1; 5568 mutex_exit(&un->un_pm_mutex); 5569 un->un_power_level = SD_SPINDLE_OFF; 5570 } 5571 5572 mutex_exit(SD_MUTEX(un)); 5573 5574 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exit\n"); 5575 5576 return (DDI_SUCCESS); 5577 } 5578 5579 5580 /* 5581 * Function: sd_ddi_resume 5582 * 5583 * Description: Performs system power-up operations.. 5584 * 5585 * Return Code: DDI_SUCCESS 5586 * DDI_FAILURE 5587 * 5588 * Context: Kernel thread context 5589 */ 5590 5591 static int 5592 sd_ddi_resume(dev_info_t *devi) 5593 { 5594 struct sd_lun *un; 5595 5596 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5597 if (un == NULL) { 5598 return (DDI_FAILURE); 5599 } 5600 5601 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n"); 5602 5603 mutex_enter(SD_MUTEX(un)); 5604 Restore_state(un); 5605 5606 /* 5607 * Restore the state which was saved to give the 5608 * the right state in un_last_state 5609 */ 5610 un->un_last_state = un->un_save_state; 5611 /* 5612 * Note: throttle comes back at full. 5613 * Also note: this MUST be done before calling pm_raise_power 5614 * otherwise the system can get hung in biowait. The scenario where 5615 * this'll happen is under cpr suspend. Writing of the system 5616 * state goes through sddump, which writes 0 to un_throttle. If 5617 * writing the system state then fails, example if the partition is 5618 * too small, then cpr attempts a resume. If throttle isn't restored 5619 * from the saved value until after calling pm_raise_power then 5620 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs 5621 * in biowait. 5622 */ 5623 un->un_throttle = un->un_saved_throttle; 5624 5625 /* 5626 * The chance of failure is very rare as the only command done in power 5627 * entry point is START command when you transition from 0->1 or 5628 * unknown->1. Put it to SPINDLE ON state irrespective of the state at 5629 * which suspend was done. Ignore the return value as the resume should 5630 * not be failed. In the case of removable media the media need not be 5631 * inserted and hence there is a chance that raise power will fail with 5632 * media not present. 5633 */ 5634 if (un->un_f_attach_spinup) { 5635 mutex_exit(SD_MUTEX(un)); 5636 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 5637 mutex_enter(SD_MUTEX(un)); 5638 } 5639 5640 /* 5641 * Don't broadcast to the suspend cv and therefore possibly 5642 * start I/O until after power has been restored. 5643 */ 5644 cv_broadcast(&un->un_suspend_cv); 5645 cv_broadcast(&un->un_state_cv); 5646 5647 /* restart thread */ 5648 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { 5649 scsi_watch_resume(un->un_swr_token); 5650 } 5651 5652 #if (defined(__fibre)) 5653 if (un->un_f_is_fibre == TRUE) { 5654 /* 5655 * Add callbacks for insert and remove events 5656 */ 5657 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 5658 sd_init_event_callbacks(un); 5659 } 5660 } 5661 #endif 5662 5663 /* 5664 * Transport any pending commands to the target. 5665 * 5666 * If this is a low-activity device commands in queue will have to wait 5667 * until new commands come in, which may take awhile. Also, we 5668 * specifically don't check un_ncmds_in_transport because we know that 5669 * there really are no commands in progress after the unit was 5670 * suspended and we could have reached the throttle level, been 5671 * suspended, and have no new commands coming in for awhile. Highly 5672 * unlikely, but so is the low-activity disk scenario. 5673 */ 5674 ddi_xbuf_dispatch(un->un_xbuf_attr); 5675 5676 sd_start_cmds(un, NULL); 5677 mutex_exit(SD_MUTEX(un)); 5678 5679 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n"); 5680 5681 return (DDI_SUCCESS); 5682 } 5683 5684 5685 /* 5686 * Function: sd_ddi_pm_resume 5687 * 5688 * Description: Set the drive state to powered on. 5689 * Someone else is required to actually change the drive 5690 * power level. 5691 * 5692 * Arguments: un - driver soft state (unit) structure 5693 * 5694 * Return Code: DDI_SUCCESS 5695 * 5696 * Context: Kernel thread context 5697 */ 5698 5699 static int 5700 sd_ddi_pm_resume(struct sd_lun *un) 5701 { 5702 ASSERT(un != NULL); 5703 5704 ASSERT(!mutex_owned(SD_MUTEX(un))); 5705 mutex_enter(SD_MUTEX(un)); 5706 un->un_power_level = SD_SPINDLE_ON; 5707 5708 ASSERT(!mutex_owned(&un->un_pm_mutex)); 5709 mutex_enter(&un->un_pm_mutex); 5710 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 5711 un->un_pm_count++; 5712 ASSERT(un->un_pm_count == 0); 5713 /* 5714 * Note: no longer do the cv_broadcast on un_suspend_cv. The 5715 * un_suspend_cv is for a system resume, not a power management 5716 * device resume. (4297749) 5717 * cv_broadcast(&un->un_suspend_cv); 5718 */ 5719 } 5720 mutex_exit(&un->un_pm_mutex); 5721 mutex_exit(SD_MUTEX(un)); 5722 5723 return (DDI_SUCCESS); 5724 } 5725 5726 5727 /* 5728 * Function: sd_pm_idletimeout_handler 5729 * 5730 * Description: A timer routine that's active only while a device is busy. 5731 * The purpose is to extend slightly the pm framework's busy 5732 * view of the device to prevent busy/idle thrashing for 5733 * back-to-back commands. Do this by comparing the current time 5734 * to the time at which the last command completed and when the 5735 * difference is greater than sd_pm_idletime, call 5736 * pm_idle_component. In addition to indicating idle to the pm 5737 * framework, update the chain type to again use the internal pm 5738 * layers of the driver. 5739 * 5740 * Arguments: arg - driver soft state (unit) structure 5741 * 5742 * Context: Executes in a timeout(9F) thread context 5743 */ 5744 5745 static void 5746 sd_pm_idletimeout_handler(void *arg) 5747 { 5748 struct sd_lun *un = arg; 5749 5750 time_t now; 5751 5752 mutex_enter(&sd_detach_mutex); 5753 if (un->un_detach_count != 0) { 5754 /* Abort if the instance is detaching */ 5755 mutex_exit(&sd_detach_mutex); 5756 return; 5757 } 5758 mutex_exit(&sd_detach_mutex); 5759 5760 now = ddi_get_time(); 5761 /* 5762 * Grab both mutexes, in the proper order, since we're accessing 5763 * both PM and softstate variables. 5764 */ 5765 mutex_enter(SD_MUTEX(un)); 5766 mutex_enter(&un->un_pm_mutex); 5767 if (((now - un->un_pm_idle_time) > sd_pm_idletime) && 5768 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) { 5769 /* 5770 * Update the chain types. 5771 * This takes affect on the next new command received. 5772 */ 5773 if (un->un_f_non_devbsize_supported) { 5774 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 5775 } else { 5776 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 5777 } 5778 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 5779 5780 SD_TRACE(SD_LOG_IO_PM, un, 5781 "sd_pm_idletimeout_handler: idling device\n"); 5782 (void) pm_idle_component(SD_DEVINFO(un), 0); 5783 un->un_pm_idle_timeid = NULL; 5784 } else { 5785 un->un_pm_idle_timeid = 5786 timeout(sd_pm_idletimeout_handler, un, 5787 (drv_usectohz((clock_t)300000))); /* 300 ms. */ 5788 } 5789 mutex_exit(&un->un_pm_mutex); 5790 mutex_exit(SD_MUTEX(un)); 5791 } 5792 5793 5794 /* 5795 * Function: sd_pm_timeout_handler 5796 * 5797 * Description: Callback to tell framework we are idle. 5798 * 5799 * Context: timeout(9f) thread context. 5800 */ 5801 5802 static void 5803 sd_pm_timeout_handler(void *arg) 5804 { 5805 struct sd_lun *un = arg; 5806 5807 (void) pm_idle_component(SD_DEVINFO(un), 0); 5808 mutex_enter(&un->un_pm_mutex); 5809 un->un_pm_timeid = NULL; 5810 mutex_exit(&un->un_pm_mutex); 5811 } 5812 5813 5814 /* 5815 * Function: sdpower 5816 * 5817 * Description: PM entry point. 5818 * 5819 * Return Code: DDI_SUCCESS 5820 * DDI_FAILURE 5821 * 5822 * Context: Kernel thread context 5823 */ 5824 5825 static int 5826 sdpower(dev_info_t *devi, int component, int level) 5827 { 5828 struct sd_lun *un; 5829 int instance; 5830 int rval = DDI_SUCCESS; 5831 uint_t i, log_page_size, maxcycles, ncycles; 5832 uchar_t *log_page_data; 5833 int log_sense_page; 5834 int medium_present; 5835 time_t intvlp; 5836 dev_t dev; 5837 struct pm_trans_data sd_pm_tran_data; 5838 uchar_t save_state; 5839 int sval; 5840 uchar_t state_before_pm; 5841 int got_semaphore_here; 5842 5843 instance = ddi_get_instance(devi); 5844 5845 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 5846 (SD_SPINDLE_OFF > level) || (level > SD_SPINDLE_ON) || 5847 component != 0) { 5848 return (DDI_FAILURE); 5849 } 5850 5851 dev = sd_make_device(SD_DEVINFO(un)); 5852 5853 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level); 5854 5855 /* 5856 * Must synchronize power down with close. 5857 * Attempt to decrement/acquire the open/close semaphore, 5858 * but do NOT wait on it. If it's not greater than zero, 5859 * ie. it can't be decremented without waiting, then 5860 * someone else, either open or close, already has it 5861 * and the try returns 0. Use that knowledge here to determine 5862 * if it's OK to change the device power level. 5863 * Also, only increment it on exit if it was decremented, ie. gotten, 5864 * here. 5865 */ 5866 got_semaphore_here = sema_tryp(&un->un_semoclose); 5867 5868 mutex_enter(SD_MUTEX(un)); 5869 5870 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n", 5871 un->un_ncmds_in_driver); 5872 5873 /* 5874 * If un_ncmds_in_driver is non-zero it indicates commands are 5875 * already being processed in the driver, or if the semaphore was 5876 * not gotten here it indicates an open or close is being processed. 5877 * At the same time somebody is requesting to go low power which 5878 * can't happen, therefore we need to return failure. 5879 */ 5880 if ((level == SD_SPINDLE_OFF) && 5881 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) { 5882 mutex_exit(SD_MUTEX(un)); 5883 5884 if (got_semaphore_here != 0) { 5885 sema_v(&un->un_semoclose); 5886 } 5887 SD_TRACE(SD_LOG_IO_PM, un, 5888 "sdpower: exit, device has queued cmds.\n"); 5889 return (DDI_FAILURE); 5890 } 5891 5892 /* 5893 * if it is OFFLINE that means the disk is completely dead 5894 * in our case we have to put the disk in on or off by sending commands 5895 * Of course that will fail anyway so return back here. 5896 * 5897 * Power changes to a device that's OFFLINE or SUSPENDED 5898 * are not allowed. 5899 */ 5900 if ((un->un_state == SD_STATE_OFFLINE) || 5901 (un->un_state == SD_STATE_SUSPENDED)) { 5902 mutex_exit(SD_MUTEX(un)); 5903 5904 if (got_semaphore_here != 0) { 5905 sema_v(&un->un_semoclose); 5906 } 5907 SD_TRACE(SD_LOG_IO_PM, un, 5908 "sdpower: exit, device is off-line.\n"); 5909 return (DDI_FAILURE); 5910 } 5911 5912 /* 5913 * Change the device's state to indicate it's power level 5914 * is being changed. Do this to prevent a power off in the 5915 * middle of commands, which is especially bad on devices 5916 * that are really powered off instead of just spun down. 5917 */ 5918 state_before_pm = un->un_state; 5919 un->un_state = SD_STATE_PM_CHANGING; 5920 5921 mutex_exit(SD_MUTEX(un)); 5922 5923 /* 5924 * If "pm-capable" property is set to TRUE by HBA drivers, 5925 * bypass the following checking, otherwise, check the log 5926 * sense information for this device 5927 */ 5928 if ((level == SD_SPINDLE_OFF) && un->un_f_log_sense_supported) { 5929 /* 5930 * Get the log sense information to understand whether the 5931 * the powercycle counts have gone beyond the threshhold. 5932 */ 5933 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5934 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5935 5936 mutex_enter(SD_MUTEX(un)); 5937 log_sense_page = un->un_start_stop_cycle_page; 5938 mutex_exit(SD_MUTEX(un)); 5939 5940 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 5941 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT); 5942 #ifdef SDDEBUG 5943 if (sd_force_pm_supported) { 5944 /* Force a successful result */ 5945 rval = 0; 5946 } 5947 #endif 5948 if (rval != 0) { 5949 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 5950 "Log Sense Failed\n"); 5951 kmem_free(log_page_data, log_page_size); 5952 /* Cannot support power management on those drives */ 5953 5954 if (got_semaphore_here != 0) { 5955 sema_v(&un->un_semoclose); 5956 } 5957 /* 5958 * On exit put the state back to it's original value 5959 * and broadcast to anyone waiting for the power 5960 * change completion. 5961 */ 5962 mutex_enter(SD_MUTEX(un)); 5963 un->un_state = state_before_pm; 5964 cv_broadcast(&un->un_suspend_cv); 5965 mutex_exit(SD_MUTEX(un)); 5966 SD_TRACE(SD_LOG_IO_PM, un, 5967 "sdpower: exit, Log Sense Failed.\n"); 5968 return (DDI_FAILURE); 5969 } 5970 5971 /* 5972 * From the page data - Convert the essential information to 5973 * pm_trans_data 5974 */ 5975 maxcycles = 5976 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) | 5977 (log_page_data[0x1E] << 8) | log_page_data[0x1F]; 5978 5979 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles; 5980 5981 ncycles = 5982 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) | 5983 (log_page_data[0x26] << 8) | log_page_data[0x27]; 5984 5985 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles; 5986 5987 for (i = 0; i < DC_SCSI_MFR_LEN; i++) { 5988 sd_pm_tran_data.un.scsi_cycles.svc_date[i] = 5989 log_page_data[8+i]; 5990 } 5991 5992 kmem_free(log_page_data, log_page_size); 5993 5994 /* 5995 * Call pm_trans_check routine to get the Ok from 5996 * the global policy 5997 */ 5998 5999 sd_pm_tran_data.format = DC_SCSI_FORMAT; 6000 sd_pm_tran_data.un.scsi_cycles.flag = 0; 6001 6002 rval = pm_trans_check(&sd_pm_tran_data, &intvlp); 6003 #ifdef SDDEBUG 6004 if (sd_force_pm_supported) { 6005 /* Force a successful result */ 6006 rval = 1; 6007 } 6008 #endif 6009 switch (rval) { 6010 case 0: 6011 /* 6012 * Not Ok to Power cycle or error in parameters passed 6013 * Would have given the advised time to consider power 6014 * cycle. Based on the new intvlp parameter we are 6015 * supposed to pretend we are busy so that pm framework 6016 * will never call our power entry point. Because of 6017 * that install a timeout handler and wait for the 6018 * recommended time to elapse so that power management 6019 * can be effective again. 6020 * 6021 * To effect this behavior, call pm_busy_component to 6022 * indicate to the framework this device is busy. 6023 * By not adjusting un_pm_count the rest of PM in 6024 * the driver will function normally, and independant 6025 * of this but because the framework is told the device 6026 * is busy it won't attempt powering down until it gets 6027 * a matching idle. The timeout handler sends this. 6028 * Note: sd_pm_entry can't be called here to do this 6029 * because sdpower may have been called as a result 6030 * of a call to pm_raise_power from within sd_pm_entry. 6031 * 6032 * If a timeout handler is already active then 6033 * don't install another. 6034 */ 6035 mutex_enter(&un->un_pm_mutex); 6036 if (un->un_pm_timeid == NULL) { 6037 un->un_pm_timeid = 6038 timeout(sd_pm_timeout_handler, 6039 un, intvlp * drv_usectohz(1000000)); 6040 mutex_exit(&un->un_pm_mutex); 6041 (void) pm_busy_component(SD_DEVINFO(un), 0); 6042 } else { 6043 mutex_exit(&un->un_pm_mutex); 6044 } 6045 if (got_semaphore_here != 0) { 6046 sema_v(&un->un_semoclose); 6047 } 6048 /* 6049 * On exit put the state back to it's original value 6050 * and broadcast to anyone waiting for the power 6051 * change completion. 6052 */ 6053 mutex_enter(SD_MUTEX(un)); 6054 un->un_state = state_before_pm; 6055 cv_broadcast(&un->un_suspend_cv); 6056 mutex_exit(SD_MUTEX(un)); 6057 6058 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, " 6059 "trans check Failed, not ok to power cycle.\n"); 6060 return (DDI_FAILURE); 6061 6062 case -1: 6063 if (got_semaphore_here != 0) { 6064 sema_v(&un->un_semoclose); 6065 } 6066 /* 6067 * On exit put the state back to it's original value 6068 * and broadcast to anyone waiting for the power 6069 * change completion. 6070 */ 6071 mutex_enter(SD_MUTEX(un)); 6072 un->un_state = state_before_pm; 6073 cv_broadcast(&un->un_suspend_cv); 6074 mutex_exit(SD_MUTEX(un)); 6075 SD_TRACE(SD_LOG_IO_PM, un, 6076 "sdpower: exit, trans check command Failed.\n"); 6077 return (DDI_FAILURE); 6078 } 6079 } 6080 6081 if (level == SD_SPINDLE_OFF) { 6082 /* 6083 * Save the last state... if the STOP FAILS we need it 6084 * for restoring 6085 */ 6086 mutex_enter(SD_MUTEX(un)); 6087 save_state = un->un_last_state; 6088 /* 6089 * There must not be any cmds. getting processed 6090 * in the driver when we get here. Power to the 6091 * device is potentially going off. 6092 */ 6093 ASSERT(un->un_ncmds_in_driver == 0); 6094 mutex_exit(SD_MUTEX(un)); 6095 6096 /* 6097 * For now suspend the device completely before spindle is 6098 * turned off 6099 */ 6100 if ((rval = sd_ddi_pm_suspend(un)) == DDI_FAILURE) { 6101 if (got_semaphore_here != 0) { 6102 sema_v(&un->un_semoclose); 6103 } 6104 /* 6105 * On exit put the state back to it's original value 6106 * and broadcast to anyone waiting for the power 6107 * change completion. 6108 */ 6109 mutex_enter(SD_MUTEX(un)); 6110 un->un_state = state_before_pm; 6111 cv_broadcast(&un->un_suspend_cv); 6112 mutex_exit(SD_MUTEX(un)); 6113 SD_TRACE(SD_LOG_IO_PM, un, 6114 "sdpower: exit, PM suspend Failed.\n"); 6115 return (DDI_FAILURE); 6116 } 6117 } 6118 6119 /* 6120 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open, 6121 * close, or strategy. Dump no long uses this routine, it uses it's 6122 * own code so it can be done in polled mode. 6123 */ 6124 6125 medium_present = TRUE; 6126 6127 /* 6128 * When powering up, issue a TUR in case the device is at unit 6129 * attention. Don't do retries. Bypass the PM layer, otherwise 6130 * a deadlock on un_pm_busy_cv will occur. 6131 */ 6132 if (level == SD_SPINDLE_ON) { 6133 (void) sd_send_scsi_TEST_UNIT_READY(un, 6134 SD_DONT_RETRY_TUR | SD_BYPASS_PM); 6135 } 6136 6137 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n", 6138 ((level == SD_SPINDLE_ON) ? "START" : "STOP")); 6139 6140 sval = sd_send_scsi_START_STOP_UNIT(un, 6141 ((level == SD_SPINDLE_ON) ? SD_TARGET_START : SD_TARGET_STOP), 6142 SD_PATH_DIRECT); 6143 /* Command failed, check for media present. */ 6144 if ((sval == ENXIO) && un->un_f_has_removable_media) { 6145 medium_present = FALSE; 6146 } 6147 6148 /* 6149 * The conditions of interest here are: 6150 * if a spindle off with media present fails, 6151 * then restore the state and return an error. 6152 * else if a spindle on fails, 6153 * then return an error (there's no state to restore). 6154 * In all other cases we setup for the new state 6155 * and return success. 6156 */ 6157 switch (level) { 6158 case SD_SPINDLE_OFF: 6159 if ((medium_present == TRUE) && (sval != 0)) { 6160 /* The stop command from above failed */ 6161 rval = DDI_FAILURE; 6162 /* 6163 * The stop command failed, and we have media 6164 * present. Put the level back by calling the 6165 * sd_pm_resume() and set the state back to 6166 * it's previous value. 6167 */ 6168 (void) sd_ddi_pm_resume(un); 6169 mutex_enter(SD_MUTEX(un)); 6170 un->un_last_state = save_state; 6171 mutex_exit(SD_MUTEX(un)); 6172 break; 6173 } 6174 /* 6175 * The stop command from above succeeded. 6176 */ 6177 if (un->un_f_monitor_media_state) { 6178 /* 6179 * Terminate watch thread in case of removable media 6180 * devices going into low power state. This is as per 6181 * the requirements of pm framework, otherwise commands 6182 * will be generated for the device (through watch 6183 * thread), even when the device is in low power state. 6184 */ 6185 mutex_enter(SD_MUTEX(un)); 6186 un->un_f_watcht_stopped = FALSE; 6187 if (un->un_swr_token != NULL) { 6188 opaque_t temp_token = un->un_swr_token; 6189 un->un_f_watcht_stopped = TRUE; 6190 un->un_swr_token = NULL; 6191 mutex_exit(SD_MUTEX(un)); 6192 (void) scsi_watch_request_terminate(temp_token, 6193 SCSI_WATCH_TERMINATE_WAIT); 6194 } else { 6195 mutex_exit(SD_MUTEX(un)); 6196 } 6197 } 6198 break; 6199 6200 default: /* The level requested is spindle on... */ 6201 /* 6202 * Legacy behavior: return success on a failed spinup 6203 * if there is no media in the drive. 6204 * Do this by looking at medium_present here. 6205 */ 6206 if ((sval != 0) && medium_present) { 6207 /* The start command from above failed */ 6208 rval = DDI_FAILURE; 6209 break; 6210 } 6211 /* 6212 * The start command from above succeeded 6213 * Resume the devices now that we have 6214 * started the disks 6215 */ 6216 (void) sd_ddi_pm_resume(un); 6217 6218 /* 6219 * Resume the watch thread since it was suspended 6220 * when the device went into low power mode. 6221 */ 6222 if (un->un_f_monitor_media_state) { 6223 mutex_enter(SD_MUTEX(un)); 6224 if (un->un_f_watcht_stopped == TRUE) { 6225 opaque_t temp_token; 6226 6227 un->un_f_watcht_stopped = FALSE; 6228 mutex_exit(SD_MUTEX(un)); 6229 temp_token = scsi_watch_request_submit( 6230 SD_SCSI_DEVP(un), 6231 sd_check_media_time, 6232 SENSE_LENGTH, sd_media_watch_cb, 6233 (caddr_t)dev); 6234 mutex_enter(SD_MUTEX(un)); 6235 un->un_swr_token = temp_token; 6236 } 6237 mutex_exit(SD_MUTEX(un)); 6238 } 6239 } 6240 if (got_semaphore_here != 0) { 6241 sema_v(&un->un_semoclose); 6242 } 6243 /* 6244 * On exit put the state back to it's original value 6245 * and broadcast to anyone waiting for the power 6246 * change completion. 6247 */ 6248 mutex_enter(SD_MUTEX(un)); 6249 un->un_state = state_before_pm; 6250 cv_broadcast(&un->un_suspend_cv); 6251 mutex_exit(SD_MUTEX(un)); 6252 6253 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval); 6254 6255 return (rval); 6256 } 6257 6258 6259 6260 /* 6261 * Function: sdattach 6262 * 6263 * Description: Driver's attach(9e) entry point function. 6264 * 6265 * Arguments: devi - opaque device info handle 6266 * cmd - attach type 6267 * 6268 * Return Code: DDI_SUCCESS 6269 * DDI_FAILURE 6270 * 6271 * Context: Kernel thread context 6272 */ 6273 6274 static int 6275 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 6276 { 6277 switch (cmd) { 6278 case DDI_ATTACH: 6279 return (sd_unit_attach(devi)); 6280 case DDI_RESUME: 6281 return (sd_ddi_resume(devi)); 6282 default: 6283 break; 6284 } 6285 return (DDI_FAILURE); 6286 } 6287 6288 6289 /* 6290 * Function: sddetach 6291 * 6292 * Description: Driver's detach(9E) entry point function. 6293 * 6294 * Arguments: devi - opaque device info handle 6295 * cmd - detach type 6296 * 6297 * Return Code: DDI_SUCCESS 6298 * DDI_FAILURE 6299 * 6300 * Context: Kernel thread context 6301 */ 6302 6303 static int 6304 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 6305 { 6306 switch (cmd) { 6307 case DDI_DETACH: 6308 return (sd_unit_detach(devi)); 6309 case DDI_SUSPEND: 6310 return (sd_ddi_suspend(devi)); 6311 default: 6312 break; 6313 } 6314 return (DDI_FAILURE); 6315 } 6316 6317 6318 /* 6319 * Function: sd_sync_with_callback 6320 * 6321 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft 6322 * state while the callback routine is active. 6323 * 6324 * Arguments: un: softstate structure for the instance 6325 * 6326 * Context: Kernel thread context 6327 */ 6328 6329 static void 6330 sd_sync_with_callback(struct sd_lun *un) 6331 { 6332 ASSERT(un != NULL); 6333 6334 mutex_enter(SD_MUTEX(un)); 6335 6336 ASSERT(un->un_in_callback >= 0); 6337 6338 while (un->un_in_callback > 0) { 6339 mutex_exit(SD_MUTEX(un)); 6340 delay(2); 6341 mutex_enter(SD_MUTEX(un)); 6342 } 6343 6344 mutex_exit(SD_MUTEX(un)); 6345 } 6346 6347 /* 6348 * Function: sd_unit_attach 6349 * 6350 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates 6351 * the soft state structure for the device and performs 6352 * all necessary structure and device initializations. 6353 * 6354 * Arguments: devi: the system's dev_info_t for the device. 6355 * 6356 * Return Code: DDI_SUCCESS if attach is successful. 6357 * DDI_FAILURE if any part of the attach fails. 6358 * 6359 * Context: Called at attach(9e) time for the DDI_ATTACH flag. 6360 * Kernel thread context only. Can sleep. 6361 */ 6362 6363 static int 6364 sd_unit_attach(dev_info_t *devi) 6365 { 6366 struct scsi_device *devp; 6367 struct sd_lun *un; 6368 char *variantp; 6369 int reservation_flag = SD_TARGET_IS_UNRESERVED; 6370 int instance; 6371 int rval; 6372 int wc_enabled; 6373 int tgt; 6374 uint64_t capacity; 6375 uint_t lbasize = 0; 6376 dev_info_t *pdip = ddi_get_parent(devi); 6377 int offbyone = 0; 6378 int geom_label_valid = 0; 6379 #if defined(__sparc) 6380 int max_xfer_size; 6381 #endif 6382 6383 /* 6384 * Retrieve the target driver's private data area. This was set 6385 * up by the HBA. 6386 */ 6387 devp = ddi_get_driver_private(devi); 6388 6389 /* 6390 * Retrieve the target ID of the device. 6391 */ 6392 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6393 SCSI_ADDR_PROP_TARGET, -1); 6394 6395 /* 6396 * Since we have no idea what state things were left in by the last 6397 * user of the device, set up some 'default' settings, ie. turn 'em 6398 * off. The scsi_ifsetcap calls force re-negotiations with the drive. 6399 * Do this before the scsi_probe, which sends an inquiry. 6400 * This is a fix for bug (4430280). 6401 * Of special importance is wide-xfer. The drive could have been left 6402 * in wide transfer mode by the last driver to communicate with it, 6403 * this includes us. If that's the case, and if the following is not 6404 * setup properly or we don't re-negotiate with the drive prior to 6405 * transferring data to/from the drive, it causes bus parity errors, 6406 * data overruns, and unexpected interrupts. This first occurred when 6407 * the fix for bug (4378686) was made. 6408 */ 6409 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1); 6410 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1); 6411 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1); 6412 6413 /* 6414 * Currently, scsi_ifsetcap sets tagged-qing capability for all LUNs 6415 * on a target. Setting it per lun instance actually sets the 6416 * capability of this target, which affects those luns already 6417 * attached on the same target. So during attach, we can only disable 6418 * this capability only when no other lun has been attached on this 6419 * target. By doing this, we assume a target has the same tagged-qing 6420 * capability for every lun. The condition can be removed when HBA 6421 * is changed to support per lun based tagged-qing capability. 6422 */ 6423 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 6424 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1); 6425 } 6426 6427 /* 6428 * Use scsi_probe() to issue an INQUIRY command to the device. 6429 * This call will allocate and fill in the scsi_inquiry structure 6430 * and point the sd_inq member of the scsi_device structure to it. 6431 * If the attach succeeds, then this memory will not be de-allocated 6432 * (via scsi_unprobe()) until the instance is detached. 6433 */ 6434 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 6435 goto probe_failed; 6436 } 6437 6438 /* 6439 * Check the device type as specified in the inquiry data and 6440 * claim it if it is of a type that we support. 6441 */ 6442 switch (devp->sd_inq->inq_dtype) { 6443 case DTYPE_DIRECT: 6444 break; 6445 case DTYPE_RODIRECT: 6446 break; 6447 case DTYPE_OPTICAL: 6448 break; 6449 case DTYPE_NOTPRESENT: 6450 default: 6451 /* Unsupported device type; fail the attach. */ 6452 goto probe_failed; 6453 } 6454 6455 /* 6456 * Allocate the soft state structure for this unit. 6457 * 6458 * We rely upon this memory being set to all zeroes by 6459 * ddi_soft_state_zalloc(). We assume that any member of the 6460 * soft state structure that is not explicitly initialized by 6461 * this routine will have a value of zero. 6462 */ 6463 instance = ddi_get_instance(devp->sd_dev); 6464 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) { 6465 goto probe_failed; 6466 } 6467 6468 /* 6469 * Retrieve a pointer to the newly-allocated soft state. 6470 * 6471 * This should NEVER fail if the ddi_soft_state_zalloc() call above 6472 * was successful, unless something has gone horribly wrong and the 6473 * ddi's soft state internals are corrupt (in which case it is 6474 * probably better to halt here than just fail the attach....) 6475 */ 6476 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 6477 panic("sd_unit_attach: NULL soft state on instance:0x%x", 6478 instance); 6479 /*NOTREACHED*/ 6480 } 6481 6482 /* 6483 * Link the back ptr of the driver soft state to the scsi_device 6484 * struct for this lun. 6485 * Save a pointer to the softstate in the driver-private area of 6486 * the scsi_device struct. 6487 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until 6488 * we first set un->un_sd below. 6489 */ 6490 un->un_sd = devp; 6491 devp->sd_private = (opaque_t)un; 6492 6493 /* 6494 * The following must be after devp is stored in the soft state struct. 6495 */ 6496 #ifdef SDDEBUG 6497 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6498 "%s_unit_attach: un:0x%p instance:%d\n", 6499 ddi_driver_name(devi), un, instance); 6500 #endif 6501 6502 /* 6503 * Set up the device type and node type (for the minor nodes). 6504 * By default we assume that the device can at least support the 6505 * Common Command Set. Call it a CD-ROM if it reports itself 6506 * as a RODIRECT device. 6507 */ 6508 switch (devp->sd_inq->inq_dtype) { 6509 case DTYPE_RODIRECT: 6510 un->un_node_type = DDI_NT_CD_CHAN; 6511 un->un_ctype = CTYPE_CDROM; 6512 break; 6513 case DTYPE_OPTICAL: 6514 un->un_node_type = DDI_NT_BLOCK_CHAN; 6515 un->un_ctype = CTYPE_ROD; 6516 break; 6517 default: 6518 un->un_node_type = DDI_NT_BLOCK_CHAN; 6519 un->un_ctype = CTYPE_CCS; 6520 break; 6521 } 6522 6523 /* 6524 * Try to read the interconnect type from the HBA. 6525 * 6526 * Note: This driver is currently compiled as two binaries, a parallel 6527 * scsi version (sd) and a fibre channel version (ssd). All functional 6528 * differences are determined at compile time. In the future a single 6529 * binary will be provided and the inteconnect type will be used to 6530 * differentiate between fibre and parallel scsi behaviors. At that time 6531 * it will be necessary for all fibre channel HBAs to support this 6532 * property. 6533 * 6534 * set un_f_is_fiber to TRUE ( default fiber ) 6535 */ 6536 un->un_f_is_fibre = TRUE; 6537 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { 6538 case INTERCONNECT_SSA: 6539 un->un_interconnect_type = SD_INTERCONNECT_SSA; 6540 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6541 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); 6542 break; 6543 case INTERCONNECT_PARALLEL: 6544 un->un_f_is_fibre = FALSE; 6545 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 6546 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6547 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); 6548 break; 6549 case INTERCONNECT_SATA: 6550 un->un_f_is_fibre = FALSE; 6551 un->un_interconnect_type = SD_INTERCONNECT_SATA; 6552 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6553 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un); 6554 break; 6555 case INTERCONNECT_FIBRE: 6556 un->un_interconnect_type = SD_INTERCONNECT_FIBRE; 6557 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6558 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); 6559 break; 6560 case INTERCONNECT_FABRIC: 6561 un->un_interconnect_type = SD_INTERCONNECT_FABRIC; 6562 un->un_node_type = DDI_NT_BLOCK_FABRIC; 6563 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6564 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); 6565 break; 6566 default: 6567 #ifdef SD_DEFAULT_INTERCONNECT_TYPE 6568 /* 6569 * The HBA does not support the "interconnect-type" property 6570 * (or did not provide a recognized type). 6571 * 6572 * Note: This will be obsoleted when a single fibre channel 6573 * and parallel scsi driver is delivered. In the meantime the 6574 * interconnect type will be set to the platform default.If that 6575 * type is not parallel SCSI, it means that we should be 6576 * assuming "ssd" semantics. However, here this also means that 6577 * the FC HBA is not supporting the "interconnect-type" property 6578 * like we expect it to, so log this occurrence. 6579 */ 6580 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; 6581 if (!SD_IS_PARALLEL_SCSI(un)) { 6582 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6583 "sd_unit_attach: un:0x%p Assuming " 6584 "INTERCONNECT_FIBRE\n", un); 6585 } else { 6586 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6587 "sd_unit_attach: un:0x%p Assuming " 6588 "INTERCONNECT_PARALLEL\n", un); 6589 un->un_f_is_fibre = FALSE; 6590 } 6591 #else 6592 /* 6593 * Note: This source will be implemented when a single fibre 6594 * channel and parallel scsi driver is delivered. The default 6595 * will be to assume that if a device does not support the 6596 * "interconnect-type" property it is a parallel SCSI HBA and 6597 * we will set the interconnect type for parallel scsi. 6598 */ 6599 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 6600 un->un_f_is_fibre = FALSE; 6601 #endif 6602 break; 6603 } 6604 6605 if (un->un_f_is_fibre == TRUE) { 6606 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == 6607 SCSI_VERSION_3) { 6608 switch (un->un_interconnect_type) { 6609 case SD_INTERCONNECT_FIBRE: 6610 case SD_INTERCONNECT_SSA: 6611 un->un_node_type = DDI_NT_BLOCK_WWN; 6612 break; 6613 default: 6614 break; 6615 } 6616 } 6617 } 6618 6619 /* 6620 * Initialize the Request Sense command for the target 6621 */ 6622 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { 6623 goto alloc_rqs_failed; 6624 } 6625 6626 /* 6627 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc 6628 * with separate binary for sd and ssd. 6629 * 6630 * x86 has 1 binary, un_retry_count is set base on connection type. 6631 * The hardcoded values will go away when Sparc uses 1 binary 6632 * for sd and ssd. This hardcoded values need to match 6633 * SD_RETRY_COUNT in sddef.h 6634 * The value used is base on interconnect type. 6635 * fibre = 3, parallel = 5 6636 */ 6637 #if defined(__i386) || defined(__amd64) 6638 un->un_retry_count = un->un_f_is_fibre ? 3 : 5; 6639 #else 6640 un->un_retry_count = SD_RETRY_COUNT; 6641 #endif 6642 6643 /* 6644 * Set the per disk retry count to the default number of retries 6645 * for disks and CDROMs. This value can be overridden by the 6646 * disk property list or an entry in sd.conf. 6647 */ 6648 un->un_notready_retry_count = 6649 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un) 6650 : DISK_NOT_READY_RETRY_COUNT(un); 6651 6652 /* 6653 * Set the busy retry count to the default value of un_retry_count. 6654 * This can be overridden by entries in sd.conf or the device 6655 * config table. 6656 */ 6657 un->un_busy_retry_count = un->un_retry_count; 6658 6659 /* 6660 * Init the reset threshold for retries. This number determines 6661 * how many retries must be performed before a reset can be issued 6662 * (for certain error conditions). This can be overridden by entries 6663 * in sd.conf or the device config table. 6664 */ 6665 un->un_reset_retry_count = (un->un_retry_count / 2); 6666 6667 /* 6668 * Set the victim_retry_count to the default un_retry_count 6669 */ 6670 un->un_victim_retry_count = (2 * un->un_retry_count); 6671 6672 /* 6673 * Set the reservation release timeout to the default value of 6674 * 5 seconds. This can be overridden by entries in ssd.conf or the 6675 * device config table. 6676 */ 6677 un->un_reserve_release_time = 5; 6678 6679 /* 6680 * Set up the default maximum transfer size. Note that this may 6681 * get updated later in the attach, when setting up default wide 6682 * operations for disks. 6683 */ 6684 #if defined(__i386) || defined(__amd64) 6685 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; 6686 un->un_partial_dma_supported = 1; 6687 #else 6688 un->un_max_xfer_size = (uint_t)maxphys; 6689 #endif 6690 6691 /* 6692 * Get "allow bus device reset" property (defaults to "enabled" if 6693 * the property was not defined). This is to disable bus resets for 6694 * certain kinds of error recovery. Note: In the future when a run-time 6695 * fibre check is available the soft state flag should default to 6696 * enabled. 6697 */ 6698 if (un->un_f_is_fibre == TRUE) { 6699 un->un_f_allow_bus_device_reset = TRUE; 6700 } else { 6701 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6702 "allow-bus-device-reset", 1) != 0) { 6703 un->un_f_allow_bus_device_reset = TRUE; 6704 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6705 "sd_unit_attach: un:0x%p Bus device reset " 6706 "enabled\n", un); 6707 } else { 6708 un->un_f_allow_bus_device_reset = FALSE; 6709 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6710 "sd_unit_attach: un:0x%p Bus device reset " 6711 "disabled\n", un); 6712 } 6713 } 6714 6715 /* 6716 * Check if this is an ATAPI device. ATAPI devices use Group 1 6717 * Read/Write commands and Group 2 Mode Sense/Select commands. 6718 * 6719 * Note: The "obsolete" way of doing this is to check for the "atapi" 6720 * property. The new "variant" property with a value of "atapi" has been 6721 * introduced so that future 'variants' of standard SCSI behavior (like 6722 * atapi) could be specified by the underlying HBA drivers by supplying 6723 * a new value for the "variant" property, instead of having to define a 6724 * new property. 6725 */ 6726 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { 6727 un->un_f_cfg_is_atapi = TRUE; 6728 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6729 "sd_unit_attach: un:0x%p Atapi device\n", un); 6730 } 6731 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", 6732 &variantp) == DDI_PROP_SUCCESS) { 6733 if (strcmp(variantp, "atapi") == 0) { 6734 un->un_f_cfg_is_atapi = TRUE; 6735 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6736 "sd_unit_attach: un:0x%p Atapi device\n", un); 6737 } 6738 ddi_prop_free(variantp); 6739 } 6740 6741 un->un_cmd_timeout = SD_IO_TIME; 6742 6743 /* Info on current states, statuses, etc. (Updated frequently) */ 6744 un->un_state = SD_STATE_NORMAL; 6745 un->un_last_state = SD_STATE_NORMAL; 6746 6747 /* Control & status info for command throttling */ 6748 un->un_throttle = sd_max_throttle; 6749 un->un_saved_throttle = sd_max_throttle; 6750 un->un_min_throttle = sd_min_throttle; 6751 6752 if (un->un_f_is_fibre == TRUE) { 6753 un->un_f_use_adaptive_throttle = TRUE; 6754 } else { 6755 un->un_f_use_adaptive_throttle = FALSE; 6756 } 6757 6758 /* Removable media support. */ 6759 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 6760 un->un_mediastate = DKIO_NONE; 6761 un->un_specified_mediastate = DKIO_NONE; 6762 6763 /* CVs for suspend/resume (PM or DR) */ 6764 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 6765 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 6766 6767 /* Power management support. */ 6768 un->un_power_level = SD_SPINDLE_UNINIT; 6769 6770 cv_init(&un->un_wcc_cv, NULL, CV_DRIVER, NULL); 6771 un->un_f_wcc_inprog = 0; 6772 6773 /* 6774 * The open/close semaphore is used to serialize threads executing 6775 * in the driver's open & close entry point routines for a given 6776 * instance. 6777 */ 6778 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 6779 6780 /* 6781 * The conf file entry and softstate variable is a forceful override, 6782 * meaning a non-zero value must be entered to change the default. 6783 */ 6784 un->un_f_disksort_disabled = FALSE; 6785 6786 /* 6787 * Retrieve the properties from the static driver table or the driver 6788 * configuration file (.conf) for this unit and update the soft state 6789 * for the device as needed for the indicated properties. 6790 * Note: the property configuration needs to occur here as some of the 6791 * following routines may have dependancies on soft state flags set 6792 * as part of the driver property configuration. 6793 */ 6794 sd_read_unit_properties(un); 6795 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6796 "sd_unit_attach: un:0x%p property configuration complete.\n", un); 6797 6798 /* 6799 * Only if a device has "hotpluggable" property, it is 6800 * treated as hotpluggable device. Otherwise, it is 6801 * regarded as non-hotpluggable one. 6802 */ 6803 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable", 6804 -1) != -1) { 6805 un->un_f_is_hotpluggable = TRUE; 6806 } 6807 6808 /* 6809 * set unit's attributes(flags) according to "hotpluggable" and 6810 * RMB bit in INQUIRY data. 6811 */ 6812 sd_set_unit_attributes(un, devi); 6813 6814 /* 6815 * By default, we mark the capacity, lbasize, and geometry 6816 * as invalid. Only if we successfully read a valid capacity 6817 * will we update the un_blockcount and un_tgt_blocksize with the 6818 * valid values (the geometry will be validated later). 6819 */ 6820 un->un_f_blockcount_is_valid = FALSE; 6821 un->un_f_tgt_blocksize_is_valid = FALSE; 6822 6823 /* 6824 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine 6825 * otherwise. 6826 */ 6827 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE; 6828 un->un_blockcount = 0; 6829 6830 /* 6831 * Set up the per-instance info needed to determine the correct 6832 * CDBs and other info for issuing commands to the target. 6833 */ 6834 sd_init_cdb_limits(un); 6835 6836 /* 6837 * Set up the IO chains to use, based upon the target type. 6838 */ 6839 if (un->un_f_non_devbsize_supported) { 6840 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 6841 } else { 6842 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 6843 } 6844 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 6845 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD; 6846 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD; 6847 6848 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf), 6849 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit, 6850 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER); 6851 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi); 6852 6853 6854 if (ISCD(un)) { 6855 un->un_additional_codes = sd_additional_codes; 6856 } else { 6857 un->un_additional_codes = NULL; 6858 } 6859 6860 /* 6861 * Create the kstats here so they can be available for attach-time 6862 * routines that send commands to the unit (either polled or via 6863 * sd_send_scsi_cmd). 6864 * 6865 * Note: This is a critical sequence that needs to be maintained: 6866 * 1) Instantiate the kstats here, before any routines using the 6867 * iopath (i.e. sd_send_scsi_cmd). 6868 * 2) Instantiate and initialize the partition stats 6869 * (sd_set_pstats). 6870 * 3) Initialize the error stats (sd_set_errstats), following 6871 * sd_validate_geometry(),sd_register_devid(), 6872 * and sd_cache_control(). 6873 */ 6874 6875 un->un_stats = kstat_create(sd_label, instance, 6876 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 6877 if (un->un_stats != NULL) { 6878 un->un_stats->ks_lock = SD_MUTEX(un); 6879 kstat_install(un->un_stats); 6880 } 6881 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6882 "sd_unit_attach: un:0x%p un_stats created\n", un); 6883 6884 sd_create_errstats(un, instance); 6885 if (un->un_errstats == NULL) { 6886 goto create_errstats_failed; 6887 } 6888 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6889 "sd_unit_attach: un:0x%p errstats created\n", un); 6890 6891 /* 6892 * The following if/else code was relocated here from below as part 6893 * of the fix for bug (4430280). However with the default setup added 6894 * on entry to this routine, it's no longer absolutely necessary for 6895 * this to be before the call to sd_spin_up_unit. 6896 */ 6897 if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) { 6898 /* 6899 * If SCSI-2 tagged queueing is supported by the target 6900 * and by the host adapter then we will enable it. 6901 */ 6902 un->un_tagflags = 0; 6903 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && 6904 (devp->sd_inq->inq_cmdque) && 6905 (un->un_f_arq_enabled == TRUE)) { 6906 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 6907 1, 1) == 1) { 6908 un->un_tagflags = FLAG_STAG; 6909 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6910 "sd_unit_attach: un:0x%p tag queueing " 6911 "enabled\n", un); 6912 } else if (scsi_ifgetcap(SD_ADDRESS(un), 6913 "untagged-qing", 0) == 1) { 6914 un->un_f_opt_queueing = TRUE; 6915 un->un_saved_throttle = un->un_throttle = 6916 min(un->un_throttle, 3); 6917 } else { 6918 un->un_f_opt_queueing = FALSE; 6919 un->un_saved_throttle = un->un_throttle = 1; 6920 } 6921 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) 6922 == 1) && (un->un_f_arq_enabled == TRUE)) { 6923 /* The Host Adapter supports internal queueing. */ 6924 un->un_f_opt_queueing = TRUE; 6925 un->un_saved_throttle = un->un_throttle = 6926 min(un->un_throttle, 3); 6927 } else { 6928 un->un_f_opt_queueing = FALSE; 6929 un->un_saved_throttle = un->un_throttle = 1; 6930 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6931 "sd_unit_attach: un:0x%p no tag queueing\n", un); 6932 } 6933 6934 /* 6935 * Enable large transfers for SATA/SAS drives 6936 */ 6937 if (SD_IS_SERIAL(un)) { 6938 un->un_max_xfer_size = 6939 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 6940 sd_max_xfer_size, SD_MAX_XFER_SIZE); 6941 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6942 "sd_unit_attach: un:0x%p max transfer " 6943 "size=0x%x\n", un, un->un_max_xfer_size); 6944 6945 } 6946 6947 /* Setup or tear down default wide operations for disks */ 6948 6949 /* 6950 * Note: Legacy: it may be possible for both "sd_max_xfer_size" 6951 * and "ssd_max_xfer_size" to exist simultaneously on the same 6952 * system and be set to different values. In the future this 6953 * code may need to be updated when the ssd module is 6954 * obsoleted and removed from the system. (4299588) 6955 */ 6956 if (SD_IS_PARALLEL_SCSI(un) && 6957 (devp->sd_inq->inq_rdf == RDF_SCSI2) && 6958 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { 6959 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 6960 1, 1) == 1) { 6961 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6962 "sd_unit_attach: un:0x%p Wide Transfer " 6963 "enabled\n", un); 6964 } 6965 6966 /* 6967 * If tagged queuing has also been enabled, then 6968 * enable large xfers 6969 */ 6970 if (un->un_saved_throttle == sd_max_throttle) { 6971 un->un_max_xfer_size = 6972 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 6973 sd_max_xfer_size, SD_MAX_XFER_SIZE); 6974 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6975 "sd_unit_attach: un:0x%p max transfer " 6976 "size=0x%x\n", un, un->un_max_xfer_size); 6977 } 6978 } else { 6979 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 6980 0, 1) == 1) { 6981 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6982 "sd_unit_attach: un:0x%p " 6983 "Wide Transfer disabled\n", un); 6984 } 6985 } 6986 } else { 6987 un->un_tagflags = FLAG_STAG; 6988 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, 6989 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); 6990 } 6991 6992 /* 6993 * If this target supports LUN reset, try to enable it. 6994 */ 6995 if (un->un_f_lun_reset_enabled) { 6996 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { 6997 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 6998 "un:0x%p lun_reset capability set\n", un); 6999 } else { 7000 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7001 "un:0x%p lun-reset capability not set\n", un); 7002 } 7003 } 7004 7005 /* 7006 * Adjust the maximum transfer size. This is to fix 7007 * the problem of partial DMA support on SPARC. Some 7008 * HBA driver, like aac, has very small dma_attr_maxxfer 7009 * size, which requires partial DMA support on SPARC. 7010 * In the future the SPARC pci nexus driver may solve 7011 * the problem instead of this fix. 7012 */ 7013 #if defined(__sparc) 7014 max_xfer_size = scsi_ifgetcap(SD_ADDRESS(un), "dma-max", 1); 7015 if ((max_xfer_size > 0) && (max_xfer_size < un->un_max_xfer_size)) { 7016 un->un_max_xfer_size = max_xfer_size; 7017 un->un_partial_dma_supported = 1; 7018 } 7019 #endif 7020 7021 /* 7022 * Set PKT_DMA_PARTIAL flag. 7023 */ 7024 if (un->un_partial_dma_supported == 1) { 7025 un->un_pkt_flags = PKT_DMA_PARTIAL; 7026 } else { 7027 un->un_pkt_flags = 0; 7028 } 7029 7030 /* 7031 * At this point in the attach, we have enough info in the 7032 * soft state to be able to issue commands to the target. 7033 * 7034 * All command paths used below MUST issue their commands as 7035 * SD_PATH_DIRECT. This is important as intermediate layers 7036 * are not all initialized yet (such as PM). 7037 */ 7038 7039 /* 7040 * Send a TEST UNIT READY command to the device. This should clear 7041 * any outstanding UNIT ATTENTION that may be present. 7042 * 7043 * Note: Don't check for success, just track if there is a reservation, 7044 * this is a throw away command to clear any unit attentions. 7045 * 7046 * Note: This MUST be the first command issued to the target during 7047 * attach to ensure power on UNIT ATTENTIONS are cleared. 7048 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated 7049 * with attempts at spinning up a device with no media. 7050 */ 7051 if (sd_send_scsi_TEST_UNIT_READY(un, SD_DONT_RETRY_TUR) == EACCES) { 7052 reservation_flag = SD_TARGET_IS_RESERVED; 7053 } 7054 7055 /* 7056 * If the device is NOT a removable media device, attempt to spin 7057 * it up (using the START_STOP_UNIT command) and read its capacity 7058 * (using the READ CAPACITY command). Note, however, that either 7059 * of these could fail and in some cases we would continue with 7060 * the attach despite the failure (see below). 7061 */ 7062 if (un->un_f_descr_format_supported) { 7063 switch (sd_spin_up_unit(un)) { 7064 case 0: 7065 /* 7066 * Spin-up was successful; now try to read the 7067 * capacity. If successful then save the results 7068 * and mark the capacity & lbasize as valid. 7069 */ 7070 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7071 "sd_unit_attach: un:0x%p spin-up successful\n", un); 7072 7073 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, 7074 &lbasize, SD_PATH_DIRECT)) { 7075 case 0: { 7076 if (capacity > DK_MAX_BLOCKS) { 7077 #ifdef _LP64 7078 if (capacity + 1 > 7079 SD_GROUP1_MAX_ADDRESS) { 7080 /* 7081 * Enable descriptor format 7082 * sense data so that we can 7083 * get 64 bit sense data 7084 * fields. 7085 */ 7086 sd_enable_descr_sense(un); 7087 } 7088 #else 7089 /* 32-bit kernels can't handle this */ 7090 scsi_log(SD_DEVINFO(un), 7091 sd_label, CE_WARN, 7092 "disk has %llu blocks, which " 7093 "is too large for a 32-bit " 7094 "kernel", capacity); 7095 7096 #if defined(__i386) || defined(__amd64) 7097 /* 7098 * 1TB disk was treated as (1T - 512)B 7099 * in the past, so that it might have 7100 * valid VTOC and solaris partitions, 7101 * we have to allow it to continue to 7102 * work. 7103 */ 7104 if (capacity -1 > DK_MAX_BLOCKS) 7105 #endif 7106 goto spinup_failed; 7107 #endif 7108 } 7109 7110 /* 7111 * Here it's not necessary to check the case: 7112 * the capacity of the device is bigger than 7113 * what the max hba cdb can support. Because 7114 * sd_send_scsi_READ_CAPACITY will retrieve 7115 * the capacity by sending USCSI command, which 7116 * is constrained by the max hba cdb. Actually, 7117 * sd_send_scsi_READ_CAPACITY will return 7118 * EINVAL when using bigger cdb than required 7119 * cdb length. Will handle this case in 7120 * "case EINVAL". 7121 */ 7122 7123 /* 7124 * The following relies on 7125 * sd_send_scsi_READ_CAPACITY never 7126 * returning 0 for capacity and/or lbasize. 7127 */ 7128 sd_update_block_info(un, lbasize, capacity); 7129 7130 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7131 "sd_unit_attach: un:0x%p capacity = %ld " 7132 "blocks; lbasize= %ld.\n", un, 7133 un->un_blockcount, un->un_tgt_blocksize); 7134 7135 break; 7136 } 7137 case EINVAL: 7138 /* 7139 * In the case where the max-cdb-length property 7140 * is smaller than the required CDB length for 7141 * a SCSI device, a target driver can fail to 7142 * attach to that device. 7143 */ 7144 scsi_log(SD_DEVINFO(un), 7145 sd_label, CE_WARN, 7146 "disk capacity is too large " 7147 "for current cdb length"); 7148 goto spinup_failed; 7149 case EACCES: 7150 /* 7151 * Should never get here if the spin-up 7152 * succeeded, but code it in anyway. 7153 * From here, just continue with the attach... 7154 */ 7155 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7156 "sd_unit_attach: un:0x%p " 7157 "sd_send_scsi_READ_CAPACITY " 7158 "returned reservation conflict\n", un); 7159 reservation_flag = SD_TARGET_IS_RESERVED; 7160 break; 7161 default: 7162 /* 7163 * Likewise, should never get here if the 7164 * spin-up succeeded. Just continue with 7165 * the attach... 7166 */ 7167 break; 7168 } 7169 break; 7170 case EACCES: 7171 /* 7172 * Device is reserved by another host. In this case 7173 * we could not spin it up or read the capacity, but 7174 * we continue with the attach anyway. 7175 */ 7176 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7177 "sd_unit_attach: un:0x%p spin-up reservation " 7178 "conflict.\n", un); 7179 reservation_flag = SD_TARGET_IS_RESERVED; 7180 break; 7181 default: 7182 /* Fail the attach if the spin-up failed. */ 7183 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7184 "sd_unit_attach: un:0x%p spin-up failed.", un); 7185 goto spinup_failed; 7186 } 7187 } 7188 7189 /* 7190 * Check to see if this is a MMC drive 7191 */ 7192 if (ISCD(un)) { 7193 sd_set_mmc_caps(un); 7194 } 7195 7196 7197 /* 7198 * Add a zero-length attribute to tell the world we support 7199 * kernel ioctls (for layered drivers) 7200 */ 7201 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7202 DDI_KERNEL_IOCTL, NULL, 0); 7203 7204 /* 7205 * Add a boolean property to tell the world we support 7206 * the B_FAILFAST flag (for layered drivers) 7207 */ 7208 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7209 "ddi-failfast-supported", NULL, 0); 7210 7211 /* 7212 * Initialize power management 7213 */ 7214 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); 7215 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); 7216 sd_setup_pm(un, devi); 7217 if (un->un_f_pm_is_enabled == FALSE) { 7218 /* 7219 * For performance, point to a jump table that does 7220 * not include pm. 7221 * The direct and priority chains don't change with PM. 7222 * 7223 * Note: this is currently done based on individual device 7224 * capabilities. When an interface for determining system 7225 * power enabled state becomes available, or when additional 7226 * layers are added to the command chain, these values will 7227 * have to be re-evaluated for correctness. 7228 */ 7229 if (un->un_f_non_devbsize_supported) { 7230 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM; 7231 } else { 7232 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM; 7233 } 7234 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 7235 } 7236 7237 /* 7238 * This property is set to 0 by HA software to avoid retries 7239 * on a reserved disk. (The preferred property name is 7240 * "retry-on-reservation-conflict") (1189689) 7241 * 7242 * Note: The use of a global here can have unintended consequences. A 7243 * per instance variable is preferrable to match the capabilities of 7244 * different underlying hba's (4402600) 7245 */ 7246 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, 7247 DDI_PROP_DONTPASS, "retry-on-reservation-conflict", 7248 sd_retry_on_reservation_conflict); 7249 if (sd_retry_on_reservation_conflict != 0) { 7250 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, 7251 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, 7252 sd_retry_on_reservation_conflict); 7253 } 7254 7255 /* Set up options for QFULL handling. */ 7256 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7257 "qfull-retries", -1)) != -1) { 7258 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries", 7259 rval, 1); 7260 } 7261 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7262 "qfull-retry-interval", -1)) != -1) { 7263 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval", 7264 rval, 1); 7265 } 7266 7267 /* 7268 * This just prints a message that announces the existence of the 7269 * device. The message is always printed in the system logfile, but 7270 * only appears on the console if the system is booted with the 7271 * -v (verbose) argument. 7272 */ 7273 ddi_report_dev(devi); 7274 7275 un->un_mediastate = DKIO_NONE; 7276 7277 cmlb_alloc_handle(&un->un_cmlbhandle); 7278 7279 #if defined(__i386) || defined(__amd64) 7280 /* 7281 * On x86, compensate for off-by-1 legacy error 7282 */ 7283 if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable && 7284 (lbasize == un->un_sys_blocksize)) 7285 offbyone = CMLB_OFF_BY_ONE; 7286 #endif 7287 7288 if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype, 7289 un->un_f_has_removable_media, un->un_f_is_hotpluggable, 7290 un->un_node_type, offbyone, un->un_cmlbhandle, 7291 (void *)SD_PATH_DIRECT) != 0) { 7292 goto cmlb_attach_failed; 7293 } 7294 7295 7296 /* 7297 * Read and validate the device's geometry (ie, disk label) 7298 * A new unformatted drive will not have a valid geometry, but 7299 * the driver needs to successfully attach to this device so 7300 * the drive can be formatted via ioctls. 7301 */ 7302 geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0, 7303 (void *)SD_PATH_DIRECT) == 0) ? 1: 0; 7304 7305 mutex_enter(SD_MUTEX(un)); 7306 7307 /* 7308 * Read and initialize the devid for the unit. 7309 */ 7310 if (un->un_f_devid_supported) { 7311 sd_register_devid(un, devi, reservation_flag); 7312 } 7313 mutex_exit(SD_MUTEX(un)); 7314 7315 #if (defined(__fibre)) 7316 /* 7317 * Register callbacks for fibre only. You can't do this soley 7318 * on the basis of the devid_type because this is hba specific. 7319 * We need to query our hba capabilities to find out whether to 7320 * register or not. 7321 */ 7322 if (un->un_f_is_fibre) { 7323 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 7324 sd_init_event_callbacks(un); 7325 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7326 "sd_unit_attach: un:0x%p event callbacks inserted", 7327 un); 7328 } 7329 } 7330 #endif 7331 7332 if (un->un_f_opt_disable_cache == TRUE) { 7333 /* 7334 * Disable both read cache and write cache. This is 7335 * the historic behavior of the keywords in the config file. 7336 */ 7337 if (sd_cache_control(un, SD_CACHE_DISABLE, SD_CACHE_DISABLE) != 7338 0) { 7339 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7340 "sd_unit_attach: un:0x%p Could not disable " 7341 "caching", un); 7342 goto devid_failed; 7343 } 7344 } 7345 7346 /* 7347 * Check the value of the WCE bit now and 7348 * set un_f_write_cache_enabled accordingly. 7349 */ 7350 (void) sd_get_write_cache_enabled(un, &wc_enabled); 7351 mutex_enter(SD_MUTEX(un)); 7352 un->un_f_write_cache_enabled = (wc_enabled != 0); 7353 mutex_exit(SD_MUTEX(un)); 7354 7355 /* 7356 * Check the value of the NV_SUP bit and set 7357 * un_f_suppress_cache_flush accordingly. 7358 */ 7359 sd_get_nv_sup(un); 7360 7361 /* 7362 * Find out what type of reservation this disk supports. 7363 */ 7364 switch (sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 0, NULL)) { 7365 case 0: 7366 /* 7367 * SCSI-3 reservations are supported. 7368 */ 7369 un->un_reservation_type = SD_SCSI3_RESERVATION; 7370 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7371 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un); 7372 break; 7373 case ENOTSUP: 7374 /* 7375 * The PERSISTENT RESERVE IN command would not be recognized by 7376 * a SCSI-2 device, so assume the reservation type is SCSI-2. 7377 */ 7378 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7379 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un); 7380 un->un_reservation_type = SD_SCSI2_RESERVATION; 7381 break; 7382 default: 7383 /* 7384 * default to SCSI-3 reservations 7385 */ 7386 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7387 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un); 7388 un->un_reservation_type = SD_SCSI3_RESERVATION; 7389 break; 7390 } 7391 7392 /* 7393 * Set the pstat and error stat values here, so data obtained during the 7394 * previous attach-time routines is available. 7395 * 7396 * Note: This is a critical sequence that needs to be maintained: 7397 * 1) Instantiate the kstats before any routines using the iopath 7398 * (i.e. sd_send_scsi_cmd). 7399 * 2) Initialize the error stats (sd_set_errstats) and partition 7400 * stats (sd_set_pstats)here, following 7401 * cmlb_validate_geometry(), sd_register_devid(), and 7402 * sd_cache_control(). 7403 */ 7404 7405 if (un->un_f_pkstats_enabled && geom_label_valid) { 7406 sd_set_pstats(un); 7407 SD_TRACE(SD_LOG_IO_PARTITION, un, 7408 "sd_unit_attach: un:0x%p pstats created and set\n", un); 7409 } 7410 7411 sd_set_errstats(un); 7412 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7413 "sd_unit_attach: un:0x%p errstats set\n", un); 7414 7415 7416 /* 7417 * After successfully attaching an instance, we record the information 7418 * of how many luns have been attached on the relative target and 7419 * controller for parallel SCSI. This information is used when sd tries 7420 * to set the tagged queuing capability in HBA. 7421 */ 7422 if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) { 7423 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH); 7424 } 7425 7426 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7427 "sd_unit_attach: un:0x%p exit success\n", un); 7428 7429 return (DDI_SUCCESS); 7430 7431 /* 7432 * An error occurred during the attach; clean up & return failure. 7433 */ 7434 7435 devid_failed: 7436 7437 setup_pm_failed: 7438 ddi_remove_minor_node(devi, NULL); 7439 7440 cmlb_attach_failed: 7441 /* 7442 * Cleanup from the scsi_ifsetcap() calls (437868) 7443 */ 7444 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 7445 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 7446 7447 /* 7448 * Refer to the comments of setting tagged-qing in the beginning of 7449 * sd_unit_attach. We can only disable tagged queuing when there is 7450 * no lun attached on the target. 7451 */ 7452 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 7453 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 7454 } 7455 7456 if (un->un_f_is_fibre == FALSE) { 7457 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 7458 } 7459 7460 spinup_failed: 7461 7462 mutex_enter(SD_MUTEX(un)); 7463 7464 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ 7465 if (un->un_direct_priority_timeid != NULL) { 7466 timeout_id_t temp_id = un->un_direct_priority_timeid; 7467 un->un_direct_priority_timeid = NULL; 7468 mutex_exit(SD_MUTEX(un)); 7469 (void) untimeout(temp_id); 7470 mutex_enter(SD_MUTEX(un)); 7471 } 7472 7473 /* Cancel any pending start/stop timeouts */ 7474 if (un->un_startstop_timeid != NULL) { 7475 timeout_id_t temp_id = un->un_startstop_timeid; 7476 un->un_startstop_timeid = NULL; 7477 mutex_exit(SD_MUTEX(un)); 7478 (void) untimeout(temp_id); 7479 mutex_enter(SD_MUTEX(un)); 7480 } 7481 7482 /* Cancel any pending reset-throttle timeouts */ 7483 if (un->un_reset_throttle_timeid != NULL) { 7484 timeout_id_t temp_id = un->un_reset_throttle_timeid; 7485 un->un_reset_throttle_timeid = NULL; 7486 mutex_exit(SD_MUTEX(un)); 7487 (void) untimeout(temp_id); 7488 mutex_enter(SD_MUTEX(un)); 7489 } 7490 7491 /* Cancel any pending retry timeouts */ 7492 if (un->un_retry_timeid != NULL) { 7493 timeout_id_t temp_id = un->un_retry_timeid; 7494 un->un_retry_timeid = NULL; 7495 mutex_exit(SD_MUTEX(un)); 7496 (void) untimeout(temp_id); 7497 mutex_enter(SD_MUTEX(un)); 7498 } 7499 7500 /* Cancel any pending delayed cv broadcast timeouts */ 7501 if (un->un_dcvb_timeid != NULL) { 7502 timeout_id_t temp_id = un->un_dcvb_timeid; 7503 un->un_dcvb_timeid = NULL; 7504 mutex_exit(SD_MUTEX(un)); 7505 (void) untimeout(temp_id); 7506 mutex_enter(SD_MUTEX(un)); 7507 } 7508 7509 mutex_exit(SD_MUTEX(un)); 7510 7511 /* There should not be any in-progress I/O so ASSERT this check */ 7512 ASSERT(un->un_ncmds_in_transport == 0); 7513 ASSERT(un->un_ncmds_in_driver == 0); 7514 7515 /* Do not free the softstate if the callback routine is active */ 7516 sd_sync_with_callback(un); 7517 7518 /* 7519 * Partition stats apparently are not used with removables. These would 7520 * not have been created during attach, so no need to clean them up... 7521 */ 7522 if (un->un_errstats != NULL) { 7523 kstat_delete(un->un_errstats); 7524 un->un_errstats = NULL; 7525 } 7526 7527 create_errstats_failed: 7528 7529 if (un->un_stats != NULL) { 7530 kstat_delete(un->un_stats); 7531 un->un_stats = NULL; 7532 } 7533 7534 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 7535 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 7536 7537 ddi_prop_remove_all(devi); 7538 sema_destroy(&un->un_semoclose); 7539 cv_destroy(&un->un_state_cv); 7540 7541 getrbuf_failed: 7542 7543 sd_free_rqs(un); 7544 7545 alloc_rqs_failed: 7546 7547 devp->sd_private = NULL; 7548 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ 7549 7550 get_softstate_failed: 7551 /* 7552 * Note: the man pages are unclear as to whether or not doing a 7553 * ddi_soft_state_free(sd_state, instance) is the right way to 7554 * clean up after the ddi_soft_state_zalloc() if the subsequent 7555 * ddi_get_soft_state() fails. The implication seems to be 7556 * that the get_soft_state cannot fail if the zalloc succeeds. 7557 */ 7558 ddi_soft_state_free(sd_state, instance); 7559 7560 probe_failed: 7561 scsi_unprobe(devp); 7562 7563 return (DDI_FAILURE); 7564 } 7565 7566 7567 /* 7568 * Function: sd_unit_detach 7569 * 7570 * Description: Performs DDI_DETACH processing for sddetach(). 7571 * 7572 * Return Code: DDI_SUCCESS 7573 * DDI_FAILURE 7574 * 7575 * Context: Kernel thread context 7576 */ 7577 7578 static int 7579 sd_unit_detach(dev_info_t *devi) 7580 { 7581 struct scsi_device *devp; 7582 struct sd_lun *un; 7583 int i; 7584 int tgt; 7585 dev_t dev; 7586 dev_info_t *pdip = ddi_get_parent(devi); 7587 int instance = ddi_get_instance(devi); 7588 7589 mutex_enter(&sd_detach_mutex); 7590 7591 /* 7592 * Fail the detach for any of the following: 7593 * - Unable to get the sd_lun struct for the instance 7594 * - A layered driver has an outstanding open on the instance 7595 * - Another thread is already detaching this instance 7596 * - Another thread is currently performing an open 7597 */ 7598 devp = ddi_get_driver_private(devi); 7599 if ((devp == NULL) || 7600 ((un = (struct sd_lun *)devp->sd_private) == NULL) || 7601 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) || 7602 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) { 7603 mutex_exit(&sd_detach_mutex); 7604 return (DDI_FAILURE); 7605 } 7606 7607 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); 7608 7609 /* 7610 * Mark this instance as currently in a detach, to inhibit any 7611 * opens from a layered driver. 7612 */ 7613 un->un_detach_count++; 7614 mutex_exit(&sd_detach_mutex); 7615 7616 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7617 SCSI_ADDR_PROP_TARGET, -1); 7618 7619 dev = sd_make_device(SD_DEVINFO(un)); 7620 7621 #ifndef lint 7622 _NOTE(COMPETING_THREADS_NOW); 7623 #endif 7624 7625 mutex_enter(SD_MUTEX(un)); 7626 7627 /* 7628 * Fail the detach if there are any outstanding layered 7629 * opens on this device. 7630 */ 7631 for (i = 0; i < NDKMAP; i++) { 7632 if (un->un_ocmap.lyropen[i] != 0) { 7633 goto err_notclosed; 7634 } 7635 } 7636 7637 /* 7638 * Verify there are NO outstanding commands issued to this device. 7639 * ie, un_ncmds_in_transport == 0. 7640 * It's possible to have outstanding commands through the physio 7641 * code path, even though everything's closed. 7642 */ 7643 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || 7644 (un->un_direct_priority_timeid != NULL) || 7645 (un->un_state == SD_STATE_RWAIT)) { 7646 mutex_exit(SD_MUTEX(un)); 7647 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7648 "sd_dr_detach: Detach failure due to outstanding cmds\n"); 7649 goto err_stillbusy; 7650 } 7651 7652 /* 7653 * If we have the device reserved, release the reservation. 7654 */ 7655 if ((un->un_resvd_status & SD_RESERVE) && 7656 !(un->un_resvd_status & SD_LOST_RESERVE)) { 7657 mutex_exit(SD_MUTEX(un)); 7658 /* 7659 * Note: sd_reserve_release sends a command to the device 7660 * via the sd_ioctlcmd() path, and can sleep. 7661 */ 7662 if (sd_reserve_release(dev, SD_RELEASE) != 0) { 7663 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7664 "sd_dr_detach: Cannot release reservation \n"); 7665 } 7666 } else { 7667 mutex_exit(SD_MUTEX(un)); 7668 } 7669 7670 /* 7671 * Untimeout any reserve recover, throttle reset, restart unit 7672 * and delayed broadcast timeout threads. Protect the timeout pointer 7673 * from getting nulled by their callback functions. 7674 */ 7675 mutex_enter(SD_MUTEX(un)); 7676 if (un->un_resvd_timeid != NULL) { 7677 timeout_id_t temp_id = un->un_resvd_timeid; 7678 un->un_resvd_timeid = NULL; 7679 mutex_exit(SD_MUTEX(un)); 7680 (void) untimeout(temp_id); 7681 mutex_enter(SD_MUTEX(un)); 7682 } 7683 7684 if (un->un_reset_throttle_timeid != NULL) { 7685 timeout_id_t temp_id = un->un_reset_throttle_timeid; 7686 un->un_reset_throttle_timeid = NULL; 7687 mutex_exit(SD_MUTEX(un)); 7688 (void) untimeout(temp_id); 7689 mutex_enter(SD_MUTEX(un)); 7690 } 7691 7692 if (un->un_startstop_timeid != NULL) { 7693 timeout_id_t temp_id = un->un_startstop_timeid; 7694 un->un_startstop_timeid = NULL; 7695 mutex_exit(SD_MUTEX(un)); 7696 (void) untimeout(temp_id); 7697 mutex_enter(SD_MUTEX(un)); 7698 } 7699 7700 if (un->un_dcvb_timeid != NULL) { 7701 timeout_id_t temp_id = un->un_dcvb_timeid; 7702 un->un_dcvb_timeid = NULL; 7703 mutex_exit(SD_MUTEX(un)); 7704 (void) untimeout(temp_id); 7705 } else { 7706 mutex_exit(SD_MUTEX(un)); 7707 } 7708 7709 /* Remove any pending reservation reclaim requests for this device */ 7710 sd_rmv_resv_reclaim_req(dev); 7711 7712 mutex_enter(SD_MUTEX(un)); 7713 7714 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ 7715 if (un->un_direct_priority_timeid != NULL) { 7716 timeout_id_t temp_id = un->un_direct_priority_timeid; 7717 un->un_direct_priority_timeid = NULL; 7718 mutex_exit(SD_MUTEX(un)); 7719 (void) untimeout(temp_id); 7720 mutex_enter(SD_MUTEX(un)); 7721 } 7722 7723 /* Cancel any active multi-host disk watch thread requests */ 7724 if (un->un_mhd_token != NULL) { 7725 mutex_exit(SD_MUTEX(un)); 7726 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); 7727 if (scsi_watch_request_terminate(un->un_mhd_token, 7728 SCSI_WATCH_TERMINATE_NOWAIT)) { 7729 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7730 "sd_dr_detach: Cannot cancel mhd watch request\n"); 7731 /* 7732 * Note: We are returning here after having removed 7733 * some driver timeouts above. This is consistent with 7734 * the legacy implementation but perhaps the watch 7735 * terminate call should be made with the wait flag set. 7736 */ 7737 goto err_stillbusy; 7738 } 7739 mutex_enter(SD_MUTEX(un)); 7740 un->un_mhd_token = NULL; 7741 } 7742 7743 if (un->un_swr_token != NULL) { 7744 mutex_exit(SD_MUTEX(un)); 7745 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); 7746 if (scsi_watch_request_terminate(un->un_swr_token, 7747 SCSI_WATCH_TERMINATE_NOWAIT)) { 7748 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7749 "sd_dr_detach: Cannot cancel swr watch request\n"); 7750 /* 7751 * Note: We are returning here after having removed 7752 * some driver timeouts above. This is consistent with 7753 * the legacy implementation but perhaps the watch 7754 * terminate call should be made with the wait flag set. 7755 */ 7756 goto err_stillbusy; 7757 } 7758 mutex_enter(SD_MUTEX(un)); 7759 un->un_swr_token = NULL; 7760 } 7761 7762 mutex_exit(SD_MUTEX(un)); 7763 7764 /* 7765 * Clear any scsi_reset_notifies. We clear the reset notifies 7766 * if we have not registered one. 7767 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! 7768 */ 7769 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 7770 sd_mhd_reset_notify_cb, (caddr_t)un); 7771 7772 /* 7773 * protect the timeout pointers from getting nulled by 7774 * their callback functions during the cancellation process. 7775 * In such a scenario untimeout can be invoked with a null value. 7776 */ 7777 _NOTE(NO_COMPETING_THREADS_NOW); 7778 7779 mutex_enter(&un->un_pm_mutex); 7780 if (un->un_pm_idle_timeid != NULL) { 7781 timeout_id_t temp_id = un->un_pm_idle_timeid; 7782 un->un_pm_idle_timeid = NULL; 7783 mutex_exit(&un->un_pm_mutex); 7784 7785 /* 7786 * Timeout is active; cancel it. 7787 * Note that it'll never be active on a device 7788 * that does not support PM therefore we don't 7789 * have to check before calling pm_idle_component. 7790 */ 7791 (void) untimeout(temp_id); 7792 (void) pm_idle_component(SD_DEVINFO(un), 0); 7793 mutex_enter(&un->un_pm_mutex); 7794 } 7795 7796 /* 7797 * Check whether there is already a timeout scheduled for power 7798 * management. If yes then don't lower the power here, that's. 7799 * the timeout handler's job. 7800 */ 7801 if (un->un_pm_timeid != NULL) { 7802 timeout_id_t temp_id = un->un_pm_timeid; 7803 un->un_pm_timeid = NULL; 7804 mutex_exit(&un->un_pm_mutex); 7805 /* 7806 * Timeout is active; cancel it. 7807 * Note that it'll never be active on a device 7808 * that does not support PM therefore we don't 7809 * have to check before calling pm_idle_component. 7810 */ 7811 (void) untimeout(temp_id); 7812 (void) pm_idle_component(SD_DEVINFO(un), 0); 7813 7814 } else { 7815 mutex_exit(&un->un_pm_mutex); 7816 if ((un->un_f_pm_is_enabled == TRUE) && 7817 (pm_lower_power(SD_DEVINFO(un), 0, SD_SPINDLE_OFF) != 7818 DDI_SUCCESS)) { 7819 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7820 "sd_dr_detach: Lower power request failed, ignoring.\n"); 7821 /* 7822 * Fix for bug: 4297749, item # 13 7823 * The above test now includes a check to see if PM is 7824 * supported by this device before call 7825 * pm_lower_power(). 7826 * Note, the following is not dead code. The call to 7827 * pm_lower_power above will generate a call back into 7828 * our sdpower routine which might result in a timeout 7829 * handler getting activated. Therefore the following 7830 * code is valid and necessary. 7831 */ 7832 mutex_enter(&un->un_pm_mutex); 7833 if (un->un_pm_timeid != NULL) { 7834 timeout_id_t temp_id = un->un_pm_timeid; 7835 un->un_pm_timeid = NULL; 7836 mutex_exit(&un->un_pm_mutex); 7837 (void) untimeout(temp_id); 7838 (void) pm_idle_component(SD_DEVINFO(un), 0); 7839 } else { 7840 mutex_exit(&un->un_pm_mutex); 7841 } 7842 } 7843 } 7844 7845 /* 7846 * Cleanup from the scsi_ifsetcap() calls (437868) 7847 * Relocated here from above to be after the call to 7848 * pm_lower_power, which was getting errors. 7849 */ 7850 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 7851 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 7852 7853 /* 7854 * Currently, tagged queuing is supported per target based by HBA. 7855 * Setting this per lun instance actually sets the capability of this 7856 * target in HBA, which affects those luns already attached on the 7857 * same target. So during detach, we can only disable this capability 7858 * only when this is the only lun left on this target. By doing 7859 * this, we assume a target has the same tagged queuing capability 7860 * for every lun. The condition can be removed when HBA is changed to 7861 * support per lun based tagged queuing capability. 7862 */ 7863 if (sd_scsi_get_target_lun_count(pdip, tgt) <= 1) { 7864 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 7865 } 7866 7867 if (un->un_f_is_fibre == FALSE) { 7868 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 7869 } 7870 7871 /* 7872 * Remove any event callbacks, fibre only 7873 */ 7874 if (un->un_f_is_fibre == TRUE) { 7875 if ((un->un_insert_event != NULL) && 7876 (ddi_remove_event_handler(un->un_insert_cb_id) != 7877 DDI_SUCCESS)) { 7878 /* 7879 * Note: We are returning here after having done 7880 * substantial cleanup above. This is consistent 7881 * with the legacy implementation but this may not 7882 * be the right thing to do. 7883 */ 7884 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7885 "sd_dr_detach: Cannot cancel insert event\n"); 7886 goto err_remove_event; 7887 } 7888 un->un_insert_event = NULL; 7889 7890 if ((un->un_remove_event != NULL) && 7891 (ddi_remove_event_handler(un->un_remove_cb_id) != 7892 DDI_SUCCESS)) { 7893 /* 7894 * Note: We are returning here after having done 7895 * substantial cleanup above. This is consistent 7896 * with the legacy implementation but this may not 7897 * be the right thing to do. 7898 */ 7899 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7900 "sd_dr_detach: Cannot cancel remove event\n"); 7901 goto err_remove_event; 7902 } 7903 un->un_remove_event = NULL; 7904 } 7905 7906 /* Do not free the softstate if the callback routine is active */ 7907 sd_sync_with_callback(un); 7908 7909 cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 7910 cmlb_free_handle(&un->un_cmlbhandle); 7911 7912 /* 7913 * Hold the detach mutex here, to make sure that no other threads ever 7914 * can access a (partially) freed soft state structure. 7915 */ 7916 mutex_enter(&sd_detach_mutex); 7917 7918 /* 7919 * Clean up the soft state struct. 7920 * Cleanup is done in reverse order of allocs/inits. 7921 * At this point there should be no competing threads anymore. 7922 */ 7923 7924 /* Unregister and free device id. */ 7925 ddi_devid_unregister(devi); 7926 if (un->un_devid) { 7927 ddi_devid_free(un->un_devid); 7928 un->un_devid = NULL; 7929 } 7930 7931 /* 7932 * Destroy wmap cache if it exists. 7933 */ 7934 if (un->un_wm_cache != NULL) { 7935 kmem_cache_destroy(un->un_wm_cache); 7936 un->un_wm_cache = NULL; 7937 } 7938 7939 /* 7940 * kstat cleanup is done in detach for all device types (4363169). 7941 * We do not want to fail detach if the device kstats are not deleted 7942 * since there is a confusion about the devo_refcnt for the device. 7943 * We just delete the kstats and let detach complete successfully. 7944 */ 7945 if (un->un_stats != NULL) { 7946 kstat_delete(un->un_stats); 7947 un->un_stats = NULL; 7948 } 7949 if (un->un_errstats != NULL) { 7950 kstat_delete(un->un_errstats); 7951 un->un_errstats = NULL; 7952 } 7953 7954 /* Remove partition stats */ 7955 if (un->un_f_pkstats_enabled) { 7956 for (i = 0; i < NSDMAP; i++) { 7957 if (un->un_pstats[i] != NULL) { 7958 kstat_delete(un->un_pstats[i]); 7959 un->un_pstats[i] = NULL; 7960 } 7961 } 7962 } 7963 7964 /* Remove xbuf registration */ 7965 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 7966 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 7967 7968 /* Remove driver properties */ 7969 ddi_prop_remove_all(devi); 7970 7971 mutex_destroy(&un->un_pm_mutex); 7972 cv_destroy(&un->un_pm_busy_cv); 7973 7974 cv_destroy(&un->un_wcc_cv); 7975 7976 /* Open/close semaphore */ 7977 sema_destroy(&un->un_semoclose); 7978 7979 /* Removable media condvar. */ 7980 cv_destroy(&un->un_state_cv); 7981 7982 /* Suspend/resume condvar. */ 7983 cv_destroy(&un->un_suspend_cv); 7984 cv_destroy(&un->un_disk_busy_cv); 7985 7986 sd_free_rqs(un); 7987 7988 /* Free up soft state */ 7989 devp->sd_private = NULL; 7990 7991 bzero(un, sizeof (struct sd_lun)); 7992 ddi_soft_state_free(sd_state, instance); 7993 7994 mutex_exit(&sd_detach_mutex); 7995 7996 /* This frees up the INQUIRY data associated with the device. */ 7997 scsi_unprobe(devp); 7998 7999 /* 8000 * After successfully detaching an instance, we update the information 8001 * of how many luns have been attached in the relative target and 8002 * controller for parallel SCSI. This information is used when sd tries 8003 * to set the tagged queuing capability in HBA. 8004 * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to 8005 * check if the device is parallel SCSI. However, we don't need to 8006 * check here because we've already checked during attach. No device 8007 * that is not parallel SCSI is in the chain. 8008 */ 8009 if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) { 8010 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH); 8011 } 8012 8013 return (DDI_SUCCESS); 8014 8015 err_notclosed: 8016 mutex_exit(SD_MUTEX(un)); 8017 8018 err_stillbusy: 8019 _NOTE(NO_COMPETING_THREADS_NOW); 8020 8021 err_remove_event: 8022 mutex_enter(&sd_detach_mutex); 8023 un->un_detach_count--; 8024 mutex_exit(&sd_detach_mutex); 8025 8026 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); 8027 return (DDI_FAILURE); 8028 } 8029 8030 8031 /* 8032 * Function: sd_create_errstats 8033 * 8034 * Description: This routine instantiates the device error stats. 8035 * 8036 * Note: During attach the stats are instantiated first so they are 8037 * available for attach-time routines that utilize the driver 8038 * iopath to send commands to the device. The stats are initialized 8039 * separately so data obtained during some attach-time routines is 8040 * available. (4362483) 8041 * 8042 * Arguments: un - driver soft state (unit) structure 8043 * instance - driver instance 8044 * 8045 * Context: Kernel thread context 8046 */ 8047 8048 static void 8049 sd_create_errstats(struct sd_lun *un, int instance) 8050 { 8051 struct sd_errstats *stp; 8052 char kstatmodule_err[KSTAT_STRLEN]; 8053 char kstatname[KSTAT_STRLEN]; 8054 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t)); 8055 8056 ASSERT(un != NULL); 8057 8058 if (un->un_errstats != NULL) { 8059 return; 8060 } 8061 8062 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err), 8063 "%serr", sd_label); 8064 (void) snprintf(kstatname, sizeof (kstatname), 8065 "%s%d,err", sd_label, instance); 8066 8067 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname, 8068 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 8069 8070 if (un->un_errstats == NULL) { 8071 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8072 "sd_create_errstats: Failed kstat_create\n"); 8073 return; 8074 } 8075 8076 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8077 kstat_named_init(&stp->sd_softerrs, "Soft Errors", 8078 KSTAT_DATA_UINT32); 8079 kstat_named_init(&stp->sd_harderrs, "Hard Errors", 8080 KSTAT_DATA_UINT32); 8081 kstat_named_init(&stp->sd_transerrs, "Transport Errors", 8082 KSTAT_DATA_UINT32); 8083 kstat_named_init(&stp->sd_vid, "Vendor", 8084 KSTAT_DATA_CHAR); 8085 kstat_named_init(&stp->sd_pid, "Product", 8086 KSTAT_DATA_CHAR); 8087 kstat_named_init(&stp->sd_revision, "Revision", 8088 KSTAT_DATA_CHAR); 8089 kstat_named_init(&stp->sd_serial, "Serial No", 8090 KSTAT_DATA_CHAR); 8091 kstat_named_init(&stp->sd_capacity, "Size", 8092 KSTAT_DATA_ULONGLONG); 8093 kstat_named_init(&stp->sd_rq_media_err, "Media Error", 8094 KSTAT_DATA_UINT32); 8095 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready", 8096 KSTAT_DATA_UINT32); 8097 kstat_named_init(&stp->sd_rq_nodev_err, "No Device", 8098 KSTAT_DATA_UINT32); 8099 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable", 8100 KSTAT_DATA_UINT32); 8101 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request", 8102 KSTAT_DATA_UINT32); 8103 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis", 8104 KSTAT_DATA_UINT32); 8105 8106 un->un_errstats->ks_private = un; 8107 un->un_errstats->ks_update = nulldev; 8108 8109 kstat_install(un->un_errstats); 8110 } 8111 8112 8113 /* 8114 * Function: sd_set_errstats 8115 * 8116 * Description: This routine sets the value of the vendor id, product id, 8117 * revision, serial number, and capacity device error stats. 8118 * 8119 * Note: During attach the stats are instantiated first so they are 8120 * available for attach-time routines that utilize the driver 8121 * iopath to send commands to the device. The stats are initialized 8122 * separately so data obtained during some attach-time routines is 8123 * available. (4362483) 8124 * 8125 * Arguments: un - driver soft state (unit) structure 8126 * 8127 * Context: Kernel thread context 8128 */ 8129 8130 static void 8131 sd_set_errstats(struct sd_lun *un) 8132 { 8133 struct sd_errstats *stp; 8134 8135 ASSERT(un != NULL); 8136 ASSERT(un->un_errstats != NULL); 8137 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8138 ASSERT(stp != NULL); 8139 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8); 8140 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16); 8141 (void) strncpy(stp->sd_revision.value.c, 8142 un->un_sd->sd_inq->inq_revision, 4); 8143 8144 /* 8145 * All the errstats are persistent across detach/attach, 8146 * so reset all the errstats here in case of the hot 8147 * replacement of disk drives, except for not changed 8148 * Sun qualified drives. 8149 */ 8150 if ((bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) != 0) || 8151 (bcmp(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8152 sizeof (SD_INQUIRY(un)->inq_serial)) != 0)) { 8153 stp->sd_softerrs.value.ui32 = 0; 8154 stp->sd_harderrs.value.ui32 = 0; 8155 stp->sd_transerrs.value.ui32 = 0; 8156 stp->sd_rq_media_err.value.ui32 = 0; 8157 stp->sd_rq_ntrdy_err.value.ui32 = 0; 8158 stp->sd_rq_nodev_err.value.ui32 = 0; 8159 stp->sd_rq_recov_err.value.ui32 = 0; 8160 stp->sd_rq_illrq_err.value.ui32 = 0; 8161 stp->sd_rq_pfa_err.value.ui32 = 0; 8162 } 8163 8164 /* 8165 * Set the "Serial No" kstat for Sun qualified drives (indicated by 8166 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid) 8167 * (4376302)) 8168 */ 8169 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) { 8170 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8171 sizeof (SD_INQUIRY(un)->inq_serial)); 8172 } 8173 8174 if (un->un_f_blockcount_is_valid != TRUE) { 8175 /* 8176 * Set capacity error stat to 0 for no media. This ensures 8177 * a valid capacity is displayed in response to 'iostat -E' 8178 * when no media is present in the device. 8179 */ 8180 stp->sd_capacity.value.ui64 = 0; 8181 } else { 8182 /* 8183 * Multiply un_blockcount by un->un_sys_blocksize to get 8184 * capacity. 8185 * 8186 * Note: for non-512 blocksize devices "un_blockcount" has been 8187 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by 8188 * (un_tgt_blocksize / un->un_sys_blocksize). 8189 */ 8190 stp->sd_capacity.value.ui64 = (uint64_t) 8191 ((uint64_t)un->un_blockcount * un->un_sys_blocksize); 8192 } 8193 } 8194 8195 8196 /* 8197 * Function: sd_set_pstats 8198 * 8199 * Description: This routine instantiates and initializes the partition 8200 * stats for each partition with more than zero blocks. 8201 * (4363169) 8202 * 8203 * Arguments: un - driver soft state (unit) structure 8204 * 8205 * Context: Kernel thread context 8206 */ 8207 8208 static void 8209 sd_set_pstats(struct sd_lun *un) 8210 { 8211 char kstatname[KSTAT_STRLEN]; 8212 int instance; 8213 int i; 8214 diskaddr_t nblks = 0; 8215 char *partname = NULL; 8216 8217 ASSERT(un != NULL); 8218 8219 instance = ddi_get_instance(SD_DEVINFO(un)); 8220 8221 /* Note:x86: is this a VTOC8/VTOC16 difference? */ 8222 for (i = 0; i < NSDMAP; i++) { 8223 8224 if (cmlb_partinfo(un->un_cmlbhandle, i, 8225 &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0) 8226 continue; 8227 mutex_enter(SD_MUTEX(un)); 8228 8229 if ((un->un_pstats[i] == NULL) && 8230 (nblks != 0)) { 8231 8232 (void) snprintf(kstatname, sizeof (kstatname), 8233 "%s%d,%s", sd_label, instance, 8234 partname); 8235 8236 un->un_pstats[i] = kstat_create(sd_label, 8237 instance, kstatname, "partition", KSTAT_TYPE_IO, 8238 1, KSTAT_FLAG_PERSISTENT); 8239 if (un->un_pstats[i] != NULL) { 8240 un->un_pstats[i]->ks_lock = SD_MUTEX(un); 8241 kstat_install(un->un_pstats[i]); 8242 } 8243 } 8244 mutex_exit(SD_MUTEX(un)); 8245 } 8246 } 8247 8248 8249 #if (defined(__fibre)) 8250 /* 8251 * Function: sd_init_event_callbacks 8252 * 8253 * Description: This routine initializes the insertion and removal event 8254 * callbacks. (fibre only) 8255 * 8256 * Arguments: un - driver soft state (unit) structure 8257 * 8258 * Context: Kernel thread context 8259 */ 8260 8261 static void 8262 sd_init_event_callbacks(struct sd_lun *un) 8263 { 8264 ASSERT(un != NULL); 8265 8266 if ((un->un_insert_event == NULL) && 8267 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, 8268 &un->un_insert_event) == DDI_SUCCESS)) { 8269 /* 8270 * Add the callback for an insertion event 8271 */ 8272 (void) ddi_add_event_handler(SD_DEVINFO(un), 8273 un->un_insert_event, sd_event_callback, (void *)un, 8274 &(un->un_insert_cb_id)); 8275 } 8276 8277 if ((un->un_remove_event == NULL) && 8278 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, 8279 &un->un_remove_event) == DDI_SUCCESS)) { 8280 /* 8281 * Add the callback for a removal event 8282 */ 8283 (void) ddi_add_event_handler(SD_DEVINFO(un), 8284 un->un_remove_event, sd_event_callback, (void *)un, 8285 &(un->un_remove_cb_id)); 8286 } 8287 } 8288 8289 8290 /* 8291 * Function: sd_event_callback 8292 * 8293 * Description: This routine handles insert/remove events (photon). The 8294 * state is changed to OFFLINE which can be used to supress 8295 * error msgs. (fibre only) 8296 * 8297 * Arguments: un - driver soft state (unit) structure 8298 * 8299 * Context: Callout thread context 8300 */ 8301 /* ARGSUSED */ 8302 static void 8303 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, 8304 void *bus_impldata) 8305 { 8306 struct sd_lun *un = (struct sd_lun *)arg; 8307 8308 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); 8309 if (event == un->un_insert_event) { 8310 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); 8311 mutex_enter(SD_MUTEX(un)); 8312 if (un->un_state == SD_STATE_OFFLINE) { 8313 if (un->un_last_state != SD_STATE_SUSPENDED) { 8314 un->un_state = un->un_last_state; 8315 } else { 8316 /* 8317 * We have gone through SUSPEND/RESUME while 8318 * we were offline. Restore the last state 8319 */ 8320 un->un_state = un->un_save_state; 8321 } 8322 } 8323 mutex_exit(SD_MUTEX(un)); 8324 8325 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); 8326 } else if (event == un->un_remove_event) { 8327 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); 8328 mutex_enter(SD_MUTEX(un)); 8329 /* 8330 * We need to handle an event callback that occurs during 8331 * the suspend operation, since we don't prevent it. 8332 */ 8333 if (un->un_state != SD_STATE_OFFLINE) { 8334 if (un->un_state != SD_STATE_SUSPENDED) { 8335 New_state(un, SD_STATE_OFFLINE); 8336 } else { 8337 un->un_last_state = SD_STATE_OFFLINE; 8338 } 8339 } 8340 mutex_exit(SD_MUTEX(un)); 8341 } else { 8342 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 8343 "!Unknown event\n"); 8344 } 8345 8346 } 8347 #endif 8348 8349 /* 8350 * Function: sd_cache_control() 8351 * 8352 * Description: This routine is the driver entry point for setting 8353 * read and write caching by modifying the WCE (write cache 8354 * enable) and RCD (read cache disable) bits of mode 8355 * page 8 (MODEPAGE_CACHING). 8356 * 8357 * Arguments: un - driver soft state (unit) structure 8358 * rcd_flag - flag for controlling the read cache 8359 * wce_flag - flag for controlling the write cache 8360 * 8361 * Return Code: EIO 8362 * code returned by sd_send_scsi_MODE_SENSE and 8363 * sd_send_scsi_MODE_SELECT 8364 * 8365 * Context: Kernel Thread 8366 */ 8367 8368 static int 8369 sd_cache_control(struct sd_lun *un, int rcd_flag, int wce_flag) 8370 { 8371 struct mode_caching *mode_caching_page; 8372 uchar_t *header; 8373 size_t buflen; 8374 int hdrlen; 8375 int bd_len; 8376 int rval = 0; 8377 struct mode_header_grp2 *mhp; 8378 8379 ASSERT(un != NULL); 8380 8381 /* 8382 * Do a test unit ready, otherwise a mode sense may not work if this 8383 * is the first command sent to the device after boot. 8384 */ 8385 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 8386 8387 if (un->un_f_cfg_is_atapi == TRUE) { 8388 hdrlen = MODE_HEADER_LENGTH_GRP2; 8389 } else { 8390 hdrlen = MODE_HEADER_LENGTH; 8391 } 8392 8393 /* 8394 * Allocate memory for the retrieved mode page and its headers. Set 8395 * a pointer to the page itself. Use mode_cache_scsi3 to insure 8396 * we get all of the mode sense data otherwise, the mode select 8397 * will fail. mode_cache_scsi3 is a superset of mode_caching. 8398 */ 8399 buflen = hdrlen + MODE_BLK_DESC_LENGTH + 8400 sizeof (struct mode_cache_scsi3); 8401 8402 header = kmem_zalloc(buflen, KM_SLEEP); 8403 8404 /* Get the information from the device. */ 8405 if (un->un_f_cfg_is_atapi == TRUE) { 8406 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, header, buflen, 8407 MODEPAGE_CACHING, SD_PATH_DIRECT); 8408 } else { 8409 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 8410 MODEPAGE_CACHING, SD_PATH_DIRECT); 8411 } 8412 if (rval != 0) { 8413 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 8414 "sd_cache_control: Mode Sense Failed\n"); 8415 kmem_free(header, buflen); 8416 return (rval); 8417 } 8418 8419 /* 8420 * Determine size of Block Descriptors in order to locate 8421 * the mode page data. ATAPI devices return 0, SCSI devices 8422 * should return MODE_BLK_DESC_LENGTH. 8423 */ 8424 if (un->un_f_cfg_is_atapi == TRUE) { 8425 mhp = (struct mode_header_grp2 *)header; 8426 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 8427 } else { 8428 bd_len = ((struct mode_header *)header)->bdesc_length; 8429 } 8430 8431 if (bd_len > MODE_BLK_DESC_LENGTH) { 8432 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 8433 "sd_cache_control: Mode Sense returned invalid " 8434 "block descriptor length\n"); 8435 kmem_free(header, buflen); 8436 return (EIO); 8437 } 8438 8439 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 8440 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 8441 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 8442 " caching page code mismatch %d\n", 8443 mode_caching_page->mode_page.code); 8444 kmem_free(header, buflen); 8445 return (EIO); 8446 } 8447 8448 /* Check the relevant bits on successful mode sense. */ 8449 if ((mode_caching_page->rcd && rcd_flag == SD_CACHE_ENABLE) || 8450 (!mode_caching_page->rcd && rcd_flag == SD_CACHE_DISABLE) || 8451 (mode_caching_page->wce && wce_flag == SD_CACHE_DISABLE) || 8452 (!mode_caching_page->wce && wce_flag == SD_CACHE_ENABLE)) { 8453 8454 size_t sbuflen; 8455 uchar_t save_pg; 8456 8457 /* 8458 * Construct select buffer length based on the 8459 * length of the sense data returned. 8460 */ 8461 sbuflen = hdrlen + MODE_BLK_DESC_LENGTH + 8462 sizeof (struct mode_page) + 8463 (int)mode_caching_page->mode_page.length; 8464 8465 /* 8466 * Set the caching bits as requested. 8467 */ 8468 if (rcd_flag == SD_CACHE_ENABLE) 8469 mode_caching_page->rcd = 0; 8470 else if (rcd_flag == SD_CACHE_DISABLE) 8471 mode_caching_page->rcd = 1; 8472 8473 if (wce_flag == SD_CACHE_ENABLE) 8474 mode_caching_page->wce = 1; 8475 else if (wce_flag == SD_CACHE_DISABLE) 8476 mode_caching_page->wce = 0; 8477 8478 /* 8479 * Save the page if the mode sense says the 8480 * drive supports it. 8481 */ 8482 save_pg = mode_caching_page->mode_page.ps ? 8483 SD_SAVE_PAGE : SD_DONTSAVE_PAGE; 8484 8485 /* Clear reserved bits before mode select. */ 8486 mode_caching_page->mode_page.ps = 0; 8487 8488 /* 8489 * Clear out mode header for mode select. 8490 * The rest of the retrieved page will be reused. 8491 */ 8492 bzero(header, hdrlen); 8493 8494 if (un->un_f_cfg_is_atapi == TRUE) { 8495 mhp = (struct mode_header_grp2 *)header; 8496 mhp->bdesc_length_hi = bd_len >> 8; 8497 mhp->bdesc_length_lo = (uchar_t)bd_len & 0xff; 8498 } else { 8499 ((struct mode_header *)header)->bdesc_length = bd_len; 8500 } 8501 8502 /* Issue mode select to change the cache settings */ 8503 if (un->un_f_cfg_is_atapi == TRUE) { 8504 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, header, 8505 sbuflen, save_pg, SD_PATH_DIRECT); 8506 } else { 8507 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 8508 sbuflen, save_pg, SD_PATH_DIRECT); 8509 } 8510 } 8511 8512 kmem_free(header, buflen); 8513 return (rval); 8514 } 8515 8516 8517 /* 8518 * Function: sd_get_write_cache_enabled() 8519 * 8520 * Description: This routine is the driver entry point for determining if 8521 * write caching is enabled. It examines the WCE (write cache 8522 * enable) bits of mode page 8 (MODEPAGE_CACHING). 8523 * 8524 * Arguments: un - driver soft state (unit) structure 8525 * is_enabled - pointer to int where write cache enabled state 8526 * is returned (non-zero -> write cache enabled) 8527 * 8528 * 8529 * Return Code: EIO 8530 * code returned by sd_send_scsi_MODE_SENSE 8531 * 8532 * Context: Kernel Thread 8533 * 8534 * NOTE: If ioctl is added to disable write cache, this sequence should 8535 * be followed so that no locking is required for accesses to 8536 * un->un_f_write_cache_enabled: 8537 * do mode select to clear wce 8538 * do synchronize cache to flush cache 8539 * set un->un_f_write_cache_enabled = FALSE 8540 * 8541 * Conversely, an ioctl to enable the write cache should be done 8542 * in this order: 8543 * set un->un_f_write_cache_enabled = TRUE 8544 * do mode select to set wce 8545 */ 8546 8547 static int 8548 sd_get_write_cache_enabled(struct sd_lun *un, int *is_enabled) 8549 { 8550 struct mode_caching *mode_caching_page; 8551 uchar_t *header; 8552 size_t buflen; 8553 int hdrlen; 8554 int bd_len; 8555 int rval = 0; 8556 8557 ASSERT(un != NULL); 8558 ASSERT(is_enabled != NULL); 8559 8560 /* in case of error, flag as enabled */ 8561 *is_enabled = TRUE; 8562 8563 /* 8564 * Do a test unit ready, otherwise a mode sense may not work if this 8565 * is the first command sent to the device after boot. 8566 */ 8567 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 8568 8569 if (un->un_f_cfg_is_atapi == TRUE) { 8570 hdrlen = MODE_HEADER_LENGTH_GRP2; 8571 } else { 8572 hdrlen = MODE_HEADER_LENGTH; 8573 } 8574 8575 /* 8576 * Allocate memory for the retrieved mode page and its headers. Set 8577 * a pointer to the page itself. 8578 */ 8579 buflen = hdrlen + MODE_BLK_DESC_LENGTH + sizeof (struct mode_caching); 8580 header = kmem_zalloc(buflen, KM_SLEEP); 8581 8582 /* Get the information from the device. */ 8583 if (un->un_f_cfg_is_atapi == TRUE) { 8584 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, header, buflen, 8585 MODEPAGE_CACHING, SD_PATH_DIRECT); 8586 } else { 8587 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 8588 MODEPAGE_CACHING, SD_PATH_DIRECT); 8589 } 8590 if (rval != 0) { 8591 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 8592 "sd_get_write_cache_enabled: Mode Sense Failed\n"); 8593 kmem_free(header, buflen); 8594 return (rval); 8595 } 8596 8597 /* 8598 * Determine size of Block Descriptors in order to locate 8599 * the mode page data. ATAPI devices return 0, SCSI devices 8600 * should return MODE_BLK_DESC_LENGTH. 8601 */ 8602 if (un->un_f_cfg_is_atapi == TRUE) { 8603 struct mode_header_grp2 *mhp; 8604 mhp = (struct mode_header_grp2 *)header; 8605 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 8606 } else { 8607 bd_len = ((struct mode_header *)header)->bdesc_length; 8608 } 8609 8610 if (bd_len > MODE_BLK_DESC_LENGTH) { 8611 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 8612 "sd_get_write_cache_enabled: Mode Sense returned invalid " 8613 "block descriptor length\n"); 8614 kmem_free(header, buflen); 8615 return (EIO); 8616 } 8617 8618 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 8619 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 8620 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 8621 " caching page code mismatch %d\n", 8622 mode_caching_page->mode_page.code); 8623 kmem_free(header, buflen); 8624 return (EIO); 8625 } 8626 *is_enabled = mode_caching_page->wce; 8627 8628 kmem_free(header, buflen); 8629 return (0); 8630 } 8631 8632 /* 8633 * Function: sd_get_nv_sup() 8634 * 8635 * Description: This routine is the driver entry point for 8636 * determining whether non-volatile cache is supported. This 8637 * determination process works as follows: 8638 * 8639 * 1. sd first queries sd.conf on whether 8640 * suppress_cache_flush bit is set for this device. 8641 * 8642 * 2. if not there, then queries the internal disk table. 8643 * 8644 * 3. if either sd.conf or internal disk table specifies 8645 * cache flush be suppressed, we don't bother checking 8646 * NV_SUP bit. 8647 * 8648 * If SUPPRESS_CACHE_FLUSH bit is not set to 1, sd queries 8649 * the optional INQUIRY VPD page 0x86. If the device 8650 * supports VPD page 0x86, sd examines the NV_SUP 8651 * (non-volatile cache support) bit in the INQUIRY VPD page 8652 * 0x86: 8653 * o If NV_SUP bit is set, sd assumes the device has a 8654 * non-volatile cache and set the 8655 * un_f_sync_nv_supported to TRUE. 8656 * o Otherwise cache is not non-volatile, 8657 * un_f_sync_nv_supported is set to FALSE. 8658 * 8659 * Arguments: un - driver soft state (unit) structure 8660 * 8661 * Return Code: 8662 * 8663 * Context: Kernel Thread 8664 */ 8665 8666 static void 8667 sd_get_nv_sup(struct sd_lun *un) 8668 { 8669 int rval = 0; 8670 uchar_t *inq86 = NULL; 8671 size_t inq86_len = MAX_INQUIRY_SIZE; 8672 size_t inq86_resid = 0; 8673 struct dk_callback *dkc; 8674 8675 ASSERT(un != NULL); 8676 8677 mutex_enter(SD_MUTEX(un)); 8678 8679 /* 8680 * Be conservative on the device's support of 8681 * SYNC_NV bit: un_f_sync_nv_supported is 8682 * initialized to be false. 8683 */ 8684 un->un_f_sync_nv_supported = FALSE; 8685 8686 /* 8687 * If either sd.conf or internal disk table 8688 * specifies cache flush be suppressed, then 8689 * we don't bother checking NV_SUP bit. 8690 */ 8691 if (un->un_f_suppress_cache_flush == TRUE) { 8692 mutex_exit(SD_MUTEX(un)); 8693 return; 8694 } 8695 8696 if (sd_check_vpd_page_support(un) == 0 && 8697 un->un_vpd_page_mask & SD_VPD_EXTENDED_DATA_PG) { 8698 mutex_exit(SD_MUTEX(un)); 8699 /* collect page 86 data if available */ 8700 inq86 = kmem_zalloc(inq86_len, KM_SLEEP); 8701 rval = sd_send_scsi_INQUIRY(un, inq86, inq86_len, 8702 0x01, 0x86, &inq86_resid); 8703 8704 if (rval == 0 && (inq86_len - inq86_resid > 6)) { 8705 SD_TRACE(SD_LOG_COMMON, un, 8706 "sd_get_nv_sup: \ 8707 successfully get VPD page: %x \ 8708 PAGE LENGTH: %x BYTE 6: %x\n", 8709 inq86[1], inq86[3], inq86[6]); 8710 8711 mutex_enter(SD_MUTEX(un)); 8712 /* 8713 * check the value of NV_SUP bit: only if the device 8714 * reports NV_SUP bit to be 1, the 8715 * un_f_sync_nv_supported bit will be set to true. 8716 */ 8717 if (inq86[6] & SD_VPD_NV_SUP) { 8718 un->un_f_sync_nv_supported = TRUE; 8719 } 8720 mutex_exit(SD_MUTEX(un)); 8721 } 8722 kmem_free(inq86, inq86_len); 8723 } else { 8724 mutex_exit(SD_MUTEX(un)); 8725 } 8726 8727 /* 8728 * Send a SYNC CACHE command to check whether 8729 * SYNC_NV bit is supported. This command should have 8730 * un_f_sync_nv_supported set to correct value. 8731 */ 8732 mutex_enter(SD_MUTEX(un)); 8733 if (un->un_f_sync_nv_supported) { 8734 mutex_exit(SD_MUTEX(un)); 8735 dkc = kmem_zalloc(sizeof (struct dk_callback), KM_SLEEP); 8736 dkc->dkc_flag = FLUSH_VOLATILE; 8737 (void) sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 8738 8739 /* 8740 * Send a TEST UNIT READY command to the device. This should 8741 * clear any outstanding UNIT ATTENTION that may be present. 8742 */ 8743 (void) sd_send_scsi_TEST_UNIT_READY(un, SD_DONT_RETRY_TUR); 8744 8745 kmem_free(dkc, sizeof (struct dk_callback)); 8746 } else { 8747 mutex_exit(SD_MUTEX(un)); 8748 } 8749 8750 SD_TRACE(SD_LOG_COMMON, un, "sd_get_nv_sup: \ 8751 un_f_suppress_cache_flush is set to %d\n", 8752 un->un_f_suppress_cache_flush); 8753 } 8754 8755 /* 8756 * Function: sd_make_device 8757 * 8758 * Description: Utility routine to return the Solaris device number from 8759 * the data in the device's dev_info structure. 8760 * 8761 * Return Code: The Solaris device number 8762 * 8763 * Context: Any 8764 */ 8765 8766 static dev_t 8767 sd_make_device(dev_info_t *devi) 8768 { 8769 return (makedevice(ddi_name_to_major(ddi_get_name(devi)), 8770 ddi_get_instance(devi) << SDUNIT_SHIFT)); 8771 } 8772 8773 8774 /* 8775 * Function: sd_pm_entry 8776 * 8777 * Description: Called at the start of a new command to manage power 8778 * and busy status of a device. This includes determining whether 8779 * the current power state of the device is sufficient for 8780 * performing the command or whether it must be changed. 8781 * The PM framework is notified appropriately. 8782 * Only with a return status of DDI_SUCCESS will the 8783 * component be busy to the framework. 8784 * 8785 * All callers of sd_pm_entry must check the return status 8786 * and only call sd_pm_exit it it was DDI_SUCCESS. A status 8787 * of DDI_FAILURE indicates the device failed to power up. 8788 * In this case un_pm_count has been adjusted so the result 8789 * on exit is still powered down, ie. count is less than 0. 8790 * Calling sd_pm_exit with this count value hits an ASSERT. 8791 * 8792 * Return Code: DDI_SUCCESS or DDI_FAILURE 8793 * 8794 * Context: Kernel thread context. 8795 */ 8796 8797 static int 8798 sd_pm_entry(struct sd_lun *un) 8799 { 8800 int return_status = DDI_SUCCESS; 8801 8802 ASSERT(!mutex_owned(SD_MUTEX(un))); 8803 ASSERT(!mutex_owned(&un->un_pm_mutex)); 8804 8805 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n"); 8806 8807 if (un->un_f_pm_is_enabled == FALSE) { 8808 SD_TRACE(SD_LOG_IO_PM, un, 8809 "sd_pm_entry: exiting, PM not enabled\n"); 8810 return (return_status); 8811 } 8812 8813 /* 8814 * Just increment a counter if PM is enabled. On the transition from 8815 * 0 ==> 1, mark the device as busy. The iodone side will decrement 8816 * the count with each IO and mark the device as idle when the count 8817 * hits 0. 8818 * 8819 * If the count is less than 0 the device is powered down. If a powered 8820 * down device is successfully powered up then the count must be 8821 * incremented to reflect the power up. Note that it'll get incremented 8822 * a second time to become busy. 8823 * 8824 * Because the following has the potential to change the device state 8825 * and must release the un_pm_mutex to do so, only one thread can be 8826 * allowed through at a time. 8827 */ 8828 8829 mutex_enter(&un->un_pm_mutex); 8830 while (un->un_pm_busy == TRUE) { 8831 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex); 8832 } 8833 un->un_pm_busy = TRUE; 8834 8835 if (un->un_pm_count < 1) { 8836 8837 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n"); 8838 8839 /* 8840 * Indicate we are now busy so the framework won't attempt to 8841 * power down the device. This call will only fail if either 8842 * we passed a bad component number or the device has no 8843 * components. Neither of these should ever happen. 8844 */ 8845 mutex_exit(&un->un_pm_mutex); 8846 return_status = pm_busy_component(SD_DEVINFO(un), 0); 8847 ASSERT(return_status == DDI_SUCCESS); 8848 8849 mutex_enter(&un->un_pm_mutex); 8850 8851 if (un->un_pm_count < 0) { 8852 mutex_exit(&un->un_pm_mutex); 8853 8854 SD_TRACE(SD_LOG_IO_PM, un, 8855 "sd_pm_entry: power up component\n"); 8856 8857 /* 8858 * pm_raise_power will cause sdpower to be called 8859 * which brings the device power level to the 8860 * desired state, ON in this case. If successful, 8861 * un_pm_count and un_power_level will be updated 8862 * appropriately. 8863 */ 8864 return_status = pm_raise_power(SD_DEVINFO(un), 0, 8865 SD_SPINDLE_ON); 8866 8867 mutex_enter(&un->un_pm_mutex); 8868 8869 if (return_status != DDI_SUCCESS) { 8870 /* 8871 * Power up failed. 8872 * Idle the device and adjust the count 8873 * so the result on exit is that we're 8874 * still powered down, ie. count is less than 0. 8875 */ 8876 SD_TRACE(SD_LOG_IO_PM, un, 8877 "sd_pm_entry: power up failed," 8878 " idle the component\n"); 8879 8880 (void) pm_idle_component(SD_DEVINFO(un), 0); 8881 un->un_pm_count--; 8882 } else { 8883 /* 8884 * Device is powered up, verify the 8885 * count is non-negative. 8886 * This is debug only. 8887 */ 8888 ASSERT(un->un_pm_count == 0); 8889 } 8890 } 8891 8892 if (return_status == DDI_SUCCESS) { 8893 /* 8894 * For performance, now that the device has been tagged 8895 * as busy, and it's known to be powered up, update the 8896 * chain types to use jump tables that do not include 8897 * pm. This significantly lowers the overhead and 8898 * therefore improves performance. 8899 */ 8900 8901 mutex_exit(&un->un_pm_mutex); 8902 mutex_enter(SD_MUTEX(un)); 8903 SD_TRACE(SD_LOG_IO_PM, un, 8904 "sd_pm_entry: changing uscsi_chain_type from %d\n", 8905 un->un_uscsi_chain_type); 8906 8907 if (un->un_f_non_devbsize_supported) { 8908 un->un_buf_chain_type = 8909 SD_CHAIN_INFO_RMMEDIA_NO_PM; 8910 } else { 8911 un->un_buf_chain_type = 8912 SD_CHAIN_INFO_DISK_NO_PM; 8913 } 8914 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 8915 8916 SD_TRACE(SD_LOG_IO_PM, un, 8917 " changed uscsi_chain_type to %d\n", 8918 un->un_uscsi_chain_type); 8919 mutex_exit(SD_MUTEX(un)); 8920 mutex_enter(&un->un_pm_mutex); 8921 8922 if (un->un_pm_idle_timeid == NULL) { 8923 /* 300 ms. */ 8924 un->un_pm_idle_timeid = 8925 timeout(sd_pm_idletimeout_handler, un, 8926 (drv_usectohz((clock_t)300000))); 8927 /* 8928 * Include an extra call to busy which keeps the 8929 * device busy with-respect-to the PM layer 8930 * until the timer fires, at which time it'll 8931 * get the extra idle call. 8932 */ 8933 (void) pm_busy_component(SD_DEVINFO(un), 0); 8934 } 8935 } 8936 } 8937 un->un_pm_busy = FALSE; 8938 /* Next... */ 8939 cv_signal(&un->un_pm_busy_cv); 8940 8941 un->un_pm_count++; 8942 8943 SD_TRACE(SD_LOG_IO_PM, un, 8944 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count); 8945 8946 mutex_exit(&un->un_pm_mutex); 8947 8948 return (return_status); 8949 } 8950 8951 8952 /* 8953 * Function: sd_pm_exit 8954 * 8955 * Description: Called at the completion of a command to manage busy 8956 * status for the device. If the device becomes idle the 8957 * PM framework is notified. 8958 * 8959 * Context: Kernel thread context 8960 */ 8961 8962 static void 8963 sd_pm_exit(struct sd_lun *un) 8964 { 8965 ASSERT(!mutex_owned(SD_MUTEX(un))); 8966 ASSERT(!mutex_owned(&un->un_pm_mutex)); 8967 8968 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n"); 8969 8970 /* 8971 * After attach the following flag is only read, so don't 8972 * take the penalty of acquiring a mutex for it. 8973 */ 8974 if (un->un_f_pm_is_enabled == TRUE) { 8975 8976 mutex_enter(&un->un_pm_mutex); 8977 un->un_pm_count--; 8978 8979 SD_TRACE(SD_LOG_IO_PM, un, 8980 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count); 8981 8982 ASSERT(un->un_pm_count >= 0); 8983 if (un->un_pm_count == 0) { 8984 mutex_exit(&un->un_pm_mutex); 8985 8986 SD_TRACE(SD_LOG_IO_PM, un, 8987 "sd_pm_exit: idle component\n"); 8988 8989 (void) pm_idle_component(SD_DEVINFO(un), 0); 8990 8991 } else { 8992 mutex_exit(&un->un_pm_mutex); 8993 } 8994 } 8995 8996 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n"); 8997 } 8998 8999 9000 /* 9001 * Function: sdopen 9002 * 9003 * Description: Driver's open(9e) entry point function. 9004 * 9005 * Arguments: dev_i - pointer to device number 9006 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE) 9007 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9008 * cred_p - user credential pointer 9009 * 9010 * Return Code: EINVAL 9011 * ENXIO 9012 * EIO 9013 * EROFS 9014 * EBUSY 9015 * 9016 * Context: Kernel thread context 9017 */ 9018 /* ARGSUSED */ 9019 static int 9020 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 9021 { 9022 struct sd_lun *un; 9023 int nodelay; 9024 int part; 9025 uint64_t partmask; 9026 int instance; 9027 dev_t dev; 9028 int rval = EIO; 9029 diskaddr_t nblks = 0; 9030 9031 /* Validate the open type */ 9032 if (otyp >= OTYPCNT) { 9033 return (EINVAL); 9034 } 9035 9036 dev = *dev_p; 9037 instance = SDUNIT(dev); 9038 mutex_enter(&sd_detach_mutex); 9039 9040 /* 9041 * Fail the open if there is no softstate for the instance, or 9042 * if another thread somewhere is trying to detach the instance. 9043 */ 9044 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 9045 (un->un_detach_count != 0)) { 9046 mutex_exit(&sd_detach_mutex); 9047 /* 9048 * The probe cache only needs to be cleared when open (9e) fails 9049 * with ENXIO (4238046). 9050 */ 9051 /* 9052 * un-conditionally clearing probe cache is ok with 9053 * separate sd/ssd binaries 9054 * x86 platform can be an issue with both parallel 9055 * and fibre in 1 binary 9056 */ 9057 sd_scsi_clear_probe_cache(); 9058 return (ENXIO); 9059 } 9060 9061 /* 9062 * The un_layer_count is to prevent another thread in specfs from 9063 * trying to detach the instance, which can happen when we are 9064 * called from a higher-layer driver instead of thru specfs. 9065 * This will not be needed when DDI provides a layered driver 9066 * interface that allows specfs to know that an instance is in 9067 * use by a layered driver & should not be detached. 9068 * 9069 * Note: the semantics for layered driver opens are exactly one 9070 * close for every open. 9071 */ 9072 if (otyp == OTYP_LYR) { 9073 un->un_layer_count++; 9074 } 9075 9076 /* 9077 * Keep a count of the current # of opens in progress. This is because 9078 * some layered drivers try to call us as a regular open. This can 9079 * cause problems that we cannot prevent, however by keeping this count 9080 * we can at least keep our open and detach routines from racing against 9081 * each other under such conditions. 9082 */ 9083 un->un_opens_in_progress++; 9084 mutex_exit(&sd_detach_mutex); 9085 9086 nodelay = (flag & (FNDELAY | FNONBLOCK)); 9087 part = SDPART(dev); 9088 partmask = 1 << part; 9089 9090 /* 9091 * We use a semaphore here in order to serialize 9092 * open and close requests on the device. 9093 */ 9094 sema_p(&un->un_semoclose); 9095 9096 mutex_enter(SD_MUTEX(un)); 9097 9098 /* 9099 * All device accesses go thru sdstrategy() where we check 9100 * on suspend status but there could be a scsi_poll command, 9101 * which bypasses sdstrategy(), so we need to check pm 9102 * status. 9103 */ 9104 9105 if (!nodelay) { 9106 while ((un->un_state == SD_STATE_SUSPENDED) || 9107 (un->un_state == SD_STATE_PM_CHANGING)) { 9108 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9109 } 9110 9111 mutex_exit(SD_MUTEX(un)); 9112 if (sd_pm_entry(un) != DDI_SUCCESS) { 9113 rval = EIO; 9114 SD_ERROR(SD_LOG_OPEN_CLOSE, un, 9115 "sdopen: sd_pm_entry failed\n"); 9116 goto open_failed_with_pm; 9117 } 9118 mutex_enter(SD_MUTEX(un)); 9119 } 9120 9121 /* check for previous exclusive open */ 9122 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); 9123 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9124 "sdopen: exclopen=%x, flag=%x, regopen=%x\n", 9125 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 9126 9127 if (un->un_exclopen & (partmask)) { 9128 goto excl_open_fail; 9129 } 9130 9131 if (flag & FEXCL) { 9132 int i; 9133 if (un->un_ocmap.lyropen[part]) { 9134 goto excl_open_fail; 9135 } 9136 for (i = 0; i < (OTYPCNT - 1); i++) { 9137 if (un->un_ocmap.regopen[i] & (partmask)) { 9138 goto excl_open_fail; 9139 } 9140 } 9141 } 9142 9143 /* 9144 * Check the write permission if this is a removable media device, 9145 * NDELAY has not been set, and writable permission is requested. 9146 * 9147 * Note: If NDELAY was set and this is write-protected media the WRITE 9148 * attempt will fail with EIO as part of the I/O processing. This is a 9149 * more permissive implementation that allows the open to succeed and 9150 * WRITE attempts to fail when appropriate. 9151 */ 9152 if (un->un_f_chk_wp_open) { 9153 if ((flag & FWRITE) && (!nodelay)) { 9154 mutex_exit(SD_MUTEX(un)); 9155 /* 9156 * Defer the check for write permission on writable 9157 * DVD drive till sdstrategy and will not fail open even 9158 * if FWRITE is set as the device can be writable 9159 * depending upon the media and the media can change 9160 * after the call to open(). 9161 */ 9162 if (un->un_f_dvdram_writable_device == FALSE) { 9163 if (ISCD(un) || sr_check_wp(dev)) { 9164 rval = EROFS; 9165 mutex_enter(SD_MUTEX(un)); 9166 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9167 "write to cd or write protected media\n"); 9168 goto open_fail; 9169 } 9170 } 9171 mutex_enter(SD_MUTEX(un)); 9172 } 9173 } 9174 9175 /* 9176 * If opening in NDELAY/NONBLOCK mode, just return. 9177 * Check if disk is ready and has a valid geometry later. 9178 */ 9179 if (!nodelay) { 9180 mutex_exit(SD_MUTEX(un)); 9181 rval = sd_ready_and_valid(un); 9182 mutex_enter(SD_MUTEX(un)); 9183 /* 9184 * Fail if device is not ready or if the number of disk 9185 * blocks is zero or negative for non CD devices. 9186 */ 9187 9188 nblks = 0; 9189 9190 if (rval == SD_READY_VALID && (!ISCD(un))) { 9191 /* if cmlb_partinfo fails, nblks remains 0 */ 9192 mutex_exit(SD_MUTEX(un)); 9193 (void) cmlb_partinfo(un->un_cmlbhandle, part, &nblks, 9194 NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 9195 mutex_enter(SD_MUTEX(un)); 9196 } 9197 9198 if ((rval != SD_READY_VALID) || 9199 (!ISCD(un) && nblks <= 0)) { 9200 rval = un->un_f_has_removable_media ? ENXIO : EIO; 9201 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9202 "device not ready or invalid disk block value\n"); 9203 goto open_fail; 9204 } 9205 #if defined(__i386) || defined(__amd64) 9206 } else { 9207 uchar_t *cp; 9208 /* 9209 * x86 requires special nodelay handling, so that p0 is 9210 * always defined and accessible. 9211 * Invalidate geometry only if device is not already open. 9212 */ 9213 cp = &un->un_ocmap.chkd[0]; 9214 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9215 if (*cp != (uchar_t)0) { 9216 break; 9217 } 9218 cp++; 9219 } 9220 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9221 mutex_exit(SD_MUTEX(un)); 9222 cmlb_invalidate(un->un_cmlbhandle, 9223 (void *)SD_PATH_DIRECT); 9224 mutex_enter(SD_MUTEX(un)); 9225 } 9226 9227 #endif 9228 } 9229 9230 if (otyp == OTYP_LYR) { 9231 un->un_ocmap.lyropen[part]++; 9232 } else { 9233 un->un_ocmap.regopen[otyp] |= partmask; 9234 } 9235 9236 /* Set up open and exclusive open flags */ 9237 if (flag & FEXCL) { 9238 un->un_exclopen |= (partmask); 9239 } 9240 9241 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9242 "open of part %d type %d\n", part, otyp); 9243 9244 mutex_exit(SD_MUTEX(un)); 9245 if (!nodelay) { 9246 sd_pm_exit(un); 9247 } 9248 9249 sema_v(&un->un_semoclose); 9250 9251 mutex_enter(&sd_detach_mutex); 9252 un->un_opens_in_progress--; 9253 mutex_exit(&sd_detach_mutex); 9254 9255 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n"); 9256 return (DDI_SUCCESS); 9257 9258 excl_open_fail: 9259 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n"); 9260 rval = EBUSY; 9261 9262 open_fail: 9263 mutex_exit(SD_MUTEX(un)); 9264 9265 /* 9266 * On a failed open we must exit the pm management. 9267 */ 9268 if (!nodelay) { 9269 sd_pm_exit(un); 9270 } 9271 open_failed_with_pm: 9272 sema_v(&un->un_semoclose); 9273 9274 mutex_enter(&sd_detach_mutex); 9275 un->un_opens_in_progress--; 9276 if (otyp == OTYP_LYR) { 9277 un->un_layer_count--; 9278 } 9279 mutex_exit(&sd_detach_mutex); 9280 9281 return (rval); 9282 } 9283 9284 9285 /* 9286 * Function: sdclose 9287 * 9288 * Description: Driver's close(9e) entry point function. 9289 * 9290 * Arguments: dev - device number 9291 * flag - file status flag, informational only 9292 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9293 * cred_p - user credential pointer 9294 * 9295 * Return Code: ENXIO 9296 * 9297 * Context: Kernel thread context 9298 */ 9299 /* ARGSUSED */ 9300 static int 9301 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 9302 { 9303 struct sd_lun *un; 9304 uchar_t *cp; 9305 int part; 9306 int nodelay; 9307 int rval = 0; 9308 9309 /* Validate the open type */ 9310 if (otyp >= OTYPCNT) { 9311 return (ENXIO); 9312 } 9313 9314 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9315 return (ENXIO); 9316 } 9317 9318 part = SDPART(dev); 9319 nodelay = flag & (FNDELAY | FNONBLOCK); 9320 9321 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9322 "sdclose: close of part %d type %d\n", part, otyp); 9323 9324 /* 9325 * We use a semaphore here in order to serialize 9326 * open and close requests on the device. 9327 */ 9328 sema_p(&un->un_semoclose); 9329 9330 mutex_enter(SD_MUTEX(un)); 9331 9332 /* Don't proceed if power is being changed. */ 9333 while (un->un_state == SD_STATE_PM_CHANGING) { 9334 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9335 } 9336 9337 if (un->un_exclopen & (1 << part)) { 9338 un->un_exclopen &= ~(1 << part); 9339 } 9340 9341 /* Update the open partition map */ 9342 if (otyp == OTYP_LYR) { 9343 un->un_ocmap.lyropen[part] -= 1; 9344 } else { 9345 un->un_ocmap.regopen[otyp] &= ~(1 << part); 9346 } 9347 9348 cp = &un->un_ocmap.chkd[0]; 9349 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9350 if (*cp != NULL) { 9351 break; 9352 } 9353 cp++; 9354 } 9355 9356 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9357 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n"); 9358 9359 /* 9360 * We avoid persistance upon the last close, and set 9361 * the throttle back to the maximum. 9362 */ 9363 un->un_throttle = un->un_saved_throttle; 9364 9365 if (un->un_state == SD_STATE_OFFLINE) { 9366 if (un->un_f_is_fibre == FALSE) { 9367 scsi_log(SD_DEVINFO(un), sd_label, 9368 CE_WARN, "offline\n"); 9369 } 9370 mutex_exit(SD_MUTEX(un)); 9371 cmlb_invalidate(un->un_cmlbhandle, 9372 (void *)SD_PATH_DIRECT); 9373 mutex_enter(SD_MUTEX(un)); 9374 9375 } else { 9376 /* 9377 * Flush any outstanding writes in NVRAM cache. 9378 * Note: SYNCHRONIZE CACHE is an optional SCSI-2 9379 * cmd, it may not work for non-Pluto devices. 9380 * SYNCHRONIZE CACHE is not required for removables, 9381 * except DVD-RAM drives. 9382 * 9383 * Also note: because SYNCHRONIZE CACHE is currently 9384 * the only command issued here that requires the 9385 * drive be powered up, only do the power up before 9386 * sending the Sync Cache command. If additional 9387 * commands are added which require a powered up 9388 * drive, the following sequence may have to change. 9389 * 9390 * And finally, note that parallel SCSI on SPARC 9391 * only issues a Sync Cache to DVD-RAM, a newly 9392 * supported device. 9393 */ 9394 #if defined(__i386) || defined(__amd64) 9395 if (un->un_f_sync_cache_supported || 9396 un->un_f_dvdram_writable_device == TRUE) { 9397 #else 9398 if (un->un_f_dvdram_writable_device == TRUE) { 9399 #endif 9400 mutex_exit(SD_MUTEX(un)); 9401 if (sd_pm_entry(un) == DDI_SUCCESS) { 9402 rval = 9403 sd_send_scsi_SYNCHRONIZE_CACHE(un, 9404 NULL); 9405 /* ignore error if not supported */ 9406 if (rval == ENOTSUP) { 9407 rval = 0; 9408 } else if (rval != 0) { 9409 rval = EIO; 9410 } 9411 sd_pm_exit(un); 9412 } else { 9413 rval = EIO; 9414 } 9415 mutex_enter(SD_MUTEX(un)); 9416 } 9417 9418 /* 9419 * For devices which supports DOOR_LOCK, send an ALLOW 9420 * MEDIA REMOVAL command, but don't get upset if it 9421 * fails. We need to raise the power of the drive before 9422 * we can call sd_send_scsi_DOORLOCK() 9423 */ 9424 if (un->un_f_doorlock_supported) { 9425 mutex_exit(SD_MUTEX(un)); 9426 if (sd_pm_entry(un) == DDI_SUCCESS) { 9427 rval = sd_send_scsi_DOORLOCK(un, 9428 SD_REMOVAL_ALLOW, SD_PATH_DIRECT); 9429 9430 sd_pm_exit(un); 9431 if (ISCD(un) && (rval != 0) && 9432 (nodelay != 0)) { 9433 rval = ENXIO; 9434 } 9435 } else { 9436 rval = EIO; 9437 } 9438 mutex_enter(SD_MUTEX(un)); 9439 } 9440 9441 /* 9442 * If a device has removable media, invalidate all 9443 * parameters related to media, such as geometry, 9444 * blocksize, and blockcount. 9445 */ 9446 if (un->un_f_has_removable_media) { 9447 sr_ejected(un); 9448 } 9449 9450 /* 9451 * Destroy the cache (if it exists) which was 9452 * allocated for the write maps since this is 9453 * the last close for this media. 9454 */ 9455 if (un->un_wm_cache) { 9456 /* 9457 * Check if there are pending commands. 9458 * and if there are give a warning and 9459 * do not destroy the cache. 9460 */ 9461 if (un->un_ncmds_in_driver > 0) { 9462 scsi_log(SD_DEVINFO(un), 9463 sd_label, CE_WARN, 9464 "Unable to clean up memory " 9465 "because of pending I/O\n"); 9466 } else { 9467 kmem_cache_destroy( 9468 un->un_wm_cache); 9469 un->un_wm_cache = NULL; 9470 } 9471 } 9472 } 9473 } 9474 9475 mutex_exit(SD_MUTEX(un)); 9476 sema_v(&un->un_semoclose); 9477 9478 if (otyp == OTYP_LYR) { 9479 mutex_enter(&sd_detach_mutex); 9480 /* 9481 * The detach routine may run when the layer count 9482 * drops to zero. 9483 */ 9484 un->un_layer_count--; 9485 mutex_exit(&sd_detach_mutex); 9486 } 9487 9488 return (rval); 9489 } 9490 9491 9492 /* 9493 * Function: sd_ready_and_valid 9494 * 9495 * Description: Test if device is ready and has a valid geometry. 9496 * 9497 * Arguments: dev - device number 9498 * un - driver soft state (unit) structure 9499 * 9500 * Return Code: SD_READY_VALID ready and valid label 9501 * SD_NOT_READY_VALID not ready, no label 9502 * SD_RESERVED_BY_OTHERS reservation conflict 9503 * 9504 * Context: Never called at interrupt context. 9505 */ 9506 9507 static int 9508 sd_ready_and_valid(struct sd_lun *un) 9509 { 9510 struct sd_errstats *stp; 9511 uint64_t capacity; 9512 uint_t lbasize; 9513 int rval = SD_READY_VALID; 9514 char name_str[48]; 9515 int is_valid; 9516 9517 ASSERT(un != NULL); 9518 ASSERT(!mutex_owned(SD_MUTEX(un))); 9519 9520 mutex_enter(SD_MUTEX(un)); 9521 /* 9522 * If a device has removable media, we must check if media is 9523 * ready when checking if this device is ready and valid. 9524 */ 9525 if (un->un_f_has_removable_media) { 9526 mutex_exit(SD_MUTEX(un)); 9527 if (sd_send_scsi_TEST_UNIT_READY(un, 0) != 0) { 9528 rval = SD_NOT_READY_VALID; 9529 mutex_enter(SD_MUTEX(un)); 9530 goto done; 9531 } 9532 9533 is_valid = SD_IS_VALID_LABEL(un); 9534 mutex_enter(SD_MUTEX(un)); 9535 if (!is_valid || 9536 (un->un_f_blockcount_is_valid == FALSE) || 9537 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 9538 9539 /* capacity has to be read every open. */ 9540 mutex_exit(SD_MUTEX(un)); 9541 if (sd_send_scsi_READ_CAPACITY(un, &capacity, 9542 &lbasize, SD_PATH_DIRECT) != 0) { 9543 cmlb_invalidate(un->un_cmlbhandle, 9544 (void *)SD_PATH_DIRECT); 9545 mutex_enter(SD_MUTEX(un)); 9546 rval = SD_NOT_READY_VALID; 9547 goto done; 9548 } else { 9549 mutex_enter(SD_MUTEX(un)); 9550 sd_update_block_info(un, lbasize, capacity); 9551 } 9552 } 9553 9554 /* 9555 * Check if the media in the device is writable or not. 9556 */ 9557 if (!is_valid && ISCD(un)) { 9558 sd_check_for_writable_cd(un, SD_PATH_DIRECT); 9559 } 9560 9561 } else { 9562 /* 9563 * Do a test unit ready to clear any unit attention from non-cd 9564 * devices. 9565 */ 9566 mutex_exit(SD_MUTEX(un)); 9567 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 9568 mutex_enter(SD_MUTEX(un)); 9569 } 9570 9571 9572 /* 9573 * If this is a non 512 block device, allocate space for 9574 * the wmap cache. This is being done here since every time 9575 * a media is changed this routine will be called and the 9576 * block size is a function of media rather than device. 9577 */ 9578 if (un->un_f_non_devbsize_supported && NOT_DEVBSIZE(un)) { 9579 if (!(un->un_wm_cache)) { 9580 (void) snprintf(name_str, sizeof (name_str), 9581 "%s%d_cache", 9582 ddi_driver_name(SD_DEVINFO(un)), 9583 ddi_get_instance(SD_DEVINFO(un))); 9584 un->un_wm_cache = kmem_cache_create( 9585 name_str, sizeof (struct sd_w_map), 9586 8, sd_wm_cache_constructor, 9587 sd_wm_cache_destructor, NULL, 9588 (void *)un, NULL, 0); 9589 if (!(un->un_wm_cache)) { 9590 rval = ENOMEM; 9591 goto done; 9592 } 9593 } 9594 } 9595 9596 if (un->un_state == SD_STATE_NORMAL) { 9597 /* 9598 * If the target is not yet ready here (defined by a TUR 9599 * failure), invalidate the geometry and print an 'offline' 9600 * message. This is a legacy message, as the state of the 9601 * target is not actually changed to SD_STATE_OFFLINE. 9602 * 9603 * If the TUR fails for EACCES (Reservation Conflict), 9604 * SD_RESERVED_BY_OTHERS will be returned to indicate 9605 * reservation conflict. If the TUR fails for other 9606 * reasons, SD_NOT_READY_VALID will be returned. 9607 */ 9608 int err; 9609 9610 mutex_exit(SD_MUTEX(un)); 9611 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 9612 mutex_enter(SD_MUTEX(un)); 9613 9614 if (err != 0) { 9615 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 9616 "offline or reservation conflict\n"); 9617 mutex_exit(SD_MUTEX(un)); 9618 cmlb_invalidate(un->un_cmlbhandle, 9619 (void *)SD_PATH_DIRECT); 9620 mutex_enter(SD_MUTEX(un)); 9621 if (err == EACCES) { 9622 rval = SD_RESERVED_BY_OTHERS; 9623 } else { 9624 rval = SD_NOT_READY_VALID; 9625 } 9626 goto done; 9627 } 9628 } 9629 9630 if (un->un_f_format_in_progress == FALSE) { 9631 mutex_exit(SD_MUTEX(un)); 9632 if (cmlb_validate(un->un_cmlbhandle, 0, 9633 (void *)SD_PATH_DIRECT) != 0) { 9634 rval = SD_NOT_READY_VALID; 9635 mutex_enter(SD_MUTEX(un)); 9636 goto done; 9637 } 9638 if (un->un_f_pkstats_enabled) { 9639 sd_set_pstats(un); 9640 SD_TRACE(SD_LOG_IO_PARTITION, un, 9641 "sd_ready_and_valid: un:0x%p pstats created and " 9642 "set\n", un); 9643 } 9644 mutex_enter(SD_MUTEX(un)); 9645 } 9646 9647 /* 9648 * If this device supports DOOR_LOCK command, try and send 9649 * this command to PREVENT MEDIA REMOVAL, but don't get upset 9650 * if it fails. For a CD, however, it is an error 9651 */ 9652 if (un->un_f_doorlock_supported) { 9653 mutex_exit(SD_MUTEX(un)); 9654 if ((sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 9655 SD_PATH_DIRECT) != 0) && ISCD(un)) { 9656 rval = SD_NOT_READY_VALID; 9657 mutex_enter(SD_MUTEX(un)); 9658 goto done; 9659 } 9660 mutex_enter(SD_MUTEX(un)); 9661 } 9662 9663 /* The state has changed, inform the media watch routines */ 9664 un->un_mediastate = DKIO_INSERTED; 9665 cv_broadcast(&un->un_state_cv); 9666 rval = SD_READY_VALID; 9667 9668 done: 9669 9670 /* 9671 * Initialize the capacity kstat value, if no media previously 9672 * (capacity kstat is 0) and a media has been inserted 9673 * (un_blockcount > 0). 9674 */ 9675 if (un->un_errstats != NULL) { 9676 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9677 if ((stp->sd_capacity.value.ui64 == 0) && 9678 (un->un_f_blockcount_is_valid == TRUE)) { 9679 stp->sd_capacity.value.ui64 = 9680 (uint64_t)((uint64_t)un->un_blockcount * 9681 un->un_sys_blocksize); 9682 } 9683 } 9684 9685 mutex_exit(SD_MUTEX(un)); 9686 return (rval); 9687 } 9688 9689 9690 /* 9691 * Function: sdmin 9692 * 9693 * Description: Routine to limit the size of a data transfer. Used in 9694 * conjunction with physio(9F). 9695 * 9696 * Arguments: bp - pointer to the indicated buf(9S) struct. 9697 * 9698 * Context: Kernel thread context. 9699 */ 9700 9701 static void 9702 sdmin(struct buf *bp) 9703 { 9704 struct sd_lun *un; 9705 int instance; 9706 9707 instance = SDUNIT(bp->b_edev); 9708 9709 un = ddi_get_soft_state(sd_state, instance); 9710 ASSERT(un != NULL); 9711 9712 if (bp->b_bcount > un->un_max_xfer_size) { 9713 bp->b_bcount = un->un_max_xfer_size; 9714 } 9715 } 9716 9717 9718 /* 9719 * Function: sdread 9720 * 9721 * Description: Driver's read(9e) entry point function. 9722 * 9723 * Arguments: dev - device number 9724 * uio - structure pointer describing where data is to be stored 9725 * in user's space 9726 * cred_p - user credential pointer 9727 * 9728 * Return Code: ENXIO 9729 * EIO 9730 * EINVAL 9731 * value returned by physio 9732 * 9733 * Context: Kernel thread context. 9734 */ 9735 /* ARGSUSED */ 9736 static int 9737 sdread(dev_t dev, struct uio *uio, cred_t *cred_p) 9738 { 9739 struct sd_lun *un = NULL; 9740 int secmask; 9741 int err; 9742 9743 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9744 return (ENXIO); 9745 } 9746 9747 ASSERT(!mutex_owned(SD_MUTEX(un))); 9748 9749 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9750 mutex_enter(SD_MUTEX(un)); 9751 /* 9752 * Because the call to sd_ready_and_valid will issue I/O we 9753 * must wait here if either the device is suspended or 9754 * if it's power level is changing. 9755 */ 9756 while ((un->un_state == SD_STATE_SUSPENDED) || 9757 (un->un_state == SD_STATE_PM_CHANGING)) { 9758 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9759 } 9760 un->un_ncmds_in_driver++; 9761 mutex_exit(SD_MUTEX(un)); 9762 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9763 mutex_enter(SD_MUTEX(un)); 9764 un->un_ncmds_in_driver--; 9765 ASSERT(un->un_ncmds_in_driver >= 0); 9766 mutex_exit(SD_MUTEX(un)); 9767 return (EIO); 9768 } 9769 mutex_enter(SD_MUTEX(un)); 9770 un->un_ncmds_in_driver--; 9771 ASSERT(un->un_ncmds_in_driver >= 0); 9772 mutex_exit(SD_MUTEX(un)); 9773 } 9774 9775 /* 9776 * Read requests are restricted to multiples of the system block size. 9777 */ 9778 secmask = un->un_sys_blocksize - 1; 9779 9780 if (uio->uio_loffset & ((offset_t)(secmask))) { 9781 SD_ERROR(SD_LOG_READ_WRITE, un, 9782 "sdread: file offset not modulo %d\n", 9783 un->un_sys_blocksize); 9784 err = EINVAL; 9785 } else if (uio->uio_iov->iov_len & (secmask)) { 9786 SD_ERROR(SD_LOG_READ_WRITE, un, 9787 "sdread: transfer length not modulo %d\n", 9788 un->un_sys_blocksize); 9789 err = EINVAL; 9790 } else { 9791 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio); 9792 } 9793 return (err); 9794 } 9795 9796 9797 /* 9798 * Function: sdwrite 9799 * 9800 * Description: Driver's write(9e) entry point function. 9801 * 9802 * Arguments: dev - device number 9803 * uio - structure pointer describing where data is stored in 9804 * user's space 9805 * cred_p - user credential pointer 9806 * 9807 * Return Code: ENXIO 9808 * EIO 9809 * EINVAL 9810 * value returned by physio 9811 * 9812 * Context: Kernel thread context. 9813 */ 9814 /* ARGSUSED */ 9815 static int 9816 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 9817 { 9818 struct sd_lun *un = NULL; 9819 int secmask; 9820 int err; 9821 9822 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9823 return (ENXIO); 9824 } 9825 9826 ASSERT(!mutex_owned(SD_MUTEX(un))); 9827 9828 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9829 mutex_enter(SD_MUTEX(un)); 9830 /* 9831 * Because the call to sd_ready_and_valid will issue I/O we 9832 * must wait here if either the device is suspended or 9833 * if it's power level is changing. 9834 */ 9835 while ((un->un_state == SD_STATE_SUSPENDED) || 9836 (un->un_state == SD_STATE_PM_CHANGING)) { 9837 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9838 } 9839 un->un_ncmds_in_driver++; 9840 mutex_exit(SD_MUTEX(un)); 9841 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9842 mutex_enter(SD_MUTEX(un)); 9843 un->un_ncmds_in_driver--; 9844 ASSERT(un->un_ncmds_in_driver >= 0); 9845 mutex_exit(SD_MUTEX(un)); 9846 return (EIO); 9847 } 9848 mutex_enter(SD_MUTEX(un)); 9849 un->un_ncmds_in_driver--; 9850 ASSERT(un->un_ncmds_in_driver >= 0); 9851 mutex_exit(SD_MUTEX(un)); 9852 } 9853 9854 /* 9855 * Write requests are restricted to multiples of the system block size. 9856 */ 9857 secmask = un->un_sys_blocksize - 1; 9858 9859 if (uio->uio_loffset & ((offset_t)(secmask))) { 9860 SD_ERROR(SD_LOG_READ_WRITE, un, 9861 "sdwrite: file offset not modulo %d\n", 9862 un->un_sys_blocksize); 9863 err = EINVAL; 9864 } else if (uio->uio_iov->iov_len & (secmask)) { 9865 SD_ERROR(SD_LOG_READ_WRITE, un, 9866 "sdwrite: transfer length not modulo %d\n", 9867 un->un_sys_blocksize); 9868 err = EINVAL; 9869 } else { 9870 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio); 9871 } 9872 return (err); 9873 } 9874 9875 9876 /* 9877 * Function: sdaread 9878 * 9879 * Description: Driver's aread(9e) entry point function. 9880 * 9881 * Arguments: dev - device number 9882 * aio - structure pointer describing where data is to be stored 9883 * cred_p - user credential pointer 9884 * 9885 * Return Code: ENXIO 9886 * EIO 9887 * EINVAL 9888 * value returned by aphysio 9889 * 9890 * Context: Kernel thread context. 9891 */ 9892 /* ARGSUSED */ 9893 static int 9894 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 9895 { 9896 struct sd_lun *un = NULL; 9897 struct uio *uio = aio->aio_uio; 9898 int secmask; 9899 int err; 9900 9901 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9902 return (ENXIO); 9903 } 9904 9905 ASSERT(!mutex_owned(SD_MUTEX(un))); 9906 9907 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9908 mutex_enter(SD_MUTEX(un)); 9909 /* 9910 * Because the call to sd_ready_and_valid will issue I/O we 9911 * must wait here if either the device is suspended or 9912 * if it's power level is changing. 9913 */ 9914 while ((un->un_state == SD_STATE_SUSPENDED) || 9915 (un->un_state == SD_STATE_PM_CHANGING)) { 9916 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9917 } 9918 un->un_ncmds_in_driver++; 9919 mutex_exit(SD_MUTEX(un)); 9920 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9921 mutex_enter(SD_MUTEX(un)); 9922 un->un_ncmds_in_driver--; 9923 ASSERT(un->un_ncmds_in_driver >= 0); 9924 mutex_exit(SD_MUTEX(un)); 9925 return (EIO); 9926 } 9927 mutex_enter(SD_MUTEX(un)); 9928 un->un_ncmds_in_driver--; 9929 ASSERT(un->un_ncmds_in_driver >= 0); 9930 mutex_exit(SD_MUTEX(un)); 9931 } 9932 9933 /* 9934 * Read requests are restricted to multiples of the system block size. 9935 */ 9936 secmask = un->un_sys_blocksize - 1; 9937 9938 if (uio->uio_loffset & ((offset_t)(secmask))) { 9939 SD_ERROR(SD_LOG_READ_WRITE, un, 9940 "sdaread: file offset not modulo %d\n", 9941 un->un_sys_blocksize); 9942 err = EINVAL; 9943 } else if (uio->uio_iov->iov_len & (secmask)) { 9944 SD_ERROR(SD_LOG_READ_WRITE, un, 9945 "sdaread: transfer length not modulo %d\n", 9946 un->un_sys_blocksize); 9947 err = EINVAL; 9948 } else { 9949 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio); 9950 } 9951 return (err); 9952 } 9953 9954 9955 /* 9956 * Function: sdawrite 9957 * 9958 * Description: Driver's awrite(9e) entry point function. 9959 * 9960 * Arguments: dev - device number 9961 * aio - structure pointer describing where data is stored 9962 * cred_p - user credential pointer 9963 * 9964 * Return Code: ENXIO 9965 * EIO 9966 * EINVAL 9967 * value returned by aphysio 9968 * 9969 * Context: Kernel thread context. 9970 */ 9971 /* ARGSUSED */ 9972 static int 9973 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 9974 { 9975 struct sd_lun *un = NULL; 9976 struct uio *uio = aio->aio_uio; 9977 int secmask; 9978 int err; 9979 9980 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9981 return (ENXIO); 9982 } 9983 9984 ASSERT(!mutex_owned(SD_MUTEX(un))); 9985 9986 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9987 mutex_enter(SD_MUTEX(un)); 9988 /* 9989 * Because the call to sd_ready_and_valid will issue I/O we 9990 * must wait here if either the device is suspended or 9991 * if it's power level is changing. 9992 */ 9993 while ((un->un_state == SD_STATE_SUSPENDED) || 9994 (un->un_state == SD_STATE_PM_CHANGING)) { 9995 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9996 } 9997 un->un_ncmds_in_driver++; 9998 mutex_exit(SD_MUTEX(un)); 9999 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 10000 mutex_enter(SD_MUTEX(un)); 10001 un->un_ncmds_in_driver--; 10002 ASSERT(un->un_ncmds_in_driver >= 0); 10003 mutex_exit(SD_MUTEX(un)); 10004 return (EIO); 10005 } 10006 mutex_enter(SD_MUTEX(un)); 10007 un->un_ncmds_in_driver--; 10008 ASSERT(un->un_ncmds_in_driver >= 0); 10009 mutex_exit(SD_MUTEX(un)); 10010 } 10011 10012 /* 10013 * Write requests are restricted to multiples of the system block size. 10014 */ 10015 secmask = un->un_sys_blocksize - 1; 10016 10017 if (uio->uio_loffset & ((offset_t)(secmask))) { 10018 SD_ERROR(SD_LOG_READ_WRITE, un, 10019 "sdawrite: file offset not modulo %d\n", 10020 un->un_sys_blocksize); 10021 err = EINVAL; 10022 } else if (uio->uio_iov->iov_len & (secmask)) { 10023 SD_ERROR(SD_LOG_READ_WRITE, un, 10024 "sdawrite: transfer length not modulo %d\n", 10025 un->un_sys_blocksize); 10026 err = EINVAL; 10027 } else { 10028 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio); 10029 } 10030 return (err); 10031 } 10032 10033 10034 10035 10036 10037 /* 10038 * Driver IO processing follows the following sequence: 10039 * 10040 * sdioctl(9E) sdstrategy(9E) biodone(9F) 10041 * | | ^ 10042 * v v | 10043 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+ 10044 * | | | | 10045 * v | | | 10046 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone() 10047 * | | ^ ^ 10048 * v v | | 10049 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | | 10050 * | | | | 10051 * +---+ | +------------+ +-------+ 10052 * | | | | 10053 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10054 * | v | | 10055 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() | 10056 * | | ^ | 10057 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10058 * | v | | 10059 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() | 10060 * | | ^ | 10061 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10062 * | v | | 10063 * | sd_checksum_iostart() sd_checksum_iodone() | 10064 * | | ^ | 10065 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+ 10066 * | v | | 10067 * | sd_pm_iostart() sd_pm_iodone() | 10068 * | | ^ | 10069 * | | | | 10070 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+ 10071 * | ^ 10072 * v | 10073 * sd_core_iostart() | 10074 * | | 10075 * | +------>(*destroypkt)() 10076 * +-> sd_start_cmds() <-+ | | 10077 * | | | v 10078 * | | | scsi_destroy_pkt(9F) 10079 * | | | 10080 * +->(*initpkt)() +- sdintr() 10081 * | | | | 10082 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx() 10083 * | +-> scsi_setup_cdb(9F) | 10084 * | | 10085 * +--> scsi_transport(9F) | 10086 * | | 10087 * +----> SCSA ---->+ 10088 * 10089 * 10090 * This code is based upon the following presumptions: 10091 * 10092 * - iostart and iodone functions operate on buf(9S) structures. These 10093 * functions perform the necessary operations on the buf(9S) and pass 10094 * them along to the next function in the chain by using the macros 10095 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE() 10096 * (for iodone side functions). 10097 * 10098 * - The iostart side functions may sleep. The iodone side functions 10099 * are called under interrupt context and may NOT sleep. Therefore 10100 * iodone side functions also may not call iostart side functions. 10101 * (NOTE: iostart side functions should NOT sleep for memory, as 10102 * this could result in deadlock.) 10103 * 10104 * - An iostart side function may call its corresponding iodone side 10105 * function directly (if necessary). 10106 * 10107 * - In the event of an error, an iostart side function can return a buf(9S) 10108 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and 10109 * b_error in the usual way of course). 10110 * 10111 * - The taskq mechanism may be used by the iodone side functions to dispatch 10112 * requests to the iostart side functions. The iostart side functions in 10113 * this case would be called under the context of a taskq thread, so it's 10114 * OK for them to block/sleep/spin in this case. 10115 * 10116 * - iostart side functions may allocate "shadow" buf(9S) structs and 10117 * pass them along to the next function in the chain. The corresponding 10118 * iodone side functions must coalesce the "shadow" bufs and return 10119 * the "original" buf to the next higher layer. 10120 * 10121 * - The b_private field of the buf(9S) struct holds a pointer to 10122 * an sd_xbuf struct, which contains information needed to 10123 * construct the scsi_pkt for the command. 10124 * 10125 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each 10126 * layer must acquire & release the SD_MUTEX(un) as needed. 10127 */ 10128 10129 10130 /* 10131 * Create taskq for all targets in the system. This is created at 10132 * _init(9E) and destroyed at _fini(9E). 10133 * 10134 * Note: here we set the minalloc to a reasonably high number to ensure that 10135 * we will have an adequate supply of task entries available at interrupt time. 10136 * This is used in conjunction with the TASKQ_PREPOPULATE flag in 10137 * sd_create_taskq(). Since we do not want to sleep for allocations at 10138 * interrupt time, set maxalloc equal to minalloc. That way we will just fail 10139 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq 10140 * requests any one instant in time. 10141 */ 10142 #define SD_TASKQ_NUMTHREADS 8 10143 #define SD_TASKQ_MINALLOC 256 10144 #define SD_TASKQ_MAXALLOC 256 10145 10146 static taskq_t *sd_tq = NULL; 10147 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq)) 10148 10149 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; 10150 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; 10151 10152 /* 10153 * The following task queue is being created for the write part of 10154 * read-modify-write of non-512 block size devices. 10155 * Limit the number of threads to 1 for now. This number has been chosen 10156 * considering the fact that it applies only to dvd ram drives/MO drives 10157 * currently. Performance for which is not main criteria at this stage. 10158 * Note: It needs to be explored if we can use a single taskq in future 10159 */ 10160 #define SD_WMR_TASKQ_NUMTHREADS 1 10161 static taskq_t *sd_wmr_tq = NULL; 10162 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq)) 10163 10164 /* 10165 * Function: sd_taskq_create 10166 * 10167 * Description: Create taskq thread(s) and preallocate task entries 10168 * 10169 * Return Code: Returns a pointer to the allocated taskq_t. 10170 * 10171 * Context: Can sleep. Requires blockable context. 10172 * 10173 * Notes: - The taskq() facility currently is NOT part of the DDI. 10174 * (definitely NOT recommeded for 3rd-party drivers!) :-) 10175 * - taskq_create() will block for memory, also it will panic 10176 * if it cannot create the requested number of threads. 10177 * - Currently taskq_create() creates threads that cannot be 10178 * swapped. 10179 * - We use TASKQ_PREPOPULATE to ensure we have an adequate 10180 * supply of taskq entries at interrupt time (ie, so that we 10181 * do not have to sleep for memory) 10182 */ 10183 10184 static void 10185 sd_taskq_create(void) 10186 { 10187 char taskq_name[TASKQ_NAMELEN]; 10188 10189 ASSERT(sd_tq == NULL); 10190 ASSERT(sd_wmr_tq == NULL); 10191 10192 (void) snprintf(taskq_name, sizeof (taskq_name), 10193 "%s_drv_taskq", sd_label); 10194 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS, 10195 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10196 TASKQ_PREPOPULATE)); 10197 10198 (void) snprintf(taskq_name, sizeof (taskq_name), 10199 "%s_rmw_taskq", sd_label); 10200 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS, 10201 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10202 TASKQ_PREPOPULATE)); 10203 } 10204 10205 10206 /* 10207 * Function: sd_taskq_delete 10208 * 10209 * Description: Complementary cleanup routine for sd_taskq_create(). 10210 * 10211 * Context: Kernel thread context. 10212 */ 10213 10214 static void 10215 sd_taskq_delete(void) 10216 { 10217 ASSERT(sd_tq != NULL); 10218 ASSERT(sd_wmr_tq != NULL); 10219 taskq_destroy(sd_tq); 10220 taskq_destroy(sd_wmr_tq); 10221 sd_tq = NULL; 10222 sd_wmr_tq = NULL; 10223 } 10224 10225 10226 /* 10227 * Function: sdstrategy 10228 * 10229 * Description: Driver's strategy (9E) entry point function. 10230 * 10231 * Arguments: bp - pointer to buf(9S) 10232 * 10233 * Return Code: Always returns zero 10234 * 10235 * Context: Kernel thread context. 10236 */ 10237 10238 static int 10239 sdstrategy(struct buf *bp) 10240 { 10241 struct sd_lun *un; 10242 10243 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 10244 if (un == NULL) { 10245 bioerror(bp, EIO); 10246 bp->b_resid = bp->b_bcount; 10247 biodone(bp); 10248 return (0); 10249 } 10250 /* As was done in the past, fail new cmds. if state is dumping. */ 10251 if (un->un_state == SD_STATE_DUMPING) { 10252 bioerror(bp, ENXIO); 10253 bp->b_resid = bp->b_bcount; 10254 biodone(bp); 10255 return (0); 10256 } 10257 10258 ASSERT(!mutex_owned(SD_MUTEX(un))); 10259 10260 /* 10261 * Commands may sneak in while we released the mutex in 10262 * DDI_SUSPEND, we should block new commands. However, old 10263 * commands that are still in the driver at this point should 10264 * still be allowed to drain. 10265 */ 10266 mutex_enter(SD_MUTEX(un)); 10267 /* 10268 * Must wait here if either the device is suspended or 10269 * if it's power level is changing. 10270 */ 10271 while ((un->un_state == SD_STATE_SUSPENDED) || 10272 (un->un_state == SD_STATE_PM_CHANGING)) { 10273 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10274 } 10275 10276 un->un_ncmds_in_driver++; 10277 10278 /* 10279 * atapi: Since we are running the CD for now in PIO mode we need to 10280 * call bp_mapin here to avoid bp_mapin called interrupt context under 10281 * the HBA's init_pkt routine. 10282 */ 10283 if (un->un_f_cfg_is_atapi == TRUE) { 10284 mutex_exit(SD_MUTEX(un)); 10285 bp_mapin(bp); 10286 mutex_enter(SD_MUTEX(un)); 10287 } 10288 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n", 10289 un->un_ncmds_in_driver); 10290 10291 mutex_exit(SD_MUTEX(un)); 10292 10293 /* 10294 * This will (eventually) allocate the sd_xbuf area and 10295 * call sd_xbuf_strategy(). We just want to return the 10296 * result of ddi_xbuf_qstrategy so that we have an opt- 10297 * imized tail call which saves us a stack frame. 10298 */ 10299 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); 10300 } 10301 10302 10303 /* 10304 * Function: sd_xbuf_strategy 10305 * 10306 * Description: Function for initiating IO operations via the 10307 * ddi_xbuf_qstrategy() mechanism. 10308 * 10309 * Context: Kernel thread context. 10310 */ 10311 10312 static void 10313 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg) 10314 { 10315 struct sd_lun *un = arg; 10316 10317 ASSERT(bp != NULL); 10318 ASSERT(xp != NULL); 10319 ASSERT(un != NULL); 10320 ASSERT(!mutex_owned(SD_MUTEX(un))); 10321 10322 /* 10323 * Initialize the fields in the xbuf and save a pointer to the 10324 * xbuf in bp->b_private. 10325 */ 10326 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL); 10327 10328 /* Send the buf down the iostart chain */ 10329 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp); 10330 } 10331 10332 10333 /* 10334 * Function: sd_xbuf_init 10335 * 10336 * Description: Prepare the given sd_xbuf struct for use. 10337 * 10338 * Arguments: un - ptr to softstate 10339 * bp - ptr to associated buf(9S) 10340 * xp - ptr to associated sd_xbuf 10341 * chain_type - IO chain type to use: 10342 * SD_CHAIN_NULL 10343 * SD_CHAIN_BUFIO 10344 * SD_CHAIN_USCSI 10345 * SD_CHAIN_DIRECT 10346 * SD_CHAIN_DIRECT_PRIORITY 10347 * pktinfop - ptr to private data struct for scsi_pkt(9S) 10348 * initialization; may be NULL if none. 10349 * 10350 * Context: Kernel thread context 10351 */ 10352 10353 static void 10354 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 10355 uchar_t chain_type, void *pktinfop) 10356 { 10357 int index; 10358 10359 ASSERT(un != NULL); 10360 ASSERT(bp != NULL); 10361 ASSERT(xp != NULL); 10362 10363 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n", 10364 bp, chain_type); 10365 10366 xp->xb_un = un; 10367 xp->xb_pktp = NULL; 10368 xp->xb_pktinfo = pktinfop; 10369 xp->xb_private = bp->b_private; 10370 xp->xb_blkno = (daddr_t)bp->b_blkno; 10371 10372 /* 10373 * Set up the iostart and iodone chain indexes in the xbuf, based 10374 * upon the specified chain type to use. 10375 */ 10376 switch (chain_type) { 10377 case SD_CHAIN_NULL: 10378 /* 10379 * Fall thru to just use the values for the buf type, even 10380 * tho for the NULL chain these values will never be used. 10381 */ 10382 /* FALLTHRU */ 10383 case SD_CHAIN_BUFIO: 10384 index = un->un_buf_chain_type; 10385 break; 10386 case SD_CHAIN_USCSI: 10387 index = un->un_uscsi_chain_type; 10388 break; 10389 case SD_CHAIN_DIRECT: 10390 index = un->un_direct_chain_type; 10391 break; 10392 case SD_CHAIN_DIRECT_PRIORITY: 10393 index = un->un_priority_chain_type; 10394 break; 10395 default: 10396 /* We're really broken if we ever get here... */ 10397 panic("sd_xbuf_init: illegal chain type!"); 10398 /*NOTREACHED*/ 10399 } 10400 10401 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index; 10402 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index; 10403 10404 /* 10405 * It might be a bit easier to simply bzero the entire xbuf above, 10406 * but it turns out that since we init a fair number of members anyway, 10407 * we save a fair number cycles by doing explicit assignment of zero. 10408 */ 10409 xp->xb_pkt_flags = 0; 10410 xp->xb_dma_resid = 0; 10411 xp->xb_retry_count = 0; 10412 xp->xb_victim_retry_count = 0; 10413 xp->xb_ua_retry_count = 0; 10414 xp->xb_nr_retry_count = 0; 10415 xp->xb_sense_bp = NULL; 10416 xp->xb_sense_status = 0; 10417 xp->xb_sense_state = 0; 10418 xp->xb_sense_resid = 0; 10419 10420 bp->b_private = xp; 10421 bp->b_flags &= ~(B_DONE | B_ERROR); 10422 bp->b_resid = 0; 10423 bp->av_forw = NULL; 10424 bp->av_back = NULL; 10425 bioerror(bp, 0); 10426 10427 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n"); 10428 } 10429 10430 10431 /* 10432 * Function: sd_uscsi_strategy 10433 * 10434 * Description: Wrapper for calling into the USCSI chain via physio(9F) 10435 * 10436 * Arguments: bp - buf struct ptr 10437 * 10438 * Return Code: Always returns 0 10439 * 10440 * Context: Kernel thread context 10441 */ 10442 10443 static int 10444 sd_uscsi_strategy(struct buf *bp) 10445 { 10446 struct sd_lun *un; 10447 struct sd_uscsi_info *uip; 10448 struct sd_xbuf *xp; 10449 uchar_t chain_type; 10450 10451 ASSERT(bp != NULL); 10452 10453 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 10454 if (un == NULL) { 10455 bioerror(bp, EIO); 10456 bp->b_resid = bp->b_bcount; 10457 biodone(bp); 10458 return (0); 10459 } 10460 10461 ASSERT(!mutex_owned(SD_MUTEX(un))); 10462 10463 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp); 10464 10465 mutex_enter(SD_MUTEX(un)); 10466 /* 10467 * atapi: Since we are running the CD for now in PIO mode we need to 10468 * call bp_mapin here to avoid bp_mapin called interrupt context under 10469 * the HBA's init_pkt routine. 10470 */ 10471 if (un->un_f_cfg_is_atapi == TRUE) { 10472 mutex_exit(SD_MUTEX(un)); 10473 bp_mapin(bp); 10474 mutex_enter(SD_MUTEX(un)); 10475 } 10476 un->un_ncmds_in_driver++; 10477 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n", 10478 un->un_ncmds_in_driver); 10479 mutex_exit(SD_MUTEX(un)); 10480 10481 /* 10482 * A pointer to a struct sd_uscsi_info is expected in bp->b_private 10483 */ 10484 ASSERT(bp->b_private != NULL); 10485 uip = (struct sd_uscsi_info *)bp->b_private; 10486 10487 switch (uip->ui_flags) { 10488 case SD_PATH_DIRECT: 10489 chain_type = SD_CHAIN_DIRECT; 10490 break; 10491 case SD_PATH_DIRECT_PRIORITY: 10492 chain_type = SD_CHAIN_DIRECT_PRIORITY; 10493 break; 10494 default: 10495 chain_type = SD_CHAIN_USCSI; 10496 break; 10497 } 10498 10499 /* 10500 * We may allocate extra buf for external USCSI commands. If the 10501 * application asks for bigger than 20-byte sense data via USCSI, 10502 * SCSA layer will allocate 252 bytes sense buf for that command. 10503 */ 10504 if (((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_rqlen > 10505 SENSE_LENGTH) { 10506 xp = kmem_zalloc(sizeof (struct sd_xbuf) - SENSE_LENGTH + 10507 MAX_SENSE_LENGTH, KM_SLEEP); 10508 } else { 10509 xp = kmem_zalloc(sizeof (struct sd_xbuf), KM_SLEEP); 10510 } 10511 10512 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp); 10513 10514 /* Use the index obtained within xbuf_init */ 10515 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp); 10516 10517 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp); 10518 10519 return (0); 10520 } 10521 10522 /* 10523 * Function: sd_send_scsi_cmd 10524 * 10525 * Description: Runs a USCSI command for user (when called thru sdioctl), 10526 * or for the driver 10527 * 10528 * Arguments: dev - the dev_t for the device 10529 * incmd - ptr to a valid uscsi_cmd struct 10530 * flag - bit flag, indicating open settings, 32/64 bit type 10531 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 10532 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 10533 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 10534 * to use the USCSI "direct" chain and bypass the normal 10535 * command waitq. 10536 * 10537 * Return Code: 0 - successful completion of the given command 10538 * EIO - scsi_uscsi_handle_command() failed 10539 * ENXIO - soft state not found for specified dev 10540 * EINVAL 10541 * EFAULT - copyin/copyout error 10542 * return code of scsi_uscsi_handle_command(): 10543 * EIO 10544 * ENXIO 10545 * EACCES 10546 * 10547 * Context: Waits for command to complete. Can sleep. 10548 */ 10549 10550 static int 10551 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 10552 enum uio_seg dataspace, int path_flag) 10553 { 10554 struct sd_uscsi_info *uip; 10555 struct uscsi_cmd *uscmd; 10556 struct sd_lun *un; 10557 int format = 0; 10558 int rval; 10559 10560 un = ddi_get_soft_state(sd_state, SDUNIT(dev)); 10561 if (un == NULL) { 10562 return (ENXIO); 10563 } 10564 10565 ASSERT(!mutex_owned(SD_MUTEX(un))); 10566 10567 #ifdef SDDEBUG 10568 switch (dataspace) { 10569 case UIO_USERSPACE: 10570 SD_TRACE(SD_LOG_IO, un, 10571 "sd_send_scsi_cmd: entry: un:0x%p UIO_USERSPACE\n", un); 10572 break; 10573 case UIO_SYSSPACE: 10574 SD_TRACE(SD_LOG_IO, un, 10575 "sd_send_scsi_cmd: entry: un:0x%p UIO_SYSSPACE\n", un); 10576 break; 10577 default: 10578 SD_TRACE(SD_LOG_IO, un, 10579 "sd_send_scsi_cmd: entry: un:0x%p UNEXPECTED SPACE\n", un); 10580 break; 10581 } 10582 #endif 10583 10584 rval = scsi_uscsi_alloc_and_copyin((intptr_t)incmd, flag, 10585 SD_ADDRESS(un), &uscmd); 10586 if (rval != 0) { 10587 SD_TRACE(SD_LOG_IO, un, "sd_sense_scsi_cmd: " 10588 "scsi_uscsi_alloc_and_copyin failed\n", un); 10589 return (rval); 10590 } 10591 10592 if ((uscmd->uscsi_cdb != NULL) && 10593 (uscmd->uscsi_cdb[0] == SCMD_FORMAT)) { 10594 mutex_enter(SD_MUTEX(un)); 10595 un->un_f_format_in_progress = TRUE; 10596 mutex_exit(SD_MUTEX(un)); 10597 format = 1; 10598 } 10599 10600 /* 10601 * Allocate an sd_uscsi_info struct and fill it with the info 10602 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 10603 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 10604 * since we allocate the buf here in this function, we do not 10605 * need to preserve the prior contents of b_private. 10606 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 10607 */ 10608 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 10609 uip->ui_flags = path_flag; 10610 uip->ui_cmdp = uscmd; 10611 10612 /* 10613 * Commands sent with priority are intended for error recovery 10614 * situations, and do not have retries performed. 10615 */ 10616 if (path_flag == SD_PATH_DIRECT_PRIORITY) { 10617 uscmd->uscsi_flags |= USCSI_DIAGNOSE; 10618 } 10619 uscmd->uscsi_flags &= ~USCSI_NOINTR; 10620 10621 rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd, 10622 sd_uscsi_strategy, NULL, uip); 10623 10624 #ifdef SDDEBUG 10625 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 10626 "uscsi_status: 0x%02x uscsi_resid:0x%x\n", 10627 uscmd->uscsi_status, uscmd->uscsi_resid); 10628 if (uscmd->uscsi_bufaddr != NULL) { 10629 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 10630 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n", 10631 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen); 10632 if (dataspace == UIO_SYSSPACE) { 10633 SD_DUMP_MEMORY(un, SD_LOG_IO, 10634 "data", (uchar_t *)uscmd->uscsi_bufaddr, 10635 uscmd->uscsi_buflen, SD_LOG_HEX); 10636 } 10637 } 10638 #endif 10639 10640 if (format == 1) { 10641 mutex_enter(SD_MUTEX(un)); 10642 un->un_f_format_in_progress = FALSE; 10643 mutex_exit(SD_MUTEX(un)); 10644 } 10645 10646 (void) scsi_uscsi_copyout_and_free((intptr_t)incmd, uscmd); 10647 kmem_free(uip, sizeof (struct sd_uscsi_info)); 10648 10649 return (rval); 10650 } 10651 10652 10653 /* 10654 * Function: sd_buf_iodone 10655 * 10656 * Description: Frees the sd_xbuf & returns the buf to its originator. 10657 * 10658 * Context: May be called from interrupt context. 10659 */ 10660 /* ARGSUSED */ 10661 static void 10662 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp) 10663 { 10664 struct sd_xbuf *xp; 10665 10666 ASSERT(un != NULL); 10667 ASSERT(bp != NULL); 10668 ASSERT(!mutex_owned(SD_MUTEX(un))); 10669 10670 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n"); 10671 10672 xp = SD_GET_XBUF(bp); 10673 ASSERT(xp != NULL); 10674 10675 mutex_enter(SD_MUTEX(un)); 10676 10677 /* 10678 * Grab time when the cmd completed. 10679 * This is used for determining if the system has been 10680 * idle long enough to make it idle to the PM framework. 10681 * This is for lowering the overhead, and therefore improving 10682 * performance per I/O operation. 10683 */ 10684 un->un_pm_idle_time = ddi_get_time(); 10685 10686 un->un_ncmds_in_driver--; 10687 ASSERT(un->un_ncmds_in_driver >= 0); 10688 SD_INFO(SD_LOG_IO, un, "sd_buf_iodone: un_ncmds_in_driver = %ld\n", 10689 un->un_ncmds_in_driver); 10690 10691 mutex_exit(SD_MUTEX(un)); 10692 10693 ddi_xbuf_done(bp, un->un_xbuf_attr); /* xbuf is gone after this */ 10694 biodone(bp); /* bp is gone after this */ 10695 10696 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n"); 10697 } 10698 10699 10700 /* 10701 * Function: sd_uscsi_iodone 10702 * 10703 * Description: Frees the sd_xbuf & returns the buf to its originator. 10704 * 10705 * Context: May be called from interrupt context. 10706 */ 10707 /* ARGSUSED */ 10708 static void 10709 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 10710 { 10711 struct sd_xbuf *xp; 10712 10713 ASSERT(un != NULL); 10714 ASSERT(bp != NULL); 10715 10716 xp = SD_GET_XBUF(bp); 10717 ASSERT(xp != NULL); 10718 ASSERT(!mutex_owned(SD_MUTEX(un))); 10719 10720 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n"); 10721 10722 bp->b_private = xp->xb_private; 10723 10724 mutex_enter(SD_MUTEX(un)); 10725 10726 /* 10727 * Grab time when the cmd completed. 10728 * This is used for determining if the system has been 10729 * idle long enough to make it idle to the PM framework. 10730 * This is for lowering the overhead, and therefore improving 10731 * performance per I/O operation. 10732 */ 10733 un->un_pm_idle_time = ddi_get_time(); 10734 10735 un->un_ncmds_in_driver--; 10736 ASSERT(un->un_ncmds_in_driver >= 0); 10737 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", 10738 un->un_ncmds_in_driver); 10739 10740 mutex_exit(SD_MUTEX(un)); 10741 10742 if (((struct uscsi_cmd *)(xp->xb_pktinfo))->uscsi_rqlen > 10743 SENSE_LENGTH) { 10744 kmem_free(xp, sizeof (struct sd_xbuf) - SENSE_LENGTH + 10745 MAX_SENSE_LENGTH); 10746 } else { 10747 kmem_free(xp, sizeof (struct sd_xbuf)); 10748 } 10749 10750 biodone(bp); 10751 10752 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n"); 10753 } 10754 10755 10756 /* 10757 * Function: sd_mapblockaddr_iostart 10758 * 10759 * Description: Verify request lies within the partition limits for 10760 * the indicated minor device. Issue "overrun" buf if 10761 * request would exceed partition range. Converts 10762 * partition-relative block address to absolute. 10763 * 10764 * Context: Can sleep 10765 * 10766 * Issues: This follows what the old code did, in terms of accessing 10767 * some of the partition info in the unit struct without holding 10768 * the mutext. This is a general issue, if the partition info 10769 * can be altered while IO is in progress... as soon as we send 10770 * a buf, its partitioning can be invalid before it gets to the 10771 * device. Probably the right fix is to move partitioning out 10772 * of the driver entirely. 10773 */ 10774 10775 static void 10776 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp) 10777 { 10778 diskaddr_t nblocks; /* #blocks in the given partition */ 10779 daddr_t blocknum; /* Block number specified by the buf */ 10780 size_t requested_nblocks; 10781 size_t available_nblocks; 10782 int partition; 10783 diskaddr_t partition_offset; 10784 struct sd_xbuf *xp; 10785 10786 10787 ASSERT(un != NULL); 10788 ASSERT(bp != NULL); 10789 ASSERT(!mutex_owned(SD_MUTEX(un))); 10790 10791 SD_TRACE(SD_LOG_IO_PARTITION, un, 10792 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp); 10793 10794 xp = SD_GET_XBUF(bp); 10795 ASSERT(xp != NULL); 10796 10797 /* 10798 * If the geometry is not indicated as valid, attempt to access 10799 * the unit & verify the geometry/label. This can be the case for 10800 * removable-media devices, of if the device was opened in 10801 * NDELAY/NONBLOCK mode. 10802 */ 10803 if (!SD_IS_VALID_LABEL(un) && 10804 (sd_ready_and_valid(un) != SD_READY_VALID)) { 10805 /* 10806 * For removable devices it is possible to start an I/O 10807 * without a media by opening the device in nodelay mode. 10808 * Also for writable CDs there can be many scenarios where 10809 * there is no geometry yet but volume manager is trying to 10810 * issue a read() just because it can see TOC on the CD. So 10811 * do not print a message for removables. 10812 */ 10813 if (!un->un_f_has_removable_media) { 10814 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10815 "i/o to invalid geometry\n"); 10816 } 10817 bioerror(bp, EIO); 10818 bp->b_resid = bp->b_bcount; 10819 SD_BEGIN_IODONE(index, un, bp); 10820 return; 10821 } 10822 10823 partition = SDPART(bp->b_edev); 10824 10825 nblocks = 0; 10826 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 10827 &nblocks, &partition_offset, NULL, NULL, (void *)SD_PATH_DIRECT); 10828 10829 /* 10830 * blocknum is the starting block number of the request. At this 10831 * point it is still relative to the start of the minor device. 10832 */ 10833 blocknum = xp->xb_blkno; 10834 10835 /* 10836 * Legacy: If the starting block number is one past the last block 10837 * in the partition, do not set B_ERROR in the buf. 10838 */ 10839 if (blocknum == nblocks) { 10840 goto error_exit; 10841 } 10842 10843 /* 10844 * Confirm that the first block of the request lies within the 10845 * partition limits. Also the requested number of bytes must be 10846 * a multiple of the system block size. 10847 */ 10848 if ((blocknum < 0) || (blocknum >= nblocks) || 10849 ((bp->b_bcount & (un->un_sys_blocksize - 1)) != 0)) { 10850 bp->b_flags |= B_ERROR; 10851 goto error_exit; 10852 } 10853 10854 /* 10855 * If the requsted # blocks exceeds the available # blocks, that 10856 * is an overrun of the partition. 10857 */ 10858 requested_nblocks = SD_BYTES2SYSBLOCKS(un, bp->b_bcount); 10859 available_nblocks = (size_t)(nblocks - blocknum); 10860 ASSERT(nblocks >= blocknum); 10861 10862 if (requested_nblocks > available_nblocks) { 10863 /* 10864 * Allocate an "overrun" buf to allow the request to proceed 10865 * for the amount of space available in the partition. The 10866 * amount not transferred will be added into the b_resid 10867 * when the operation is complete. The overrun buf 10868 * replaces the original buf here, and the original buf 10869 * is saved inside the overrun buf, for later use. 10870 */ 10871 size_t resid = SD_SYSBLOCKS2BYTES(un, 10872 (offset_t)(requested_nblocks - available_nblocks)); 10873 size_t count = bp->b_bcount - resid; 10874 /* 10875 * Note: count is an unsigned entity thus it'll NEVER 10876 * be less than 0 so ASSERT the original values are 10877 * correct. 10878 */ 10879 ASSERT(bp->b_bcount >= resid); 10880 10881 bp = sd_bioclone_alloc(bp, count, blocknum, 10882 (int (*)(struct buf *)) sd_mapblockaddr_iodone); 10883 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */ 10884 ASSERT(xp != NULL); 10885 } 10886 10887 /* At this point there should be no residual for this buf. */ 10888 ASSERT(bp->b_resid == 0); 10889 10890 /* Convert the block number to an absolute address. */ 10891 xp->xb_blkno += partition_offset; 10892 10893 SD_NEXT_IOSTART(index, un, bp); 10894 10895 SD_TRACE(SD_LOG_IO_PARTITION, un, 10896 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp); 10897 10898 return; 10899 10900 error_exit: 10901 bp->b_resid = bp->b_bcount; 10902 SD_BEGIN_IODONE(index, un, bp); 10903 SD_TRACE(SD_LOG_IO_PARTITION, un, 10904 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp); 10905 } 10906 10907 10908 /* 10909 * Function: sd_mapblockaddr_iodone 10910 * 10911 * Description: Completion-side processing for partition management. 10912 * 10913 * Context: May be called under interrupt context 10914 */ 10915 10916 static void 10917 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp) 10918 { 10919 /* int partition; */ /* Not used, see below. */ 10920 ASSERT(un != NULL); 10921 ASSERT(bp != NULL); 10922 ASSERT(!mutex_owned(SD_MUTEX(un))); 10923 10924 SD_TRACE(SD_LOG_IO_PARTITION, un, 10925 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp); 10926 10927 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) { 10928 /* 10929 * We have an "overrun" buf to deal with... 10930 */ 10931 struct sd_xbuf *xp; 10932 struct buf *obp; /* ptr to the original buf */ 10933 10934 xp = SD_GET_XBUF(bp); 10935 ASSERT(xp != NULL); 10936 10937 /* Retrieve the pointer to the original buf */ 10938 obp = (struct buf *)xp->xb_private; 10939 ASSERT(obp != NULL); 10940 10941 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid); 10942 bioerror(obp, bp->b_error); 10943 10944 sd_bioclone_free(bp); 10945 10946 /* 10947 * Get back the original buf. 10948 * Note that since the restoration of xb_blkno below 10949 * was removed, the sd_xbuf is not needed. 10950 */ 10951 bp = obp; 10952 /* 10953 * xp = SD_GET_XBUF(bp); 10954 * ASSERT(xp != NULL); 10955 */ 10956 } 10957 10958 /* 10959 * Convert sd->xb_blkno back to a minor-device relative value. 10960 * Note: this has been commented out, as it is not needed in the 10961 * current implementation of the driver (ie, since this function 10962 * is at the top of the layering chains, so the info will be 10963 * discarded) and it is in the "hot" IO path. 10964 * 10965 * partition = getminor(bp->b_edev) & SDPART_MASK; 10966 * xp->xb_blkno -= un->un_offset[partition]; 10967 */ 10968 10969 SD_NEXT_IODONE(index, un, bp); 10970 10971 SD_TRACE(SD_LOG_IO_PARTITION, un, 10972 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp); 10973 } 10974 10975 10976 /* 10977 * Function: sd_mapblocksize_iostart 10978 * 10979 * Description: Convert between system block size (un->un_sys_blocksize) 10980 * and target block size (un->un_tgt_blocksize). 10981 * 10982 * Context: Can sleep to allocate resources. 10983 * 10984 * Assumptions: A higher layer has already performed any partition validation, 10985 * and converted the xp->xb_blkno to an absolute value relative 10986 * to the start of the device. 10987 * 10988 * It is also assumed that the higher layer has implemented 10989 * an "overrun" mechanism for the case where the request would 10990 * read/write beyond the end of a partition. In this case we 10991 * assume (and ASSERT) that bp->b_resid == 0. 10992 * 10993 * Note: The implementation for this routine assumes the target 10994 * block size remains constant between allocation and transport. 10995 */ 10996 10997 static void 10998 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp) 10999 { 11000 struct sd_mapblocksize_info *bsp; 11001 struct sd_xbuf *xp; 11002 offset_t first_byte; 11003 daddr_t start_block, end_block; 11004 daddr_t request_bytes; 11005 ushort_t is_aligned = FALSE; 11006 11007 ASSERT(un != NULL); 11008 ASSERT(bp != NULL); 11009 ASSERT(!mutex_owned(SD_MUTEX(un))); 11010 ASSERT(bp->b_resid == 0); 11011 11012 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 11013 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp); 11014 11015 /* 11016 * For a non-writable CD, a write request is an error 11017 */ 11018 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) && 11019 (un->un_f_mmc_writable_media == FALSE)) { 11020 bioerror(bp, EIO); 11021 bp->b_resid = bp->b_bcount; 11022 SD_BEGIN_IODONE(index, un, bp); 11023 return; 11024 } 11025 11026 /* 11027 * We do not need a shadow buf if the device is using 11028 * un->un_sys_blocksize as its block size or if bcount == 0. 11029 * In this case there is no layer-private data block allocated. 11030 */ 11031 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 11032 (bp->b_bcount == 0)) { 11033 goto done; 11034 } 11035 11036 #if defined(__i386) || defined(__amd64) 11037 /* We do not support non-block-aligned transfers for ROD devices */ 11038 ASSERT(!ISROD(un)); 11039 #endif 11040 11041 xp = SD_GET_XBUF(bp); 11042 ASSERT(xp != NULL); 11043 11044 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 11045 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n", 11046 un->un_tgt_blocksize, un->un_sys_blocksize); 11047 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 11048 "request start block:0x%x\n", xp->xb_blkno); 11049 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 11050 "request len:0x%x\n", bp->b_bcount); 11051 11052 /* 11053 * Allocate the layer-private data area for the mapblocksize layer. 11054 * Layers are allowed to use the xp_private member of the sd_xbuf 11055 * struct to store the pointer to their layer-private data block, but 11056 * each layer also has the responsibility of restoring the prior 11057 * contents of xb_private before returning the buf/xbuf to the 11058 * higher layer that sent it. 11059 * 11060 * Here we save the prior contents of xp->xb_private into the 11061 * bsp->mbs_oprivate field of our layer-private data area. This value 11062 * is restored by sd_mapblocksize_iodone() just prior to freeing up 11063 * the layer-private area and returning the buf/xbuf to the layer 11064 * that sent it. 11065 * 11066 * Note that here we use kmem_zalloc for the allocation as there are 11067 * parts of the mapblocksize code that expect certain fields to be 11068 * zero unless explicitly set to a required value. 11069 */ 11070 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 11071 bsp->mbs_oprivate = xp->xb_private; 11072 xp->xb_private = bsp; 11073 11074 /* 11075 * This treats the data on the disk (target) as an array of bytes. 11076 * first_byte is the byte offset, from the beginning of the device, 11077 * to the location of the request. This is converted from a 11078 * un->un_sys_blocksize block address to a byte offset, and then back 11079 * to a block address based upon a un->un_tgt_blocksize block size. 11080 * 11081 * xp->xb_blkno should be absolute upon entry into this function, 11082 * but, but it is based upon partitions that use the "system" 11083 * block size. It must be adjusted to reflect the block size of 11084 * the target. 11085 * 11086 * Note that end_block is actually the block that follows the last 11087 * block of the request, but that's what is needed for the computation. 11088 */ 11089 first_byte = SD_SYSBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 11090 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize; 11091 end_block = (first_byte + bp->b_bcount + un->un_tgt_blocksize - 1) / 11092 un->un_tgt_blocksize; 11093 11094 /* request_bytes is rounded up to a multiple of the target block size */ 11095 request_bytes = (end_block - start_block) * un->un_tgt_blocksize; 11096 11097 /* 11098 * See if the starting address of the request and the request 11099 * length are aligned on a un->un_tgt_blocksize boundary. If aligned 11100 * then we do not need to allocate a shadow buf to handle the request. 11101 */ 11102 if (((first_byte % un->un_tgt_blocksize) == 0) && 11103 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) { 11104 is_aligned = TRUE; 11105 } 11106 11107 if ((bp->b_flags & B_READ) == 0) { 11108 /* 11109 * Lock the range for a write operation. An aligned request is 11110 * considered a simple write; otherwise the request must be a 11111 * read-modify-write. 11112 */ 11113 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1, 11114 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW); 11115 } 11116 11117 /* 11118 * Alloc a shadow buf if the request is not aligned. Also, this is 11119 * where the READ command is generated for a read-modify-write. (The 11120 * write phase is deferred until after the read completes.) 11121 */ 11122 if (is_aligned == FALSE) { 11123 11124 struct sd_mapblocksize_info *shadow_bsp; 11125 struct sd_xbuf *shadow_xp; 11126 struct buf *shadow_bp; 11127 11128 /* 11129 * Allocate the shadow buf and it associated xbuf. Note that 11130 * after this call the xb_blkno value in both the original 11131 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the 11132 * same: absolute relative to the start of the device, and 11133 * adjusted for the target block size. The b_blkno in the 11134 * shadow buf will also be set to this value. We should never 11135 * change b_blkno in the original bp however. 11136 * 11137 * Note also that the shadow buf will always need to be a 11138 * READ command, regardless of whether the incoming command 11139 * is a READ or a WRITE. 11140 */ 11141 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ, 11142 xp->xb_blkno, 11143 (int (*)(struct buf *)) sd_mapblocksize_iodone); 11144 11145 shadow_xp = SD_GET_XBUF(shadow_bp); 11146 11147 /* 11148 * Allocate the layer-private data for the shadow buf. 11149 * (No need to preserve xb_private in the shadow xbuf.) 11150 */ 11151 shadow_xp->xb_private = shadow_bsp = 11152 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 11153 11154 /* 11155 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone 11156 * to figure out where the start of the user data is (based upon 11157 * the system block size) in the data returned by the READ 11158 * command (which will be based upon the target blocksize). Note 11159 * that this is only really used if the request is unaligned. 11160 */ 11161 bsp->mbs_copy_offset = (ssize_t)(first_byte - 11162 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize)); 11163 ASSERT((bsp->mbs_copy_offset >= 0) && 11164 (bsp->mbs_copy_offset < un->un_tgt_blocksize)); 11165 11166 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset; 11167 11168 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index; 11169 11170 /* Transfer the wmap (if any) to the shadow buf */ 11171 shadow_bsp->mbs_wmp = bsp->mbs_wmp; 11172 bsp->mbs_wmp = NULL; 11173 11174 /* 11175 * The shadow buf goes on from here in place of the 11176 * original buf. 11177 */ 11178 shadow_bsp->mbs_orig_bp = bp; 11179 bp = shadow_bp; 11180 } 11181 11182 SD_INFO(SD_LOG_IO_RMMEDIA, un, 11183 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno); 11184 SD_INFO(SD_LOG_IO_RMMEDIA, un, 11185 "sd_mapblocksize_iostart: tgt request len:0x%x\n", 11186 request_bytes); 11187 SD_INFO(SD_LOG_IO_RMMEDIA, un, 11188 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp); 11189 11190 done: 11191 SD_NEXT_IOSTART(index, un, bp); 11192 11193 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 11194 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp); 11195 } 11196 11197 11198 /* 11199 * Function: sd_mapblocksize_iodone 11200 * 11201 * Description: Completion side processing for block-size mapping. 11202 * 11203 * Context: May be called under interrupt context 11204 */ 11205 11206 static void 11207 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp) 11208 { 11209 struct sd_mapblocksize_info *bsp; 11210 struct sd_xbuf *xp; 11211 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */ 11212 struct buf *orig_bp; /* ptr to the original buf */ 11213 offset_t shadow_end; 11214 offset_t request_end; 11215 offset_t shadow_start; 11216 ssize_t copy_offset; 11217 size_t copy_length; 11218 size_t shortfall; 11219 uint_t is_write; /* TRUE if this bp is a WRITE */ 11220 uint_t has_wmap; /* TRUE is this bp has a wmap */ 11221 11222 ASSERT(un != NULL); 11223 ASSERT(bp != NULL); 11224 11225 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 11226 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp); 11227 11228 /* 11229 * There is no shadow buf or layer-private data if the target is 11230 * using un->un_sys_blocksize as its block size or if bcount == 0. 11231 */ 11232 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 11233 (bp->b_bcount == 0)) { 11234 goto exit; 11235 } 11236 11237 xp = SD_GET_XBUF(bp); 11238 ASSERT(xp != NULL); 11239 11240 /* Retrieve the pointer to the layer-private data area from the xbuf. */ 11241 bsp = xp->xb_private; 11242 11243 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE; 11244 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE; 11245 11246 if (is_write) { 11247 /* 11248 * For a WRITE request we must free up the block range that 11249 * we have locked up. This holds regardless of whether this is 11250 * an aligned write request or a read-modify-write request. 11251 */ 11252 sd_range_unlock(un, bsp->mbs_wmp); 11253 bsp->mbs_wmp = NULL; 11254 } 11255 11256 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) { 11257 /* 11258 * An aligned read or write command will have no shadow buf; 11259 * there is not much else to do with it. 11260 */ 11261 goto done; 11262 } 11263 11264 orig_bp = bsp->mbs_orig_bp; 11265 ASSERT(orig_bp != NULL); 11266 orig_xp = SD_GET_XBUF(orig_bp); 11267 ASSERT(orig_xp != NULL); 11268 ASSERT(!mutex_owned(SD_MUTEX(un))); 11269 11270 if (!is_write && has_wmap) { 11271 /* 11272 * A READ with a wmap means this is the READ phase of a 11273 * read-modify-write. If an error occurred on the READ then 11274 * we do not proceed with the WRITE phase or copy any data. 11275 * Just release the write maps and return with an error. 11276 */ 11277 if ((bp->b_resid != 0) || (bp->b_error != 0)) { 11278 orig_bp->b_resid = orig_bp->b_bcount; 11279 bioerror(orig_bp, bp->b_error); 11280 sd_range_unlock(un, bsp->mbs_wmp); 11281 goto freebuf_done; 11282 } 11283 } 11284 11285 /* 11286 * Here is where we set up to copy the data from the shadow buf 11287 * into the space associated with the original buf. 11288 * 11289 * To deal with the conversion between block sizes, these 11290 * computations treat the data as an array of bytes, with the 11291 * first byte (byte 0) corresponding to the first byte in the 11292 * first block on the disk. 11293 */ 11294 11295 /* 11296 * shadow_start and shadow_len indicate the location and size of 11297 * the data returned with the shadow IO request. 11298 */ 11299 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 11300 shadow_end = shadow_start + bp->b_bcount - bp->b_resid; 11301 11302 /* 11303 * copy_offset gives the offset (in bytes) from the start of the first 11304 * block of the READ request to the beginning of the data. We retrieve 11305 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved 11306 * there by sd_mapblockize_iostart(). copy_length gives the amount of 11307 * data to be copied (in bytes). 11308 */ 11309 copy_offset = bsp->mbs_copy_offset; 11310 ASSERT((copy_offset >= 0) && (copy_offset < un->un_tgt_blocksize)); 11311 copy_length = orig_bp->b_bcount; 11312 request_end = shadow_start + copy_offset + orig_bp->b_bcount; 11313 11314 /* 11315 * Set up the resid and error fields of orig_bp as appropriate. 11316 */ 11317 if (shadow_end >= request_end) { 11318 /* We got all the requested data; set resid to zero */ 11319 orig_bp->b_resid = 0; 11320 } else { 11321 /* 11322 * We failed to get enough data to fully satisfy the original 11323 * request. Just copy back whatever data we got and set 11324 * up the residual and error code as required. 11325 * 11326 * 'shortfall' is the amount by which the data received with the 11327 * shadow buf has "fallen short" of the requested amount. 11328 */ 11329 shortfall = (size_t)(request_end - shadow_end); 11330 11331 if (shortfall > orig_bp->b_bcount) { 11332 /* 11333 * We did not get enough data to even partially 11334 * fulfill the original request. The residual is 11335 * equal to the amount requested. 11336 */ 11337 orig_bp->b_resid = orig_bp->b_bcount; 11338 } else { 11339 /* 11340 * We did not get all the data that we requested 11341 * from the device, but we will try to return what 11342 * portion we did get. 11343 */ 11344 orig_bp->b_resid = shortfall; 11345 } 11346 ASSERT(copy_length >= orig_bp->b_resid); 11347 copy_length -= orig_bp->b_resid; 11348 } 11349 11350 /* Propagate the error code from the shadow buf to the original buf */ 11351 bioerror(orig_bp, bp->b_error); 11352 11353 if (is_write) { 11354 goto freebuf_done; /* No data copying for a WRITE */ 11355 } 11356 11357 if (has_wmap) { 11358 /* 11359 * This is a READ command from the READ phase of a 11360 * read-modify-write request. We have to copy the data given 11361 * by the user OVER the data returned by the READ command, 11362 * then convert the command from a READ to a WRITE and send 11363 * it back to the target. 11364 */ 11365 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset, 11366 copy_length); 11367 11368 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */ 11369 11370 /* 11371 * Dispatch the WRITE command to the taskq thread, which 11372 * will in turn send the command to the target. When the 11373 * WRITE command completes, we (sd_mapblocksize_iodone()) 11374 * will get called again as part of the iodone chain 11375 * processing for it. Note that we will still be dealing 11376 * with the shadow buf at that point. 11377 */ 11378 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp, 11379 KM_NOSLEEP) != 0) { 11380 /* 11381 * Dispatch was successful so we are done. Return 11382 * without going any higher up the iodone chain. Do 11383 * not free up any layer-private data until after the 11384 * WRITE completes. 11385 */ 11386 return; 11387 } 11388 11389 /* 11390 * Dispatch of the WRITE command failed; set up the error 11391 * condition and send this IO back up the iodone chain. 11392 */ 11393 bioerror(orig_bp, EIO); 11394 orig_bp->b_resid = orig_bp->b_bcount; 11395 11396 } else { 11397 /* 11398 * This is a regular READ request (ie, not a RMW). Copy the 11399 * data from the shadow buf into the original buf. The 11400 * copy_offset compensates for any "misalignment" between the 11401 * shadow buf (with its un->un_tgt_blocksize blocks) and the 11402 * original buf (with its un->un_sys_blocksize blocks). 11403 */ 11404 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr, 11405 copy_length); 11406 } 11407 11408 freebuf_done: 11409 11410 /* 11411 * At this point we still have both the shadow buf AND the original 11412 * buf to deal with, as well as the layer-private data area in each. 11413 * Local variables are as follows: 11414 * 11415 * bp -- points to shadow buf 11416 * xp -- points to xbuf of shadow buf 11417 * bsp -- points to layer-private data area of shadow buf 11418 * orig_bp -- points to original buf 11419 * 11420 * First free the shadow buf and its associated xbuf, then free the 11421 * layer-private data area from the shadow buf. There is no need to 11422 * restore xb_private in the shadow xbuf. 11423 */ 11424 sd_shadow_buf_free(bp); 11425 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 11426 11427 /* 11428 * Now update the local variables to point to the original buf, xbuf, 11429 * and layer-private area. 11430 */ 11431 bp = orig_bp; 11432 xp = SD_GET_XBUF(bp); 11433 ASSERT(xp != NULL); 11434 ASSERT(xp == orig_xp); 11435 bsp = xp->xb_private; 11436 ASSERT(bsp != NULL); 11437 11438 done: 11439 /* 11440 * Restore xb_private to whatever it was set to by the next higher 11441 * layer in the chain, then free the layer-private data area. 11442 */ 11443 xp->xb_private = bsp->mbs_oprivate; 11444 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 11445 11446 exit: 11447 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp), 11448 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp); 11449 11450 SD_NEXT_IODONE(index, un, bp); 11451 } 11452 11453 11454 /* 11455 * Function: sd_checksum_iostart 11456 * 11457 * Description: A stub function for a layer that's currently not used. 11458 * For now just a placeholder. 11459 * 11460 * Context: Kernel thread context 11461 */ 11462 11463 static void 11464 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp) 11465 { 11466 ASSERT(un != NULL); 11467 ASSERT(bp != NULL); 11468 ASSERT(!mutex_owned(SD_MUTEX(un))); 11469 SD_NEXT_IOSTART(index, un, bp); 11470 } 11471 11472 11473 /* 11474 * Function: sd_checksum_iodone 11475 * 11476 * Description: A stub function for a layer that's currently not used. 11477 * For now just a placeholder. 11478 * 11479 * Context: May be called under interrupt context 11480 */ 11481 11482 static void 11483 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp) 11484 { 11485 ASSERT(un != NULL); 11486 ASSERT(bp != NULL); 11487 ASSERT(!mutex_owned(SD_MUTEX(un))); 11488 SD_NEXT_IODONE(index, un, bp); 11489 } 11490 11491 11492 /* 11493 * Function: sd_checksum_uscsi_iostart 11494 * 11495 * Description: A stub function for a layer that's currently not used. 11496 * For now just a placeholder. 11497 * 11498 * Context: Kernel thread context 11499 */ 11500 11501 static void 11502 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp) 11503 { 11504 ASSERT(un != NULL); 11505 ASSERT(bp != NULL); 11506 ASSERT(!mutex_owned(SD_MUTEX(un))); 11507 SD_NEXT_IOSTART(index, un, bp); 11508 } 11509 11510 11511 /* 11512 * Function: sd_checksum_uscsi_iodone 11513 * 11514 * Description: A stub function for a layer that's currently not used. 11515 * For now just a placeholder. 11516 * 11517 * Context: May be called under interrupt context 11518 */ 11519 11520 static void 11521 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 11522 { 11523 ASSERT(un != NULL); 11524 ASSERT(bp != NULL); 11525 ASSERT(!mutex_owned(SD_MUTEX(un))); 11526 SD_NEXT_IODONE(index, un, bp); 11527 } 11528 11529 11530 /* 11531 * Function: sd_pm_iostart 11532 * 11533 * Description: iostart-side routine for Power mangement. 11534 * 11535 * Context: Kernel thread context 11536 */ 11537 11538 static void 11539 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp) 11540 { 11541 ASSERT(un != NULL); 11542 ASSERT(bp != NULL); 11543 ASSERT(!mutex_owned(SD_MUTEX(un))); 11544 ASSERT(!mutex_owned(&un->un_pm_mutex)); 11545 11546 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n"); 11547 11548 if (sd_pm_entry(un) != DDI_SUCCESS) { 11549 /* 11550 * Set up to return the failed buf back up the 'iodone' 11551 * side of the calling chain. 11552 */ 11553 bioerror(bp, EIO); 11554 bp->b_resid = bp->b_bcount; 11555 11556 SD_BEGIN_IODONE(index, un, bp); 11557 11558 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 11559 return; 11560 } 11561 11562 SD_NEXT_IOSTART(index, un, bp); 11563 11564 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 11565 } 11566 11567 11568 /* 11569 * Function: sd_pm_iodone 11570 * 11571 * Description: iodone-side routine for power mangement. 11572 * 11573 * Context: may be called from interrupt context 11574 */ 11575 11576 static void 11577 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp) 11578 { 11579 ASSERT(un != NULL); 11580 ASSERT(bp != NULL); 11581 ASSERT(!mutex_owned(&un->un_pm_mutex)); 11582 11583 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n"); 11584 11585 /* 11586 * After attach the following flag is only read, so don't 11587 * take the penalty of acquiring a mutex for it. 11588 */ 11589 if (un->un_f_pm_is_enabled == TRUE) { 11590 sd_pm_exit(un); 11591 } 11592 11593 SD_NEXT_IODONE(index, un, bp); 11594 11595 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n"); 11596 } 11597 11598 11599 /* 11600 * Function: sd_core_iostart 11601 * 11602 * Description: Primary driver function for enqueuing buf(9S) structs from 11603 * the system and initiating IO to the target device 11604 * 11605 * Context: Kernel thread context. Can sleep. 11606 * 11607 * Assumptions: - The given xp->xb_blkno is absolute 11608 * (ie, relative to the start of the device). 11609 * - The IO is to be done using the native blocksize of 11610 * the device, as specified in un->un_tgt_blocksize. 11611 */ 11612 /* ARGSUSED */ 11613 static void 11614 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp) 11615 { 11616 struct sd_xbuf *xp; 11617 11618 ASSERT(un != NULL); 11619 ASSERT(bp != NULL); 11620 ASSERT(!mutex_owned(SD_MUTEX(un))); 11621 ASSERT(bp->b_resid == 0); 11622 11623 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp); 11624 11625 xp = SD_GET_XBUF(bp); 11626 ASSERT(xp != NULL); 11627 11628 mutex_enter(SD_MUTEX(un)); 11629 11630 /* 11631 * If we are currently in the failfast state, fail any new IO 11632 * that has B_FAILFAST set, then return. 11633 */ 11634 if ((bp->b_flags & B_FAILFAST) && 11635 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) { 11636 mutex_exit(SD_MUTEX(un)); 11637 bioerror(bp, EIO); 11638 bp->b_resid = bp->b_bcount; 11639 SD_BEGIN_IODONE(index, un, bp); 11640 return; 11641 } 11642 11643 if (SD_IS_DIRECT_PRIORITY(xp)) { 11644 /* 11645 * Priority command -- transport it immediately. 11646 * 11647 * Note: We may want to assert that USCSI_DIAGNOSE is set, 11648 * because all direct priority commands should be associated 11649 * with error recovery actions which we don't want to retry. 11650 */ 11651 sd_start_cmds(un, bp); 11652 } else { 11653 /* 11654 * Normal command -- add it to the wait queue, then start 11655 * transporting commands from the wait queue. 11656 */ 11657 sd_add_buf_to_waitq(un, bp); 11658 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 11659 sd_start_cmds(un, NULL); 11660 } 11661 11662 mutex_exit(SD_MUTEX(un)); 11663 11664 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp); 11665 } 11666 11667 11668 /* 11669 * Function: sd_init_cdb_limits 11670 * 11671 * Description: This is to handle scsi_pkt initialization differences 11672 * between the driver platforms. 11673 * 11674 * Legacy behaviors: 11675 * 11676 * If the block number or the sector count exceeds the 11677 * capabilities of a Group 0 command, shift over to a 11678 * Group 1 command. We don't blindly use Group 1 11679 * commands because a) some drives (CDC Wren IVs) get a 11680 * bit confused, and b) there is probably a fair amount 11681 * of speed difference for a target to receive and decode 11682 * a 10 byte command instead of a 6 byte command. 11683 * 11684 * The xfer time difference of 6 vs 10 byte CDBs is 11685 * still significant so this code is still worthwhile. 11686 * 10 byte CDBs are very inefficient with the fas HBA driver 11687 * and older disks. Each CDB byte took 1 usec with some 11688 * popular disks. 11689 * 11690 * Context: Must be called at attach time 11691 */ 11692 11693 static void 11694 sd_init_cdb_limits(struct sd_lun *un) 11695 { 11696 int hba_cdb_limit; 11697 11698 /* 11699 * Use CDB_GROUP1 commands for most devices except for 11700 * parallel SCSI fixed drives in which case we get better 11701 * performance using CDB_GROUP0 commands (where applicable). 11702 */ 11703 un->un_mincdb = SD_CDB_GROUP1; 11704 #if !defined(__fibre) 11705 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && 11706 !un->un_f_has_removable_media) { 11707 un->un_mincdb = SD_CDB_GROUP0; 11708 } 11709 #endif 11710 11711 /* 11712 * Try to read the max-cdb-length supported by HBA. 11713 */ 11714 un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1); 11715 if (0 >= un->un_max_hba_cdb) { 11716 un->un_max_hba_cdb = CDB_GROUP4; 11717 hba_cdb_limit = SD_CDB_GROUP4; 11718 } else if (0 < un->un_max_hba_cdb && 11719 un->un_max_hba_cdb < CDB_GROUP1) { 11720 hba_cdb_limit = SD_CDB_GROUP0; 11721 } else if (CDB_GROUP1 <= un->un_max_hba_cdb && 11722 un->un_max_hba_cdb < CDB_GROUP5) { 11723 hba_cdb_limit = SD_CDB_GROUP1; 11724 } else if (CDB_GROUP5 <= un->un_max_hba_cdb && 11725 un->un_max_hba_cdb < CDB_GROUP4) { 11726 hba_cdb_limit = SD_CDB_GROUP5; 11727 } else { 11728 hba_cdb_limit = SD_CDB_GROUP4; 11729 } 11730 11731 /* 11732 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4 11733 * commands for fixed disks unless we are building for a 32 bit 11734 * kernel. 11735 */ 11736 #ifdef _LP64 11737 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 11738 min(hba_cdb_limit, SD_CDB_GROUP4); 11739 #else 11740 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 11741 min(hba_cdb_limit, SD_CDB_GROUP1); 11742 #endif 11743 11744 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) 11745 ? sizeof (struct scsi_arq_status) : 1); 11746 un->un_cmd_timeout = (ushort_t)sd_io_time; 11747 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; 11748 } 11749 11750 11751 /* 11752 * Function: sd_initpkt_for_buf 11753 * 11754 * Description: Allocate and initialize for transport a scsi_pkt struct, 11755 * based upon the info specified in the given buf struct. 11756 * 11757 * Assumes the xb_blkno in the request is absolute (ie, 11758 * relative to the start of the device (NOT partition!). 11759 * Also assumes that the request is using the native block 11760 * size of the device (as returned by the READ CAPACITY 11761 * command). 11762 * 11763 * Return Code: SD_PKT_ALLOC_SUCCESS 11764 * SD_PKT_ALLOC_FAILURE 11765 * SD_PKT_ALLOC_FAILURE_NO_DMA 11766 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 11767 * 11768 * Context: Kernel thread and may be called from software interrupt context 11769 * as part of a sdrunout callback. This function may not block or 11770 * call routines that block 11771 */ 11772 11773 static int 11774 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp) 11775 { 11776 struct sd_xbuf *xp; 11777 struct scsi_pkt *pktp = NULL; 11778 struct sd_lun *un; 11779 size_t blockcount; 11780 daddr_t startblock; 11781 int rval; 11782 int cmd_flags; 11783 11784 ASSERT(bp != NULL); 11785 ASSERT(pktpp != NULL); 11786 xp = SD_GET_XBUF(bp); 11787 ASSERT(xp != NULL); 11788 un = SD_GET_UN(bp); 11789 ASSERT(un != NULL); 11790 ASSERT(mutex_owned(SD_MUTEX(un))); 11791 ASSERT(bp->b_resid == 0); 11792 11793 SD_TRACE(SD_LOG_IO_CORE, un, 11794 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); 11795 11796 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11797 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { 11798 /* 11799 * Already have a scsi_pkt -- just need DMA resources. 11800 * We must recompute the CDB in case the mapping returns 11801 * a nonzero pkt_resid. 11802 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer 11803 * that is being retried, the unmap/remap of the DMA resouces 11804 * will result in the entire transfer starting over again 11805 * from the very first block. 11806 */ 11807 ASSERT(xp->xb_pktp != NULL); 11808 pktp = xp->xb_pktp; 11809 } else { 11810 pktp = NULL; 11811 } 11812 #endif /* __i386 || __amd64 */ 11813 11814 startblock = xp->xb_blkno; /* Absolute block num. */ 11815 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 11816 11817 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11818 11819 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); 11820 11821 #else 11822 11823 cmd_flags = un->un_pkt_flags | xp->xb_pkt_flags; 11824 11825 #endif 11826 11827 /* 11828 * sd_setup_rw_pkt will determine the appropriate CDB group to use, 11829 * call scsi_init_pkt, and build the CDB. 11830 */ 11831 rval = sd_setup_rw_pkt(un, &pktp, bp, 11832 cmd_flags, sdrunout, (caddr_t)un, 11833 startblock, blockcount); 11834 11835 if (rval == 0) { 11836 /* 11837 * Success. 11838 * 11839 * If partial DMA is being used and required for this transfer. 11840 * set it up here. 11841 */ 11842 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 && 11843 (pktp->pkt_resid != 0)) { 11844 11845 /* 11846 * Save the CDB length and pkt_resid for the 11847 * next xfer 11848 */ 11849 xp->xb_dma_resid = pktp->pkt_resid; 11850 11851 /* rezero resid */ 11852 pktp->pkt_resid = 0; 11853 11854 } else { 11855 xp->xb_dma_resid = 0; 11856 } 11857 11858 pktp->pkt_flags = un->un_tagflags; 11859 pktp->pkt_time = un->un_cmd_timeout; 11860 pktp->pkt_comp = sdintr; 11861 11862 pktp->pkt_private = bp; 11863 *pktpp = pktp; 11864 11865 SD_TRACE(SD_LOG_IO_CORE, un, 11866 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); 11867 11868 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11869 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; 11870 #endif 11871 11872 return (SD_PKT_ALLOC_SUCCESS); 11873 11874 } 11875 11876 /* 11877 * SD_PKT_ALLOC_FAILURE is the only expected failure code 11878 * from sd_setup_rw_pkt. 11879 */ 11880 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 11881 11882 if (rval == SD_PKT_ALLOC_FAILURE) { 11883 *pktpp = NULL; 11884 /* 11885 * Set the driver state to RWAIT to indicate the driver 11886 * is waiting on resource allocations. The driver will not 11887 * suspend, pm_suspend, or detatch while the state is RWAIT. 11888 */ 11889 New_state(un, SD_STATE_RWAIT); 11890 11891 SD_ERROR(SD_LOG_IO_CORE, un, 11892 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp); 11893 11894 if ((bp->b_flags & B_ERROR) != 0) { 11895 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 11896 } 11897 return (SD_PKT_ALLOC_FAILURE); 11898 } else { 11899 /* 11900 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL 11901 * 11902 * This should never happen. Maybe someone messed with the 11903 * kernel's minphys? 11904 */ 11905 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 11906 "Request rejected: too large for CDB: " 11907 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount); 11908 SD_ERROR(SD_LOG_IO_CORE, un, 11909 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp); 11910 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 11911 11912 } 11913 } 11914 11915 11916 /* 11917 * Function: sd_destroypkt_for_buf 11918 * 11919 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing). 11920 * 11921 * Context: Kernel thread or interrupt context 11922 */ 11923 11924 static void 11925 sd_destroypkt_for_buf(struct buf *bp) 11926 { 11927 ASSERT(bp != NULL); 11928 ASSERT(SD_GET_UN(bp) != NULL); 11929 11930 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 11931 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp); 11932 11933 ASSERT(SD_GET_PKTP(bp) != NULL); 11934 scsi_destroy_pkt(SD_GET_PKTP(bp)); 11935 11936 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 11937 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp); 11938 } 11939 11940 /* 11941 * Function: sd_setup_rw_pkt 11942 * 11943 * Description: Determines appropriate CDB group for the requested LBA 11944 * and transfer length, calls scsi_init_pkt, and builds 11945 * the CDB. Do not use for partial DMA transfers except 11946 * for the initial transfer since the CDB size must 11947 * remain constant. 11948 * 11949 * Context: Kernel thread and may be called from software interrupt 11950 * context as part of a sdrunout callback. This function may not 11951 * block or call routines that block 11952 */ 11953 11954 11955 int 11956 sd_setup_rw_pkt(struct sd_lun *un, 11957 struct scsi_pkt **pktpp, struct buf *bp, int flags, 11958 int (*callback)(caddr_t), caddr_t callback_arg, 11959 diskaddr_t lba, uint32_t blockcount) 11960 { 11961 struct scsi_pkt *return_pktp; 11962 union scsi_cdb *cdbp; 11963 struct sd_cdbinfo *cp = NULL; 11964 int i; 11965 11966 /* 11967 * See which size CDB to use, based upon the request. 11968 */ 11969 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) { 11970 11971 /* 11972 * Check lba and block count against sd_cdbtab limits. 11973 * In the partial DMA case, we have to use the same size 11974 * CDB for all the transfers. Check lba + blockcount 11975 * against the max LBA so we know that segment of the 11976 * transfer can use the CDB we select. 11977 */ 11978 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) && 11979 (blockcount <= sd_cdbtab[i].sc_maxlen)) { 11980 11981 /* 11982 * The command will fit into the CDB type 11983 * specified by sd_cdbtab[i]. 11984 */ 11985 cp = sd_cdbtab + i; 11986 11987 /* 11988 * Call scsi_init_pkt so we can fill in the 11989 * CDB. 11990 */ 11991 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp, 11992 bp, cp->sc_grpcode, un->un_status_len, 0, 11993 flags, callback, callback_arg); 11994 11995 if (return_pktp != NULL) { 11996 11997 /* 11998 * Return new value of pkt 11999 */ 12000 *pktpp = return_pktp; 12001 12002 /* 12003 * To be safe, zero the CDB insuring there is 12004 * no leftover data from a previous command. 12005 */ 12006 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode); 12007 12008 /* 12009 * Handle partial DMA mapping 12010 */ 12011 if (return_pktp->pkt_resid != 0) { 12012 12013 /* 12014 * Not going to xfer as many blocks as 12015 * originally expected 12016 */ 12017 blockcount -= 12018 SD_BYTES2TGTBLOCKS(un, 12019 return_pktp->pkt_resid); 12020 } 12021 12022 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp; 12023 12024 /* 12025 * Set command byte based on the CDB 12026 * type we matched. 12027 */ 12028 cdbp->scc_cmd = cp->sc_grpmask | 12029 ((bp->b_flags & B_READ) ? 12030 SCMD_READ : SCMD_WRITE); 12031 12032 SD_FILL_SCSI1_LUN(un, return_pktp); 12033 12034 /* 12035 * Fill in LBA and length 12036 */ 12037 ASSERT((cp->sc_grpcode == CDB_GROUP1) || 12038 (cp->sc_grpcode == CDB_GROUP4) || 12039 (cp->sc_grpcode == CDB_GROUP0) || 12040 (cp->sc_grpcode == CDB_GROUP5)); 12041 12042 if (cp->sc_grpcode == CDB_GROUP1) { 12043 FORMG1ADDR(cdbp, lba); 12044 FORMG1COUNT(cdbp, blockcount); 12045 return (0); 12046 } else if (cp->sc_grpcode == CDB_GROUP4) { 12047 FORMG4LONGADDR(cdbp, lba); 12048 FORMG4COUNT(cdbp, blockcount); 12049 return (0); 12050 } else if (cp->sc_grpcode == CDB_GROUP0) { 12051 FORMG0ADDR(cdbp, lba); 12052 FORMG0COUNT(cdbp, blockcount); 12053 return (0); 12054 } else if (cp->sc_grpcode == CDB_GROUP5) { 12055 FORMG5ADDR(cdbp, lba); 12056 FORMG5COUNT(cdbp, blockcount); 12057 return (0); 12058 } 12059 12060 /* 12061 * It should be impossible to not match one 12062 * of the CDB types above, so we should never 12063 * reach this point. Set the CDB command byte 12064 * to test-unit-ready to avoid writing 12065 * to somewhere we don't intend. 12066 */ 12067 cdbp->scc_cmd = SCMD_TEST_UNIT_READY; 12068 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 12069 } else { 12070 /* 12071 * Couldn't get scsi_pkt 12072 */ 12073 return (SD_PKT_ALLOC_FAILURE); 12074 } 12075 } 12076 } 12077 12078 /* 12079 * None of the available CDB types were suitable. This really 12080 * should never happen: on a 64 bit system we support 12081 * READ16/WRITE16 which will hold an entire 64 bit disk address 12082 * and on a 32 bit system we will refuse to bind to a device 12083 * larger than 2TB so addresses will never be larger than 32 bits. 12084 */ 12085 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 12086 } 12087 12088 /* 12089 * Function: sd_setup_next_rw_pkt 12090 * 12091 * Description: Setup packet for partial DMA transfers, except for the 12092 * initial transfer. sd_setup_rw_pkt should be used for 12093 * the initial transfer. 12094 * 12095 * Context: Kernel thread and may be called from interrupt context. 12096 */ 12097 12098 int 12099 sd_setup_next_rw_pkt(struct sd_lun *un, 12100 struct scsi_pkt *pktp, struct buf *bp, 12101 diskaddr_t lba, uint32_t blockcount) 12102 { 12103 uchar_t com; 12104 union scsi_cdb *cdbp; 12105 uchar_t cdb_group_id; 12106 12107 ASSERT(pktp != NULL); 12108 ASSERT(pktp->pkt_cdbp != NULL); 12109 12110 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 12111 com = cdbp->scc_cmd; 12112 cdb_group_id = CDB_GROUPID(com); 12113 12114 ASSERT((cdb_group_id == CDB_GROUPID_0) || 12115 (cdb_group_id == CDB_GROUPID_1) || 12116 (cdb_group_id == CDB_GROUPID_4) || 12117 (cdb_group_id == CDB_GROUPID_5)); 12118 12119 /* 12120 * Move pkt to the next portion of the xfer. 12121 * func is NULL_FUNC so we do not have to release 12122 * the disk mutex here. 12123 */ 12124 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0, 12125 NULL_FUNC, NULL) == pktp) { 12126 /* Success. Handle partial DMA */ 12127 if (pktp->pkt_resid != 0) { 12128 blockcount -= 12129 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid); 12130 } 12131 12132 cdbp->scc_cmd = com; 12133 SD_FILL_SCSI1_LUN(un, pktp); 12134 if (cdb_group_id == CDB_GROUPID_1) { 12135 FORMG1ADDR(cdbp, lba); 12136 FORMG1COUNT(cdbp, blockcount); 12137 return (0); 12138 } else if (cdb_group_id == CDB_GROUPID_4) { 12139 FORMG4LONGADDR(cdbp, lba); 12140 FORMG4COUNT(cdbp, blockcount); 12141 return (0); 12142 } else if (cdb_group_id == CDB_GROUPID_0) { 12143 FORMG0ADDR(cdbp, lba); 12144 FORMG0COUNT(cdbp, blockcount); 12145 return (0); 12146 } else if (cdb_group_id == CDB_GROUPID_5) { 12147 FORMG5ADDR(cdbp, lba); 12148 FORMG5COUNT(cdbp, blockcount); 12149 return (0); 12150 } 12151 12152 /* Unreachable */ 12153 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 12154 } 12155 12156 /* 12157 * Error setting up next portion of cmd transfer. 12158 * Something is definitely very wrong and this 12159 * should not happen. 12160 */ 12161 return (SD_PKT_ALLOC_FAILURE); 12162 } 12163 12164 /* 12165 * Function: sd_initpkt_for_uscsi 12166 * 12167 * Description: Allocate and initialize for transport a scsi_pkt struct, 12168 * based upon the info specified in the given uscsi_cmd struct. 12169 * 12170 * Return Code: SD_PKT_ALLOC_SUCCESS 12171 * SD_PKT_ALLOC_FAILURE 12172 * SD_PKT_ALLOC_FAILURE_NO_DMA 12173 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 12174 * 12175 * Context: Kernel thread and may be called from software interrupt context 12176 * as part of a sdrunout callback. This function may not block or 12177 * call routines that block 12178 */ 12179 12180 static int 12181 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp) 12182 { 12183 struct uscsi_cmd *uscmd; 12184 struct sd_xbuf *xp; 12185 struct scsi_pkt *pktp; 12186 struct sd_lun *un; 12187 uint32_t flags = 0; 12188 12189 ASSERT(bp != NULL); 12190 ASSERT(pktpp != NULL); 12191 xp = SD_GET_XBUF(bp); 12192 ASSERT(xp != NULL); 12193 un = SD_GET_UN(bp); 12194 ASSERT(un != NULL); 12195 ASSERT(mutex_owned(SD_MUTEX(un))); 12196 12197 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 12198 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 12199 ASSERT(uscmd != NULL); 12200 12201 SD_TRACE(SD_LOG_IO_CORE, un, 12202 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp); 12203 12204 /* 12205 * Allocate the scsi_pkt for the command. 12206 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path 12207 * during scsi_init_pkt time and will continue to use the 12208 * same path as long as the same scsi_pkt is used without 12209 * intervening scsi_dma_free(). Since uscsi command does 12210 * not call scsi_dmafree() before retry failed command, it 12211 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT 12212 * set such that scsi_vhci can use other available path for 12213 * retry. Besides, ucsci command does not allow DMA breakup, 12214 * so there is no need to set PKT_DMA_PARTIAL flag. 12215 */ 12216 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 12217 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 12218 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 12219 ((int)(uscmd->uscsi_rqlen) + sizeof (struct scsi_arq_status) 12220 - sizeof (struct scsi_extended_sense)), 0, 12221 (un->un_pkt_flags & ~PKT_DMA_PARTIAL) | PKT_XARQ, 12222 sdrunout, (caddr_t)un); 12223 } else { 12224 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 12225 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 12226 sizeof (struct scsi_arq_status), 0, 12227 (un->un_pkt_flags & ~PKT_DMA_PARTIAL), 12228 sdrunout, (caddr_t)un); 12229 } 12230 12231 if (pktp == NULL) { 12232 *pktpp = NULL; 12233 /* 12234 * Set the driver state to RWAIT to indicate the driver 12235 * is waiting on resource allocations. The driver will not 12236 * suspend, pm_suspend, or detatch while the state is RWAIT. 12237 */ 12238 New_state(un, SD_STATE_RWAIT); 12239 12240 SD_ERROR(SD_LOG_IO_CORE, un, 12241 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp); 12242 12243 if ((bp->b_flags & B_ERROR) != 0) { 12244 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 12245 } 12246 return (SD_PKT_ALLOC_FAILURE); 12247 } 12248 12249 /* 12250 * We do not do DMA breakup for USCSI commands, so return failure 12251 * here if all the needed DMA resources were not allocated. 12252 */ 12253 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) && 12254 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) { 12255 scsi_destroy_pkt(pktp); 12256 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: " 12257 "No partial DMA for USCSI. exit: buf:0x%p\n", bp); 12258 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL); 12259 } 12260 12261 /* Init the cdb from the given uscsi struct */ 12262 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp, 12263 uscmd->uscsi_cdb[0], 0, 0, 0); 12264 12265 SD_FILL_SCSI1_LUN(un, pktp); 12266 12267 /* 12268 * Set up the optional USCSI flags. See the uscsi (7I) man page 12269 * for listing of the supported flags. 12270 */ 12271 12272 if (uscmd->uscsi_flags & USCSI_SILENT) { 12273 flags |= FLAG_SILENT; 12274 } 12275 12276 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) { 12277 flags |= FLAG_DIAGNOSE; 12278 } 12279 12280 if (uscmd->uscsi_flags & USCSI_ISOLATE) { 12281 flags |= FLAG_ISOLATE; 12282 } 12283 12284 if (un->un_f_is_fibre == FALSE) { 12285 if (uscmd->uscsi_flags & USCSI_RENEGOT) { 12286 flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 12287 } 12288 } 12289 12290 /* 12291 * Set the pkt flags here so we save time later. 12292 * Note: These flags are NOT in the uscsi man page!!! 12293 */ 12294 if (uscmd->uscsi_flags & USCSI_HEAD) { 12295 flags |= FLAG_HEAD; 12296 } 12297 12298 if (uscmd->uscsi_flags & USCSI_NOINTR) { 12299 flags |= FLAG_NOINTR; 12300 } 12301 12302 /* 12303 * For tagged queueing, things get a bit complicated. 12304 * Check first for head of queue and last for ordered queue. 12305 * If neither head nor order, use the default driver tag flags. 12306 */ 12307 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) { 12308 if (uscmd->uscsi_flags & USCSI_HTAG) { 12309 flags |= FLAG_HTAG; 12310 } else if (uscmd->uscsi_flags & USCSI_OTAG) { 12311 flags |= FLAG_OTAG; 12312 } else { 12313 flags |= un->un_tagflags & FLAG_TAGMASK; 12314 } 12315 } 12316 12317 if (uscmd->uscsi_flags & USCSI_NODISCON) { 12318 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON; 12319 } 12320 12321 pktp->pkt_flags = flags; 12322 12323 /* Copy the caller's CDB into the pkt... */ 12324 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen); 12325 12326 if (uscmd->uscsi_timeout == 0) { 12327 pktp->pkt_time = un->un_uscsi_timeout; 12328 } else { 12329 pktp->pkt_time = uscmd->uscsi_timeout; 12330 } 12331 12332 /* need it later to identify USCSI request in sdintr */ 12333 xp->xb_pkt_flags |= SD_XB_USCSICMD; 12334 12335 xp->xb_sense_resid = uscmd->uscsi_rqresid; 12336 12337 pktp->pkt_private = bp; 12338 pktp->pkt_comp = sdintr; 12339 *pktpp = pktp; 12340 12341 SD_TRACE(SD_LOG_IO_CORE, un, 12342 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp); 12343 12344 return (SD_PKT_ALLOC_SUCCESS); 12345 } 12346 12347 12348 /* 12349 * Function: sd_destroypkt_for_uscsi 12350 * 12351 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi 12352 * IOs.. Also saves relevant info into the associated uscsi_cmd 12353 * struct. 12354 * 12355 * Context: May be called under interrupt context 12356 */ 12357 12358 static void 12359 sd_destroypkt_for_uscsi(struct buf *bp) 12360 { 12361 struct uscsi_cmd *uscmd; 12362 struct sd_xbuf *xp; 12363 struct scsi_pkt *pktp; 12364 struct sd_lun *un; 12365 12366 ASSERT(bp != NULL); 12367 xp = SD_GET_XBUF(bp); 12368 ASSERT(xp != NULL); 12369 un = SD_GET_UN(bp); 12370 ASSERT(un != NULL); 12371 ASSERT(!mutex_owned(SD_MUTEX(un))); 12372 pktp = SD_GET_PKTP(bp); 12373 ASSERT(pktp != NULL); 12374 12375 SD_TRACE(SD_LOG_IO_CORE, un, 12376 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp); 12377 12378 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 12379 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 12380 ASSERT(uscmd != NULL); 12381 12382 /* Save the status and the residual into the uscsi_cmd struct */ 12383 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 12384 uscmd->uscsi_resid = bp->b_resid; 12385 12386 /* 12387 * If enabled, copy any saved sense data into the area specified 12388 * by the uscsi command. 12389 */ 12390 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 12391 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 12392 /* 12393 * Note: uscmd->uscsi_rqbuf should always point to a buffer 12394 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd()) 12395 */ 12396 uscmd->uscsi_rqstatus = xp->xb_sense_status; 12397 uscmd->uscsi_rqresid = xp->xb_sense_resid; 12398 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 12399 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 12400 MAX_SENSE_LENGTH); 12401 } else { 12402 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 12403 SENSE_LENGTH); 12404 } 12405 } 12406 12407 /* We are done with the scsi_pkt; free it now */ 12408 ASSERT(SD_GET_PKTP(bp) != NULL); 12409 scsi_destroy_pkt(SD_GET_PKTP(bp)); 12410 12411 SD_TRACE(SD_LOG_IO_CORE, un, 12412 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp); 12413 } 12414 12415 12416 /* 12417 * Function: sd_bioclone_alloc 12418 * 12419 * Description: Allocate a buf(9S) and init it as per the given buf 12420 * and the various arguments. The associated sd_xbuf 12421 * struct is (nearly) duplicated. The struct buf *bp 12422 * argument is saved in new_xp->xb_private. 12423 * 12424 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 12425 * datalen - size of data area for the shadow bp 12426 * blkno - starting LBA 12427 * func - function pointer for b_iodone in the shadow buf. (May 12428 * be NULL if none.) 12429 * 12430 * Return Code: Pointer to allocates buf(9S) struct 12431 * 12432 * Context: Can sleep. 12433 */ 12434 12435 static struct buf * 12436 sd_bioclone_alloc(struct buf *bp, size_t datalen, 12437 daddr_t blkno, int (*func)(struct buf *)) 12438 { 12439 struct sd_lun *un; 12440 struct sd_xbuf *xp; 12441 struct sd_xbuf *new_xp; 12442 struct buf *new_bp; 12443 12444 ASSERT(bp != NULL); 12445 xp = SD_GET_XBUF(bp); 12446 ASSERT(xp != NULL); 12447 un = SD_GET_UN(bp); 12448 ASSERT(un != NULL); 12449 ASSERT(!mutex_owned(SD_MUTEX(un))); 12450 12451 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func, 12452 NULL, KM_SLEEP); 12453 12454 new_bp->b_lblkno = blkno; 12455 12456 /* 12457 * Allocate an xbuf for the shadow bp and copy the contents of the 12458 * original xbuf into it. 12459 */ 12460 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 12461 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 12462 12463 /* 12464 * The given bp is automatically saved in the xb_private member 12465 * of the new xbuf. Callers are allowed to depend on this. 12466 */ 12467 new_xp->xb_private = bp; 12468 12469 new_bp->b_private = new_xp; 12470 12471 return (new_bp); 12472 } 12473 12474 /* 12475 * Function: sd_shadow_buf_alloc 12476 * 12477 * Description: Allocate a buf(9S) and init it as per the given buf 12478 * and the various arguments. The associated sd_xbuf 12479 * struct is (nearly) duplicated. The struct buf *bp 12480 * argument is saved in new_xp->xb_private. 12481 * 12482 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 12483 * datalen - size of data area for the shadow bp 12484 * bflags - B_READ or B_WRITE (pseudo flag) 12485 * blkno - starting LBA 12486 * func - function pointer for b_iodone in the shadow buf. (May 12487 * be NULL if none.) 12488 * 12489 * Return Code: Pointer to allocates buf(9S) struct 12490 * 12491 * Context: Can sleep. 12492 */ 12493 12494 static struct buf * 12495 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags, 12496 daddr_t blkno, int (*func)(struct buf *)) 12497 { 12498 struct sd_lun *un; 12499 struct sd_xbuf *xp; 12500 struct sd_xbuf *new_xp; 12501 struct buf *new_bp; 12502 12503 ASSERT(bp != NULL); 12504 xp = SD_GET_XBUF(bp); 12505 ASSERT(xp != NULL); 12506 un = SD_GET_UN(bp); 12507 ASSERT(un != NULL); 12508 ASSERT(!mutex_owned(SD_MUTEX(un))); 12509 12510 if (bp->b_flags & (B_PAGEIO | B_PHYS)) { 12511 bp_mapin(bp); 12512 } 12513 12514 bflags &= (B_READ | B_WRITE); 12515 #if defined(__i386) || defined(__amd64) 12516 new_bp = getrbuf(KM_SLEEP); 12517 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); 12518 new_bp->b_bcount = datalen; 12519 new_bp->b_flags = bflags | 12520 (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW)); 12521 #else 12522 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, 12523 datalen, bflags, SLEEP_FUNC, NULL); 12524 #endif 12525 new_bp->av_forw = NULL; 12526 new_bp->av_back = NULL; 12527 new_bp->b_dev = bp->b_dev; 12528 new_bp->b_blkno = blkno; 12529 new_bp->b_iodone = func; 12530 new_bp->b_edev = bp->b_edev; 12531 new_bp->b_resid = 0; 12532 12533 /* We need to preserve the B_FAILFAST flag */ 12534 if (bp->b_flags & B_FAILFAST) { 12535 new_bp->b_flags |= B_FAILFAST; 12536 } 12537 12538 /* 12539 * Allocate an xbuf for the shadow bp and copy the contents of the 12540 * original xbuf into it. 12541 */ 12542 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 12543 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 12544 12545 /* Need later to copy data between the shadow buf & original buf! */ 12546 new_xp->xb_pkt_flags |= PKT_CONSISTENT; 12547 12548 /* 12549 * The given bp is automatically saved in the xb_private member 12550 * of the new xbuf. Callers are allowed to depend on this. 12551 */ 12552 new_xp->xb_private = bp; 12553 12554 new_bp->b_private = new_xp; 12555 12556 return (new_bp); 12557 } 12558 12559 /* 12560 * Function: sd_bioclone_free 12561 * 12562 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations 12563 * in the larger than partition operation. 12564 * 12565 * Context: May be called under interrupt context 12566 */ 12567 12568 static void 12569 sd_bioclone_free(struct buf *bp) 12570 { 12571 struct sd_xbuf *xp; 12572 12573 ASSERT(bp != NULL); 12574 xp = SD_GET_XBUF(bp); 12575 ASSERT(xp != NULL); 12576 12577 /* 12578 * Call bp_mapout() before freeing the buf, in case a lower 12579 * layer or HBA had done a bp_mapin(). we must do this here 12580 * as we are the "originator" of the shadow buf. 12581 */ 12582 bp_mapout(bp); 12583 12584 /* 12585 * Null out b_iodone before freeing the bp, to ensure that the driver 12586 * never gets confused by a stale value in this field. (Just a little 12587 * extra defensiveness here.) 12588 */ 12589 bp->b_iodone = NULL; 12590 12591 freerbuf(bp); 12592 12593 kmem_free(xp, sizeof (struct sd_xbuf)); 12594 } 12595 12596 /* 12597 * Function: sd_shadow_buf_free 12598 * 12599 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations. 12600 * 12601 * Context: May be called under interrupt context 12602 */ 12603 12604 static void 12605 sd_shadow_buf_free(struct buf *bp) 12606 { 12607 struct sd_xbuf *xp; 12608 12609 ASSERT(bp != NULL); 12610 xp = SD_GET_XBUF(bp); 12611 ASSERT(xp != NULL); 12612 12613 #if defined(__sparc) 12614 /* 12615 * Call bp_mapout() before freeing the buf, in case a lower 12616 * layer or HBA had done a bp_mapin(). we must do this here 12617 * as we are the "originator" of the shadow buf. 12618 */ 12619 bp_mapout(bp); 12620 #endif 12621 12622 /* 12623 * Null out b_iodone before freeing the bp, to ensure that the driver 12624 * never gets confused by a stale value in this field. (Just a little 12625 * extra defensiveness here.) 12626 */ 12627 bp->b_iodone = NULL; 12628 12629 #if defined(__i386) || defined(__amd64) 12630 kmem_free(bp->b_un.b_addr, bp->b_bcount); 12631 freerbuf(bp); 12632 #else 12633 scsi_free_consistent_buf(bp); 12634 #endif 12635 12636 kmem_free(xp, sizeof (struct sd_xbuf)); 12637 } 12638 12639 12640 /* 12641 * Function: sd_print_transport_rejected_message 12642 * 12643 * Description: This implements the ludicrously complex rules for printing 12644 * a "transport rejected" message. This is to address the 12645 * specific problem of having a flood of this error message 12646 * produced when a failover occurs. 12647 * 12648 * Context: Any. 12649 */ 12650 12651 static void 12652 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp, 12653 int code) 12654 { 12655 ASSERT(un != NULL); 12656 ASSERT(mutex_owned(SD_MUTEX(un))); 12657 ASSERT(xp != NULL); 12658 12659 /* 12660 * Print the "transport rejected" message under the following 12661 * conditions: 12662 * 12663 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set 12664 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR. 12665 * - If the error code IS a TRAN_FATAL_ERROR, then the message is 12666 * printed the FIRST time a TRAN_FATAL_ERROR is returned from 12667 * scsi_transport(9F) (which indicates that the target might have 12668 * gone off-line). This uses the un->un_tran_fatal_count 12669 * count, which is incremented whenever a TRAN_FATAL_ERROR is 12670 * received, and reset to zero whenver a TRAN_ACCEPT is returned 12671 * from scsi_transport(). 12672 * 12673 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of 12674 * the preceeding cases in order for the message to be printed. 12675 */ 12676 if ((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) { 12677 if ((sd_level_mask & SD_LOGMASK_DIAG) || 12678 (code != TRAN_FATAL_ERROR) || 12679 (un->un_tran_fatal_count == 1)) { 12680 switch (code) { 12681 case TRAN_BADPKT: 12682 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12683 "transport rejected bad packet\n"); 12684 break; 12685 case TRAN_FATAL_ERROR: 12686 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12687 "transport rejected fatal error\n"); 12688 break; 12689 default: 12690 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12691 "transport rejected (%d)\n", code); 12692 break; 12693 } 12694 } 12695 } 12696 } 12697 12698 12699 /* 12700 * Function: sd_add_buf_to_waitq 12701 * 12702 * Description: Add the given buf(9S) struct to the wait queue for the 12703 * instance. If sorting is enabled, then the buf is added 12704 * to the queue via an elevator sort algorithm (a la 12705 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key. 12706 * If sorting is not enabled, then the buf is just added 12707 * to the end of the wait queue. 12708 * 12709 * Return Code: void 12710 * 12711 * Context: Does not sleep/block, therefore technically can be called 12712 * from any context. However if sorting is enabled then the 12713 * execution time is indeterminate, and may take long if 12714 * the wait queue grows large. 12715 */ 12716 12717 static void 12718 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp) 12719 { 12720 struct buf *ap; 12721 12722 ASSERT(bp != NULL); 12723 ASSERT(un != NULL); 12724 ASSERT(mutex_owned(SD_MUTEX(un))); 12725 12726 /* If the queue is empty, add the buf as the only entry & return. */ 12727 if (un->un_waitq_headp == NULL) { 12728 ASSERT(un->un_waitq_tailp == NULL); 12729 un->un_waitq_headp = un->un_waitq_tailp = bp; 12730 bp->av_forw = NULL; 12731 return; 12732 } 12733 12734 ASSERT(un->un_waitq_tailp != NULL); 12735 12736 /* 12737 * If sorting is disabled, just add the buf to the tail end of 12738 * the wait queue and return. 12739 */ 12740 if (un->un_f_disksort_disabled) { 12741 un->un_waitq_tailp->av_forw = bp; 12742 un->un_waitq_tailp = bp; 12743 bp->av_forw = NULL; 12744 return; 12745 } 12746 12747 /* 12748 * Sort thru the list of requests currently on the wait queue 12749 * and add the new buf request at the appropriate position. 12750 * 12751 * The un->un_waitq_headp is an activity chain pointer on which 12752 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The 12753 * first queue holds those requests which are positioned after 12754 * the current SD_GET_BLKNO() (in the first request); the second holds 12755 * requests which came in after their SD_GET_BLKNO() number was passed. 12756 * Thus we implement a one way scan, retracting after reaching 12757 * the end of the drive to the first request on the second 12758 * queue, at which time it becomes the first queue. 12759 * A one-way scan is natural because of the way UNIX read-ahead 12760 * blocks are allocated. 12761 * 12762 * If we lie after the first request, then we must locate the 12763 * second request list and add ourselves to it. 12764 */ 12765 ap = un->un_waitq_headp; 12766 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) { 12767 while (ap->av_forw != NULL) { 12768 /* 12769 * Look for an "inversion" in the (normally 12770 * ascending) block numbers. This indicates 12771 * the start of the second request list. 12772 */ 12773 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) { 12774 /* 12775 * Search the second request list for the 12776 * first request at a larger block number. 12777 * We go before that; however if there is 12778 * no such request, we go at the end. 12779 */ 12780 do { 12781 if (SD_GET_BLKNO(bp) < 12782 SD_GET_BLKNO(ap->av_forw)) { 12783 goto insert; 12784 } 12785 ap = ap->av_forw; 12786 } while (ap->av_forw != NULL); 12787 goto insert; /* after last */ 12788 } 12789 ap = ap->av_forw; 12790 } 12791 12792 /* 12793 * No inversions... we will go after the last, and 12794 * be the first request in the second request list. 12795 */ 12796 goto insert; 12797 } 12798 12799 /* 12800 * Request is at/after the current request... 12801 * sort in the first request list. 12802 */ 12803 while (ap->av_forw != NULL) { 12804 /* 12805 * We want to go after the current request (1) if 12806 * there is an inversion after it (i.e. it is the end 12807 * of the first request list), or (2) if the next 12808 * request is a larger block no. than our request. 12809 */ 12810 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) || 12811 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) { 12812 goto insert; 12813 } 12814 ap = ap->av_forw; 12815 } 12816 12817 /* 12818 * Neither a second list nor a larger request, therefore 12819 * we go at the end of the first list (which is the same 12820 * as the end of the whole schebang). 12821 */ 12822 insert: 12823 bp->av_forw = ap->av_forw; 12824 ap->av_forw = bp; 12825 12826 /* 12827 * If we inserted onto the tail end of the waitq, make sure the 12828 * tail pointer is updated. 12829 */ 12830 if (ap == un->un_waitq_tailp) { 12831 un->un_waitq_tailp = bp; 12832 } 12833 } 12834 12835 12836 /* 12837 * Function: sd_start_cmds 12838 * 12839 * Description: Remove and transport cmds from the driver queues. 12840 * 12841 * Arguments: un - pointer to the unit (soft state) struct for the target. 12842 * 12843 * immed_bp - ptr to a buf to be transported immediately. Only 12844 * the immed_bp is transported; bufs on the waitq are not 12845 * processed and the un_retry_bp is not checked. If immed_bp is 12846 * NULL, then normal queue processing is performed. 12847 * 12848 * Context: May be called from kernel thread context, interrupt context, 12849 * or runout callback context. This function may not block or 12850 * call routines that block. 12851 */ 12852 12853 static void 12854 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) 12855 { 12856 struct sd_xbuf *xp; 12857 struct buf *bp; 12858 void (*statp)(kstat_io_t *); 12859 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12860 void (*saved_statp)(kstat_io_t *); 12861 #endif 12862 int rval; 12863 12864 ASSERT(un != NULL); 12865 ASSERT(mutex_owned(SD_MUTEX(un))); 12866 ASSERT(un->un_ncmds_in_transport >= 0); 12867 ASSERT(un->un_throttle >= 0); 12868 12869 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); 12870 12871 do { 12872 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12873 saved_statp = NULL; 12874 #endif 12875 12876 /* 12877 * If we are syncing or dumping, fail the command to 12878 * avoid recursively calling back into scsi_transport(). 12879 * The dump I/O itself uses a separate code path so this 12880 * only prevents non-dump I/O from being sent while dumping. 12881 * File system sync takes place before dumping begins. 12882 * During panic, filesystem I/O is allowed provided 12883 * un_in_callback is <= 1. This is to prevent recursion 12884 * such as sd_start_cmds -> scsi_transport -> sdintr -> 12885 * sd_start_cmds and so on. See panic.c for more information 12886 * about the states the system can be in during panic. 12887 */ 12888 if ((un->un_state == SD_STATE_DUMPING) || 12889 (ddi_in_panic() && (un->un_in_callback > 1))) { 12890 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12891 "sd_start_cmds: panicking\n"); 12892 goto exit; 12893 } 12894 12895 if ((bp = immed_bp) != NULL) { 12896 /* 12897 * We have a bp that must be transported immediately. 12898 * It's OK to transport the immed_bp here without doing 12899 * the throttle limit check because the immed_bp is 12900 * always used in a retry/recovery case. This means 12901 * that we know we are not at the throttle limit by 12902 * virtue of the fact that to get here we must have 12903 * already gotten a command back via sdintr(). This also 12904 * relies on (1) the command on un_retry_bp preventing 12905 * further commands from the waitq from being issued; 12906 * and (2) the code in sd_retry_command checking the 12907 * throttle limit before issuing a delayed or immediate 12908 * retry. This holds even if the throttle limit is 12909 * currently ratcheted down from its maximum value. 12910 */ 12911 statp = kstat_runq_enter; 12912 if (bp == un->un_retry_bp) { 12913 ASSERT((un->un_retry_statp == NULL) || 12914 (un->un_retry_statp == kstat_waitq_enter) || 12915 (un->un_retry_statp == 12916 kstat_runq_back_to_waitq)); 12917 /* 12918 * If the waitq kstat was incremented when 12919 * sd_set_retry_bp() queued this bp for a retry, 12920 * then we must set up statp so that the waitq 12921 * count will get decremented correctly below. 12922 * Also we must clear un->un_retry_statp to 12923 * ensure that we do not act on a stale value 12924 * in this field. 12925 */ 12926 if ((un->un_retry_statp == kstat_waitq_enter) || 12927 (un->un_retry_statp == 12928 kstat_runq_back_to_waitq)) { 12929 statp = kstat_waitq_to_runq; 12930 } 12931 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12932 saved_statp = un->un_retry_statp; 12933 #endif 12934 un->un_retry_statp = NULL; 12935 12936 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 12937 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " 12938 "un_throttle:%d un_ncmds_in_transport:%d\n", 12939 un, un->un_retry_bp, un->un_throttle, 12940 un->un_ncmds_in_transport); 12941 } else { 12942 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: " 12943 "processing priority bp:0x%p\n", bp); 12944 } 12945 12946 } else if ((bp = un->un_waitq_headp) != NULL) { 12947 /* 12948 * A command on the waitq is ready to go, but do not 12949 * send it if: 12950 * 12951 * (1) the throttle limit has been reached, or 12952 * (2) a retry is pending, or 12953 * (3) a START_STOP_UNIT callback pending, or 12954 * (4) a callback for a SD_PATH_DIRECT_PRIORITY 12955 * command is pending. 12956 * 12957 * For all of these conditions, IO processing will 12958 * restart after the condition is cleared. 12959 */ 12960 if (un->un_ncmds_in_transport >= un->un_throttle) { 12961 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12962 "sd_start_cmds: exiting, " 12963 "throttle limit reached!\n"); 12964 goto exit; 12965 } 12966 if (un->un_retry_bp != NULL) { 12967 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12968 "sd_start_cmds: exiting, retry pending!\n"); 12969 goto exit; 12970 } 12971 if (un->un_startstop_timeid != NULL) { 12972 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12973 "sd_start_cmds: exiting, " 12974 "START_STOP pending!\n"); 12975 goto exit; 12976 } 12977 if (un->un_direct_priority_timeid != NULL) { 12978 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12979 "sd_start_cmds: exiting, " 12980 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n"); 12981 goto exit; 12982 } 12983 12984 /* Dequeue the command */ 12985 un->un_waitq_headp = bp->av_forw; 12986 if (un->un_waitq_headp == NULL) { 12987 un->un_waitq_tailp = NULL; 12988 } 12989 bp->av_forw = NULL; 12990 statp = kstat_waitq_to_runq; 12991 SD_TRACE(SD_LOG_IO_CORE, un, 12992 "sd_start_cmds: processing waitq bp:0x%p\n", bp); 12993 12994 } else { 12995 /* No work to do so bail out now */ 12996 SD_TRACE(SD_LOG_IO_CORE, un, 12997 "sd_start_cmds: no more work, exiting!\n"); 12998 goto exit; 12999 } 13000 13001 /* 13002 * Reset the state to normal. This is the mechanism by which 13003 * the state transitions from either SD_STATE_RWAIT or 13004 * SD_STATE_OFFLINE to SD_STATE_NORMAL. 13005 * If state is SD_STATE_PM_CHANGING then this command is 13006 * part of the device power control and the state must 13007 * not be put back to normal. Doing so would would 13008 * allow new commands to proceed when they shouldn't, 13009 * the device may be going off. 13010 */ 13011 if ((un->un_state != SD_STATE_SUSPENDED) && 13012 (un->un_state != SD_STATE_PM_CHANGING)) { 13013 New_state(un, SD_STATE_NORMAL); 13014 } 13015 13016 xp = SD_GET_XBUF(bp); 13017 ASSERT(xp != NULL); 13018 13019 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13020 /* 13021 * Allocate the scsi_pkt if we need one, or attach DMA 13022 * resources if we have a scsi_pkt that needs them. The 13023 * latter should only occur for commands that are being 13024 * retried. 13025 */ 13026 if ((xp->xb_pktp == NULL) || 13027 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { 13028 #else 13029 if (xp->xb_pktp == NULL) { 13030 #endif 13031 /* 13032 * There is no scsi_pkt allocated for this buf. Call 13033 * the initpkt function to allocate & init one. 13034 * 13035 * The scsi_init_pkt runout callback functionality is 13036 * implemented as follows: 13037 * 13038 * 1) The initpkt function always calls 13039 * scsi_init_pkt(9F) with sdrunout specified as the 13040 * callback routine. 13041 * 2) A successful packet allocation is initialized and 13042 * the I/O is transported. 13043 * 3) The I/O associated with an allocation resource 13044 * failure is left on its queue to be retried via 13045 * runout or the next I/O. 13046 * 4) The I/O associated with a DMA error is removed 13047 * from the queue and failed with EIO. Processing of 13048 * the transport queues is also halted to be 13049 * restarted via runout or the next I/O. 13050 * 5) The I/O associated with a CDB size or packet 13051 * size error is removed from the queue and failed 13052 * with EIO. Processing of the transport queues is 13053 * continued. 13054 * 13055 * Note: there is no interface for canceling a runout 13056 * callback. To prevent the driver from detaching or 13057 * suspending while a runout is pending the driver 13058 * state is set to SD_STATE_RWAIT 13059 * 13060 * Note: using the scsi_init_pkt callback facility can 13061 * result in an I/O request persisting at the head of 13062 * the list which cannot be satisfied even after 13063 * multiple retries. In the future the driver may 13064 * implement some kind of maximum runout count before 13065 * failing an I/O. 13066 * 13067 * Note: the use of funcp below may seem superfluous, 13068 * but it helps warlock figure out the correct 13069 * initpkt function calls (see [s]sd.wlcmd). 13070 */ 13071 struct scsi_pkt *pktp; 13072 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp); 13073 13074 ASSERT(bp != un->un_rqs_bp); 13075 13076 funcp = sd_initpkt_map[xp->xb_chain_iostart]; 13077 switch ((*funcp)(bp, &pktp)) { 13078 case SD_PKT_ALLOC_SUCCESS: 13079 xp->xb_pktp = pktp; 13080 SD_TRACE(SD_LOG_IO_CORE, un, 13081 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n", 13082 pktp); 13083 goto got_pkt; 13084 13085 case SD_PKT_ALLOC_FAILURE: 13086 /* 13087 * Temporary (hopefully) resource depletion. 13088 * Since retries and RQS commands always have a 13089 * scsi_pkt allocated, these cases should never 13090 * get here. So the only cases this needs to 13091 * handle is a bp from the waitq (which we put 13092 * back onto the waitq for sdrunout), or a bp 13093 * sent as an immed_bp (which we just fail). 13094 */ 13095 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13096 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); 13097 13098 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13099 13100 if (bp == immed_bp) { 13101 /* 13102 * If SD_XB_DMA_FREED is clear, then 13103 * this is a failure to allocate a 13104 * scsi_pkt, and we must fail the 13105 * command. 13106 */ 13107 if ((xp->xb_pkt_flags & 13108 SD_XB_DMA_FREED) == 0) { 13109 break; 13110 } 13111 13112 /* 13113 * If this immediate command is NOT our 13114 * un_retry_bp, then we must fail it. 13115 */ 13116 if (bp != un->un_retry_bp) { 13117 break; 13118 } 13119 13120 /* 13121 * We get here if this cmd is our 13122 * un_retry_bp that was DMAFREED, but 13123 * scsi_init_pkt() failed to reallocate 13124 * DMA resources when we attempted to 13125 * retry it. This can happen when an 13126 * mpxio failover is in progress, but 13127 * we don't want to just fail the 13128 * command in this case. 13129 * 13130 * Use timeout(9F) to restart it after 13131 * a 100ms delay. We don't want to 13132 * let sdrunout() restart it, because 13133 * sdrunout() is just supposed to start 13134 * commands that are sitting on the 13135 * wait queue. The un_retry_bp stays 13136 * set until the command completes, but 13137 * sdrunout can be called many times 13138 * before that happens. Since sdrunout 13139 * cannot tell if the un_retry_bp is 13140 * already in the transport, it could 13141 * end up calling scsi_transport() for 13142 * the un_retry_bp multiple times. 13143 * 13144 * Also: don't schedule the callback 13145 * if some other callback is already 13146 * pending. 13147 */ 13148 if (un->un_retry_statp == NULL) { 13149 /* 13150 * restore the kstat pointer to 13151 * keep kstat counts coherent 13152 * when we do retry the command. 13153 */ 13154 un->un_retry_statp = 13155 saved_statp; 13156 } 13157 13158 if ((un->un_startstop_timeid == NULL) && 13159 (un->un_retry_timeid == NULL) && 13160 (un->un_direct_priority_timeid == 13161 NULL)) { 13162 13163 un->un_retry_timeid = 13164 timeout( 13165 sd_start_retry_command, 13166 un, SD_RESTART_TIMEOUT); 13167 } 13168 goto exit; 13169 } 13170 13171 #else 13172 if (bp == immed_bp) { 13173 break; /* Just fail the command */ 13174 } 13175 #endif 13176 13177 /* Add the buf back to the head of the waitq */ 13178 bp->av_forw = un->un_waitq_headp; 13179 un->un_waitq_headp = bp; 13180 if (un->un_waitq_tailp == NULL) { 13181 un->un_waitq_tailp = bp; 13182 } 13183 goto exit; 13184 13185 case SD_PKT_ALLOC_FAILURE_NO_DMA: 13186 /* 13187 * HBA DMA resource failure. Fail the command 13188 * and continue processing of the queues. 13189 */ 13190 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13191 "sd_start_cmds: " 13192 "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); 13193 break; 13194 13195 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: 13196 /* 13197 * Note:x86: Partial DMA mapping not supported 13198 * for USCSI commands, and all the needed DMA 13199 * resources were not allocated. 13200 */ 13201 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13202 "sd_start_cmds: " 13203 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); 13204 break; 13205 13206 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: 13207 /* 13208 * Note:x86: Request cannot fit into CDB based 13209 * on lba and len. 13210 */ 13211 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13212 "sd_start_cmds: " 13213 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); 13214 break; 13215 13216 default: 13217 /* Should NEVER get here! */ 13218 panic("scsi_initpkt error"); 13219 /*NOTREACHED*/ 13220 } 13221 13222 /* 13223 * Fatal error in allocating a scsi_pkt for this buf. 13224 * Update kstats & return the buf with an error code. 13225 * We must use sd_return_failed_command_no_restart() to 13226 * avoid a recursive call back into sd_start_cmds(). 13227 * However this also means that we must keep processing 13228 * the waitq here in order to avoid stalling. 13229 */ 13230 if (statp == kstat_waitq_to_runq) { 13231 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 13232 } 13233 sd_return_failed_command_no_restart(un, bp, EIO); 13234 if (bp == immed_bp) { 13235 /* immed_bp is gone by now, so clear this */ 13236 immed_bp = NULL; 13237 } 13238 continue; 13239 } 13240 got_pkt: 13241 if (bp == immed_bp) { 13242 /* goto the head of the class.... */ 13243 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 13244 } 13245 13246 un->un_ncmds_in_transport++; 13247 SD_UPDATE_KSTATS(un, statp, bp); 13248 13249 /* 13250 * Call scsi_transport() to send the command to the target. 13251 * According to SCSA architecture, we must drop the mutex here 13252 * before calling scsi_transport() in order to avoid deadlock. 13253 * Note that the scsi_pkt's completion routine can be executed 13254 * (from interrupt context) even before the call to 13255 * scsi_transport() returns. 13256 */ 13257 SD_TRACE(SD_LOG_IO_CORE, un, 13258 "sd_start_cmds: calling scsi_transport()\n"); 13259 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); 13260 13261 mutex_exit(SD_MUTEX(un)); 13262 rval = scsi_transport(xp->xb_pktp); 13263 mutex_enter(SD_MUTEX(un)); 13264 13265 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13266 "sd_start_cmds: scsi_transport() returned %d\n", rval); 13267 13268 switch (rval) { 13269 case TRAN_ACCEPT: 13270 /* Clear this with every pkt accepted by the HBA */ 13271 un->un_tran_fatal_count = 0; 13272 break; /* Success; try the next cmd (if any) */ 13273 13274 case TRAN_BUSY: 13275 un->un_ncmds_in_transport--; 13276 ASSERT(un->un_ncmds_in_transport >= 0); 13277 13278 /* 13279 * Don't retry request sense, the sense data 13280 * is lost when another request is sent. 13281 * Free up the rqs buf and retry 13282 * the original failed cmd. Update kstat. 13283 */ 13284 if (bp == un->un_rqs_bp) { 13285 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13286 bp = sd_mark_rqs_idle(un, xp); 13287 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 13288 NULL, NULL, EIO, SD_BSY_TIMEOUT / 500, 13289 kstat_waitq_enter); 13290 goto exit; 13291 } 13292 13293 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13294 /* 13295 * Free the DMA resources for the scsi_pkt. This will 13296 * allow mpxio to select another path the next time 13297 * we call scsi_transport() with this scsi_pkt. 13298 * See sdintr() for the rationalization behind this. 13299 */ 13300 if ((un->un_f_is_fibre == TRUE) && 13301 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 13302 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { 13303 scsi_dmafree(xp->xb_pktp); 13304 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 13305 } 13306 #endif 13307 13308 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { 13309 /* 13310 * Commands that are SD_PATH_DIRECT_PRIORITY 13311 * are for error recovery situations. These do 13312 * not use the normal command waitq, so if they 13313 * get a TRAN_BUSY we cannot put them back onto 13314 * the waitq for later retry. One possible 13315 * problem is that there could already be some 13316 * other command on un_retry_bp that is waiting 13317 * for this one to complete, so we would be 13318 * deadlocked if we put this command back onto 13319 * the waitq for later retry (since un_retry_bp 13320 * must complete before the driver gets back to 13321 * commands on the waitq). 13322 * 13323 * To avoid deadlock we must schedule a callback 13324 * that will restart this command after a set 13325 * interval. This should keep retrying for as 13326 * long as the underlying transport keeps 13327 * returning TRAN_BUSY (just like for other 13328 * commands). Use the same timeout interval as 13329 * for the ordinary TRAN_BUSY retry. 13330 */ 13331 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13332 "sd_start_cmds: scsi_transport() returned " 13333 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n"); 13334 13335 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13336 un->un_direct_priority_timeid = 13337 timeout(sd_start_direct_priority_command, 13338 bp, SD_BSY_TIMEOUT / 500); 13339 13340 goto exit; 13341 } 13342 13343 /* 13344 * For TRAN_BUSY, we want to reduce the throttle value, 13345 * unless we are retrying a command. 13346 */ 13347 if (bp != un->un_retry_bp) { 13348 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); 13349 } 13350 13351 /* 13352 * Set up the bp to be tried again 10 ms later. 13353 * Note:x86: Is there a timeout value in the sd_lun 13354 * for this condition? 13355 */ 13356 sd_set_retry_bp(un, bp, SD_BSY_TIMEOUT / 500, 13357 kstat_runq_back_to_waitq); 13358 goto exit; 13359 13360 case TRAN_FATAL_ERROR: 13361 un->un_tran_fatal_count++; 13362 /* FALLTHRU */ 13363 13364 case TRAN_BADPKT: 13365 default: 13366 un->un_ncmds_in_transport--; 13367 ASSERT(un->un_ncmds_in_transport >= 0); 13368 13369 /* 13370 * If this is our REQUEST SENSE command with a 13371 * transport error, we must get back the pointers 13372 * to the original buf, and mark the REQUEST 13373 * SENSE command as "available". 13374 */ 13375 if (bp == un->un_rqs_bp) { 13376 bp = sd_mark_rqs_idle(un, xp); 13377 xp = SD_GET_XBUF(bp); 13378 } else { 13379 /* 13380 * Legacy behavior: do not update transport 13381 * error count for request sense commands. 13382 */ 13383 SD_UPDATE_ERRSTATS(un, sd_transerrs); 13384 } 13385 13386 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13387 sd_print_transport_rejected_message(un, xp, rval); 13388 13389 /* 13390 * We must use sd_return_failed_command_no_restart() to 13391 * avoid a recursive call back into sd_start_cmds(). 13392 * However this also means that we must keep processing 13393 * the waitq here in order to avoid stalling. 13394 */ 13395 sd_return_failed_command_no_restart(un, bp, EIO); 13396 13397 /* 13398 * Notify any threads waiting in sd_ddi_suspend() that 13399 * a command completion has occurred. 13400 */ 13401 if (un->un_state == SD_STATE_SUSPENDED) { 13402 cv_broadcast(&un->un_disk_busy_cv); 13403 } 13404 13405 if (bp == immed_bp) { 13406 /* immed_bp is gone by now, so clear this */ 13407 immed_bp = NULL; 13408 } 13409 break; 13410 } 13411 13412 } while (immed_bp == NULL); 13413 13414 exit: 13415 ASSERT(mutex_owned(SD_MUTEX(un))); 13416 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n"); 13417 } 13418 13419 13420 /* 13421 * Function: sd_return_command 13422 * 13423 * Description: Returns a command to its originator (with or without an 13424 * error). Also starts commands waiting to be transported 13425 * to the target. 13426 * 13427 * Context: May be called from interrupt, kernel, or timeout context 13428 */ 13429 13430 static void 13431 sd_return_command(struct sd_lun *un, struct buf *bp) 13432 { 13433 struct sd_xbuf *xp; 13434 struct scsi_pkt *pktp; 13435 13436 ASSERT(bp != NULL); 13437 ASSERT(un != NULL); 13438 ASSERT(mutex_owned(SD_MUTEX(un))); 13439 ASSERT(bp != un->un_rqs_bp); 13440 xp = SD_GET_XBUF(bp); 13441 ASSERT(xp != NULL); 13442 13443 pktp = SD_GET_PKTP(bp); 13444 13445 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n"); 13446 13447 /* 13448 * Note: check for the "sdrestart failed" case. 13449 */ 13450 if ((un->un_partial_dma_supported == 1) && 13451 ((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) && 13452 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) && 13453 (xp->xb_pktp->pkt_resid == 0)) { 13454 13455 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) { 13456 /* 13457 * Successfully set up next portion of cmd 13458 * transfer, try sending it 13459 */ 13460 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 13461 NULL, NULL, 0, (clock_t)0, NULL); 13462 sd_start_cmds(un, NULL); 13463 return; /* Note:x86: need a return here? */ 13464 } 13465 } 13466 13467 /* 13468 * If this is the failfast bp, clear it from un_failfast_bp. This 13469 * can happen if upon being re-tried the failfast bp either 13470 * succeeded or encountered another error (possibly even a different 13471 * error than the one that precipitated the failfast state, but in 13472 * that case it would have had to exhaust retries as well). Regardless, 13473 * this should not occur whenever the instance is in the active 13474 * failfast state. 13475 */ 13476 if (bp == un->un_failfast_bp) { 13477 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 13478 un->un_failfast_bp = NULL; 13479 } 13480 13481 /* 13482 * Clear the failfast state upon successful completion of ANY cmd. 13483 */ 13484 if (bp->b_error == 0) { 13485 un->un_failfast_state = SD_FAILFAST_INACTIVE; 13486 } 13487 13488 /* 13489 * This is used if the command was retried one or more times. Show that 13490 * we are done with it, and allow processing of the waitq to resume. 13491 */ 13492 if (bp == un->un_retry_bp) { 13493 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13494 "sd_return_command: un:0x%p: " 13495 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 13496 un->un_retry_bp = NULL; 13497 un->un_retry_statp = NULL; 13498 } 13499 13500 SD_UPDATE_RDWR_STATS(un, bp); 13501 SD_UPDATE_PARTITION_STATS(un, bp); 13502 13503 switch (un->un_state) { 13504 case SD_STATE_SUSPENDED: 13505 /* 13506 * Notify any threads waiting in sd_ddi_suspend() that 13507 * a command completion has occurred. 13508 */ 13509 cv_broadcast(&un->un_disk_busy_cv); 13510 break; 13511 default: 13512 sd_start_cmds(un, NULL); 13513 break; 13514 } 13515 13516 /* Return this command up the iodone chain to its originator. */ 13517 mutex_exit(SD_MUTEX(un)); 13518 13519 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 13520 xp->xb_pktp = NULL; 13521 13522 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 13523 13524 ASSERT(!mutex_owned(SD_MUTEX(un))); 13525 mutex_enter(SD_MUTEX(un)); 13526 13527 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n"); 13528 } 13529 13530 13531 /* 13532 * Function: sd_return_failed_command 13533 * 13534 * Description: Command completion when an error occurred. 13535 * 13536 * Context: May be called from interrupt context 13537 */ 13538 13539 static void 13540 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode) 13541 { 13542 ASSERT(bp != NULL); 13543 ASSERT(un != NULL); 13544 ASSERT(mutex_owned(SD_MUTEX(un))); 13545 13546 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13547 "sd_return_failed_command: entry\n"); 13548 13549 /* 13550 * b_resid could already be nonzero due to a partial data 13551 * transfer, so do not change it here. 13552 */ 13553 SD_BIOERROR(bp, errcode); 13554 13555 sd_return_command(un, bp); 13556 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13557 "sd_return_failed_command: exit\n"); 13558 } 13559 13560 13561 /* 13562 * Function: sd_return_failed_command_no_restart 13563 * 13564 * Description: Same as sd_return_failed_command, but ensures that no 13565 * call back into sd_start_cmds will be issued. 13566 * 13567 * Context: May be called from interrupt context 13568 */ 13569 13570 static void 13571 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp, 13572 int errcode) 13573 { 13574 struct sd_xbuf *xp; 13575 13576 ASSERT(bp != NULL); 13577 ASSERT(un != NULL); 13578 ASSERT(mutex_owned(SD_MUTEX(un))); 13579 xp = SD_GET_XBUF(bp); 13580 ASSERT(xp != NULL); 13581 ASSERT(errcode != 0); 13582 13583 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13584 "sd_return_failed_command_no_restart: entry\n"); 13585 13586 /* 13587 * b_resid could already be nonzero due to a partial data 13588 * transfer, so do not change it here. 13589 */ 13590 SD_BIOERROR(bp, errcode); 13591 13592 /* 13593 * If this is the failfast bp, clear it. This can happen if the 13594 * failfast bp encounterd a fatal error when we attempted to 13595 * re-try it (such as a scsi_transport(9F) failure). However 13596 * we should NOT be in an active failfast state if the failfast 13597 * bp is not NULL. 13598 */ 13599 if (bp == un->un_failfast_bp) { 13600 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 13601 un->un_failfast_bp = NULL; 13602 } 13603 13604 if (bp == un->un_retry_bp) { 13605 /* 13606 * This command was retried one or more times. Show that we are 13607 * done with it, and allow processing of the waitq to resume. 13608 */ 13609 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13610 "sd_return_failed_command_no_restart: " 13611 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 13612 un->un_retry_bp = NULL; 13613 un->un_retry_statp = NULL; 13614 } 13615 13616 SD_UPDATE_RDWR_STATS(un, bp); 13617 SD_UPDATE_PARTITION_STATS(un, bp); 13618 13619 mutex_exit(SD_MUTEX(un)); 13620 13621 if (xp->xb_pktp != NULL) { 13622 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 13623 xp->xb_pktp = NULL; 13624 } 13625 13626 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 13627 13628 mutex_enter(SD_MUTEX(un)); 13629 13630 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13631 "sd_return_failed_command_no_restart: exit\n"); 13632 } 13633 13634 13635 /* 13636 * Function: sd_retry_command 13637 * 13638 * Description: queue up a command for retry, or (optionally) fail it 13639 * if retry counts are exhausted. 13640 * 13641 * Arguments: un - Pointer to the sd_lun struct for the target. 13642 * 13643 * bp - Pointer to the buf for the command to be retried. 13644 * 13645 * retry_check_flag - Flag to see which (if any) of the retry 13646 * counts should be decremented/checked. If the indicated 13647 * retry count is exhausted, then the command will not be 13648 * retried; it will be failed instead. This should use a 13649 * value equal to one of the following: 13650 * 13651 * SD_RETRIES_NOCHECK 13652 * SD_RESD_RETRIES_STANDARD 13653 * SD_RETRIES_VICTIM 13654 * 13655 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE 13656 * if the check should be made to see of FLAG_ISOLATE is set 13657 * in the pkt. If FLAG_ISOLATE is set, then the command is 13658 * not retried, it is simply failed. 13659 * 13660 * user_funcp - Ptr to function to call before dispatching the 13661 * command. May be NULL if no action needs to be performed. 13662 * (Primarily intended for printing messages.) 13663 * 13664 * user_arg - Optional argument to be passed along to 13665 * the user_funcp call. 13666 * 13667 * failure_code - errno return code to set in the bp if the 13668 * command is going to be failed. 13669 * 13670 * retry_delay - Retry delay interval in (clock_t) units. May 13671 * be zero which indicates that the retry should be retried 13672 * immediately (ie, without an intervening delay). 13673 * 13674 * statp - Ptr to kstat function to be updated if the command 13675 * is queued for a delayed retry. May be NULL if no kstat 13676 * update is desired. 13677 * 13678 * Context: May be called from interrupt context. 13679 */ 13680 13681 static void 13682 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag, 13683 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int 13684 code), void *user_arg, int failure_code, clock_t retry_delay, 13685 void (*statp)(kstat_io_t *)) 13686 { 13687 struct sd_xbuf *xp; 13688 struct scsi_pkt *pktp; 13689 13690 ASSERT(un != NULL); 13691 ASSERT(mutex_owned(SD_MUTEX(un))); 13692 ASSERT(bp != NULL); 13693 xp = SD_GET_XBUF(bp); 13694 ASSERT(xp != NULL); 13695 pktp = SD_GET_PKTP(bp); 13696 ASSERT(pktp != NULL); 13697 13698 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 13699 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp); 13700 13701 /* 13702 * If we are syncing or dumping, fail the command to avoid 13703 * recursively calling back into scsi_transport(). 13704 */ 13705 if (ddi_in_panic()) { 13706 goto fail_command_no_log; 13707 } 13708 13709 /* 13710 * We should never be be retrying a command with FLAG_DIAGNOSE set, so 13711 * log an error and fail the command. 13712 */ 13713 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 13714 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 13715 "ERROR, retrying FLAG_DIAGNOSE command.\n"); 13716 sd_dump_memory(un, SD_LOG_IO, "CDB", 13717 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 13718 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 13719 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 13720 goto fail_command; 13721 } 13722 13723 /* 13724 * If we are suspended, then put the command onto head of the 13725 * wait queue since we don't want to start more commands, and 13726 * clear the un_retry_bp. Next time when we are resumed, will 13727 * handle the command in the wait queue. 13728 */ 13729 switch (un->un_state) { 13730 case SD_STATE_SUSPENDED: 13731 case SD_STATE_DUMPING: 13732 bp->av_forw = un->un_waitq_headp; 13733 un->un_waitq_headp = bp; 13734 if (un->un_waitq_tailp == NULL) { 13735 un->un_waitq_tailp = bp; 13736 } 13737 if (bp == un->un_retry_bp) { 13738 un->un_retry_bp = NULL; 13739 un->un_retry_statp = NULL; 13740 } 13741 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 13742 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: " 13743 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp); 13744 return; 13745 default: 13746 break; 13747 } 13748 13749 /* 13750 * If the caller wants us to check FLAG_ISOLATE, then see if that 13751 * is set; if it is then we do not want to retry the command. 13752 * Normally, FLAG_ISOLATE is only used with USCSI cmds. 13753 */ 13754 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) { 13755 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { 13756 goto fail_command; 13757 } 13758 } 13759 13760 13761 /* 13762 * If SD_RETRIES_FAILFAST is set, it indicates that either a 13763 * command timeout or a selection timeout has occurred. This means 13764 * that we were unable to establish an kind of communication with 13765 * the target, and subsequent retries and/or commands are likely 13766 * to encounter similar results and take a long time to complete. 13767 * 13768 * If this is a failfast error condition, we need to update the 13769 * failfast state, even if this bp does not have B_FAILFAST set. 13770 */ 13771 if (retry_check_flag & SD_RETRIES_FAILFAST) { 13772 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { 13773 ASSERT(un->un_failfast_bp == NULL); 13774 /* 13775 * If we are already in the active failfast state, and 13776 * another failfast error condition has been detected, 13777 * then fail this command if it has B_FAILFAST set. 13778 * If B_FAILFAST is clear, then maintain the legacy 13779 * behavior of retrying heroically, even tho this will 13780 * take a lot more time to fail the command. 13781 */ 13782 if (bp->b_flags & B_FAILFAST) { 13783 goto fail_command; 13784 } 13785 } else { 13786 /* 13787 * We're not in the active failfast state, but we 13788 * have a failfast error condition, so we must begin 13789 * transition to the next state. We do this regardless 13790 * of whether or not this bp has B_FAILFAST set. 13791 */ 13792 if (un->un_failfast_bp == NULL) { 13793 /* 13794 * This is the first bp to meet a failfast 13795 * condition so save it on un_failfast_bp & 13796 * do normal retry processing. Do not enter 13797 * active failfast state yet. This marks 13798 * entry into the "failfast pending" state. 13799 */ 13800 un->un_failfast_bp = bp; 13801 13802 } else if (un->un_failfast_bp == bp) { 13803 /* 13804 * This is the second time *this* bp has 13805 * encountered a failfast error condition, 13806 * so enter active failfast state & flush 13807 * queues as appropriate. 13808 */ 13809 un->un_failfast_state = SD_FAILFAST_ACTIVE; 13810 un->un_failfast_bp = NULL; 13811 sd_failfast_flushq(un); 13812 13813 /* 13814 * Fail this bp now if B_FAILFAST set; 13815 * otherwise continue with retries. (It would 13816 * be pretty ironic if this bp succeeded on a 13817 * subsequent retry after we just flushed all 13818 * the queues). 13819 */ 13820 if (bp->b_flags & B_FAILFAST) { 13821 goto fail_command; 13822 } 13823 13824 #if !defined(lint) && !defined(__lint) 13825 } else { 13826 /* 13827 * If neither of the preceeding conditionals 13828 * was true, it means that there is some 13829 * *other* bp that has met an inital failfast 13830 * condition and is currently either being 13831 * retried or is waiting to be retried. In 13832 * that case we should perform normal retry 13833 * processing on *this* bp, since there is a 13834 * chance that the current failfast condition 13835 * is transient and recoverable. If that does 13836 * not turn out to be the case, then retries 13837 * will be cleared when the wait queue is 13838 * flushed anyway. 13839 */ 13840 #endif 13841 } 13842 } 13843 } else { 13844 /* 13845 * SD_RETRIES_FAILFAST is clear, which indicates that we 13846 * likely were able to at least establish some level of 13847 * communication with the target and subsequent commands 13848 * and/or retries are likely to get through to the target, 13849 * In this case we want to be aggressive about clearing 13850 * the failfast state. Note that this does not affect 13851 * the "failfast pending" condition. 13852 */ 13853 un->un_failfast_state = SD_FAILFAST_INACTIVE; 13854 } 13855 13856 13857 /* 13858 * Check the specified retry count to see if we can still do 13859 * any retries with this pkt before we should fail it. 13860 */ 13861 switch (retry_check_flag & SD_RETRIES_MASK) { 13862 case SD_RETRIES_VICTIM: 13863 /* 13864 * Check the victim retry count. If exhausted, then fall 13865 * thru & check against the standard retry count. 13866 */ 13867 if (xp->xb_victim_retry_count < un->un_victim_retry_count) { 13868 /* Increment count & proceed with the retry */ 13869 xp->xb_victim_retry_count++; 13870 break; 13871 } 13872 /* Victim retries exhausted, fall back to std. retries... */ 13873 /* FALLTHRU */ 13874 13875 case SD_RETRIES_STANDARD: 13876 if (xp->xb_retry_count >= un->un_retry_count) { 13877 /* Retries exhausted, fail the command */ 13878 SD_TRACE(SD_LOG_IO_CORE, un, 13879 "sd_retry_command: retries exhausted!\n"); 13880 /* 13881 * update b_resid for failed SCMD_READ & SCMD_WRITE 13882 * commands with nonzero pkt_resid. 13883 */ 13884 if ((pktp->pkt_reason == CMD_CMPLT) && 13885 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) && 13886 (pktp->pkt_resid != 0)) { 13887 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F; 13888 if ((op == SCMD_READ) || (op == SCMD_WRITE)) { 13889 SD_UPDATE_B_RESID(bp, pktp); 13890 } 13891 } 13892 goto fail_command; 13893 } 13894 xp->xb_retry_count++; 13895 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13896 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 13897 break; 13898 13899 case SD_RETRIES_UA: 13900 if (xp->xb_ua_retry_count >= sd_ua_retry_count) { 13901 /* Retries exhausted, fail the command */ 13902 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13903 "Unit Attention retries exhausted. " 13904 "Check the target.\n"); 13905 goto fail_command; 13906 } 13907 xp->xb_ua_retry_count++; 13908 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13909 "sd_retry_command: retry count:%d\n", 13910 xp->xb_ua_retry_count); 13911 break; 13912 13913 case SD_RETRIES_BUSY: 13914 if (xp->xb_retry_count >= un->un_busy_retry_count) { 13915 /* Retries exhausted, fail the command */ 13916 SD_TRACE(SD_LOG_IO_CORE, un, 13917 "sd_retry_command: retries exhausted!\n"); 13918 goto fail_command; 13919 } 13920 xp->xb_retry_count++; 13921 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13922 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 13923 break; 13924 13925 case SD_RETRIES_NOCHECK: 13926 default: 13927 /* No retry count to check. Just proceed with the retry */ 13928 break; 13929 } 13930 13931 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 13932 13933 /* 13934 * If we were given a zero timeout, we must attempt to retry the 13935 * command immediately (ie, without a delay). 13936 */ 13937 if (retry_delay == 0) { 13938 /* 13939 * Check some limiting conditions to see if we can actually 13940 * do the immediate retry. If we cannot, then we must 13941 * fall back to queueing up a delayed retry. 13942 */ 13943 if (un->un_ncmds_in_transport >= un->un_throttle) { 13944 /* 13945 * We are at the throttle limit for the target, 13946 * fall back to delayed retry. 13947 */ 13948 retry_delay = SD_BSY_TIMEOUT; 13949 statp = kstat_waitq_enter; 13950 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13951 "sd_retry_command: immed. retry hit " 13952 "throttle!\n"); 13953 } else { 13954 /* 13955 * We're clear to proceed with the immediate retry. 13956 * First call the user-provided function (if any) 13957 */ 13958 if (user_funcp != NULL) { 13959 (*user_funcp)(un, bp, user_arg, 13960 SD_IMMEDIATE_RETRY_ISSUED); 13961 #ifdef __lock_lint 13962 sd_print_incomplete_msg(un, bp, user_arg, 13963 SD_IMMEDIATE_RETRY_ISSUED); 13964 sd_print_cmd_incomplete_msg(un, bp, user_arg, 13965 SD_IMMEDIATE_RETRY_ISSUED); 13966 sd_print_sense_failed_msg(un, bp, user_arg, 13967 SD_IMMEDIATE_RETRY_ISSUED); 13968 #endif 13969 } 13970 13971 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13972 "sd_retry_command: issuing immediate retry\n"); 13973 13974 /* 13975 * Call sd_start_cmds() to transport the command to 13976 * the target. 13977 */ 13978 sd_start_cmds(un, bp); 13979 13980 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13981 "sd_retry_command exit\n"); 13982 return; 13983 } 13984 } 13985 13986 /* 13987 * Set up to retry the command after a delay. 13988 * First call the user-provided function (if any) 13989 */ 13990 if (user_funcp != NULL) { 13991 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED); 13992 } 13993 13994 sd_set_retry_bp(un, bp, retry_delay, statp); 13995 13996 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 13997 return; 13998 13999 fail_command: 14000 14001 if (user_funcp != NULL) { 14002 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED); 14003 } 14004 14005 fail_command_no_log: 14006 14007 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14008 "sd_retry_command: returning failed command\n"); 14009 14010 sd_return_failed_command(un, bp, failure_code); 14011 14012 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 14013 } 14014 14015 14016 /* 14017 * Function: sd_set_retry_bp 14018 * 14019 * Description: Set up the given bp for retry. 14020 * 14021 * Arguments: un - ptr to associated softstate 14022 * bp - ptr to buf(9S) for the command 14023 * retry_delay - time interval before issuing retry (may be 0) 14024 * statp - optional pointer to kstat function 14025 * 14026 * Context: May be called under interrupt context 14027 */ 14028 14029 static void 14030 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, 14031 void (*statp)(kstat_io_t *)) 14032 { 14033 ASSERT(un != NULL); 14034 ASSERT(mutex_owned(SD_MUTEX(un))); 14035 ASSERT(bp != NULL); 14036 14037 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14038 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp); 14039 14040 /* 14041 * Indicate that the command is being retried. This will not allow any 14042 * other commands on the wait queue to be transported to the target 14043 * until this command has been completed (success or failure). The 14044 * "retry command" is not transported to the target until the given 14045 * time delay expires, unless the user specified a 0 retry_delay. 14046 * 14047 * Note: the timeout(9F) callback routine is what actually calls 14048 * sd_start_cmds() to transport the command, with the exception of a 14049 * zero retry_delay. The only current implementor of a zero retry delay 14050 * is the case where a START_STOP_UNIT is sent to spin-up a device. 14051 */ 14052 if (un->un_retry_bp == NULL) { 14053 ASSERT(un->un_retry_statp == NULL); 14054 un->un_retry_bp = bp; 14055 14056 /* 14057 * If the user has not specified a delay the command should 14058 * be queued and no timeout should be scheduled. 14059 */ 14060 if (retry_delay == 0) { 14061 /* 14062 * Save the kstat pointer that will be used in the 14063 * call to SD_UPDATE_KSTATS() below, so that 14064 * sd_start_cmds() can correctly decrement the waitq 14065 * count when it is time to transport this command. 14066 */ 14067 un->un_retry_statp = statp; 14068 goto done; 14069 } 14070 } 14071 14072 if (un->un_retry_bp == bp) { 14073 /* 14074 * Save the kstat pointer that will be used in the call to 14075 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can 14076 * correctly decrement the waitq count when it is time to 14077 * transport this command. 14078 */ 14079 un->un_retry_statp = statp; 14080 14081 /* 14082 * Schedule a timeout if: 14083 * 1) The user has specified a delay. 14084 * 2) There is not a START_STOP_UNIT callback pending. 14085 * 14086 * If no delay has been specified, then it is up to the caller 14087 * to ensure that IO processing continues without stalling. 14088 * Effectively, this means that the caller will issue the 14089 * required call to sd_start_cmds(). The START_STOP_UNIT 14090 * callback does this after the START STOP UNIT command has 14091 * completed. In either of these cases we should not schedule 14092 * a timeout callback here. Also don't schedule the timeout if 14093 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart. 14094 */ 14095 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) && 14096 (un->un_direct_priority_timeid == NULL)) { 14097 un->un_retry_timeid = 14098 timeout(sd_start_retry_command, un, retry_delay); 14099 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14100 "sd_set_retry_bp: setting timeout: un: 0x%p" 14101 " bp:0x%p un_retry_timeid:0x%p\n", 14102 un, bp, un->un_retry_timeid); 14103 } 14104 } else { 14105 /* 14106 * We only get in here if there is already another command 14107 * waiting to be retried. In this case, we just put the 14108 * given command onto the wait queue, so it can be transported 14109 * after the current retry command has completed. 14110 * 14111 * Also we have to make sure that if the command at the head 14112 * of the wait queue is the un_failfast_bp, that we do not 14113 * put ahead of it any other commands that are to be retried. 14114 */ 14115 if ((un->un_failfast_bp != NULL) && 14116 (un->un_failfast_bp == un->un_waitq_headp)) { 14117 /* 14118 * Enqueue this command AFTER the first command on 14119 * the wait queue (which is also un_failfast_bp). 14120 */ 14121 bp->av_forw = un->un_waitq_headp->av_forw; 14122 un->un_waitq_headp->av_forw = bp; 14123 if (un->un_waitq_headp == un->un_waitq_tailp) { 14124 un->un_waitq_tailp = bp; 14125 } 14126 } else { 14127 /* Enqueue this command at the head of the waitq. */ 14128 bp->av_forw = un->un_waitq_headp; 14129 un->un_waitq_headp = bp; 14130 if (un->un_waitq_tailp == NULL) { 14131 un->un_waitq_tailp = bp; 14132 } 14133 } 14134 14135 if (statp == NULL) { 14136 statp = kstat_waitq_enter; 14137 } 14138 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14139 "sd_set_retry_bp: un:0x%p already delayed retry\n", un); 14140 } 14141 14142 done: 14143 if (statp != NULL) { 14144 SD_UPDATE_KSTATS(un, statp, bp); 14145 } 14146 14147 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14148 "sd_set_retry_bp: exit un:0x%p\n", un); 14149 } 14150 14151 14152 /* 14153 * Function: sd_start_retry_command 14154 * 14155 * Description: Start the command that has been waiting on the target's 14156 * retry queue. Called from timeout(9F) context after the 14157 * retry delay interval has expired. 14158 * 14159 * Arguments: arg - pointer to associated softstate for the device. 14160 * 14161 * Context: timeout(9F) thread context. May not sleep. 14162 */ 14163 14164 static void 14165 sd_start_retry_command(void *arg) 14166 { 14167 struct sd_lun *un = arg; 14168 14169 ASSERT(un != NULL); 14170 ASSERT(!mutex_owned(SD_MUTEX(un))); 14171 14172 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14173 "sd_start_retry_command: entry\n"); 14174 14175 mutex_enter(SD_MUTEX(un)); 14176 14177 un->un_retry_timeid = NULL; 14178 14179 if (un->un_retry_bp != NULL) { 14180 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14181 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n", 14182 un, un->un_retry_bp); 14183 sd_start_cmds(un, un->un_retry_bp); 14184 } 14185 14186 mutex_exit(SD_MUTEX(un)); 14187 14188 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14189 "sd_start_retry_command: exit\n"); 14190 } 14191 14192 14193 /* 14194 * Function: sd_start_direct_priority_command 14195 * 14196 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had 14197 * received TRAN_BUSY when we called scsi_transport() to send it 14198 * to the underlying HBA. This function is called from timeout(9F) 14199 * context after the delay interval has expired. 14200 * 14201 * Arguments: arg - pointer to associated buf(9S) to be restarted. 14202 * 14203 * Context: timeout(9F) thread context. May not sleep. 14204 */ 14205 14206 static void 14207 sd_start_direct_priority_command(void *arg) 14208 { 14209 struct buf *priority_bp = arg; 14210 struct sd_lun *un; 14211 14212 ASSERT(priority_bp != NULL); 14213 un = SD_GET_UN(priority_bp); 14214 ASSERT(un != NULL); 14215 ASSERT(!mutex_owned(SD_MUTEX(un))); 14216 14217 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14218 "sd_start_direct_priority_command: entry\n"); 14219 14220 mutex_enter(SD_MUTEX(un)); 14221 un->un_direct_priority_timeid = NULL; 14222 sd_start_cmds(un, priority_bp); 14223 mutex_exit(SD_MUTEX(un)); 14224 14225 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14226 "sd_start_direct_priority_command: exit\n"); 14227 } 14228 14229 14230 /* 14231 * Function: sd_send_request_sense_command 14232 * 14233 * Description: Sends a REQUEST SENSE command to the target 14234 * 14235 * Context: May be called from interrupt context. 14236 */ 14237 14238 static void 14239 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 14240 struct scsi_pkt *pktp) 14241 { 14242 ASSERT(bp != NULL); 14243 ASSERT(un != NULL); 14244 ASSERT(mutex_owned(SD_MUTEX(un))); 14245 14246 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: " 14247 "entry: buf:0x%p\n", bp); 14248 14249 /* 14250 * If we are syncing or dumping, then fail the command to avoid a 14251 * recursive callback into scsi_transport(). Also fail the command 14252 * if we are suspended (legacy behavior). 14253 */ 14254 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 14255 (un->un_state == SD_STATE_DUMPING)) { 14256 sd_return_failed_command(un, bp, EIO); 14257 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14258 "sd_send_request_sense_command: syncing/dumping, exit\n"); 14259 return; 14260 } 14261 14262 /* 14263 * Retry the failed command and don't issue the request sense if: 14264 * 1) the sense buf is busy 14265 * 2) we have 1 or more outstanding commands on the target 14266 * (the sense data will be cleared or invalidated any way) 14267 * 14268 * Note: There could be an issue with not checking a retry limit here, 14269 * the problem is determining which retry limit to check. 14270 */ 14271 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { 14272 /* Don't retry if the command is flagged as non-retryable */ 14273 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 14274 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 14275 NULL, NULL, 0, SD_BSY_TIMEOUT, kstat_waitq_enter); 14276 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14277 "sd_send_request_sense_command: " 14278 "at full throttle, retrying exit\n"); 14279 } else { 14280 sd_return_failed_command(un, bp, EIO); 14281 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14282 "sd_send_request_sense_command: " 14283 "at full throttle, non-retryable exit\n"); 14284 } 14285 return; 14286 } 14287 14288 sd_mark_rqs_busy(un, bp); 14289 sd_start_cmds(un, un->un_rqs_bp); 14290 14291 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14292 "sd_send_request_sense_command: exit\n"); 14293 } 14294 14295 14296 /* 14297 * Function: sd_mark_rqs_busy 14298 * 14299 * Description: Indicate that the request sense bp for this instance is 14300 * in use. 14301 * 14302 * Context: May be called under interrupt context 14303 */ 14304 14305 static void 14306 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp) 14307 { 14308 struct sd_xbuf *sense_xp; 14309 14310 ASSERT(un != NULL); 14311 ASSERT(bp != NULL); 14312 ASSERT(mutex_owned(SD_MUTEX(un))); 14313 ASSERT(un->un_sense_isbusy == 0); 14314 14315 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: " 14316 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un); 14317 14318 sense_xp = SD_GET_XBUF(un->un_rqs_bp); 14319 ASSERT(sense_xp != NULL); 14320 14321 SD_INFO(SD_LOG_IO, un, 14322 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp); 14323 14324 ASSERT(sense_xp->xb_pktp != NULL); 14325 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) 14326 == (FLAG_SENSING | FLAG_HEAD)); 14327 14328 un->un_sense_isbusy = 1; 14329 un->un_rqs_bp->b_resid = 0; 14330 sense_xp->xb_pktp->pkt_resid = 0; 14331 sense_xp->xb_pktp->pkt_reason = 0; 14332 14333 /* So we can get back the bp at interrupt time! */ 14334 sense_xp->xb_sense_bp = bp; 14335 14336 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH); 14337 14338 /* 14339 * Mark this buf as awaiting sense data. (This is already set in 14340 * the pkt_flags for the RQS packet.) 14341 */ 14342 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING; 14343 14344 sense_xp->xb_retry_count = 0; 14345 sense_xp->xb_victim_retry_count = 0; 14346 sense_xp->xb_ua_retry_count = 0; 14347 sense_xp->xb_nr_retry_count = 0; 14348 sense_xp->xb_dma_resid = 0; 14349 14350 /* Clean up the fields for auto-request sense */ 14351 sense_xp->xb_sense_status = 0; 14352 sense_xp->xb_sense_state = 0; 14353 sense_xp->xb_sense_resid = 0; 14354 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data)); 14355 14356 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n"); 14357 } 14358 14359 14360 /* 14361 * Function: sd_mark_rqs_idle 14362 * 14363 * Description: SD_MUTEX must be held continuously through this routine 14364 * to prevent reuse of the rqs struct before the caller can 14365 * complete it's processing. 14366 * 14367 * Return Code: Pointer to the RQS buf 14368 * 14369 * Context: May be called under interrupt context 14370 */ 14371 14372 static struct buf * 14373 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp) 14374 { 14375 struct buf *bp; 14376 ASSERT(un != NULL); 14377 ASSERT(sense_xp != NULL); 14378 ASSERT(mutex_owned(SD_MUTEX(un))); 14379 ASSERT(un->un_sense_isbusy != 0); 14380 14381 un->un_sense_isbusy = 0; 14382 bp = sense_xp->xb_sense_bp; 14383 sense_xp->xb_sense_bp = NULL; 14384 14385 /* This pkt is no longer interested in getting sense data */ 14386 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING; 14387 14388 return (bp); 14389 } 14390 14391 14392 14393 /* 14394 * Function: sd_alloc_rqs 14395 * 14396 * Description: Set up the unit to receive auto request sense data 14397 * 14398 * Return Code: DDI_SUCCESS or DDI_FAILURE 14399 * 14400 * Context: Called under attach(9E) context 14401 */ 14402 14403 static int 14404 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un) 14405 { 14406 struct sd_xbuf *xp; 14407 14408 ASSERT(un != NULL); 14409 ASSERT(!mutex_owned(SD_MUTEX(un))); 14410 ASSERT(un->un_rqs_bp == NULL); 14411 ASSERT(un->un_rqs_pktp == NULL); 14412 14413 /* 14414 * First allocate the required buf and scsi_pkt structs, then set up 14415 * the CDB in the scsi_pkt for a REQUEST SENSE command. 14416 */ 14417 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL, 14418 MAX_SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 14419 if (un->un_rqs_bp == NULL) { 14420 return (DDI_FAILURE); 14421 } 14422 14423 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp, 14424 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL); 14425 14426 if (un->un_rqs_pktp == NULL) { 14427 sd_free_rqs(un); 14428 return (DDI_FAILURE); 14429 } 14430 14431 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */ 14432 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp, 14433 SCMD_REQUEST_SENSE, 0, MAX_SENSE_LENGTH, 0); 14434 14435 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); 14436 14437 /* Set up the other needed members in the ARQ scsi_pkt. */ 14438 un->un_rqs_pktp->pkt_comp = sdintr; 14439 un->un_rqs_pktp->pkt_time = sd_io_time; 14440 un->un_rqs_pktp->pkt_flags |= 14441 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ 14442 14443 /* 14444 * Allocate & init the sd_xbuf struct for the RQS command. Do not 14445 * provide any intpkt, destroypkt routines as we take care of 14446 * scsi_pkt allocation/freeing here and in sd_free_rqs(). 14447 */ 14448 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 14449 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL); 14450 xp->xb_pktp = un->un_rqs_pktp; 14451 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14452 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n", 14453 un, xp, un->un_rqs_pktp, un->un_rqs_bp); 14454 14455 /* 14456 * Save the pointer to the request sense private bp so it can 14457 * be retrieved in sdintr. 14458 */ 14459 un->un_rqs_pktp->pkt_private = un->un_rqs_bp; 14460 ASSERT(un->un_rqs_bp->b_private == xp); 14461 14462 /* 14463 * See if the HBA supports auto-request sense for the specified 14464 * target/lun. If it does, then try to enable it (if not already 14465 * enabled). 14466 * 14467 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return 14468 * failure, while for other HBAs (pln) scsi_ifsetcap will always 14469 * return success. However, in both of these cases ARQ is always 14470 * enabled and scsi_ifgetcap will always return true. The best approach 14471 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). 14472 * 14473 * The 3rd case is the HBA (adp) always return enabled on 14474 * scsi_ifgetgetcap even when it's not enable, the best approach 14475 * is issue a scsi_ifsetcap then a scsi_ifgetcap 14476 * Note: this case is to circumvent the Adaptec bug. (x86 only) 14477 */ 14478 14479 if (un->un_f_is_fibre == TRUE) { 14480 un->un_f_arq_enabled = TRUE; 14481 } else { 14482 #if defined(__i386) || defined(__amd64) 14483 /* 14484 * Circumvent the Adaptec bug, remove this code when 14485 * the bug is fixed 14486 */ 14487 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); 14488 #endif 14489 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { 14490 case 0: 14491 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14492 "sd_alloc_rqs: HBA supports ARQ\n"); 14493 /* 14494 * ARQ is supported by this HBA but currently is not 14495 * enabled. Attempt to enable it and if successful then 14496 * mark this instance as ARQ enabled. 14497 */ 14498 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1) 14499 == 1) { 14500 /* Successfully enabled ARQ in the HBA */ 14501 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14502 "sd_alloc_rqs: ARQ enabled\n"); 14503 un->un_f_arq_enabled = TRUE; 14504 } else { 14505 /* Could not enable ARQ in the HBA */ 14506 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14507 "sd_alloc_rqs: failed ARQ enable\n"); 14508 un->un_f_arq_enabled = FALSE; 14509 } 14510 break; 14511 case 1: 14512 /* 14513 * ARQ is supported by this HBA and is already enabled. 14514 * Just mark ARQ as enabled for this instance. 14515 */ 14516 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14517 "sd_alloc_rqs: ARQ already enabled\n"); 14518 un->un_f_arq_enabled = TRUE; 14519 break; 14520 default: 14521 /* 14522 * ARQ is not supported by this HBA; disable it for this 14523 * instance. 14524 */ 14525 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14526 "sd_alloc_rqs: HBA does not support ARQ\n"); 14527 un->un_f_arq_enabled = FALSE; 14528 break; 14529 } 14530 } 14531 14532 return (DDI_SUCCESS); 14533 } 14534 14535 14536 /* 14537 * Function: sd_free_rqs 14538 * 14539 * Description: Cleanup for the pre-instance RQS command. 14540 * 14541 * Context: Kernel thread context 14542 */ 14543 14544 static void 14545 sd_free_rqs(struct sd_lun *un) 14546 { 14547 ASSERT(un != NULL); 14548 14549 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n"); 14550 14551 /* 14552 * If consistent memory is bound to a scsi_pkt, the pkt 14553 * has to be destroyed *before* freeing the consistent memory. 14554 * Don't change the sequence of this operations. 14555 * scsi_destroy_pkt() might access memory, which isn't allowed, 14556 * after it was freed in scsi_free_consistent_buf(). 14557 */ 14558 if (un->un_rqs_pktp != NULL) { 14559 scsi_destroy_pkt(un->un_rqs_pktp); 14560 un->un_rqs_pktp = NULL; 14561 } 14562 14563 if (un->un_rqs_bp != NULL) { 14564 struct sd_xbuf *xp = SD_GET_XBUF(un->un_rqs_bp); 14565 if (xp != NULL) { 14566 kmem_free(xp, sizeof (struct sd_xbuf)); 14567 } 14568 scsi_free_consistent_buf(un->un_rqs_bp); 14569 un->un_rqs_bp = NULL; 14570 } 14571 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n"); 14572 } 14573 14574 14575 14576 /* 14577 * Function: sd_reduce_throttle 14578 * 14579 * Description: Reduces the maximum # of outstanding commands on a 14580 * target to the current number of outstanding commands. 14581 * Queues a tiemout(9F) callback to restore the limit 14582 * after a specified interval has elapsed. 14583 * Typically used when we get a TRAN_BUSY return code 14584 * back from scsi_transport(). 14585 * 14586 * Arguments: un - ptr to the sd_lun softstate struct 14587 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL 14588 * 14589 * Context: May be called from interrupt context 14590 */ 14591 14592 static void 14593 sd_reduce_throttle(struct sd_lun *un, int throttle_type) 14594 { 14595 ASSERT(un != NULL); 14596 ASSERT(mutex_owned(SD_MUTEX(un))); 14597 ASSERT(un->un_ncmds_in_transport >= 0); 14598 14599 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 14600 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n", 14601 un, un->un_throttle, un->un_ncmds_in_transport); 14602 14603 if (un->un_throttle > 1) { 14604 if (un->un_f_use_adaptive_throttle == TRUE) { 14605 switch (throttle_type) { 14606 case SD_THROTTLE_TRAN_BUSY: 14607 if (un->un_busy_throttle == 0) { 14608 un->un_busy_throttle = un->un_throttle; 14609 } 14610 break; 14611 case SD_THROTTLE_QFULL: 14612 un->un_busy_throttle = 0; 14613 break; 14614 default: 14615 ASSERT(FALSE); 14616 } 14617 14618 if (un->un_ncmds_in_transport > 0) { 14619 un->un_throttle = un->un_ncmds_in_transport; 14620 } 14621 14622 } else { 14623 if (un->un_ncmds_in_transport == 0) { 14624 un->un_throttle = 1; 14625 } else { 14626 un->un_throttle = un->un_ncmds_in_transport; 14627 } 14628 } 14629 } 14630 14631 /* Reschedule the timeout if none is currently active */ 14632 if (un->un_reset_throttle_timeid == NULL) { 14633 un->un_reset_throttle_timeid = timeout(sd_restore_throttle, 14634 un, SD_THROTTLE_RESET_INTERVAL); 14635 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14636 "sd_reduce_throttle: timeout scheduled!\n"); 14637 } 14638 14639 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 14640 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle); 14641 } 14642 14643 14644 14645 /* 14646 * Function: sd_restore_throttle 14647 * 14648 * Description: Callback function for timeout(9F). Resets the current 14649 * value of un->un_throttle to its default. 14650 * 14651 * Arguments: arg - pointer to associated softstate for the device. 14652 * 14653 * Context: May be called from interrupt context 14654 */ 14655 14656 static void 14657 sd_restore_throttle(void *arg) 14658 { 14659 struct sd_lun *un = arg; 14660 14661 ASSERT(un != NULL); 14662 ASSERT(!mutex_owned(SD_MUTEX(un))); 14663 14664 mutex_enter(SD_MUTEX(un)); 14665 14666 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 14667 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle); 14668 14669 un->un_reset_throttle_timeid = NULL; 14670 14671 if (un->un_f_use_adaptive_throttle == TRUE) { 14672 /* 14673 * If un_busy_throttle is nonzero, then it contains the 14674 * value that un_throttle was when we got a TRAN_BUSY back 14675 * from scsi_transport(). We want to revert back to this 14676 * value. 14677 * 14678 * In the QFULL case, the throttle limit will incrementally 14679 * increase until it reaches max throttle. 14680 */ 14681 if (un->un_busy_throttle > 0) { 14682 un->un_throttle = un->un_busy_throttle; 14683 un->un_busy_throttle = 0; 14684 } else { 14685 /* 14686 * increase throttle by 10% open gate slowly, schedule 14687 * another restore if saved throttle has not been 14688 * reached 14689 */ 14690 short throttle; 14691 if (sd_qfull_throttle_enable) { 14692 throttle = un->un_throttle + 14693 max((un->un_throttle / 10), 1); 14694 un->un_throttle = 14695 (throttle < un->un_saved_throttle) ? 14696 throttle : un->un_saved_throttle; 14697 if (un->un_throttle < un->un_saved_throttle) { 14698 un->un_reset_throttle_timeid = 14699 timeout(sd_restore_throttle, 14700 un, 14701 SD_QFULL_THROTTLE_RESET_INTERVAL); 14702 } 14703 } 14704 } 14705 14706 /* 14707 * If un_throttle has fallen below the low-water mark, we 14708 * restore the maximum value here (and allow it to ratchet 14709 * down again if necessary). 14710 */ 14711 if (un->un_throttle < un->un_min_throttle) { 14712 un->un_throttle = un->un_saved_throttle; 14713 } 14714 } else { 14715 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 14716 "restoring limit from 0x%x to 0x%x\n", 14717 un->un_throttle, un->un_saved_throttle); 14718 un->un_throttle = un->un_saved_throttle; 14719 } 14720 14721 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14722 "sd_restore_throttle: calling sd_start_cmds!\n"); 14723 14724 sd_start_cmds(un, NULL); 14725 14726 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14727 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n", 14728 un, un->un_throttle); 14729 14730 mutex_exit(SD_MUTEX(un)); 14731 14732 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n"); 14733 } 14734 14735 /* 14736 * Function: sdrunout 14737 * 14738 * Description: Callback routine for scsi_init_pkt when a resource allocation 14739 * fails. 14740 * 14741 * Arguments: arg - a pointer to the sd_lun unit struct for the particular 14742 * soft state instance. 14743 * 14744 * Return Code: The scsi_init_pkt routine allows for the callback function to 14745 * return a 0 indicating the callback should be rescheduled or a 1 14746 * indicating not to reschedule. This routine always returns 1 14747 * because the driver always provides a callback function to 14748 * scsi_init_pkt. This results in a callback always being scheduled 14749 * (via the scsi_init_pkt callback implementation) if a resource 14750 * failure occurs. 14751 * 14752 * Context: This callback function may not block or call routines that block 14753 * 14754 * Note: Using the scsi_init_pkt callback facility can result in an I/O 14755 * request persisting at the head of the list which cannot be 14756 * satisfied even after multiple retries. In the future the driver 14757 * may implement some time of maximum runout count before failing 14758 * an I/O. 14759 */ 14760 14761 static int 14762 sdrunout(caddr_t arg) 14763 { 14764 struct sd_lun *un = (struct sd_lun *)arg; 14765 14766 ASSERT(un != NULL); 14767 ASSERT(!mutex_owned(SD_MUTEX(un))); 14768 14769 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n"); 14770 14771 mutex_enter(SD_MUTEX(un)); 14772 sd_start_cmds(un, NULL); 14773 mutex_exit(SD_MUTEX(un)); 14774 /* 14775 * This callback routine always returns 1 (i.e. do not reschedule) 14776 * because we always specify sdrunout as the callback handler for 14777 * scsi_init_pkt inside the call to sd_start_cmds. 14778 */ 14779 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); 14780 return (1); 14781 } 14782 14783 14784 /* 14785 * Function: sdintr 14786 * 14787 * Description: Completion callback routine for scsi_pkt(9S) structs 14788 * sent to the HBA driver via scsi_transport(9F). 14789 * 14790 * Context: Interrupt context 14791 */ 14792 14793 static void 14794 sdintr(struct scsi_pkt *pktp) 14795 { 14796 struct buf *bp; 14797 struct sd_xbuf *xp; 14798 struct sd_lun *un; 14799 size_t actual_len; 14800 14801 ASSERT(pktp != NULL); 14802 bp = (struct buf *)pktp->pkt_private; 14803 ASSERT(bp != NULL); 14804 xp = SD_GET_XBUF(bp); 14805 ASSERT(xp != NULL); 14806 ASSERT(xp->xb_pktp != NULL); 14807 un = SD_GET_UN(bp); 14808 ASSERT(un != NULL); 14809 ASSERT(!mutex_owned(SD_MUTEX(un))); 14810 14811 #ifdef SD_FAULT_INJECTION 14812 14813 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n"); 14814 /* SD FaultInjection */ 14815 sd_faultinjection(pktp); 14816 14817 #endif /* SD_FAULT_INJECTION */ 14818 14819 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p," 14820 " xp:0x%p, un:0x%p\n", bp, xp, un); 14821 14822 mutex_enter(SD_MUTEX(un)); 14823 14824 /* Reduce the count of the #commands currently in transport */ 14825 un->un_ncmds_in_transport--; 14826 ASSERT(un->un_ncmds_in_transport >= 0); 14827 14828 /* Increment counter to indicate that the callback routine is active */ 14829 un->un_in_callback++; 14830 14831 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14832 14833 #ifdef SDDEBUG 14834 if (bp == un->un_retry_bp) { 14835 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " 14836 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", 14837 un, un->un_retry_bp, un->un_ncmds_in_transport); 14838 } 14839 #endif 14840 14841 /* 14842 * If pkt_reason is CMD_DEV_GONE, fail the command, and update the media 14843 * state if needed. 14844 */ 14845 if (pktp->pkt_reason == CMD_DEV_GONE) { 14846 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14847 "Command failed to complete...Device is gone\n"); 14848 if (un->un_mediastate != DKIO_DEV_GONE) { 14849 un->un_mediastate = DKIO_DEV_GONE; 14850 cv_broadcast(&un->un_state_cv); 14851 } 14852 sd_return_failed_command(un, bp, EIO); 14853 goto exit; 14854 } 14855 14856 if (pktp->pkt_state & STATE_XARQ_DONE) { 14857 SD_TRACE(SD_LOG_COMMON, un, 14858 "sdintr: extra sense data received. pkt=%p\n", pktp); 14859 } 14860 14861 /* 14862 * First see if the pkt has auto-request sense data with it.... 14863 * Look at the packet state first so we don't take a performance 14864 * hit looking at the arq enabled flag unless absolutely necessary. 14865 */ 14866 if ((pktp->pkt_state & STATE_ARQ_DONE) && 14867 (un->un_f_arq_enabled == TRUE)) { 14868 /* 14869 * The HBA did an auto request sense for this command so check 14870 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 14871 * driver command that should not be retried. 14872 */ 14873 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 14874 /* 14875 * Save the relevant sense info into the xp for the 14876 * original cmd. 14877 */ 14878 struct scsi_arq_status *asp; 14879 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 14880 xp->xb_sense_status = 14881 *((uchar_t *)(&(asp->sts_rqpkt_status))); 14882 xp->xb_sense_state = asp->sts_rqpkt_state; 14883 xp->xb_sense_resid = asp->sts_rqpkt_resid; 14884 if (pktp->pkt_state & STATE_XARQ_DONE) { 14885 actual_len = MAX_SENSE_LENGTH - 14886 xp->xb_sense_resid; 14887 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 14888 MAX_SENSE_LENGTH); 14889 } else { 14890 if (xp->xb_sense_resid > SENSE_LENGTH) { 14891 actual_len = MAX_SENSE_LENGTH - 14892 xp->xb_sense_resid; 14893 } else { 14894 actual_len = SENSE_LENGTH - 14895 xp->xb_sense_resid; 14896 } 14897 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 14898 xp->xb_sense_resid = 14899 (int)(((struct uscsi_cmd *) 14900 (xp->xb_pktinfo))-> 14901 uscsi_rqlen) - actual_len; 14902 } 14903 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 14904 SENSE_LENGTH); 14905 } 14906 14907 /* fail the command */ 14908 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14909 "sdintr: arq done and FLAG_DIAGNOSE set\n"); 14910 sd_return_failed_command(un, bp, EIO); 14911 goto exit; 14912 } 14913 14914 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 14915 /* 14916 * We want to either retry or fail this command, so free 14917 * the DMA resources here. If we retry the command then 14918 * the DMA resources will be reallocated in sd_start_cmds(). 14919 * Note that when PKT_DMA_PARTIAL is used, this reallocation 14920 * causes the *entire* transfer to start over again from the 14921 * beginning of the request, even for PARTIAL chunks that 14922 * have already transferred successfully. 14923 */ 14924 if ((un->un_f_is_fibre == TRUE) && 14925 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 14926 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 14927 scsi_dmafree(pktp); 14928 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 14929 } 14930 #endif 14931 14932 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14933 "sdintr: arq done, sd_handle_auto_request_sense\n"); 14934 14935 sd_handle_auto_request_sense(un, bp, xp, pktp); 14936 goto exit; 14937 } 14938 14939 /* Next see if this is the REQUEST SENSE pkt for the instance */ 14940 if (pktp->pkt_flags & FLAG_SENSING) { 14941 /* This pktp is from the unit's REQUEST_SENSE command */ 14942 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14943 "sdintr: sd_handle_request_sense\n"); 14944 sd_handle_request_sense(un, bp, xp, pktp); 14945 goto exit; 14946 } 14947 14948 /* 14949 * Check to see if the command successfully completed as requested; 14950 * this is the most common case (and also the hot performance path). 14951 * 14952 * Requirements for successful completion are: 14953 * pkt_reason is CMD_CMPLT and packet status is status good. 14954 * In addition: 14955 * - A residual of zero indicates successful completion no matter what 14956 * the command is. 14957 * - If the residual is not zero and the command is not a read or 14958 * write, then it's still defined as successful completion. In other 14959 * words, if the command is a read or write the residual must be 14960 * zero for successful completion. 14961 * - If the residual is not zero and the command is a read or 14962 * write, and it's a USCSICMD, then it's still defined as 14963 * successful completion. 14964 */ 14965 if ((pktp->pkt_reason == CMD_CMPLT) && 14966 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) { 14967 14968 /* 14969 * Since this command is returned with a good status, we 14970 * can reset the count for Sonoma failover. 14971 */ 14972 un->un_sonoma_failure_count = 0; 14973 14974 /* 14975 * Return all USCSI commands on good status 14976 */ 14977 if (pktp->pkt_resid == 0) { 14978 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14979 "sdintr: returning command for resid == 0\n"); 14980 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) && 14981 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) { 14982 SD_UPDATE_B_RESID(bp, pktp); 14983 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14984 "sdintr: returning command for resid != 0\n"); 14985 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 14986 SD_UPDATE_B_RESID(bp, pktp); 14987 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14988 "sdintr: returning uscsi command\n"); 14989 } else { 14990 goto not_successful; 14991 } 14992 sd_return_command(un, bp); 14993 14994 /* 14995 * Decrement counter to indicate that the callback routine 14996 * is done. 14997 */ 14998 un->un_in_callback--; 14999 ASSERT(un->un_in_callback >= 0); 15000 mutex_exit(SD_MUTEX(un)); 15001 15002 return; 15003 } 15004 15005 not_successful: 15006 15007 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 15008 /* 15009 * The following is based upon knowledge of the underlying transport 15010 * and its use of DMA resources. This code should be removed when 15011 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor 15012 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() 15013 * and sd_start_cmds(). 15014 * 15015 * Free any DMA resources associated with this command if there 15016 * is a chance it could be retried or enqueued for later retry. 15017 * If we keep the DMA binding then mpxio cannot reissue the 15018 * command on another path whenever a path failure occurs. 15019 * 15020 * Note that when PKT_DMA_PARTIAL is used, free/reallocation 15021 * causes the *entire* transfer to start over again from the 15022 * beginning of the request, even for PARTIAL chunks that 15023 * have already transferred successfully. 15024 * 15025 * This is only done for non-uscsi commands (and also skipped for the 15026 * driver's internal RQS command). Also just do this for Fibre Channel 15027 * devices as these are the only ones that support mpxio. 15028 */ 15029 if ((un->un_f_is_fibre == TRUE) && 15030 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 15031 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 15032 scsi_dmafree(pktp); 15033 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 15034 } 15035 #endif 15036 15037 /* 15038 * The command did not successfully complete as requested so check 15039 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 15040 * driver command that should not be retried so just return. If 15041 * FLAG_DIAGNOSE is not set the error will be processed below. 15042 */ 15043 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 15044 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15045 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n"); 15046 /* 15047 * Issue a request sense if a check condition caused the error 15048 * (we handle the auto request sense case above), otherwise 15049 * just fail the command. 15050 */ 15051 if ((pktp->pkt_reason == CMD_CMPLT) && 15052 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { 15053 sd_send_request_sense_command(un, bp, pktp); 15054 } else { 15055 sd_return_failed_command(un, bp, EIO); 15056 } 15057 goto exit; 15058 } 15059 15060 /* 15061 * The command did not successfully complete as requested so process 15062 * the error, retry, and/or attempt recovery. 15063 */ 15064 switch (pktp->pkt_reason) { 15065 case CMD_CMPLT: 15066 switch (SD_GET_PKT_STATUS(pktp)) { 15067 case STATUS_GOOD: 15068 /* 15069 * The command completed successfully with a non-zero 15070 * residual 15071 */ 15072 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15073 "sdintr: STATUS_GOOD \n"); 15074 sd_pkt_status_good(un, bp, xp, pktp); 15075 break; 15076 15077 case STATUS_CHECK: 15078 case STATUS_TERMINATED: 15079 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15080 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n"); 15081 sd_pkt_status_check_condition(un, bp, xp, pktp); 15082 break; 15083 15084 case STATUS_BUSY: 15085 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15086 "sdintr: STATUS_BUSY\n"); 15087 sd_pkt_status_busy(un, bp, xp, pktp); 15088 break; 15089 15090 case STATUS_RESERVATION_CONFLICT: 15091 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15092 "sdintr: STATUS_RESERVATION_CONFLICT\n"); 15093 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 15094 break; 15095 15096 case STATUS_QFULL: 15097 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15098 "sdintr: STATUS_QFULL\n"); 15099 sd_pkt_status_qfull(un, bp, xp, pktp); 15100 break; 15101 15102 case STATUS_MET: 15103 case STATUS_INTERMEDIATE: 15104 case STATUS_SCSI2: 15105 case STATUS_INTERMEDIATE_MET: 15106 case STATUS_ACA_ACTIVE: 15107 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 15108 "Unexpected SCSI status received: 0x%x\n", 15109 SD_GET_PKT_STATUS(pktp)); 15110 sd_return_failed_command(un, bp, EIO); 15111 break; 15112 15113 default: 15114 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 15115 "Invalid SCSI status received: 0x%x\n", 15116 SD_GET_PKT_STATUS(pktp)); 15117 sd_return_failed_command(un, bp, EIO); 15118 break; 15119 15120 } 15121 break; 15122 15123 case CMD_INCOMPLETE: 15124 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15125 "sdintr: CMD_INCOMPLETE\n"); 15126 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp); 15127 break; 15128 case CMD_TRAN_ERR: 15129 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15130 "sdintr: CMD_TRAN_ERR\n"); 15131 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp); 15132 break; 15133 case CMD_RESET: 15134 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15135 "sdintr: CMD_RESET \n"); 15136 sd_pkt_reason_cmd_reset(un, bp, xp, pktp); 15137 break; 15138 case CMD_ABORTED: 15139 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15140 "sdintr: CMD_ABORTED \n"); 15141 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp); 15142 break; 15143 case CMD_TIMEOUT: 15144 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15145 "sdintr: CMD_TIMEOUT\n"); 15146 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp); 15147 break; 15148 case CMD_UNX_BUS_FREE: 15149 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15150 "sdintr: CMD_UNX_BUS_FREE \n"); 15151 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp); 15152 break; 15153 case CMD_TAG_REJECT: 15154 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15155 "sdintr: CMD_TAG_REJECT\n"); 15156 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp); 15157 break; 15158 default: 15159 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15160 "sdintr: default\n"); 15161 sd_pkt_reason_default(un, bp, xp, pktp); 15162 break; 15163 } 15164 15165 exit: 15166 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n"); 15167 15168 /* Decrement counter to indicate that the callback routine is done. */ 15169 un->un_in_callback--; 15170 ASSERT(un->un_in_callback >= 0); 15171 15172 /* 15173 * At this point, the pkt has been dispatched, ie, it is either 15174 * being re-tried or has been returned to its caller and should 15175 * not be referenced. 15176 */ 15177 15178 mutex_exit(SD_MUTEX(un)); 15179 } 15180 15181 15182 /* 15183 * Function: sd_print_incomplete_msg 15184 * 15185 * Description: Prints the error message for a CMD_INCOMPLETE error. 15186 * 15187 * Arguments: un - ptr to associated softstate for the device. 15188 * bp - ptr to the buf(9S) for the command. 15189 * arg - message string ptr 15190 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED, 15191 * or SD_NO_RETRY_ISSUED. 15192 * 15193 * Context: May be called under interrupt context 15194 */ 15195 15196 static void 15197 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 15198 { 15199 struct scsi_pkt *pktp; 15200 char *msgp; 15201 char *cmdp = arg; 15202 15203 ASSERT(un != NULL); 15204 ASSERT(mutex_owned(SD_MUTEX(un))); 15205 ASSERT(bp != NULL); 15206 ASSERT(arg != NULL); 15207 pktp = SD_GET_PKTP(bp); 15208 ASSERT(pktp != NULL); 15209 15210 switch (code) { 15211 case SD_DELAYED_RETRY_ISSUED: 15212 case SD_IMMEDIATE_RETRY_ISSUED: 15213 msgp = "retrying"; 15214 break; 15215 case SD_NO_RETRY_ISSUED: 15216 default: 15217 msgp = "giving up"; 15218 break; 15219 } 15220 15221 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 15222 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15223 "incomplete %s- %s\n", cmdp, msgp); 15224 } 15225 } 15226 15227 15228 15229 /* 15230 * Function: sd_pkt_status_good 15231 * 15232 * Description: Processing for a STATUS_GOOD code in pkt_status. 15233 * 15234 * Context: May be called under interrupt context 15235 */ 15236 15237 static void 15238 sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 15239 struct sd_xbuf *xp, struct scsi_pkt *pktp) 15240 { 15241 char *cmdp; 15242 15243 ASSERT(un != NULL); 15244 ASSERT(mutex_owned(SD_MUTEX(un))); 15245 ASSERT(bp != NULL); 15246 ASSERT(xp != NULL); 15247 ASSERT(pktp != NULL); 15248 ASSERT(pktp->pkt_reason == CMD_CMPLT); 15249 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD); 15250 ASSERT(pktp->pkt_resid != 0); 15251 15252 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n"); 15253 15254 SD_UPDATE_ERRSTATS(un, sd_harderrs); 15255 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) { 15256 case SCMD_READ: 15257 cmdp = "read"; 15258 break; 15259 case SCMD_WRITE: 15260 cmdp = "write"; 15261 break; 15262 default: 15263 SD_UPDATE_B_RESID(bp, pktp); 15264 sd_return_command(un, bp); 15265 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 15266 return; 15267 } 15268 15269 /* 15270 * See if we can retry the read/write, preferrably immediately. 15271 * If retries are exhaused, then sd_retry_command() will update 15272 * the b_resid count. 15273 */ 15274 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg, 15275 cmdp, EIO, (clock_t)0, NULL); 15276 15277 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 15278 } 15279 15280 15281 15282 15283 15284 /* 15285 * Function: sd_handle_request_sense 15286 * 15287 * Description: Processing for non-auto Request Sense command. 15288 * 15289 * Arguments: un - ptr to associated softstate 15290 * sense_bp - ptr to buf(9S) for the RQS command 15291 * sense_xp - ptr to the sd_xbuf for the RQS command 15292 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command 15293 * 15294 * Context: May be called under interrupt context 15295 */ 15296 15297 static void 15298 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp, 15299 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp) 15300 { 15301 struct buf *cmd_bp; /* buf for the original command */ 15302 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */ 15303 struct scsi_pkt *cmd_pktp; /* pkt for the original command */ 15304 size_t actual_len; /* actual sense data length */ 15305 15306 ASSERT(un != NULL); 15307 ASSERT(mutex_owned(SD_MUTEX(un))); 15308 ASSERT(sense_bp != NULL); 15309 ASSERT(sense_xp != NULL); 15310 ASSERT(sense_pktp != NULL); 15311 15312 /* 15313 * Note the sense_bp, sense_xp, and sense_pktp here are for the 15314 * RQS command and not the original command. 15315 */ 15316 ASSERT(sense_pktp == un->un_rqs_pktp); 15317 ASSERT(sense_bp == un->un_rqs_bp); 15318 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) == 15319 (FLAG_SENSING | FLAG_HEAD)); 15320 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) & 15321 FLAG_SENSING) == FLAG_SENSING); 15322 15323 /* These are the bp, xp, and pktp for the original command */ 15324 cmd_bp = sense_xp->xb_sense_bp; 15325 cmd_xp = SD_GET_XBUF(cmd_bp); 15326 cmd_pktp = SD_GET_PKTP(cmd_bp); 15327 15328 if (sense_pktp->pkt_reason != CMD_CMPLT) { 15329 /* 15330 * The REQUEST SENSE command failed. Release the REQUEST 15331 * SENSE command for re-use, get back the bp for the original 15332 * command, and attempt to re-try the original command if 15333 * FLAG_DIAGNOSE is not set in the original packet. 15334 */ 15335 SD_UPDATE_ERRSTATS(un, sd_harderrs); 15336 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15337 cmd_bp = sd_mark_rqs_idle(un, sense_xp); 15338 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD, 15339 NULL, NULL, EIO, (clock_t)0, NULL); 15340 return; 15341 } 15342 } 15343 15344 /* 15345 * Save the relevant sense info into the xp for the original cmd. 15346 * 15347 * Note: if the request sense failed the state info will be zero 15348 * as set in sd_mark_rqs_busy() 15349 */ 15350 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp); 15351 cmd_xp->xb_sense_state = sense_pktp->pkt_state; 15352 actual_len = MAX_SENSE_LENGTH - sense_pktp->pkt_resid; 15353 if ((cmd_xp->xb_pkt_flags & SD_XB_USCSICMD) && 15354 (((struct uscsi_cmd *)cmd_xp->xb_pktinfo)->uscsi_rqlen > 15355 SENSE_LENGTH)) { 15356 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 15357 MAX_SENSE_LENGTH); 15358 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid; 15359 } else { 15360 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 15361 SENSE_LENGTH); 15362 if (actual_len < SENSE_LENGTH) { 15363 cmd_xp->xb_sense_resid = SENSE_LENGTH - actual_len; 15364 } else { 15365 cmd_xp->xb_sense_resid = 0; 15366 } 15367 } 15368 15369 /* 15370 * Free up the RQS command.... 15371 * NOTE: 15372 * Must do this BEFORE calling sd_validate_sense_data! 15373 * sd_validate_sense_data may return the original command in 15374 * which case the pkt will be freed and the flags can no 15375 * longer be touched. 15376 * SD_MUTEX is held through this process until the command 15377 * is dispatched based upon the sense data, so there are 15378 * no race conditions. 15379 */ 15380 (void) sd_mark_rqs_idle(un, sense_xp); 15381 15382 /* 15383 * For a retryable command see if we have valid sense data, if so then 15384 * turn it over to sd_decode_sense() to figure out the right course of 15385 * action. Just fail a non-retryable command. 15386 */ 15387 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15388 if (sd_validate_sense_data(un, cmd_bp, cmd_xp, actual_len) == 15389 SD_SENSE_DATA_IS_VALID) { 15390 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp); 15391 } 15392 } else { 15393 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB", 15394 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15395 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data", 15396 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 15397 sd_return_failed_command(un, cmd_bp, EIO); 15398 } 15399 } 15400 15401 15402 15403 15404 /* 15405 * Function: sd_handle_auto_request_sense 15406 * 15407 * Description: Processing for auto-request sense information. 15408 * 15409 * Arguments: un - ptr to associated softstate 15410 * bp - ptr to buf(9S) for the command 15411 * xp - ptr to the sd_xbuf for the command 15412 * pktp - ptr to the scsi_pkt(9S) for the command 15413 * 15414 * Context: May be called under interrupt context 15415 */ 15416 15417 static void 15418 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 15419 struct sd_xbuf *xp, struct scsi_pkt *pktp) 15420 { 15421 struct scsi_arq_status *asp; 15422 size_t actual_len; 15423 15424 ASSERT(un != NULL); 15425 ASSERT(mutex_owned(SD_MUTEX(un))); 15426 ASSERT(bp != NULL); 15427 ASSERT(xp != NULL); 15428 ASSERT(pktp != NULL); 15429 ASSERT(pktp != un->un_rqs_pktp); 15430 ASSERT(bp != un->un_rqs_bp); 15431 15432 /* 15433 * For auto-request sense, we get a scsi_arq_status back from 15434 * the HBA, with the sense data in the sts_sensedata member. 15435 * The pkt_scbp of the packet points to this scsi_arq_status. 15436 */ 15437 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 15438 15439 if (asp->sts_rqpkt_reason != CMD_CMPLT) { 15440 /* 15441 * The auto REQUEST SENSE failed; see if we can re-try 15442 * the original command. 15443 */ 15444 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15445 "auto request sense failed (reason=%s)\n", 15446 scsi_rname(asp->sts_rqpkt_reason)); 15447 15448 sd_reset_target(un, pktp); 15449 15450 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15451 NULL, NULL, EIO, (clock_t)0, NULL); 15452 return; 15453 } 15454 15455 /* Save the relevant sense info into the xp for the original cmd. */ 15456 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status))); 15457 xp->xb_sense_state = asp->sts_rqpkt_state; 15458 xp->xb_sense_resid = asp->sts_rqpkt_resid; 15459 if (xp->xb_sense_state & STATE_XARQ_DONE) { 15460 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 15461 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 15462 MAX_SENSE_LENGTH); 15463 } else { 15464 if (xp->xb_sense_resid > SENSE_LENGTH) { 15465 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 15466 } else { 15467 actual_len = SENSE_LENGTH - xp->xb_sense_resid; 15468 } 15469 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 15470 xp->xb_sense_resid = (int)(((struct uscsi_cmd *) 15471 (xp->xb_pktinfo))->uscsi_rqlen) - actual_len; 15472 } 15473 bcopy(&asp->sts_sensedata, xp->xb_sense_data, SENSE_LENGTH); 15474 } 15475 15476 /* 15477 * See if we have valid sense data, if so then turn it over to 15478 * sd_decode_sense() to figure out the right course of action. 15479 */ 15480 if (sd_validate_sense_data(un, bp, xp, actual_len) == 15481 SD_SENSE_DATA_IS_VALID) { 15482 sd_decode_sense(un, bp, xp, pktp); 15483 } 15484 } 15485 15486 15487 /* 15488 * Function: sd_print_sense_failed_msg 15489 * 15490 * Description: Print log message when RQS has failed. 15491 * 15492 * Arguments: un - ptr to associated softstate 15493 * bp - ptr to buf(9S) for the command 15494 * arg - generic message string ptr 15495 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 15496 * or SD_NO_RETRY_ISSUED 15497 * 15498 * Context: May be called from interrupt context 15499 */ 15500 15501 static void 15502 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg, 15503 int code) 15504 { 15505 char *msgp = arg; 15506 15507 ASSERT(un != NULL); 15508 ASSERT(mutex_owned(SD_MUTEX(un))); 15509 ASSERT(bp != NULL); 15510 15511 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) { 15512 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp); 15513 } 15514 } 15515 15516 15517 /* 15518 * Function: sd_validate_sense_data 15519 * 15520 * Description: Check the given sense data for validity. 15521 * If the sense data is not valid, the command will 15522 * be either failed or retried! 15523 * 15524 * Return Code: SD_SENSE_DATA_IS_INVALID 15525 * SD_SENSE_DATA_IS_VALID 15526 * 15527 * Context: May be called from interrupt context 15528 */ 15529 15530 static int 15531 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 15532 size_t actual_len) 15533 { 15534 struct scsi_extended_sense *esp; 15535 struct scsi_pkt *pktp; 15536 char *msgp = NULL; 15537 15538 ASSERT(un != NULL); 15539 ASSERT(mutex_owned(SD_MUTEX(un))); 15540 ASSERT(bp != NULL); 15541 ASSERT(bp != un->un_rqs_bp); 15542 ASSERT(xp != NULL); 15543 15544 pktp = SD_GET_PKTP(bp); 15545 ASSERT(pktp != NULL); 15546 15547 /* 15548 * Check the status of the RQS command (auto or manual). 15549 */ 15550 switch (xp->xb_sense_status & STATUS_MASK) { 15551 case STATUS_GOOD: 15552 break; 15553 15554 case STATUS_RESERVATION_CONFLICT: 15555 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 15556 return (SD_SENSE_DATA_IS_INVALID); 15557 15558 case STATUS_BUSY: 15559 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15560 "Busy Status on REQUEST SENSE\n"); 15561 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL, 15562 NULL, EIO, SD_BSY_TIMEOUT / 500, kstat_waitq_enter); 15563 return (SD_SENSE_DATA_IS_INVALID); 15564 15565 case STATUS_QFULL: 15566 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15567 "QFULL Status on REQUEST SENSE\n"); 15568 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, 15569 NULL, EIO, SD_BSY_TIMEOUT / 500, kstat_waitq_enter); 15570 return (SD_SENSE_DATA_IS_INVALID); 15571 15572 case STATUS_CHECK: 15573 case STATUS_TERMINATED: 15574 msgp = "Check Condition on REQUEST SENSE\n"; 15575 goto sense_failed; 15576 15577 default: 15578 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n"; 15579 goto sense_failed; 15580 } 15581 15582 /* 15583 * See if we got the minimum required amount of sense data. 15584 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes 15585 * or less. 15586 */ 15587 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) || 15588 (actual_len == 0)) { 15589 msgp = "Request Sense couldn't get sense data\n"; 15590 goto sense_failed; 15591 } 15592 15593 if (actual_len < SUN_MIN_SENSE_LENGTH) { 15594 msgp = "Not enough sense information\n"; 15595 goto sense_failed; 15596 } 15597 15598 /* 15599 * We require the extended sense data 15600 */ 15601 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 15602 if (esp->es_class != CLASS_EXTENDED_SENSE) { 15603 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 15604 static char tmp[8]; 15605 static char buf[148]; 15606 char *p = (char *)(xp->xb_sense_data); 15607 int i; 15608 15609 mutex_enter(&sd_sense_mutex); 15610 (void) strcpy(buf, "undecodable sense information:"); 15611 for (i = 0; i < actual_len; i++) { 15612 (void) sprintf(tmp, " 0x%x", *(p++)&0xff); 15613 (void) strcpy(&buf[strlen(buf)], tmp); 15614 } 15615 i = strlen(buf); 15616 (void) strcpy(&buf[i], "-(assumed fatal)\n"); 15617 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, buf); 15618 mutex_exit(&sd_sense_mutex); 15619 } 15620 /* Note: Legacy behavior, fail the command with no retry */ 15621 sd_return_failed_command(un, bp, EIO); 15622 return (SD_SENSE_DATA_IS_INVALID); 15623 } 15624 15625 /* 15626 * Check that es_code is valid (es_class concatenated with es_code 15627 * make up the "response code" field. es_class will always be 7, so 15628 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the 15629 * format. 15630 */ 15631 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) && 15632 (esp->es_code != CODE_FMT_FIXED_DEFERRED) && 15633 (esp->es_code != CODE_FMT_DESCR_CURRENT) && 15634 (esp->es_code != CODE_FMT_DESCR_DEFERRED) && 15635 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) { 15636 goto sense_failed; 15637 } 15638 15639 return (SD_SENSE_DATA_IS_VALID); 15640 15641 sense_failed: 15642 /* 15643 * If the request sense failed (for whatever reason), attempt 15644 * to retry the original command. 15645 */ 15646 #if defined(__i386) || defined(__amd64) 15647 /* 15648 * SD_RETRY_DELAY is conditionally compile (#if fibre) in 15649 * sddef.h for Sparc platform, and x86 uses 1 binary 15650 * for both SCSI/FC. 15651 * The SD_RETRY_DELAY value need to be adjusted here 15652 * when SD_RETRY_DELAY change in sddef.h 15653 */ 15654 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15655 sd_print_sense_failed_msg, msgp, EIO, 15656 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL); 15657 #else 15658 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15659 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); 15660 #endif 15661 15662 return (SD_SENSE_DATA_IS_INVALID); 15663 } 15664 15665 15666 15667 /* 15668 * Function: sd_decode_sense 15669 * 15670 * Description: Take recovery action(s) when SCSI Sense Data is received. 15671 * 15672 * Context: Interrupt context. 15673 */ 15674 15675 static void 15676 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 15677 struct scsi_pkt *pktp) 15678 { 15679 uint8_t sense_key; 15680 15681 ASSERT(un != NULL); 15682 ASSERT(mutex_owned(SD_MUTEX(un))); 15683 ASSERT(bp != NULL); 15684 ASSERT(bp != un->un_rqs_bp); 15685 ASSERT(xp != NULL); 15686 ASSERT(pktp != NULL); 15687 15688 sense_key = scsi_sense_key(xp->xb_sense_data); 15689 15690 switch (sense_key) { 15691 case KEY_NO_SENSE: 15692 sd_sense_key_no_sense(un, bp, xp, pktp); 15693 break; 15694 case KEY_RECOVERABLE_ERROR: 15695 sd_sense_key_recoverable_error(un, xp->xb_sense_data, 15696 bp, xp, pktp); 15697 break; 15698 case KEY_NOT_READY: 15699 sd_sense_key_not_ready(un, xp->xb_sense_data, 15700 bp, xp, pktp); 15701 break; 15702 case KEY_MEDIUM_ERROR: 15703 case KEY_HARDWARE_ERROR: 15704 sd_sense_key_medium_or_hardware_error(un, 15705 xp->xb_sense_data, bp, xp, pktp); 15706 break; 15707 case KEY_ILLEGAL_REQUEST: 15708 sd_sense_key_illegal_request(un, bp, xp, pktp); 15709 break; 15710 case KEY_UNIT_ATTENTION: 15711 sd_sense_key_unit_attention(un, xp->xb_sense_data, 15712 bp, xp, pktp); 15713 break; 15714 case KEY_WRITE_PROTECT: 15715 case KEY_VOLUME_OVERFLOW: 15716 case KEY_MISCOMPARE: 15717 sd_sense_key_fail_command(un, bp, xp, pktp); 15718 break; 15719 case KEY_BLANK_CHECK: 15720 sd_sense_key_blank_check(un, bp, xp, pktp); 15721 break; 15722 case KEY_ABORTED_COMMAND: 15723 sd_sense_key_aborted_command(un, bp, xp, pktp); 15724 break; 15725 case KEY_VENDOR_UNIQUE: 15726 case KEY_COPY_ABORTED: 15727 case KEY_EQUAL: 15728 case KEY_RESERVED: 15729 default: 15730 sd_sense_key_default(un, xp->xb_sense_data, 15731 bp, xp, pktp); 15732 break; 15733 } 15734 } 15735 15736 15737 /* 15738 * Function: sd_dump_memory 15739 * 15740 * Description: Debug logging routine to print the contents of a user provided 15741 * buffer. The output of the buffer is broken up into 256 byte 15742 * segments due to a size constraint of the scsi_log. 15743 * implementation. 15744 * 15745 * Arguments: un - ptr to softstate 15746 * comp - component mask 15747 * title - "title" string to preceed data when printed 15748 * data - ptr to data block to be printed 15749 * len - size of data block to be printed 15750 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c) 15751 * 15752 * Context: May be called from interrupt context 15753 */ 15754 15755 #define SD_DUMP_MEMORY_BUF_SIZE 256 15756 15757 static char *sd_dump_format_string[] = { 15758 " 0x%02x", 15759 " %c" 15760 }; 15761 15762 static void 15763 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data, 15764 int len, int fmt) 15765 { 15766 int i, j; 15767 int avail_count; 15768 int start_offset; 15769 int end_offset; 15770 size_t entry_len; 15771 char *bufp; 15772 char *local_buf; 15773 char *format_string; 15774 15775 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR)); 15776 15777 /* 15778 * In the debug version of the driver, this function is called from a 15779 * number of places which are NOPs in the release driver. 15780 * The debug driver therefore has additional methods of filtering 15781 * debug output. 15782 */ 15783 #ifdef SDDEBUG 15784 /* 15785 * In the debug version of the driver we can reduce the amount of debug 15786 * messages by setting sd_error_level to something other than 15787 * SCSI_ERR_ALL and clearing bits in sd_level_mask and 15788 * sd_component_mask. 15789 */ 15790 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) || 15791 (sd_error_level != SCSI_ERR_ALL)) { 15792 return; 15793 } 15794 if (((sd_component_mask & comp) == 0) || 15795 (sd_error_level != SCSI_ERR_ALL)) { 15796 return; 15797 } 15798 #else 15799 if (sd_error_level != SCSI_ERR_ALL) { 15800 return; 15801 } 15802 #endif 15803 15804 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP); 15805 bufp = local_buf; 15806 /* 15807 * Available length is the length of local_buf[], minus the 15808 * length of the title string, minus one for the ":", minus 15809 * one for the newline, minus one for the NULL terminator. 15810 * This gives the #bytes available for holding the printed 15811 * values from the given data buffer. 15812 */ 15813 if (fmt == SD_LOG_HEX) { 15814 format_string = sd_dump_format_string[0]; 15815 } else /* SD_LOG_CHAR */ { 15816 format_string = sd_dump_format_string[1]; 15817 } 15818 /* 15819 * Available count is the number of elements from the given 15820 * data buffer that we can fit into the available length. 15821 * This is based upon the size of the format string used. 15822 * Make one entry and find it's size. 15823 */ 15824 (void) sprintf(bufp, format_string, data[0]); 15825 entry_len = strlen(bufp); 15826 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len; 15827 15828 j = 0; 15829 while (j < len) { 15830 bufp = local_buf; 15831 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE); 15832 start_offset = j; 15833 15834 end_offset = start_offset + avail_count; 15835 15836 (void) sprintf(bufp, "%s:", title); 15837 bufp += strlen(bufp); 15838 for (i = start_offset; ((i < end_offset) && (j < len)); 15839 i++, j++) { 15840 (void) sprintf(bufp, format_string, data[i]); 15841 bufp += entry_len; 15842 } 15843 (void) sprintf(bufp, "\n"); 15844 15845 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf); 15846 } 15847 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE); 15848 } 15849 15850 /* 15851 * Function: sd_print_sense_msg 15852 * 15853 * Description: Log a message based upon the given sense data. 15854 * 15855 * Arguments: un - ptr to associated softstate 15856 * bp - ptr to buf(9S) for the command 15857 * arg - ptr to associate sd_sense_info struct 15858 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 15859 * or SD_NO_RETRY_ISSUED 15860 * 15861 * Context: May be called from interrupt context 15862 */ 15863 15864 static void 15865 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 15866 { 15867 struct sd_xbuf *xp; 15868 struct scsi_pkt *pktp; 15869 uint8_t *sensep; 15870 daddr_t request_blkno; 15871 diskaddr_t err_blkno; 15872 int severity; 15873 int pfa_flag; 15874 extern struct scsi_key_strings scsi_cmds[]; 15875 15876 ASSERT(un != NULL); 15877 ASSERT(mutex_owned(SD_MUTEX(un))); 15878 ASSERT(bp != NULL); 15879 xp = SD_GET_XBUF(bp); 15880 ASSERT(xp != NULL); 15881 pktp = SD_GET_PKTP(bp); 15882 ASSERT(pktp != NULL); 15883 ASSERT(arg != NULL); 15884 15885 severity = ((struct sd_sense_info *)(arg))->ssi_severity; 15886 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag; 15887 15888 if ((code == SD_DELAYED_RETRY_ISSUED) || 15889 (code == SD_IMMEDIATE_RETRY_ISSUED)) { 15890 severity = SCSI_ERR_RETRYABLE; 15891 } 15892 15893 /* Use absolute block number for the request block number */ 15894 request_blkno = xp->xb_blkno; 15895 15896 /* 15897 * Now try to get the error block number from the sense data 15898 */ 15899 sensep = xp->xb_sense_data; 15900 15901 if (scsi_sense_info_uint64(sensep, SENSE_LENGTH, 15902 (uint64_t *)&err_blkno)) { 15903 /* 15904 * We retrieved the error block number from the information 15905 * portion of the sense data. 15906 * 15907 * For USCSI commands we are better off using the error 15908 * block no. as the requested block no. (This is the best 15909 * we can estimate.) 15910 */ 15911 if ((SD_IS_BUFIO(xp) == FALSE) && 15912 ((pktp->pkt_flags & FLAG_SILENT) == 0)) { 15913 request_blkno = err_blkno; 15914 } 15915 } else { 15916 /* 15917 * Without the es_valid bit set (for fixed format) or an 15918 * information descriptor (for descriptor format) we cannot 15919 * be certain of the error blkno, so just use the 15920 * request_blkno. 15921 */ 15922 err_blkno = (diskaddr_t)request_blkno; 15923 } 15924 15925 /* 15926 * The following will log the buffer contents for the release driver 15927 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error 15928 * level is set to verbose. 15929 */ 15930 sd_dump_memory(un, SD_LOG_IO, "Failed CDB", 15931 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15932 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 15933 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX); 15934 15935 if (pfa_flag == FALSE) { 15936 /* This is normally only set for USCSI */ 15937 if ((pktp->pkt_flags & FLAG_SILENT) != 0) { 15938 return; 15939 } 15940 15941 if ((SD_IS_BUFIO(xp) == TRUE) && 15942 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) && 15943 (severity < sd_error_level))) { 15944 return; 15945 } 15946 } 15947 15948 /* 15949 * Check for Sonoma Failover and keep a count of how many failed I/O's 15950 */ 15951 if ((SD_IS_LSI(un)) && 15952 (scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) && 15953 (scsi_sense_asc(sensep) == 0x94) && 15954 (scsi_sense_ascq(sensep) == 0x01)) { 15955 un->un_sonoma_failure_count++; 15956 if (un->un_sonoma_failure_count > 1) { 15957 return; 15958 } 15959 } 15960 15961 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity, 15962 request_blkno, err_blkno, scsi_cmds, 15963 (struct scsi_extended_sense *)sensep, 15964 un->un_additional_codes, NULL); 15965 } 15966 15967 /* 15968 * Function: sd_sense_key_no_sense 15969 * 15970 * Description: Recovery action when sense data was not received. 15971 * 15972 * Context: May be called from interrupt context 15973 */ 15974 15975 static void 15976 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 15977 struct sd_xbuf *xp, struct scsi_pkt *pktp) 15978 { 15979 struct sd_sense_info si; 15980 15981 ASSERT(un != NULL); 15982 ASSERT(mutex_owned(SD_MUTEX(un))); 15983 ASSERT(bp != NULL); 15984 ASSERT(xp != NULL); 15985 ASSERT(pktp != NULL); 15986 15987 si.ssi_severity = SCSI_ERR_FATAL; 15988 si.ssi_pfa_flag = FALSE; 15989 15990 SD_UPDATE_ERRSTATS(un, sd_softerrs); 15991 15992 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 15993 &si, EIO, (clock_t)0, NULL); 15994 } 15995 15996 15997 /* 15998 * Function: sd_sense_key_recoverable_error 15999 * 16000 * Description: Recovery actions for a SCSI "Recovered Error" sense key. 16001 * 16002 * Context: May be called from interrupt context 16003 */ 16004 16005 static void 16006 sd_sense_key_recoverable_error(struct sd_lun *un, 16007 uint8_t *sense_datap, 16008 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16009 { 16010 struct sd_sense_info si; 16011 uint8_t asc = scsi_sense_asc(sense_datap); 16012 16013 ASSERT(un != NULL); 16014 ASSERT(mutex_owned(SD_MUTEX(un))); 16015 ASSERT(bp != NULL); 16016 ASSERT(xp != NULL); 16017 ASSERT(pktp != NULL); 16018 16019 /* 16020 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED 16021 */ 16022 if ((asc == 0x5D) && (sd_report_pfa != 0)) { 16023 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 16024 si.ssi_severity = SCSI_ERR_INFO; 16025 si.ssi_pfa_flag = TRUE; 16026 } else { 16027 SD_UPDATE_ERRSTATS(un, sd_softerrs); 16028 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err); 16029 si.ssi_severity = SCSI_ERR_RECOVERED; 16030 si.ssi_pfa_flag = FALSE; 16031 } 16032 16033 if (pktp->pkt_resid == 0) { 16034 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16035 sd_return_command(un, bp); 16036 return; 16037 } 16038 16039 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16040 &si, EIO, (clock_t)0, NULL); 16041 } 16042 16043 16044 16045 16046 /* 16047 * Function: sd_sense_key_not_ready 16048 * 16049 * Description: Recovery actions for a SCSI "Not Ready" sense key. 16050 * 16051 * Context: May be called from interrupt context 16052 */ 16053 16054 static void 16055 sd_sense_key_not_ready(struct sd_lun *un, 16056 uint8_t *sense_datap, 16057 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16058 { 16059 struct sd_sense_info si; 16060 uint8_t asc = scsi_sense_asc(sense_datap); 16061 uint8_t ascq = scsi_sense_ascq(sense_datap); 16062 16063 ASSERT(un != NULL); 16064 ASSERT(mutex_owned(SD_MUTEX(un))); 16065 ASSERT(bp != NULL); 16066 ASSERT(xp != NULL); 16067 ASSERT(pktp != NULL); 16068 16069 si.ssi_severity = SCSI_ERR_FATAL; 16070 si.ssi_pfa_flag = FALSE; 16071 16072 /* 16073 * Update error stats after first NOT READY error. Disks may have 16074 * been powered down and may need to be restarted. For CDROMs, 16075 * report NOT READY errors only if media is present. 16076 */ 16077 if ((ISCD(un) && (asc == 0x3A)) || 16078 (xp->xb_nr_retry_count > 0)) { 16079 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16080 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err); 16081 } 16082 16083 /* 16084 * Just fail if the "not ready" retry limit has been reached. 16085 */ 16086 if (xp->xb_nr_retry_count >= un->un_notready_retry_count) { 16087 /* Special check for error message printing for removables. */ 16088 if (un->un_f_has_removable_media && (asc == 0x04) && 16089 (ascq >= 0x04)) { 16090 si.ssi_severity = SCSI_ERR_ALL; 16091 } 16092 goto fail_command; 16093 } 16094 16095 /* 16096 * Check the ASC and ASCQ in the sense data as needed, to determine 16097 * what to do. 16098 */ 16099 switch (asc) { 16100 case 0x04: /* LOGICAL UNIT NOT READY */ 16101 /* 16102 * disk drives that don't spin up result in a very long delay 16103 * in format without warning messages. We will log a message 16104 * if the error level is set to verbose. 16105 */ 16106 if (sd_error_level < SCSI_ERR_RETRYABLE) { 16107 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16108 "logical unit not ready, resetting disk\n"); 16109 } 16110 16111 /* 16112 * There are different requirements for CDROMs and disks for 16113 * the number of retries. If a CD-ROM is giving this, it is 16114 * probably reading TOC and is in the process of getting 16115 * ready, so we should keep on trying for a long time to make 16116 * sure that all types of media are taken in account (for 16117 * some media the drive takes a long time to read TOC). For 16118 * disks we do not want to retry this too many times as this 16119 * can cause a long hang in format when the drive refuses to 16120 * spin up (a very common failure). 16121 */ 16122 switch (ascq) { 16123 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */ 16124 /* 16125 * Disk drives frequently refuse to spin up which 16126 * results in a very long hang in format without 16127 * warning messages. 16128 * 16129 * Note: This code preserves the legacy behavior of 16130 * comparing xb_nr_retry_count against zero for fibre 16131 * channel targets instead of comparing against the 16132 * un_reset_retry_count value. The reason for this 16133 * discrepancy has been so utterly lost beneath the 16134 * Sands of Time that even Indiana Jones could not 16135 * find it. 16136 */ 16137 if (un->un_f_is_fibre == TRUE) { 16138 if (((sd_level_mask & SD_LOGMASK_DIAG) || 16139 (xp->xb_nr_retry_count > 0)) && 16140 (un->un_startstop_timeid == NULL)) { 16141 scsi_log(SD_DEVINFO(un), sd_label, 16142 CE_WARN, "logical unit not ready, " 16143 "resetting disk\n"); 16144 sd_reset_target(un, pktp); 16145 } 16146 } else { 16147 if (((sd_level_mask & SD_LOGMASK_DIAG) || 16148 (xp->xb_nr_retry_count > 16149 un->un_reset_retry_count)) && 16150 (un->un_startstop_timeid == NULL)) { 16151 scsi_log(SD_DEVINFO(un), sd_label, 16152 CE_WARN, "logical unit not ready, " 16153 "resetting disk\n"); 16154 sd_reset_target(un, pktp); 16155 } 16156 } 16157 break; 16158 16159 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */ 16160 /* 16161 * If the target is in the process of becoming 16162 * ready, just proceed with the retry. This can 16163 * happen with CD-ROMs that take a long time to 16164 * read TOC after a power cycle or reset. 16165 */ 16166 goto do_retry; 16167 16168 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */ 16169 break; 16170 16171 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */ 16172 /* 16173 * Retries cannot help here so just fail right away. 16174 */ 16175 goto fail_command; 16176 16177 case 0x88: 16178 /* 16179 * Vendor-unique code for T3/T4: it indicates a 16180 * path problem in a mutipathed config, but as far as 16181 * the target driver is concerned it equates to a fatal 16182 * error, so we should just fail the command right away 16183 * (without printing anything to the console). If this 16184 * is not a T3/T4, fall thru to the default recovery 16185 * action. 16186 * T3/T4 is FC only, don't need to check is_fibre 16187 */ 16188 if (SD_IS_T3(un) || SD_IS_T4(un)) { 16189 sd_return_failed_command(un, bp, EIO); 16190 return; 16191 } 16192 /* FALLTHRU */ 16193 16194 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */ 16195 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */ 16196 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */ 16197 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */ 16198 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */ 16199 default: /* Possible future codes in SCSI spec? */ 16200 /* 16201 * For removable-media devices, do not retry if 16202 * ASCQ > 2 as these result mostly from USCSI commands 16203 * on MMC devices issued to check status of an 16204 * operation initiated in immediate mode. Also for 16205 * ASCQ >= 4 do not print console messages as these 16206 * mainly represent a user-initiated operation 16207 * instead of a system failure. 16208 */ 16209 if (un->un_f_has_removable_media) { 16210 si.ssi_severity = SCSI_ERR_ALL; 16211 goto fail_command; 16212 } 16213 break; 16214 } 16215 16216 /* 16217 * As part of our recovery attempt for the NOT READY 16218 * condition, we issue a START STOP UNIT command. However 16219 * we want to wait for a short delay before attempting this 16220 * as there may still be more commands coming back from the 16221 * target with the check condition. To do this we use 16222 * timeout(9F) to call sd_start_stop_unit_callback() after 16223 * the delay interval expires. (sd_start_stop_unit_callback() 16224 * dispatches sd_start_stop_unit_task(), which will issue 16225 * the actual START STOP UNIT command. The delay interval 16226 * is one-half of the delay that we will use to retry the 16227 * command that generated the NOT READY condition. 16228 * 16229 * Note that we could just dispatch sd_start_stop_unit_task() 16230 * from here and allow it to sleep for the delay interval, 16231 * but then we would be tying up the taskq thread 16232 * uncesessarily for the duration of the delay. 16233 * 16234 * Do not issue the START STOP UNIT if the current command 16235 * is already a START STOP UNIT. 16236 */ 16237 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) { 16238 break; 16239 } 16240 16241 /* 16242 * Do not schedule the timeout if one is already pending. 16243 */ 16244 if (un->un_startstop_timeid != NULL) { 16245 SD_INFO(SD_LOG_ERROR, un, 16246 "sd_sense_key_not_ready: restart already issued to" 16247 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)), 16248 ddi_get_instance(SD_DEVINFO(un))); 16249 break; 16250 } 16251 16252 /* 16253 * Schedule the START STOP UNIT command, then queue the command 16254 * for a retry. 16255 * 16256 * Note: A timeout is not scheduled for this retry because we 16257 * want the retry to be serial with the START_STOP_UNIT. The 16258 * retry will be started when the START_STOP_UNIT is completed 16259 * in sd_start_stop_unit_task. 16260 */ 16261 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback, 16262 un, SD_BSY_TIMEOUT / 2); 16263 xp->xb_nr_retry_count++; 16264 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter); 16265 return; 16266 16267 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */ 16268 if (sd_error_level < SCSI_ERR_RETRYABLE) { 16269 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16270 "unit does not respond to selection\n"); 16271 } 16272 break; 16273 16274 case 0x3A: /* MEDIUM NOT PRESENT */ 16275 if (sd_error_level >= SCSI_ERR_FATAL) { 16276 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16277 "Caddy not inserted in drive\n"); 16278 } 16279 16280 sr_ejected(un); 16281 un->un_mediastate = DKIO_EJECTED; 16282 /* The state has changed, inform the media watch routines */ 16283 cv_broadcast(&un->un_state_cv); 16284 /* Just fail if no media is present in the drive. */ 16285 goto fail_command; 16286 16287 default: 16288 if (sd_error_level < SCSI_ERR_RETRYABLE) { 16289 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 16290 "Unit not Ready. Additional sense code 0x%x\n", 16291 asc); 16292 } 16293 break; 16294 } 16295 16296 do_retry: 16297 16298 /* 16299 * Retry the command, as some targets may report NOT READY for 16300 * several seconds after being reset. 16301 */ 16302 xp->xb_nr_retry_count++; 16303 si.ssi_severity = SCSI_ERR_RETRYABLE; 16304 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 16305 &si, EIO, SD_BSY_TIMEOUT, NULL); 16306 16307 return; 16308 16309 fail_command: 16310 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16311 sd_return_failed_command(un, bp, EIO); 16312 } 16313 16314 16315 16316 /* 16317 * Function: sd_sense_key_medium_or_hardware_error 16318 * 16319 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error" 16320 * sense key. 16321 * 16322 * Context: May be called from interrupt context 16323 */ 16324 16325 static void 16326 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 16327 uint8_t *sense_datap, 16328 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16329 { 16330 struct sd_sense_info si; 16331 uint8_t sense_key = scsi_sense_key(sense_datap); 16332 uint8_t asc = scsi_sense_asc(sense_datap); 16333 16334 ASSERT(un != NULL); 16335 ASSERT(mutex_owned(SD_MUTEX(un))); 16336 ASSERT(bp != NULL); 16337 ASSERT(xp != NULL); 16338 ASSERT(pktp != NULL); 16339 16340 si.ssi_severity = SCSI_ERR_FATAL; 16341 si.ssi_pfa_flag = FALSE; 16342 16343 if (sense_key == KEY_MEDIUM_ERROR) { 16344 SD_UPDATE_ERRSTATS(un, sd_rq_media_err); 16345 } 16346 16347 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16348 16349 if ((un->un_reset_retry_count != 0) && 16350 (xp->xb_retry_count == un->un_reset_retry_count)) { 16351 mutex_exit(SD_MUTEX(un)); 16352 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */ 16353 if (un->un_f_allow_bus_device_reset == TRUE) { 16354 16355 boolean_t try_resetting_target = B_TRUE; 16356 16357 /* 16358 * We need to be able to handle specific ASC when we are 16359 * handling a KEY_HARDWARE_ERROR. In particular 16360 * taking the default action of resetting the target may 16361 * not be the appropriate way to attempt recovery. 16362 * Resetting a target because of a single LUN failure 16363 * victimizes all LUNs on that target. 16364 * 16365 * This is true for the LSI arrays, if an LSI 16366 * array controller returns an ASC of 0x84 (LUN Dead) we 16367 * should trust it. 16368 */ 16369 16370 if (sense_key == KEY_HARDWARE_ERROR) { 16371 switch (asc) { 16372 case 0x84: 16373 if (SD_IS_LSI(un)) { 16374 try_resetting_target = B_FALSE; 16375 } 16376 break; 16377 default: 16378 break; 16379 } 16380 } 16381 16382 if (try_resetting_target == B_TRUE) { 16383 int reset_retval = 0; 16384 if (un->un_f_lun_reset_enabled == TRUE) { 16385 SD_TRACE(SD_LOG_IO_CORE, un, 16386 "sd_sense_key_medium_or_hardware_" 16387 "error: issuing RESET_LUN\n"); 16388 reset_retval = 16389 scsi_reset(SD_ADDRESS(un), 16390 RESET_LUN); 16391 } 16392 if (reset_retval == 0) { 16393 SD_TRACE(SD_LOG_IO_CORE, un, 16394 "sd_sense_key_medium_or_hardware_" 16395 "error: issuing RESET_TARGET\n"); 16396 (void) scsi_reset(SD_ADDRESS(un), 16397 RESET_TARGET); 16398 } 16399 } 16400 } 16401 mutex_enter(SD_MUTEX(un)); 16402 } 16403 16404 /* 16405 * This really ought to be a fatal error, but we will retry anyway 16406 * as some drives report this as a spurious error. 16407 */ 16408 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16409 &si, EIO, (clock_t)0, NULL); 16410 } 16411 16412 16413 16414 /* 16415 * Function: sd_sense_key_illegal_request 16416 * 16417 * Description: Recovery actions for a SCSI "Illegal Request" sense key. 16418 * 16419 * Context: May be called from interrupt context 16420 */ 16421 16422 static void 16423 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 16424 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16425 { 16426 struct sd_sense_info si; 16427 16428 ASSERT(un != NULL); 16429 ASSERT(mutex_owned(SD_MUTEX(un))); 16430 ASSERT(bp != NULL); 16431 ASSERT(xp != NULL); 16432 ASSERT(pktp != NULL); 16433 16434 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err); 16435 16436 si.ssi_severity = SCSI_ERR_INFO; 16437 si.ssi_pfa_flag = FALSE; 16438 16439 /* Pointless to retry if the target thinks it's an illegal request */ 16440 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16441 sd_return_failed_command(un, bp, EIO); 16442 } 16443 16444 16445 16446 16447 /* 16448 * Function: sd_sense_key_unit_attention 16449 * 16450 * Description: Recovery actions for a SCSI "Unit Attention" sense key. 16451 * 16452 * Context: May be called from interrupt context 16453 */ 16454 16455 static void 16456 sd_sense_key_unit_attention(struct sd_lun *un, 16457 uint8_t *sense_datap, 16458 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16459 { 16460 /* 16461 * For UNIT ATTENTION we allow retries for one minute. Devices 16462 * like Sonoma can return UNIT ATTENTION close to a minute 16463 * under certain conditions. 16464 */ 16465 int retry_check_flag = SD_RETRIES_UA; 16466 boolean_t kstat_updated = B_FALSE; 16467 struct sd_sense_info si; 16468 uint8_t asc = scsi_sense_asc(sense_datap); 16469 16470 ASSERT(un != NULL); 16471 ASSERT(mutex_owned(SD_MUTEX(un))); 16472 ASSERT(bp != NULL); 16473 ASSERT(xp != NULL); 16474 ASSERT(pktp != NULL); 16475 16476 si.ssi_severity = SCSI_ERR_INFO; 16477 si.ssi_pfa_flag = FALSE; 16478 16479 16480 switch (asc) { 16481 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */ 16482 if (sd_report_pfa != 0) { 16483 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 16484 si.ssi_pfa_flag = TRUE; 16485 retry_check_flag = SD_RETRIES_STANDARD; 16486 goto do_retry; 16487 } 16488 16489 break; 16490 16491 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ 16492 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 16493 un->un_resvd_status |= 16494 (SD_LOST_RESERVE | SD_WANT_RESERVE); 16495 } 16496 #ifdef _LP64 16497 if (un->un_blockcount + 1 > SD_GROUP1_MAX_ADDRESS) { 16498 if (taskq_dispatch(sd_tq, sd_reenable_dsense_task, 16499 un, KM_NOSLEEP) == 0) { 16500 /* 16501 * If we can't dispatch the task we'll just 16502 * live without descriptor sense. We can 16503 * try again on the next "unit attention" 16504 */ 16505 SD_ERROR(SD_LOG_ERROR, un, 16506 "sd_sense_key_unit_attention: " 16507 "Could not dispatch " 16508 "sd_reenable_dsense_task\n"); 16509 } 16510 } 16511 #endif /* _LP64 */ 16512 /* FALLTHRU */ 16513 16514 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ 16515 if (!un->un_f_has_removable_media) { 16516 break; 16517 } 16518 16519 /* 16520 * When we get a unit attention from a removable-media device, 16521 * it may be in a state that will take a long time to recover 16522 * (e.g., from a reset). Since we are executing in interrupt 16523 * context here, we cannot wait around for the device to come 16524 * back. So hand this command off to sd_media_change_task() 16525 * for deferred processing under taskq thread context. (Note 16526 * that the command still may be failed if a problem is 16527 * encountered at a later time.) 16528 */ 16529 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp, 16530 KM_NOSLEEP) == 0) { 16531 /* 16532 * Cannot dispatch the request so fail the command. 16533 */ 16534 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16535 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 16536 si.ssi_severity = SCSI_ERR_FATAL; 16537 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16538 sd_return_failed_command(un, bp, EIO); 16539 } 16540 16541 /* 16542 * If failed to dispatch sd_media_change_task(), we already 16543 * updated kstat. If succeed to dispatch sd_media_change_task(), 16544 * we should update kstat later if it encounters an error. So, 16545 * we update kstat_updated flag here. 16546 */ 16547 kstat_updated = B_TRUE; 16548 16549 /* 16550 * Either the command has been successfully dispatched to a 16551 * task Q for retrying, or the dispatch failed. In either case 16552 * do NOT retry again by calling sd_retry_command. This sets up 16553 * two retries of the same command and when one completes and 16554 * frees the resources the other will access freed memory, 16555 * a bad thing. 16556 */ 16557 return; 16558 16559 default: 16560 break; 16561 } 16562 16563 /* 16564 * Update kstat if we haven't done that. 16565 */ 16566 if (!kstat_updated) { 16567 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16568 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 16569 } 16570 16571 do_retry: 16572 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si, 16573 EIO, SD_UA_RETRY_DELAY, NULL); 16574 } 16575 16576 16577 16578 /* 16579 * Function: sd_sense_key_fail_command 16580 * 16581 * Description: Use to fail a command when we don't like the sense key that 16582 * was returned. 16583 * 16584 * Context: May be called from interrupt context 16585 */ 16586 16587 static void 16588 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 16589 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16590 { 16591 struct sd_sense_info si; 16592 16593 ASSERT(un != NULL); 16594 ASSERT(mutex_owned(SD_MUTEX(un))); 16595 ASSERT(bp != NULL); 16596 ASSERT(xp != NULL); 16597 ASSERT(pktp != NULL); 16598 16599 si.ssi_severity = SCSI_ERR_FATAL; 16600 si.ssi_pfa_flag = FALSE; 16601 16602 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16603 sd_return_failed_command(un, bp, EIO); 16604 } 16605 16606 16607 16608 /* 16609 * Function: sd_sense_key_blank_check 16610 * 16611 * Description: Recovery actions for a SCSI "Blank Check" sense key. 16612 * Has no monetary connotation. 16613 * 16614 * Context: May be called from interrupt context 16615 */ 16616 16617 static void 16618 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 16619 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16620 { 16621 struct sd_sense_info si; 16622 16623 ASSERT(un != NULL); 16624 ASSERT(mutex_owned(SD_MUTEX(un))); 16625 ASSERT(bp != NULL); 16626 ASSERT(xp != NULL); 16627 ASSERT(pktp != NULL); 16628 16629 /* 16630 * Blank check is not fatal for removable devices, therefore 16631 * it does not require a console message. 16632 */ 16633 si.ssi_severity = (un->un_f_has_removable_media) ? SCSI_ERR_ALL : 16634 SCSI_ERR_FATAL; 16635 si.ssi_pfa_flag = FALSE; 16636 16637 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16638 sd_return_failed_command(un, bp, EIO); 16639 } 16640 16641 16642 16643 16644 /* 16645 * Function: sd_sense_key_aborted_command 16646 * 16647 * Description: Recovery actions for a SCSI "Aborted Command" sense key. 16648 * 16649 * Context: May be called from interrupt context 16650 */ 16651 16652 static void 16653 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 16654 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16655 { 16656 struct sd_sense_info si; 16657 16658 ASSERT(un != NULL); 16659 ASSERT(mutex_owned(SD_MUTEX(un))); 16660 ASSERT(bp != NULL); 16661 ASSERT(xp != NULL); 16662 ASSERT(pktp != NULL); 16663 16664 si.ssi_severity = SCSI_ERR_FATAL; 16665 si.ssi_pfa_flag = FALSE; 16666 16667 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16668 16669 /* 16670 * This really ought to be a fatal error, but we will retry anyway 16671 * as some drives report this as a spurious error. 16672 */ 16673 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16674 &si, EIO, drv_usectohz(100000), NULL); 16675 } 16676 16677 16678 16679 /* 16680 * Function: sd_sense_key_default 16681 * 16682 * Description: Default recovery action for several SCSI sense keys (basically 16683 * attempts a retry). 16684 * 16685 * Context: May be called from interrupt context 16686 */ 16687 16688 static void 16689 sd_sense_key_default(struct sd_lun *un, 16690 uint8_t *sense_datap, 16691 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16692 { 16693 struct sd_sense_info si; 16694 uint8_t sense_key = scsi_sense_key(sense_datap); 16695 16696 ASSERT(un != NULL); 16697 ASSERT(mutex_owned(SD_MUTEX(un))); 16698 ASSERT(bp != NULL); 16699 ASSERT(xp != NULL); 16700 ASSERT(pktp != NULL); 16701 16702 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16703 16704 /* 16705 * Undecoded sense key. Attempt retries and hope that will fix 16706 * the problem. Otherwise, we're dead. 16707 */ 16708 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16709 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16710 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]); 16711 } 16712 16713 si.ssi_severity = SCSI_ERR_FATAL; 16714 si.ssi_pfa_flag = FALSE; 16715 16716 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16717 &si, EIO, (clock_t)0, NULL); 16718 } 16719 16720 16721 16722 /* 16723 * Function: sd_print_retry_msg 16724 * 16725 * Description: Print a message indicating the retry action being taken. 16726 * 16727 * Arguments: un - ptr to associated softstate 16728 * bp - ptr to buf(9S) for the command 16729 * arg - not used. 16730 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16731 * or SD_NO_RETRY_ISSUED 16732 * 16733 * Context: May be called from interrupt context 16734 */ 16735 /* ARGSUSED */ 16736 static void 16737 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag) 16738 { 16739 struct sd_xbuf *xp; 16740 struct scsi_pkt *pktp; 16741 char *reasonp; 16742 char *msgp; 16743 16744 ASSERT(un != NULL); 16745 ASSERT(mutex_owned(SD_MUTEX(un))); 16746 ASSERT(bp != NULL); 16747 pktp = SD_GET_PKTP(bp); 16748 ASSERT(pktp != NULL); 16749 xp = SD_GET_XBUF(bp); 16750 ASSERT(xp != NULL); 16751 16752 ASSERT(!mutex_owned(&un->un_pm_mutex)); 16753 mutex_enter(&un->un_pm_mutex); 16754 if ((un->un_state == SD_STATE_SUSPENDED) || 16755 (SD_DEVICE_IS_IN_LOW_POWER(un)) || 16756 (pktp->pkt_flags & FLAG_SILENT)) { 16757 mutex_exit(&un->un_pm_mutex); 16758 goto update_pkt_reason; 16759 } 16760 mutex_exit(&un->un_pm_mutex); 16761 16762 /* 16763 * Suppress messages if they are all the same pkt_reason; with 16764 * TQ, many (up to 256) are returned with the same pkt_reason. 16765 * If we are in panic, then suppress the retry messages. 16766 */ 16767 switch (flag) { 16768 case SD_NO_RETRY_ISSUED: 16769 msgp = "giving up"; 16770 break; 16771 case SD_IMMEDIATE_RETRY_ISSUED: 16772 case SD_DELAYED_RETRY_ISSUED: 16773 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) || 16774 ((pktp->pkt_reason == un->un_last_pkt_reason) && 16775 (sd_error_level != SCSI_ERR_ALL))) { 16776 return; 16777 } 16778 msgp = "retrying command"; 16779 break; 16780 default: 16781 goto update_pkt_reason; 16782 } 16783 16784 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" : 16785 scsi_rname(pktp->pkt_reason)); 16786 16787 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16788 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp); 16789 16790 update_pkt_reason: 16791 /* 16792 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason. 16793 * This is to prevent multiple console messages for the same failure 16794 * condition. Note that un->un_last_pkt_reason is NOT restored if & 16795 * when the command is retried successfully because there still may be 16796 * more commands coming back with the same value of pktp->pkt_reason. 16797 */ 16798 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) { 16799 un->un_last_pkt_reason = pktp->pkt_reason; 16800 } 16801 } 16802 16803 16804 /* 16805 * Function: sd_print_cmd_incomplete_msg 16806 * 16807 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason. 16808 * 16809 * Arguments: un - ptr to associated softstate 16810 * bp - ptr to buf(9S) for the command 16811 * arg - passed to sd_print_retry_msg() 16812 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16813 * or SD_NO_RETRY_ISSUED 16814 * 16815 * Context: May be called from interrupt context 16816 */ 16817 16818 static void 16819 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, 16820 int code) 16821 { 16822 dev_info_t *dip; 16823 16824 ASSERT(un != NULL); 16825 ASSERT(mutex_owned(SD_MUTEX(un))); 16826 ASSERT(bp != NULL); 16827 16828 switch (code) { 16829 case SD_NO_RETRY_ISSUED: 16830 /* Command was failed. Someone turned off this target? */ 16831 if (un->un_state != SD_STATE_OFFLINE) { 16832 /* 16833 * Suppress message if we are detaching and 16834 * device has been disconnected 16835 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation 16836 * private interface and not part of the DDI 16837 */ 16838 dip = un->un_sd->sd_dev; 16839 if (!(DEVI_IS_DETACHING(dip) && 16840 DEVI_IS_DEVICE_REMOVED(dip))) { 16841 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16842 "disk not responding to selection\n"); 16843 } 16844 New_state(un, SD_STATE_OFFLINE); 16845 } 16846 break; 16847 16848 case SD_DELAYED_RETRY_ISSUED: 16849 case SD_IMMEDIATE_RETRY_ISSUED: 16850 default: 16851 /* Command was successfully queued for retry */ 16852 sd_print_retry_msg(un, bp, arg, code); 16853 break; 16854 } 16855 } 16856 16857 16858 /* 16859 * Function: sd_pkt_reason_cmd_incomplete 16860 * 16861 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason. 16862 * 16863 * Context: May be called from interrupt context 16864 */ 16865 16866 static void 16867 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 16868 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16869 { 16870 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE; 16871 16872 ASSERT(un != NULL); 16873 ASSERT(mutex_owned(SD_MUTEX(un))); 16874 ASSERT(bp != NULL); 16875 ASSERT(xp != NULL); 16876 ASSERT(pktp != NULL); 16877 16878 /* Do not do a reset if selection did not complete */ 16879 /* Note: Should this not just check the bit? */ 16880 if (pktp->pkt_state != STATE_GOT_BUS) { 16881 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16882 sd_reset_target(un, pktp); 16883 } 16884 16885 /* 16886 * If the target was not successfully selected, then set 16887 * SD_RETRIES_FAILFAST to indicate that we lost communication 16888 * with the target, and further retries and/or commands are 16889 * likely to take a long time. 16890 */ 16891 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) { 16892 flag |= SD_RETRIES_FAILFAST; 16893 } 16894 16895 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16896 16897 sd_retry_command(un, bp, flag, 16898 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16899 } 16900 16901 16902 16903 /* 16904 * Function: sd_pkt_reason_cmd_tran_err 16905 * 16906 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason. 16907 * 16908 * Context: May be called from interrupt context 16909 */ 16910 16911 static void 16912 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 16913 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16914 { 16915 ASSERT(un != NULL); 16916 ASSERT(mutex_owned(SD_MUTEX(un))); 16917 ASSERT(bp != NULL); 16918 ASSERT(xp != NULL); 16919 ASSERT(pktp != NULL); 16920 16921 /* 16922 * Do not reset if we got a parity error, or if 16923 * selection did not complete. 16924 */ 16925 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16926 /* Note: Should this not just check the bit for pkt_state? */ 16927 if (((pktp->pkt_statistics & STAT_PERR) == 0) && 16928 (pktp->pkt_state != STATE_GOT_BUS)) { 16929 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16930 sd_reset_target(un, pktp); 16931 } 16932 16933 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16934 16935 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 16936 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16937 } 16938 16939 16940 16941 /* 16942 * Function: sd_pkt_reason_cmd_reset 16943 * 16944 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason. 16945 * 16946 * Context: May be called from interrupt context 16947 */ 16948 16949 static void 16950 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 16951 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16952 { 16953 ASSERT(un != NULL); 16954 ASSERT(mutex_owned(SD_MUTEX(un))); 16955 ASSERT(bp != NULL); 16956 ASSERT(xp != NULL); 16957 ASSERT(pktp != NULL); 16958 16959 /* The target may still be running the command, so try to reset. */ 16960 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16961 sd_reset_target(un, pktp); 16962 16963 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16964 16965 /* 16966 * If pkt_reason is CMD_RESET chances are that this pkt got 16967 * reset because another target on this bus caused it. The target 16968 * that caused it should get CMD_TIMEOUT with pkt_statistics 16969 * of STAT_TIMEOUT/STAT_DEV_RESET. 16970 */ 16971 16972 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 16973 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16974 } 16975 16976 16977 16978 16979 /* 16980 * Function: sd_pkt_reason_cmd_aborted 16981 * 16982 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason. 16983 * 16984 * Context: May be called from interrupt context 16985 */ 16986 16987 static void 16988 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 16989 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16990 { 16991 ASSERT(un != NULL); 16992 ASSERT(mutex_owned(SD_MUTEX(un))); 16993 ASSERT(bp != NULL); 16994 ASSERT(xp != NULL); 16995 ASSERT(pktp != NULL); 16996 16997 /* The target may still be running the command, so try to reset. */ 16998 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16999 sd_reset_target(un, pktp); 17000 17001 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17002 17003 /* 17004 * If pkt_reason is CMD_ABORTED chances are that this pkt got 17005 * aborted because another target on this bus caused it. The target 17006 * that caused it should get CMD_TIMEOUT with pkt_statistics 17007 * of STAT_TIMEOUT/STAT_DEV_RESET. 17008 */ 17009 17010 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 17011 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17012 } 17013 17014 17015 17016 /* 17017 * Function: sd_pkt_reason_cmd_timeout 17018 * 17019 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason. 17020 * 17021 * Context: May be called from interrupt context 17022 */ 17023 17024 static void 17025 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 17026 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17027 { 17028 ASSERT(un != NULL); 17029 ASSERT(mutex_owned(SD_MUTEX(un))); 17030 ASSERT(bp != NULL); 17031 ASSERT(xp != NULL); 17032 ASSERT(pktp != NULL); 17033 17034 17035 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17036 sd_reset_target(un, pktp); 17037 17038 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17039 17040 /* 17041 * A command timeout indicates that we could not establish 17042 * communication with the target, so set SD_RETRIES_FAILFAST 17043 * as further retries/commands are likely to take a long time. 17044 */ 17045 sd_retry_command(un, bp, 17046 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST), 17047 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17048 } 17049 17050 17051 17052 /* 17053 * Function: sd_pkt_reason_cmd_unx_bus_free 17054 * 17055 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason. 17056 * 17057 * Context: May be called from interrupt context 17058 */ 17059 17060 static void 17061 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 17062 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17063 { 17064 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code); 17065 17066 ASSERT(un != NULL); 17067 ASSERT(mutex_owned(SD_MUTEX(un))); 17068 ASSERT(bp != NULL); 17069 ASSERT(xp != NULL); 17070 ASSERT(pktp != NULL); 17071 17072 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17073 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17074 17075 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? 17076 sd_print_retry_msg : NULL; 17077 17078 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 17079 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17080 } 17081 17082 17083 /* 17084 * Function: sd_pkt_reason_cmd_tag_reject 17085 * 17086 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason. 17087 * 17088 * Context: May be called from interrupt context 17089 */ 17090 17091 static void 17092 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 17093 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17094 { 17095 ASSERT(un != NULL); 17096 ASSERT(mutex_owned(SD_MUTEX(un))); 17097 ASSERT(bp != NULL); 17098 ASSERT(xp != NULL); 17099 ASSERT(pktp != NULL); 17100 17101 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17102 pktp->pkt_flags = 0; 17103 un->un_tagflags = 0; 17104 if (un->un_f_opt_queueing == TRUE) { 17105 un->un_throttle = min(un->un_throttle, 3); 17106 } else { 17107 un->un_throttle = 1; 17108 } 17109 mutex_exit(SD_MUTEX(un)); 17110 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 17111 mutex_enter(SD_MUTEX(un)); 17112 17113 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17114 17115 /* Legacy behavior not to check retry counts here. */ 17116 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE), 17117 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17118 } 17119 17120 17121 /* 17122 * Function: sd_pkt_reason_default 17123 * 17124 * Description: Default recovery actions for SCSA pkt_reason values that 17125 * do not have more explicit recovery actions. 17126 * 17127 * Context: May be called from interrupt context 17128 */ 17129 17130 static void 17131 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 17132 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17133 { 17134 ASSERT(un != NULL); 17135 ASSERT(mutex_owned(SD_MUTEX(un))); 17136 ASSERT(bp != NULL); 17137 ASSERT(xp != NULL); 17138 ASSERT(pktp != NULL); 17139 17140 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17141 sd_reset_target(un, pktp); 17142 17143 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17144 17145 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 17146 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17147 } 17148 17149 17150 17151 /* 17152 * Function: sd_pkt_status_check_condition 17153 * 17154 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status. 17155 * 17156 * Context: May be called from interrupt context 17157 */ 17158 17159 static void 17160 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 17161 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17162 { 17163 ASSERT(un != NULL); 17164 ASSERT(mutex_owned(SD_MUTEX(un))); 17165 ASSERT(bp != NULL); 17166 ASSERT(xp != NULL); 17167 ASSERT(pktp != NULL); 17168 17169 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: " 17170 "entry: buf:0x%p xp:0x%p\n", bp, xp); 17171 17172 /* 17173 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the 17174 * command will be retried after the request sense). Otherwise, retry 17175 * the command. Note: we are issuing the request sense even though the 17176 * retry limit may have been reached for the failed command. 17177 */ 17178 if (un->un_f_arq_enabled == FALSE) { 17179 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 17180 "no ARQ, sending request sense command\n"); 17181 sd_send_request_sense_command(un, bp, pktp); 17182 } else { 17183 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 17184 "ARQ,retrying request sense command\n"); 17185 #if defined(__i386) || defined(__amd64) 17186 /* 17187 * The SD_RETRY_DELAY value need to be adjusted here 17188 * when SD_RETRY_DELAY change in sddef.h 17189 */ 17190 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 17191 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, 17192 NULL); 17193 #else 17194 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 17195 EIO, SD_RETRY_DELAY, NULL); 17196 #endif 17197 } 17198 17199 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); 17200 } 17201 17202 17203 /* 17204 * Function: sd_pkt_status_busy 17205 * 17206 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status. 17207 * 17208 * Context: May be called from interrupt context 17209 */ 17210 17211 static void 17212 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 17213 struct scsi_pkt *pktp) 17214 { 17215 ASSERT(un != NULL); 17216 ASSERT(mutex_owned(SD_MUTEX(un))); 17217 ASSERT(bp != NULL); 17218 ASSERT(xp != NULL); 17219 ASSERT(pktp != NULL); 17220 17221 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17222 "sd_pkt_status_busy: entry\n"); 17223 17224 /* If retries are exhausted, just fail the command. */ 17225 if (xp->xb_retry_count >= un->un_busy_retry_count) { 17226 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17227 "device busy too long\n"); 17228 sd_return_failed_command(un, bp, EIO); 17229 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17230 "sd_pkt_status_busy: exit\n"); 17231 return; 17232 } 17233 xp->xb_retry_count++; 17234 17235 /* 17236 * Try to reset the target. However, we do not want to perform 17237 * more than one reset if the device continues to fail. The reset 17238 * will be performed when the retry count reaches the reset 17239 * threshold. This threshold should be set such that at least 17240 * one retry is issued before the reset is performed. 17241 */ 17242 if (xp->xb_retry_count == 17243 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) { 17244 int rval = 0; 17245 mutex_exit(SD_MUTEX(un)); 17246 if (un->un_f_allow_bus_device_reset == TRUE) { 17247 /* 17248 * First try to reset the LUN; if we cannot then 17249 * try to reset the target. 17250 */ 17251 if (un->un_f_lun_reset_enabled == TRUE) { 17252 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17253 "sd_pkt_status_busy: RESET_LUN\n"); 17254 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 17255 } 17256 if (rval == 0) { 17257 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17258 "sd_pkt_status_busy: RESET_TARGET\n"); 17259 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 17260 } 17261 } 17262 if (rval == 0) { 17263 /* 17264 * If the RESET_LUN and/or RESET_TARGET failed, 17265 * try RESET_ALL 17266 */ 17267 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17268 "sd_pkt_status_busy: RESET_ALL\n"); 17269 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL); 17270 } 17271 mutex_enter(SD_MUTEX(un)); 17272 if (rval == 0) { 17273 /* 17274 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed. 17275 * At this point we give up & fail the command. 17276 */ 17277 sd_return_failed_command(un, bp, EIO); 17278 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17279 "sd_pkt_status_busy: exit (failed cmd)\n"); 17280 return; 17281 } 17282 } 17283 17284 /* 17285 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as 17286 * we have already checked the retry counts above. 17287 */ 17288 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 17289 EIO, SD_BSY_TIMEOUT, NULL); 17290 17291 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17292 "sd_pkt_status_busy: exit\n"); 17293 } 17294 17295 17296 /* 17297 * Function: sd_pkt_status_reservation_conflict 17298 * 17299 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI 17300 * command status. 17301 * 17302 * Context: May be called from interrupt context 17303 */ 17304 17305 static void 17306 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp, 17307 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17308 { 17309 ASSERT(un != NULL); 17310 ASSERT(mutex_owned(SD_MUTEX(un))); 17311 ASSERT(bp != NULL); 17312 ASSERT(xp != NULL); 17313 ASSERT(pktp != NULL); 17314 17315 /* 17316 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation 17317 * conflict could be due to various reasons like incorrect keys, not 17318 * registered or not reserved etc. So, we return EACCES to the caller. 17319 */ 17320 if (un->un_reservation_type == SD_SCSI3_RESERVATION) { 17321 int cmd = SD_GET_PKT_OPCODE(pktp); 17322 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) || 17323 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) { 17324 sd_return_failed_command(un, bp, EACCES); 17325 return; 17326 } 17327 } 17328 17329 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 17330 17331 if ((un->un_resvd_status & SD_FAILFAST) != 0) { 17332 if (sd_failfast_enable != 0) { 17333 /* By definition, we must panic here.... */ 17334 sd_panic_for_res_conflict(un); 17335 /*NOTREACHED*/ 17336 } 17337 SD_ERROR(SD_LOG_IO, un, 17338 "sd_handle_resv_conflict: Disk Reserved\n"); 17339 sd_return_failed_command(un, bp, EACCES); 17340 return; 17341 } 17342 17343 /* 17344 * 1147670: retry only if sd_retry_on_reservation_conflict 17345 * property is set (default is 1). Retries will not succeed 17346 * on a disk reserved by another initiator. HA systems 17347 * may reset this via sd.conf to avoid these retries. 17348 * 17349 * Note: The legacy return code for this failure is EIO, however EACCES 17350 * seems more appropriate for a reservation conflict. 17351 */ 17352 if (sd_retry_on_reservation_conflict == 0) { 17353 SD_ERROR(SD_LOG_IO, un, 17354 "sd_handle_resv_conflict: Device Reserved\n"); 17355 sd_return_failed_command(un, bp, EIO); 17356 return; 17357 } 17358 17359 /* 17360 * Retry the command if we can. 17361 * 17362 * Note: The legacy return code for this failure is EIO, however EACCES 17363 * seems more appropriate for a reservation conflict. 17364 */ 17365 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 17366 (clock_t)2, NULL); 17367 } 17368 17369 17370 17371 /* 17372 * Function: sd_pkt_status_qfull 17373 * 17374 * Description: Handle a QUEUE FULL condition from the target. This can 17375 * occur if the HBA does not handle the queue full condition. 17376 * (Basically this means third-party HBAs as Sun HBAs will 17377 * handle the queue full condition.) Note that if there are 17378 * some commands already in the transport, then the queue full 17379 * has occurred because the queue for this nexus is actually 17380 * full. If there are no commands in the transport, then the 17381 * queue full is resulting from some other initiator or lun 17382 * consuming all the resources at the target. 17383 * 17384 * Context: May be called from interrupt context 17385 */ 17386 17387 static void 17388 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 17389 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17390 { 17391 ASSERT(un != NULL); 17392 ASSERT(mutex_owned(SD_MUTEX(un))); 17393 ASSERT(bp != NULL); 17394 ASSERT(xp != NULL); 17395 ASSERT(pktp != NULL); 17396 17397 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17398 "sd_pkt_status_qfull: entry\n"); 17399 17400 /* 17401 * Just lower the QFULL throttle and retry the command. Note that 17402 * we do not limit the number of retries here. 17403 */ 17404 sd_reduce_throttle(un, SD_THROTTLE_QFULL); 17405 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, 17406 SD_RESTART_TIMEOUT, NULL); 17407 17408 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17409 "sd_pkt_status_qfull: exit\n"); 17410 } 17411 17412 17413 /* 17414 * Function: sd_reset_target 17415 * 17416 * Description: Issue a scsi_reset(9F), with either RESET_LUN, 17417 * RESET_TARGET, or RESET_ALL. 17418 * 17419 * Context: May be called under interrupt context. 17420 */ 17421 17422 static void 17423 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp) 17424 { 17425 int rval = 0; 17426 17427 ASSERT(un != NULL); 17428 ASSERT(mutex_owned(SD_MUTEX(un))); 17429 ASSERT(pktp != NULL); 17430 17431 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n"); 17432 17433 /* 17434 * No need to reset if the transport layer has already done so. 17435 */ 17436 if ((pktp->pkt_statistics & 17437 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) { 17438 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17439 "sd_reset_target: no reset\n"); 17440 return; 17441 } 17442 17443 mutex_exit(SD_MUTEX(un)); 17444 17445 if (un->un_f_allow_bus_device_reset == TRUE) { 17446 if (un->un_f_lun_reset_enabled == TRUE) { 17447 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17448 "sd_reset_target: RESET_LUN\n"); 17449 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 17450 } 17451 if (rval == 0) { 17452 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17453 "sd_reset_target: RESET_TARGET\n"); 17454 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 17455 } 17456 } 17457 17458 if (rval == 0) { 17459 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17460 "sd_reset_target: RESET_ALL\n"); 17461 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 17462 } 17463 17464 mutex_enter(SD_MUTEX(un)); 17465 17466 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n"); 17467 } 17468 17469 17470 /* 17471 * Function: sd_media_change_task 17472 * 17473 * Description: Recovery action for CDROM to become available. 17474 * 17475 * Context: Executes in a taskq() thread context 17476 */ 17477 17478 static void 17479 sd_media_change_task(void *arg) 17480 { 17481 struct scsi_pkt *pktp = arg; 17482 struct sd_lun *un; 17483 struct buf *bp; 17484 struct sd_xbuf *xp; 17485 int err = 0; 17486 int retry_count = 0; 17487 int retry_limit = SD_UNIT_ATTENTION_RETRY/10; 17488 struct sd_sense_info si; 17489 17490 ASSERT(pktp != NULL); 17491 bp = (struct buf *)pktp->pkt_private; 17492 ASSERT(bp != NULL); 17493 xp = SD_GET_XBUF(bp); 17494 ASSERT(xp != NULL); 17495 un = SD_GET_UN(bp); 17496 ASSERT(un != NULL); 17497 ASSERT(!mutex_owned(SD_MUTEX(un))); 17498 ASSERT(un->un_f_monitor_media_state); 17499 17500 si.ssi_severity = SCSI_ERR_INFO; 17501 si.ssi_pfa_flag = FALSE; 17502 17503 /* 17504 * When a reset is issued on a CDROM, it takes a long time to 17505 * recover. First few attempts to read capacity and other things 17506 * related to handling unit attention fail (with a ASC 0x4 and 17507 * ASCQ 0x1). In that case we want to do enough retries and we want 17508 * to limit the retries in other cases of genuine failures like 17509 * no media in drive. 17510 */ 17511 while (retry_count++ < retry_limit) { 17512 if ((err = sd_handle_mchange(un)) == 0) { 17513 break; 17514 } 17515 if (err == EAGAIN) { 17516 retry_limit = SD_UNIT_ATTENTION_RETRY; 17517 } 17518 /* Sleep for 0.5 sec. & try again */ 17519 delay(drv_usectohz(500000)); 17520 } 17521 17522 /* 17523 * Dispatch (retry or fail) the original command here, 17524 * along with appropriate console messages.... 17525 * 17526 * Must grab the mutex before calling sd_retry_command, 17527 * sd_print_sense_msg and sd_return_failed_command. 17528 */ 17529 mutex_enter(SD_MUTEX(un)); 17530 if (err != SD_CMD_SUCCESS) { 17531 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17532 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17533 si.ssi_severity = SCSI_ERR_FATAL; 17534 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17535 sd_return_failed_command(un, bp, EIO); 17536 } else { 17537 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 17538 &si, EIO, (clock_t)0, NULL); 17539 } 17540 mutex_exit(SD_MUTEX(un)); 17541 } 17542 17543 17544 17545 /* 17546 * Function: sd_handle_mchange 17547 * 17548 * Description: Perform geometry validation & other recovery when CDROM 17549 * has been removed from drive. 17550 * 17551 * Return Code: 0 for success 17552 * errno-type return code of either sd_send_scsi_DOORLOCK() or 17553 * sd_send_scsi_READ_CAPACITY() 17554 * 17555 * Context: Executes in a taskq() thread context 17556 */ 17557 17558 static int 17559 sd_handle_mchange(struct sd_lun *un) 17560 { 17561 uint64_t capacity; 17562 uint32_t lbasize; 17563 int rval; 17564 17565 ASSERT(!mutex_owned(SD_MUTEX(un))); 17566 ASSERT(un->un_f_monitor_media_state); 17567 17568 if ((rval = sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 17569 SD_PATH_DIRECT_PRIORITY)) != 0) { 17570 return (rval); 17571 } 17572 17573 mutex_enter(SD_MUTEX(un)); 17574 sd_update_block_info(un, lbasize, capacity); 17575 17576 if (un->un_errstats != NULL) { 17577 struct sd_errstats *stp = 17578 (struct sd_errstats *)un->un_errstats->ks_data; 17579 stp->sd_capacity.value.ui64 = (uint64_t) 17580 ((uint64_t)un->un_blockcount * 17581 (uint64_t)un->un_tgt_blocksize); 17582 } 17583 17584 17585 /* 17586 * Check if the media in the device is writable or not 17587 */ 17588 if (ISCD(un)) 17589 sd_check_for_writable_cd(un, SD_PATH_DIRECT_PRIORITY); 17590 17591 /* 17592 * Note: Maybe let the strategy/partitioning chain worry about getting 17593 * valid geometry. 17594 */ 17595 mutex_exit(SD_MUTEX(un)); 17596 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 17597 17598 17599 if (cmlb_validate(un->un_cmlbhandle, 0, 17600 (void *)SD_PATH_DIRECT_PRIORITY) != 0) { 17601 return (EIO); 17602 } else { 17603 if (un->un_f_pkstats_enabled) { 17604 sd_set_pstats(un); 17605 SD_TRACE(SD_LOG_IO_PARTITION, un, 17606 "sd_handle_mchange: un:0x%p pstats created and " 17607 "set\n", un); 17608 } 17609 } 17610 17611 17612 /* 17613 * Try to lock the door 17614 */ 17615 return (sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 17616 SD_PATH_DIRECT_PRIORITY)); 17617 } 17618 17619 17620 /* 17621 * Function: sd_send_scsi_DOORLOCK 17622 * 17623 * Description: Issue the scsi DOOR LOCK command 17624 * 17625 * Arguments: un - pointer to driver soft state (unit) structure for 17626 * this target. 17627 * flag - SD_REMOVAL_ALLOW 17628 * SD_REMOVAL_PREVENT 17629 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17630 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17631 * to use the USCSI "direct" chain and bypass the normal 17632 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 17633 * command is issued as part of an error recovery action. 17634 * 17635 * Return Code: 0 - Success 17636 * errno return code from sd_send_scsi_cmd() 17637 * 17638 * Context: Can sleep. 17639 */ 17640 17641 static int 17642 sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag) 17643 { 17644 union scsi_cdb cdb; 17645 struct uscsi_cmd ucmd_buf; 17646 struct scsi_extended_sense sense_buf; 17647 int status; 17648 17649 ASSERT(un != NULL); 17650 ASSERT(!mutex_owned(SD_MUTEX(un))); 17651 17652 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un); 17653 17654 /* already determined doorlock is not supported, fake success */ 17655 if (un->un_f_doorlock_supported == FALSE) { 17656 return (0); 17657 } 17658 17659 /* 17660 * If we are ejecting and see an SD_REMOVAL_PREVENT 17661 * ignore the command so we can complete the eject 17662 * operation. 17663 */ 17664 if (flag == SD_REMOVAL_PREVENT) { 17665 mutex_enter(SD_MUTEX(un)); 17666 if (un->un_f_ejecting == TRUE) { 17667 mutex_exit(SD_MUTEX(un)); 17668 return (EAGAIN); 17669 } 17670 mutex_exit(SD_MUTEX(un)); 17671 } 17672 17673 bzero(&cdb, sizeof (cdb)); 17674 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17675 17676 cdb.scc_cmd = SCMD_DOORLOCK; 17677 cdb.cdb_opaque[4] = (uchar_t)flag; 17678 17679 ucmd_buf.uscsi_cdb = (char *)&cdb; 17680 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 17681 ucmd_buf.uscsi_bufaddr = NULL; 17682 ucmd_buf.uscsi_buflen = 0; 17683 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17684 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 17685 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 17686 ucmd_buf.uscsi_timeout = 15; 17687 17688 SD_TRACE(SD_LOG_IO, un, 17689 "sd_send_scsi_DOORLOCK: returning sd_send_scsi_cmd()\n"); 17690 17691 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17692 UIO_SYSSPACE, path_flag); 17693 17694 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) && 17695 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 17696 (scsi_sense_key((uint8_t *)&sense_buf) == KEY_ILLEGAL_REQUEST)) { 17697 /* fake success and skip subsequent doorlock commands */ 17698 un->un_f_doorlock_supported = FALSE; 17699 return (0); 17700 } 17701 17702 return (status); 17703 } 17704 17705 /* 17706 * Function: sd_send_scsi_READ_CAPACITY 17707 * 17708 * Description: This routine uses the scsi READ CAPACITY command to determine 17709 * the device capacity in number of blocks and the device native 17710 * block size. If this function returns a failure, then the 17711 * values in *capp and *lbap are undefined. If the capacity 17712 * returned is 0xffffffff then the lun is too large for a 17713 * normal READ CAPACITY command and the results of a 17714 * READ CAPACITY 16 will be used instead. 17715 * 17716 * Arguments: un - ptr to soft state struct for the target 17717 * capp - ptr to unsigned 64-bit variable to receive the 17718 * capacity value from the command. 17719 * lbap - ptr to unsigned 32-bit varaible to receive the 17720 * block size value from the command 17721 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17722 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17723 * to use the USCSI "direct" chain and bypass the normal 17724 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 17725 * command is issued as part of an error recovery action. 17726 * 17727 * Return Code: 0 - Success 17728 * EIO - IO error 17729 * EACCES - Reservation conflict detected 17730 * EAGAIN - Device is becoming ready 17731 * errno return code from sd_send_scsi_cmd() 17732 * 17733 * Context: Can sleep. Blocks until command completes. 17734 */ 17735 17736 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity) 17737 17738 static int 17739 sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, uint32_t *lbap, 17740 int path_flag) 17741 { 17742 struct scsi_extended_sense sense_buf; 17743 struct uscsi_cmd ucmd_buf; 17744 union scsi_cdb cdb; 17745 uint32_t *capacity_buf; 17746 uint64_t capacity; 17747 uint32_t lbasize; 17748 int status; 17749 17750 ASSERT(un != NULL); 17751 ASSERT(!mutex_owned(SD_MUTEX(un))); 17752 ASSERT(capp != NULL); 17753 ASSERT(lbap != NULL); 17754 17755 SD_TRACE(SD_LOG_IO, un, 17756 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 17757 17758 /* 17759 * First send a READ_CAPACITY command to the target. 17760 * (This command is mandatory under SCSI-2.) 17761 * 17762 * Set up the CDB for the READ_CAPACITY command. The Partial 17763 * Medium Indicator bit is cleared. The address field must be 17764 * zero if the PMI bit is zero. 17765 */ 17766 bzero(&cdb, sizeof (cdb)); 17767 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17768 17769 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP); 17770 17771 cdb.scc_cmd = SCMD_READ_CAPACITY; 17772 17773 ucmd_buf.uscsi_cdb = (char *)&cdb; 17774 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 17775 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; 17776 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; 17777 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17778 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 17779 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 17780 ucmd_buf.uscsi_timeout = 60; 17781 17782 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17783 UIO_SYSSPACE, path_flag); 17784 17785 switch (status) { 17786 case 0: 17787 /* Return failure if we did not get valid capacity data. */ 17788 if (ucmd_buf.uscsi_resid != 0) { 17789 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17790 return (EIO); 17791 } 17792 17793 /* 17794 * Read capacity and block size from the READ CAPACITY 10 data. 17795 * This data may be adjusted later due to device specific 17796 * issues. 17797 * 17798 * According to the SCSI spec, the READ CAPACITY 10 17799 * command returns the following: 17800 * 17801 * bytes 0-3: Maximum logical block address available. 17802 * (MSB in byte:0 & LSB in byte:3) 17803 * 17804 * bytes 4-7: Block length in bytes 17805 * (MSB in byte:4 & LSB in byte:7) 17806 * 17807 */ 17808 capacity = BE_32(capacity_buf[0]); 17809 lbasize = BE_32(capacity_buf[1]); 17810 17811 /* 17812 * Done with capacity_buf 17813 */ 17814 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17815 17816 /* 17817 * if the reported capacity is set to all 0xf's, then 17818 * this disk is too large and requires SBC-2 commands. 17819 * Reissue the request using READ CAPACITY 16. 17820 */ 17821 if (capacity == 0xffffffff) { 17822 status = sd_send_scsi_READ_CAPACITY_16(un, &capacity, 17823 &lbasize, path_flag); 17824 if (status != 0) { 17825 return (status); 17826 } 17827 } 17828 break; /* Success! */ 17829 case EIO: 17830 switch (ucmd_buf.uscsi_status) { 17831 case STATUS_RESERVATION_CONFLICT: 17832 status = EACCES; 17833 break; 17834 case STATUS_CHECK: 17835 /* 17836 * Check condition; look for ASC/ASCQ of 0x04/0x01 17837 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 17838 */ 17839 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 17840 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 17841 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 17842 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17843 return (EAGAIN); 17844 } 17845 break; 17846 default: 17847 break; 17848 } 17849 /* FALLTHRU */ 17850 default: 17851 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17852 return (status); 17853 } 17854 17855 /* 17856 * Some ATAPI CD-ROM drives report inaccurate LBA size values 17857 * (2352 and 0 are common) so for these devices always force the value 17858 * to 2048 as required by the ATAPI specs. 17859 */ 17860 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 17861 lbasize = 2048; 17862 } 17863 17864 /* 17865 * Get the maximum LBA value from the READ CAPACITY data. 17866 * Here we assume that the Partial Medium Indicator (PMI) bit 17867 * was cleared when issuing the command. This means that the LBA 17868 * returned from the device is the LBA of the last logical block 17869 * on the logical unit. The actual logical block count will be 17870 * this value plus one. 17871 * 17872 * Currently the capacity is saved in terms of un->un_sys_blocksize, 17873 * so scale the capacity value to reflect this. 17874 */ 17875 capacity = (capacity + 1) * (lbasize / un->un_sys_blocksize); 17876 17877 /* 17878 * Copy the values from the READ CAPACITY command into the space 17879 * provided by the caller. 17880 */ 17881 *capp = capacity; 17882 *lbap = lbasize; 17883 17884 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: " 17885 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 17886 17887 /* 17888 * Both the lbasize and capacity from the device must be nonzero, 17889 * otherwise we assume that the values are not valid and return 17890 * failure to the caller. (4203735) 17891 */ 17892 if ((capacity == 0) || (lbasize == 0)) { 17893 return (EIO); 17894 } 17895 17896 return (0); 17897 } 17898 17899 /* 17900 * Function: sd_send_scsi_READ_CAPACITY_16 17901 * 17902 * Description: This routine uses the scsi READ CAPACITY 16 command to 17903 * determine the device capacity in number of blocks and the 17904 * device native block size. If this function returns a failure, 17905 * then the values in *capp and *lbap are undefined. 17906 * This routine should always be called by 17907 * sd_send_scsi_READ_CAPACITY which will appy any device 17908 * specific adjustments to capacity and lbasize. 17909 * 17910 * Arguments: un - ptr to soft state struct for the target 17911 * capp - ptr to unsigned 64-bit variable to receive the 17912 * capacity value from the command. 17913 * lbap - ptr to unsigned 32-bit varaible to receive the 17914 * block size value from the command 17915 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17916 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17917 * to use the USCSI "direct" chain and bypass the normal 17918 * command waitq. SD_PATH_DIRECT_PRIORITY is used when 17919 * this command is issued as part of an error recovery 17920 * action. 17921 * 17922 * Return Code: 0 - Success 17923 * EIO - IO error 17924 * EACCES - Reservation conflict detected 17925 * EAGAIN - Device is becoming ready 17926 * errno return code from sd_send_scsi_cmd() 17927 * 17928 * Context: Can sleep. Blocks until command completes. 17929 */ 17930 17931 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16) 17932 17933 static int 17934 sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 17935 uint32_t *lbap, int path_flag) 17936 { 17937 struct scsi_extended_sense sense_buf; 17938 struct uscsi_cmd ucmd_buf; 17939 union scsi_cdb cdb; 17940 uint64_t *capacity16_buf; 17941 uint64_t capacity; 17942 uint32_t lbasize; 17943 int status; 17944 17945 ASSERT(un != NULL); 17946 ASSERT(!mutex_owned(SD_MUTEX(un))); 17947 ASSERT(capp != NULL); 17948 ASSERT(lbap != NULL); 17949 17950 SD_TRACE(SD_LOG_IO, un, 17951 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 17952 17953 /* 17954 * First send a READ_CAPACITY_16 command to the target. 17955 * 17956 * Set up the CDB for the READ_CAPACITY_16 command. The Partial 17957 * Medium Indicator bit is cleared. The address field must be 17958 * zero if the PMI bit is zero. 17959 */ 17960 bzero(&cdb, sizeof (cdb)); 17961 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17962 17963 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP); 17964 17965 ucmd_buf.uscsi_cdb = (char *)&cdb; 17966 ucmd_buf.uscsi_cdblen = CDB_GROUP4; 17967 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; 17968 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; 17969 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17970 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 17971 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 17972 ucmd_buf.uscsi_timeout = 60; 17973 17974 /* 17975 * Read Capacity (16) is a Service Action In command. One 17976 * command byte (0x9E) is overloaded for multiple operations, 17977 * with the second CDB byte specifying the desired operation 17978 */ 17979 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4; 17980 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4; 17981 17982 /* 17983 * Fill in allocation length field 17984 */ 17985 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen); 17986 17987 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17988 UIO_SYSSPACE, path_flag); 17989 17990 switch (status) { 17991 case 0: 17992 /* Return failure if we did not get valid capacity data. */ 17993 if (ucmd_buf.uscsi_resid > 20) { 17994 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 17995 return (EIO); 17996 } 17997 17998 /* 17999 * Read capacity and block size from the READ CAPACITY 10 data. 18000 * This data may be adjusted later due to device specific 18001 * issues. 18002 * 18003 * According to the SCSI spec, the READ CAPACITY 10 18004 * command returns the following: 18005 * 18006 * bytes 0-7: Maximum logical block address available. 18007 * (MSB in byte:0 & LSB in byte:7) 18008 * 18009 * bytes 8-11: Block length in bytes 18010 * (MSB in byte:8 & LSB in byte:11) 18011 * 18012 */ 18013 capacity = BE_64(capacity16_buf[0]); 18014 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); 18015 18016 /* 18017 * Done with capacity16_buf 18018 */ 18019 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18020 18021 /* 18022 * if the reported capacity is set to all 0xf's, then 18023 * this disk is too large. This could only happen with 18024 * a device that supports LBAs larger than 64 bits which 18025 * are not defined by any current T10 standards. 18026 */ 18027 if (capacity == 0xffffffffffffffff) { 18028 return (EIO); 18029 } 18030 break; /* Success! */ 18031 case EIO: 18032 switch (ucmd_buf.uscsi_status) { 18033 case STATUS_RESERVATION_CONFLICT: 18034 status = EACCES; 18035 break; 18036 case STATUS_CHECK: 18037 /* 18038 * Check condition; look for ASC/ASCQ of 0x04/0x01 18039 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 18040 */ 18041 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18042 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 18043 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 18044 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18045 return (EAGAIN); 18046 } 18047 break; 18048 default: 18049 break; 18050 } 18051 /* FALLTHRU */ 18052 default: 18053 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18054 return (status); 18055 } 18056 18057 *capp = capacity; 18058 *lbap = lbasize; 18059 18060 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: " 18061 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 18062 18063 return (0); 18064 } 18065 18066 18067 /* 18068 * Function: sd_send_scsi_START_STOP_UNIT 18069 * 18070 * Description: Issue a scsi START STOP UNIT command to the target. 18071 * 18072 * Arguments: un - pointer to driver soft state (unit) structure for 18073 * this target. 18074 * flag - SD_TARGET_START 18075 * SD_TARGET_STOP 18076 * SD_TARGET_EJECT 18077 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18078 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18079 * to use the USCSI "direct" chain and bypass the normal 18080 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 18081 * command is issued as part of an error recovery action. 18082 * 18083 * Return Code: 0 - Success 18084 * EIO - IO error 18085 * EACCES - Reservation conflict detected 18086 * ENXIO - Not Ready, medium not present 18087 * errno return code from sd_send_scsi_cmd() 18088 * 18089 * Context: Can sleep. 18090 */ 18091 18092 static int 18093 sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, int path_flag) 18094 { 18095 struct scsi_extended_sense sense_buf; 18096 union scsi_cdb cdb; 18097 struct uscsi_cmd ucmd_buf; 18098 int status; 18099 18100 ASSERT(un != NULL); 18101 ASSERT(!mutex_owned(SD_MUTEX(un))); 18102 18103 SD_TRACE(SD_LOG_IO, un, 18104 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un); 18105 18106 if (un->un_f_check_start_stop && 18107 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) && 18108 (un->un_f_start_stop_supported != TRUE)) { 18109 return (0); 18110 } 18111 18112 /* 18113 * If we are performing an eject operation and 18114 * we receive any command other than SD_TARGET_EJECT 18115 * we should immediately return. 18116 */ 18117 if (flag != SD_TARGET_EJECT) { 18118 mutex_enter(SD_MUTEX(un)); 18119 if (un->un_f_ejecting == TRUE) { 18120 mutex_exit(SD_MUTEX(un)); 18121 return (EAGAIN); 18122 } 18123 mutex_exit(SD_MUTEX(un)); 18124 } 18125 18126 bzero(&cdb, sizeof (cdb)); 18127 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18128 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18129 18130 cdb.scc_cmd = SCMD_START_STOP; 18131 cdb.cdb_opaque[4] = (uchar_t)flag; 18132 18133 ucmd_buf.uscsi_cdb = (char *)&cdb; 18134 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18135 ucmd_buf.uscsi_bufaddr = NULL; 18136 ucmd_buf.uscsi_buflen = 0; 18137 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18138 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18139 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18140 ucmd_buf.uscsi_timeout = 200; 18141 18142 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18143 UIO_SYSSPACE, path_flag); 18144 18145 switch (status) { 18146 case 0: 18147 break; /* Success! */ 18148 case EIO: 18149 switch (ucmd_buf.uscsi_status) { 18150 case STATUS_RESERVATION_CONFLICT: 18151 status = EACCES; 18152 break; 18153 case STATUS_CHECK: 18154 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) { 18155 switch (scsi_sense_key( 18156 (uint8_t *)&sense_buf)) { 18157 case KEY_ILLEGAL_REQUEST: 18158 status = ENOTSUP; 18159 break; 18160 case KEY_NOT_READY: 18161 if (scsi_sense_asc( 18162 (uint8_t *)&sense_buf) 18163 == 0x3A) { 18164 status = ENXIO; 18165 } 18166 break; 18167 default: 18168 break; 18169 } 18170 } 18171 break; 18172 default: 18173 break; 18174 } 18175 break; 18176 default: 18177 break; 18178 } 18179 18180 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n"); 18181 18182 return (status); 18183 } 18184 18185 18186 /* 18187 * Function: sd_start_stop_unit_callback 18188 * 18189 * Description: timeout(9F) callback to begin recovery process for a 18190 * device that has spun down. 18191 * 18192 * Arguments: arg - pointer to associated softstate struct. 18193 * 18194 * Context: Executes in a timeout(9F) thread context 18195 */ 18196 18197 static void 18198 sd_start_stop_unit_callback(void *arg) 18199 { 18200 struct sd_lun *un = arg; 18201 ASSERT(un != NULL); 18202 ASSERT(!mutex_owned(SD_MUTEX(un))); 18203 18204 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n"); 18205 18206 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP); 18207 } 18208 18209 18210 /* 18211 * Function: sd_start_stop_unit_task 18212 * 18213 * Description: Recovery procedure when a drive is spun down. 18214 * 18215 * Arguments: arg - pointer to associated softstate struct. 18216 * 18217 * Context: Executes in a taskq() thread context 18218 */ 18219 18220 static void 18221 sd_start_stop_unit_task(void *arg) 18222 { 18223 struct sd_lun *un = arg; 18224 18225 ASSERT(un != NULL); 18226 ASSERT(!mutex_owned(SD_MUTEX(un))); 18227 18228 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n"); 18229 18230 /* 18231 * Some unformatted drives report not ready error, no need to 18232 * restart if format has been initiated. 18233 */ 18234 mutex_enter(SD_MUTEX(un)); 18235 if (un->un_f_format_in_progress == TRUE) { 18236 mutex_exit(SD_MUTEX(un)); 18237 return; 18238 } 18239 mutex_exit(SD_MUTEX(un)); 18240 18241 /* 18242 * When a START STOP command is issued from here, it is part of a 18243 * failure recovery operation and must be issued before any other 18244 * commands, including any pending retries. Thus it must be sent 18245 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up 18246 * succeeds or not, we will start I/O after the attempt. 18247 */ 18248 (void) sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 18249 SD_PATH_DIRECT_PRIORITY); 18250 18251 /* 18252 * The above call blocks until the START_STOP_UNIT command completes. 18253 * Now that it has completed, we must re-try the original IO that 18254 * received the NOT READY condition in the first place. There are 18255 * three possible conditions here: 18256 * 18257 * (1) The original IO is on un_retry_bp. 18258 * (2) The original IO is on the regular wait queue, and un_retry_bp 18259 * is NULL. 18260 * (3) The original IO is on the regular wait queue, and un_retry_bp 18261 * points to some other, unrelated bp. 18262 * 18263 * For each case, we must call sd_start_cmds() with un_retry_bp 18264 * as the argument. If un_retry_bp is NULL, this will initiate 18265 * processing of the regular wait queue. If un_retry_bp is not NULL, 18266 * then this will process the bp on un_retry_bp. That may or may not 18267 * be the original IO, but that does not matter: the important thing 18268 * is to keep the IO processing going at this point. 18269 * 18270 * Note: This is a very specific error recovery sequence associated 18271 * with a drive that is not spun up. We attempt a START_STOP_UNIT and 18272 * serialize the I/O with completion of the spin-up. 18273 */ 18274 mutex_enter(SD_MUTEX(un)); 18275 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18276 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n", 18277 un, un->un_retry_bp); 18278 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */ 18279 sd_start_cmds(un, un->un_retry_bp); 18280 mutex_exit(SD_MUTEX(un)); 18281 18282 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n"); 18283 } 18284 18285 18286 /* 18287 * Function: sd_send_scsi_INQUIRY 18288 * 18289 * Description: Issue the scsi INQUIRY command. 18290 * 18291 * Arguments: un 18292 * bufaddr 18293 * buflen 18294 * evpd 18295 * page_code 18296 * page_length 18297 * 18298 * Return Code: 0 - Success 18299 * errno return code from sd_send_scsi_cmd() 18300 * 18301 * Context: Can sleep. Does not return until command is completed. 18302 */ 18303 18304 static int 18305 sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, size_t buflen, 18306 uchar_t evpd, uchar_t page_code, size_t *residp) 18307 { 18308 union scsi_cdb cdb; 18309 struct uscsi_cmd ucmd_buf; 18310 int status; 18311 18312 ASSERT(un != NULL); 18313 ASSERT(!mutex_owned(SD_MUTEX(un))); 18314 ASSERT(bufaddr != NULL); 18315 18316 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un); 18317 18318 bzero(&cdb, sizeof (cdb)); 18319 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18320 bzero(bufaddr, buflen); 18321 18322 cdb.scc_cmd = SCMD_INQUIRY; 18323 cdb.cdb_opaque[1] = evpd; 18324 cdb.cdb_opaque[2] = page_code; 18325 FORMG0COUNT(&cdb, buflen); 18326 18327 ucmd_buf.uscsi_cdb = (char *)&cdb; 18328 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18329 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 18330 ucmd_buf.uscsi_buflen = buflen; 18331 ucmd_buf.uscsi_rqbuf = NULL; 18332 ucmd_buf.uscsi_rqlen = 0; 18333 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 18334 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ 18335 18336 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18337 UIO_SYSSPACE, SD_PATH_DIRECT); 18338 18339 if ((status == 0) && (residp != NULL)) { 18340 *residp = ucmd_buf.uscsi_resid; 18341 } 18342 18343 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n"); 18344 18345 return (status); 18346 } 18347 18348 18349 /* 18350 * Function: sd_send_scsi_TEST_UNIT_READY 18351 * 18352 * Description: Issue the scsi TEST UNIT READY command. 18353 * This routine can be told to set the flag USCSI_DIAGNOSE to 18354 * prevent retrying failed commands. Use this when the intent 18355 * is either to check for device readiness, to clear a Unit 18356 * Attention, or to clear any outstanding sense data. 18357 * However under specific conditions the expected behavior 18358 * is for retries to bring a device ready, so use the flag 18359 * with caution. 18360 * 18361 * Arguments: un 18362 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present 18363 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE. 18364 * 0: dont check for media present, do retries on cmd. 18365 * 18366 * Return Code: 0 - Success 18367 * EIO - IO error 18368 * EACCES - Reservation conflict detected 18369 * ENXIO - Not Ready, medium not present 18370 * errno return code from sd_send_scsi_cmd() 18371 * 18372 * Context: Can sleep. Does not return until command is completed. 18373 */ 18374 18375 static int 18376 sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag) 18377 { 18378 struct scsi_extended_sense sense_buf; 18379 union scsi_cdb cdb; 18380 struct uscsi_cmd ucmd_buf; 18381 int status; 18382 18383 ASSERT(un != NULL); 18384 ASSERT(!mutex_owned(SD_MUTEX(un))); 18385 18386 SD_TRACE(SD_LOG_IO, un, 18387 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un); 18388 18389 /* 18390 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect 18391 * timeouts when they receive a TUR and the queue is not empty. Check 18392 * the configuration flag set during attach (indicating the drive has 18393 * this firmware bug) and un_ncmds_in_transport before issuing the 18394 * TUR. If there are 18395 * pending commands return success, this is a bit arbitrary but is ok 18396 * for non-removables (i.e. the eliteI disks) and non-clustering 18397 * configurations. 18398 */ 18399 if (un->un_f_cfg_tur_check == TRUE) { 18400 mutex_enter(SD_MUTEX(un)); 18401 if (un->un_ncmds_in_transport != 0) { 18402 mutex_exit(SD_MUTEX(un)); 18403 return (0); 18404 } 18405 mutex_exit(SD_MUTEX(un)); 18406 } 18407 18408 bzero(&cdb, sizeof (cdb)); 18409 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18410 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18411 18412 cdb.scc_cmd = SCMD_TEST_UNIT_READY; 18413 18414 ucmd_buf.uscsi_cdb = (char *)&cdb; 18415 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18416 ucmd_buf.uscsi_bufaddr = NULL; 18417 ucmd_buf.uscsi_buflen = 0; 18418 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18419 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18420 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18421 18422 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ 18423 if ((flag & SD_DONT_RETRY_TUR) != 0) { 18424 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; 18425 } 18426 ucmd_buf.uscsi_timeout = 60; 18427 18428 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18429 UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : 18430 SD_PATH_STANDARD)); 18431 18432 switch (status) { 18433 case 0: 18434 break; /* Success! */ 18435 case EIO: 18436 switch (ucmd_buf.uscsi_status) { 18437 case STATUS_RESERVATION_CONFLICT: 18438 status = EACCES; 18439 break; 18440 case STATUS_CHECK: 18441 if ((flag & SD_CHECK_FOR_MEDIA) == 0) { 18442 break; 18443 } 18444 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18445 (scsi_sense_key((uint8_t *)&sense_buf) == 18446 KEY_NOT_READY) && 18447 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x3A)) { 18448 status = ENXIO; 18449 } 18450 break; 18451 default: 18452 break; 18453 } 18454 break; 18455 default: 18456 break; 18457 } 18458 18459 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n"); 18460 18461 return (status); 18462 } 18463 18464 18465 /* 18466 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN 18467 * 18468 * Description: Issue the scsi PERSISTENT RESERVE IN command. 18469 * 18470 * Arguments: un 18471 * 18472 * Return Code: 0 - Success 18473 * EACCES 18474 * ENOTSUP 18475 * errno return code from sd_send_scsi_cmd() 18476 * 18477 * Context: Can sleep. Does not return until command is completed. 18478 */ 18479 18480 static int 18481 sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, uchar_t usr_cmd, 18482 uint16_t data_len, uchar_t *data_bufp) 18483 { 18484 struct scsi_extended_sense sense_buf; 18485 union scsi_cdb cdb; 18486 struct uscsi_cmd ucmd_buf; 18487 int status; 18488 int no_caller_buf = FALSE; 18489 18490 ASSERT(un != NULL); 18491 ASSERT(!mutex_owned(SD_MUTEX(un))); 18492 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV)); 18493 18494 SD_TRACE(SD_LOG_IO, un, 18495 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un); 18496 18497 bzero(&cdb, sizeof (cdb)); 18498 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18499 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18500 if (data_bufp == NULL) { 18501 /* Allocate a default buf if the caller did not give one */ 18502 ASSERT(data_len == 0); 18503 data_len = MHIOC_RESV_KEY_SIZE; 18504 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP); 18505 no_caller_buf = TRUE; 18506 } 18507 18508 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN; 18509 cdb.cdb_opaque[1] = usr_cmd; 18510 FORMG1COUNT(&cdb, data_len); 18511 18512 ucmd_buf.uscsi_cdb = (char *)&cdb; 18513 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 18514 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; 18515 ucmd_buf.uscsi_buflen = data_len; 18516 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18517 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18518 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 18519 ucmd_buf.uscsi_timeout = 60; 18520 18521 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18522 UIO_SYSSPACE, SD_PATH_STANDARD); 18523 18524 switch (status) { 18525 case 0: 18526 break; /* Success! */ 18527 case EIO: 18528 switch (ucmd_buf.uscsi_status) { 18529 case STATUS_RESERVATION_CONFLICT: 18530 status = EACCES; 18531 break; 18532 case STATUS_CHECK: 18533 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18534 (scsi_sense_key((uint8_t *)&sense_buf) == 18535 KEY_ILLEGAL_REQUEST)) { 18536 status = ENOTSUP; 18537 } 18538 break; 18539 default: 18540 break; 18541 } 18542 break; 18543 default: 18544 break; 18545 } 18546 18547 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n"); 18548 18549 if (no_caller_buf == TRUE) { 18550 kmem_free(data_bufp, data_len); 18551 } 18552 18553 return (status); 18554 } 18555 18556 18557 /* 18558 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT 18559 * 18560 * Description: This routine is the driver entry point for handling CD-ROM 18561 * multi-host persistent reservation requests (MHIOCGRP_INKEYS, 18562 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the 18563 * device. 18564 * 18565 * Arguments: un - Pointer to soft state struct for the target. 18566 * usr_cmd SCSI-3 reservation facility command (one of 18567 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE, 18568 * SD_SCSI3_PREEMPTANDABORT) 18569 * usr_bufp - user provided pointer register, reserve descriptor or 18570 * preempt and abort structure (mhioc_register_t, 18571 * mhioc_resv_desc_t, mhioc_preemptandabort_t) 18572 * 18573 * Return Code: 0 - Success 18574 * EACCES 18575 * ENOTSUP 18576 * errno return code from sd_send_scsi_cmd() 18577 * 18578 * Context: Can sleep. Does not return until command is completed. 18579 */ 18580 18581 static int 18582 sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, uchar_t usr_cmd, 18583 uchar_t *usr_bufp) 18584 { 18585 struct scsi_extended_sense sense_buf; 18586 union scsi_cdb cdb; 18587 struct uscsi_cmd ucmd_buf; 18588 int status; 18589 uchar_t data_len = sizeof (sd_prout_t); 18590 sd_prout_t *prp; 18591 18592 ASSERT(un != NULL); 18593 ASSERT(!mutex_owned(SD_MUTEX(un))); 18594 ASSERT(data_len == 24); /* required by scsi spec */ 18595 18596 SD_TRACE(SD_LOG_IO, un, 18597 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un); 18598 18599 if (usr_bufp == NULL) { 18600 return (EINVAL); 18601 } 18602 18603 bzero(&cdb, sizeof (cdb)); 18604 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18605 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18606 prp = kmem_zalloc(data_len, KM_SLEEP); 18607 18608 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT; 18609 cdb.cdb_opaque[1] = usr_cmd; 18610 FORMG1COUNT(&cdb, data_len); 18611 18612 ucmd_buf.uscsi_cdb = (char *)&cdb; 18613 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 18614 ucmd_buf.uscsi_bufaddr = (caddr_t)prp; 18615 ucmd_buf.uscsi_buflen = data_len; 18616 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18617 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18618 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 18619 ucmd_buf.uscsi_timeout = 60; 18620 18621 switch (usr_cmd) { 18622 case SD_SCSI3_REGISTER: { 18623 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; 18624 18625 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18626 bcopy(ptr->newkey.key, prp->service_key, 18627 MHIOC_RESV_KEY_SIZE); 18628 prp->aptpl = ptr->aptpl; 18629 break; 18630 } 18631 case SD_SCSI3_RESERVE: 18632 case SD_SCSI3_RELEASE: { 18633 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 18634 18635 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18636 prp->scope_address = BE_32(ptr->scope_specific_addr); 18637 cdb.cdb_opaque[2] = ptr->type; 18638 break; 18639 } 18640 case SD_SCSI3_PREEMPTANDABORT: { 18641 mhioc_preemptandabort_t *ptr = 18642 (mhioc_preemptandabort_t *)usr_bufp; 18643 18644 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18645 bcopy(ptr->victim_key.key, prp->service_key, 18646 MHIOC_RESV_KEY_SIZE); 18647 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr); 18648 cdb.cdb_opaque[2] = ptr->resvdesc.type; 18649 ucmd_buf.uscsi_flags |= USCSI_HEAD; 18650 break; 18651 } 18652 case SD_SCSI3_REGISTERANDIGNOREKEY: 18653 { 18654 mhioc_registerandignorekey_t *ptr; 18655 ptr = (mhioc_registerandignorekey_t *)usr_bufp; 18656 bcopy(ptr->newkey.key, 18657 prp->service_key, MHIOC_RESV_KEY_SIZE); 18658 prp->aptpl = ptr->aptpl; 18659 break; 18660 } 18661 default: 18662 ASSERT(FALSE); 18663 break; 18664 } 18665 18666 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18667 UIO_SYSSPACE, SD_PATH_STANDARD); 18668 18669 switch (status) { 18670 case 0: 18671 break; /* Success! */ 18672 case EIO: 18673 switch (ucmd_buf.uscsi_status) { 18674 case STATUS_RESERVATION_CONFLICT: 18675 status = EACCES; 18676 break; 18677 case STATUS_CHECK: 18678 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18679 (scsi_sense_key((uint8_t *)&sense_buf) == 18680 KEY_ILLEGAL_REQUEST)) { 18681 status = ENOTSUP; 18682 } 18683 break; 18684 default: 18685 break; 18686 } 18687 break; 18688 default: 18689 break; 18690 } 18691 18692 kmem_free(prp, data_len); 18693 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n"); 18694 return (status); 18695 } 18696 18697 18698 /* 18699 * Function: sd_send_scsi_SYNCHRONIZE_CACHE 18700 * 18701 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target 18702 * 18703 * Arguments: un - pointer to the target's soft state struct 18704 * dkc - pointer to the callback structure 18705 * 18706 * Return Code: 0 - success 18707 * errno-type error code 18708 * 18709 * Context: kernel thread context only. 18710 * 18711 * _______________________________________________________________ 18712 * | dkc_flag & | dkc_callback | DKIOCFLUSHWRITECACHE | 18713 * |FLUSH_VOLATILE| | operation | 18714 * |______________|______________|_________________________________| 18715 * | 0 | NULL | Synchronous flush on both | 18716 * | | | volatile and non-volatile cache | 18717 * |______________|______________|_________________________________| 18718 * | 1 | NULL | Synchronous flush on volatile | 18719 * | | | cache; disk drivers may suppress| 18720 * | | | flush if disk table indicates | 18721 * | | | non-volatile cache | 18722 * |______________|______________|_________________________________| 18723 * | 0 | !NULL | Asynchronous flush on both | 18724 * | | | volatile and non-volatile cache;| 18725 * |______________|______________|_________________________________| 18726 * | 1 | !NULL | Asynchronous flush on volatile | 18727 * | | | cache; disk drivers may suppress| 18728 * | | | flush if disk table indicates | 18729 * | | | non-volatile cache | 18730 * |______________|______________|_________________________________| 18731 * 18732 */ 18733 18734 static int 18735 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc) 18736 { 18737 struct sd_uscsi_info *uip; 18738 struct uscsi_cmd *uscmd; 18739 union scsi_cdb *cdb; 18740 struct buf *bp; 18741 int rval = 0; 18742 int is_async; 18743 18744 SD_TRACE(SD_LOG_IO, un, 18745 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un); 18746 18747 ASSERT(un != NULL); 18748 ASSERT(!mutex_owned(SD_MUTEX(un))); 18749 18750 if (dkc == NULL || dkc->dkc_callback == NULL) { 18751 is_async = FALSE; 18752 } else { 18753 is_async = TRUE; 18754 } 18755 18756 mutex_enter(SD_MUTEX(un)); 18757 /* check whether cache flush should be suppressed */ 18758 if (un->un_f_suppress_cache_flush == TRUE) { 18759 mutex_exit(SD_MUTEX(un)); 18760 /* 18761 * suppress the cache flush if the device is told to do 18762 * so by sd.conf or disk table 18763 */ 18764 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_SYNCHRONIZE_CACHE: \ 18765 skip the cache flush since suppress_cache_flush is %d!\n", 18766 un->un_f_suppress_cache_flush); 18767 18768 if (is_async == TRUE) { 18769 /* invoke callback for asynchronous flush */ 18770 (*dkc->dkc_callback)(dkc->dkc_cookie, 0); 18771 } 18772 return (rval); 18773 } 18774 mutex_exit(SD_MUTEX(un)); 18775 18776 /* 18777 * check dkc_flag & FLUSH_VOLATILE so SYNC_NV bit can be 18778 * set properly 18779 */ 18780 cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP); 18781 cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE; 18782 18783 mutex_enter(SD_MUTEX(un)); 18784 if (dkc != NULL && un->un_f_sync_nv_supported && 18785 (dkc->dkc_flag & FLUSH_VOLATILE)) { 18786 /* 18787 * if the device supports SYNC_NV bit, turn on 18788 * the SYNC_NV bit to only flush volatile cache 18789 */ 18790 cdb->cdb_un.tag |= SD_SYNC_NV_BIT; 18791 } 18792 mutex_exit(SD_MUTEX(un)); 18793 18794 /* 18795 * First get some memory for the uscsi_cmd struct and cdb 18796 * and initialize for SYNCHRONIZE_CACHE cmd. 18797 */ 18798 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 18799 uscmd->uscsi_cdblen = CDB_GROUP1; 18800 uscmd->uscsi_cdb = (caddr_t)cdb; 18801 uscmd->uscsi_bufaddr = NULL; 18802 uscmd->uscsi_buflen = 0; 18803 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 18804 uscmd->uscsi_rqlen = SENSE_LENGTH; 18805 uscmd->uscsi_rqresid = SENSE_LENGTH; 18806 uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18807 uscmd->uscsi_timeout = sd_io_time; 18808 18809 /* 18810 * Allocate an sd_uscsi_info struct and fill it with the info 18811 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 18812 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 18813 * since we allocate the buf here in this function, we do not 18814 * need to preserve the prior contents of b_private. 18815 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 18816 */ 18817 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 18818 uip->ui_flags = SD_PATH_DIRECT; 18819 uip->ui_cmdp = uscmd; 18820 18821 bp = getrbuf(KM_SLEEP); 18822 bp->b_private = uip; 18823 18824 /* 18825 * Setup buffer to carry uscsi request. 18826 */ 18827 bp->b_flags = B_BUSY; 18828 bp->b_bcount = 0; 18829 bp->b_blkno = 0; 18830 18831 if (is_async == TRUE) { 18832 bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone; 18833 uip->ui_dkc = *dkc; 18834 } 18835 18836 bp->b_edev = SD_GET_DEV(un); 18837 bp->b_dev = cmpdev(bp->b_edev); /* maybe unnecessary? */ 18838 18839 (void) sd_uscsi_strategy(bp); 18840 18841 /* 18842 * If synchronous request, wait for completion 18843 * If async just return and let b_iodone callback 18844 * cleanup. 18845 * NOTE: On return, u_ncmds_in_driver will be decremented, 18846 * but it was also incremented in sd_uscsi_strategy(), so 18847 * we should be ok. 18848 */ 18849 if (is_async == FALSE) { 18850 (void) biowait(bp); 18851 rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp); 18852 } 18853 18854 return (rval); 18855 } 18856 18857 18858 static int 18859 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp) 18860 { 18861 struct sd_uscsi_info *uip; 18862 struct uscsi_cmd *uscmd; 18863 uint8_t *sense_buf; 18864 struct sd_lun *un; 18865 int status; 18866 union scsi_cdb *cdb; 18867 18868 uip = (struct sd_uscsi_info *)(bp->b_private); 18869 ASSERT(uip != NULL); 18870 18871 uscmd = uip->ui_cmdp; 18872 ASSERT(uscmd != NULL); 18873 18874 sense_buf = (uint8_t *)uscmd->uscsi_rqbuf; 18875 ASSERT(sense_buf != NULL); 18876 18877 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 18878 ASSERT(un != NULL); 18879 18880 cdb = (union scsi_cdb *)uscmd->uscsi_cdb; 18881 18882 status = geterror(bp); 18883 switch (status) { 18884 case 0: 18885 break; /* Success! */ 18886 case EIO: 18887 switch (uscmd->uscsi_status) { 18888 case STATUS_RESERVATION_CONFLICT: 18889 /* Ignore reservation conflict */ 18890 status = 0; 18891 goto done; 18892 18893 case STATUS_CHECK: 18894 if ((uscmd->uscsi_rqstatus == STATUS_GOOD) && 18895 (scsi_sense_key(sense_buf) == 18896 KEY_ILLEGAL_REQUEST)) { 18897 /* Ignore Illegal Request error */ 18898 if (cdb->cdb_un.tag|SD_SYNC_NV_BIT) { 18899 mutex_enter(SD_MUTEX(un)); 18900 un->un_f_sync_nv_supported = FALSE; 18901 mutex_exit(SD_MUTEX(un)); 18902 status = 0; 18903 SD_TRACE(SD_LOG_IO, un, 18904 "un_f_sync_nv_supported \ 18905 is set to false.\n"); 18906 goto done; 18907 } 18908 18909 mutex_enter(SD_MUTEX(un)); 18910 un->un_f_sync_cache_supported = FALSE; 18911 mutex_exit(SD_MUTEX(un)); 18912 SD_TRACE(SD_LOG_IO, un, 18913 "sd_send_scsi_SYNCHRONIZE_CACHE_biodone: \ 18914 un_f_sync_cache_supported set to false \ 18915 with asc = %x, ascq = %x\n", 18916 scsi_sense_asc(sense_buf), 18917 scsi_sense_ascq(sense_buf)); 18918 status = ENOTSUP; 18919 goto done; 18920 } 18921 break; 18922 default: 18923 break; 18924 } 18925 /* FALLTHRU */ 18926 default: 18927 /* 18928 * Don't log an error message if this device 18929 * has removable media. 18930 */ 18931 if (!un->un_f_has_removable_media) { 18932 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18933 "SYNCHRONIZE CACHE command failed (%d)\n", status); 18934 } 18935 break; 18936 } 18937 18938 done: 18939 if (uip->ui_dkc.dkc_callback != NULL) { 18940 (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status); 18941 } 18942 18943 ASSERT((bp->b_flags & B_REMAPPED) == 0); 18944 freerbuf(bp); 18945 kmem_free(uip, sizeof (struct sd_uscsi_info)); 18946 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 18947 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 18948 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 18949 18950 return (status); 18951 } 18952 18953 18954 /* 18955 * Function: sd_send_scsi_GET_CONFIGURATION 18956 * 18957 * Description: Issues the get configuration command to the device. 18958 * Called from sd_check_for_writable_cd & sd_get_media_info 18959 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN 18960 * Arguments: un 18961 * ucmdbuf 18962 * rqbuf 18963 * rqbuflen 18964 * bufaddr 18965 * buflen 18966 * path_flag 18967 * 18968 * Return Code: 0 - Success 18969 * errno return code from sd_send_scsi_cmd() 18970 * 18971 * Context: Can sleep. Does not return until command is completed. 18972 * 18973 */ 18974 18975 static int 18976 sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, struct uscsi_cmd *ucmdbuf, 18977 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen, 18978 int path_flag) 18979 { 18980 char cdb[CDB_GROUP1]; 18981 int status; 18982 18983 ASSERT(un != NULL); 18984 ASSERT(!mutex_owned(SD_MUTEX(un))); 18985 ASSERT(bufaddr != NULL); 18986 ASSERT(ucmdbuf != NULL); 18987 ASSERT(rqbuf != NULL); 18988 18989 SD_TRACE(SD_LOG_IO, un, 18990 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un); 18991 18992 bzero(cdb, sizeof (cdb)); 18993 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 18994 bzero(rqbuf, rqbuflen); 18995 bzero(bufaddr, buflen); 18996 18997 /* 18998 * Set up cdb field for the get configuration command. 18999 */ 19000 cdb[0] = SCMD_GET_CONFIGURATION; 19001 cdb[1] = 0x02; /* Requested Type */ 19002 cdb[8] = SD_PROFILE_HEADER_LEN; 19003 ucmdbuf->uscsi_cdb = cdb; 19004 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 19005 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 19006 ucmdbuf->uscsi_buflen = buflen; 19007 ucmdbuf->uscsi_timeout = sd_io_time; 19008 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 19009 ucmdbuf->uscsi_rqlen = rqbuflen; 19010 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 19011 19012 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, FKIOCTL, 19013 UIO_SYSSPACE, path_flag); 19014 19015 switch (status) { 19016 case 0: 19017 break; /* Success! */ 19018 case EIO: 19019 switch (ucmdbuf->uscsi_status) { 19020 case STATUS_RESERVATION_CONFLICT: 19021 status = EACCES; 19022 break; 19023 default: 19024 break; 19025 } 19026 break; 19027 default: 19028 break; 19029 } 19030 19031 if (status == 0) { 19032 SD_DUMP_MEMORY(un, SD_LOG_IO, 19033 "sd_send_scsi_GET_CONFIGURATION: data", 19034 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 19035 } 19036 19037 SD_TRACE(SD_LOG_IO, un, 19038 "sd_send_scsi_GET_CONFIGURATION: exit\n"); 19039 19040 return (status); 19041 } 19042 19043 /* 19044 * Function: sd_send_scsi_feature_GET_CONFIGURATION 19045 * 19046 * Description: Issues the get configuration command to the device to 19047 * retrieve a specific feature. Called from 19048 * sd_check_for_writable_cd & sd_set_mmc_caps. 19049 * Arguments: un 19050 * ucmdbuf 19051 * rqbuf 19052 * rqbuflen 19053 * bufaddr 19054 * buflen 19055 * feature 19056 * 19057 * Return Code: 0 - Success 19058 * errno return code from sd_send_scsi_cmd() 19059 * 19060 * Context: Can sleep. Does not return until command is completed. 19061 * 19062 */ 19063 static int 19064 sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 19065 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 19066 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag) 19067 { 19068 char cdb[CDB_GROUP1]; 19069 int status; 19070 19071 ASSERT(un != NULL); 19072 ASSERT(!mutex_owned(SD_MUTEX(un))); 19073 ASSERT(bufaddr != NULL); 19074 ASSERT(ucmdbuf != NULL); 19075 ASSERT(rqbuf != NULL); 19076 19077 SD_TRACE(SD_LOG_IO, un, 19078 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un); 19079 19080 bzero(cdb, sizeof (cdb)); 19081 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 19082 bzero(rqbuf, rqbuflen); 19083 bzero(bufaddr, buflen); 19084 19085 /* 19086 * Set up cdb field for the get configuration command. 19087 */ 19088 cdb[0] = SCMD_GET_CONFIGURATION; 19089 cdb[1] = 0x02; /* Requested Type */ 19090 cdb[3] = feature; 19091 cdb[8] = buflen; 19092 ucmdbuf->uscsi_cdb = cdb; 19093 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 19094 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 19095 ucmdbuf->uscsi_buflen = buflen; 19096 ucmdbuf->uscsi_timeout = sd_io_time; 19097 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 19098 ucmdbuf->uscsi_rqlen = rqbuflen; 19099 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 19100 19101 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, FKIOCTL, 19102 UIO_SYSSPACE, path_flag); 19103 19104 switch (status) { 19105 case 0: 19106 break; /* Success! */ 19107 case EIO: 19108 switch (ucmdbuf->uscsi_status) { 19109 case STATUS_RESERVATION_CONFLICT: 19110 status = EACCES; 19111 break; 19112 default: 19113 break; 19114 } 19115 break; 19116 default: 19117 break; 19118 } 19119 19120 if (status == 0) { 19121 SD_DUMP_MEMORY(un, SD_LOG_IO, 19122 "sd_send_scsi_feature_GET_CONFIGURATION: data", 19123 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 19124 } 19125 19126 SD_TRACE(SD_LOG_IO, un, 19127 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n"); 19128 19129 return (status); 19130 } 19131 19132 19133 /* 19134 * Function: sd_send_scsi_MODE_SENSE 19135 * 19136 * Description: Utility function for issuing a scsi MODE SENSE command. 19137 * Note: This routine uses a consistent implementation for Group0, 19138 * Group1, and Group2 commands across all platforms. ATAPI devices 19139 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 19140 * 19141 * Arguments: un - pointer to the softstate struct for the target. 19142 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 19143 * CDB_GROUP[1|2] (10 byte). 19144 * bufaddr - buffer for page data retrieved from the target. 19145 * buflen - size of page to be retrieved. 19146 * page_code - page code of data to be retrieved from the target. 19147 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19148 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19149 * to use the USCSI "direct" chain and bypass the normal 19150 * command waitq. 19151 * 19152 * Return Code: 0 - Success 19153 * errno return code from sd_send_scsi_cmd() 19154 * 19155 * Context: Can sleep. Does not return until command is completed. 19156 */ 19157 19158 static int 19159 sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 19160 size_t buflen, uchar_t page_code, int path_flag) 19161 { 19162 struct scsi_extended_sense sense_buf; 19163 union scsi_cdb cdb; 19164 struct uscsi_cmd ucmd_buf; 19165 int status; 19166 int headlen; 19167 19168 ASSERT(un != NULL); 19169 ASSERT(!mutex_owned(SD_MUTEX(un))); 19170 ASSERT(bufaddr != NULL); 19171 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 19172 (cdbsize == CDB_GROUP2)); 19173 19174 SD_TRACE(SD_LOG_IO, un, 19175 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un); 19176 19177 bzero(&cdb, sizeof (cdb)); 19178 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19179 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19180 bzero(bufaddr, buflen); 19181 19182 if (cdbsize == CDB_GROUP0) { 19183 cdb.scc_cmd = SCMD_MODE_SENSE; 19184 cdb.cdb_opaque[2] = page_code; 19185 FORMG0COUNT(&cdb, buflen); 19186 headlen = MODE_HEADER_LENGTH; 19187 } else { 19188 cdb.scc_cmd = SCMD_MODE_SENSE_G1; 19189 cdb.cdb_opaque[2] = page_code; 19190 FORMG1COUNT(&cdb, buflen); 19191 headlen = MODE_HEADER_LENGTH_GRP2; 19192 } 19193 19194 ASSERT(headlen <= buflen); 19195 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 19196 19197 ucmd_buf.uscsi_cdb = (char *)&cdb; 19198 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 19199 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19200 ucmd_buf.uscsi_buflen = buflen; 19201 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19202 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19203 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19204 ucmd_buf.uscsi_timeout = 60; 19205 19206 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19207 UIO_SYSSPACE, path_flag); 19208 19209 switch (status) { 19210 case 0: 19211 /* 19212 * sr_check_wp() uses 0x3f page code and check the header of 19213 * mode page to determine if target device is write-protected. 19214 * But some USB devices return 0 bytes for 0x3f page code. For 19215 * this case, make sure that mode page header is returned at 19216 * least. 19217 */ 19218 if (buflen - ucmd_buf.uscsi_resid < headlen) 19219 status = EIO; 19220 break; /* Success! */ 19221 case EIO: 19222 switch (ucmd_buf.uscsi_status) { 19223 case STATUS_RESERVATION_CONFLICT: 19224 status = EACCES; 19225 break; 19226 default: 19227 break; 19228 } 19229 break; 19230 default: 19231 break; 19232 } 19233 19234 if (status == 0) { 19235 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data", 19236 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19237 } 19238 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n"); 19239 19240 return (status); 19241 } 19242 19243 19244 /* 19245 * Function: sd_send_scsi_MODE_SELECT 19246 * 19247 * Description: Utility function for issuing a scsi MODE SELECT command. 19248 * Note: This routine uses a consistent implementation for Group0, 19249 * Group1, and Group2 commands across all platforms. ATAPI devices 19250 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 19251 * 19252 * Arguments: un - pointer to the softstate struct for the target. 19253 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 19254 * CDB_GROUP[1|2] (10 byte). 19255 * bufaddr - buffer for page data retrieved from the target. 19256 * buflen - size of page to be retrieved. 19257 * save_page - boolean to determin if SP bit should be set. 19258 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19259 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19260 * to use the USCSI "direct" chain and bypass the normal 19261 * command waitq. 19262 * 19263 * Return Code: 0 - Success 19264 * errno return code from sd_send_scsi_cmd() 19265 * 19266 * Context: Can sleep. Does not return until command is completed. 19267 */ 19268 19269 static int 19270 sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 19271 size_t buflen, uchar_t save_page, int path_flag) 19272 { 19273 struct scsi_extended_sense sense_buf; 19274 union scsi_cdb cdb; 19275 struct uscsi_cmd ucmd_buf; 19276 int status; 19277 19278 ASSERT(un != NULL); 19279 ASSERT(!mutex_owned(SD_MUTEX(un))); 19280 ASSERT(bufaddr != NULL); 19281 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 19282 (cdbsize == CDB_GROUP2)); 19283 19284 SD_TRACE(SD_LOG_IO, un, 19285 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un); 19286 19287 bzero(&cdb, sizeof (cdb)); 19288 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19289 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19290 19291 /* Set the PF bit for many third party drives */ 19292 cdb.cdb_opaque[1] = 0x10; 19293 19294 /* Set the savepage(SP) bit if given */ 19295 if (save_page == SD_SAVE_PAGE) { 19296 cdb.cdb_opaque[1] |= 0x01; 19297 } 19298 19299 if (cdbsize == CDB_GROUP0) { 19300 cdb.scc_cmd = SCMD_MODE_SELECT; 19301 FORMG0COUNT(&cdb, buflen); 19302 } else { 19303 cdb.scc_cmd = SCMD_MODE_SELECT_G1; 19304 FORMG1COUNT(&cdb, buflen); 19305 } 19306 19307 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 19308 19309 ucmd_buf.uscsi_cdb = (char *)&cdb; 19310 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 19311 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19312 ucmd_buf.uscsi_buflen = buflen; 19313 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19314 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19315 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 19316 ucmd_buf.uscsi_timeout = 60; 19317 19318 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19319 UIO_SYSSPACE, path_flag); 19320 19321 switch (status) { 19322 case 0: 19323 break; /* Success! */ 19324 case EIO: 19325 switch (ucmd_buf.uscsi_status) { 19326 case STATUS_RESERVATION_CONFLICT: 19327 status = EACCES; 19328 break; 19329 default: 19330 break; 19331 } 19332 break; 19333 default: 19334 break; 19335 } 19336 19337 if (status == 0) { 19338 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data", 19339 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19340 } 19341 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n"); 19342 19343 return (status); 19344 } 19345 19346 19347 /* 19348 * Function: sd_send_scsi_RDWR 19349 * 19350 * Description: Issue a scsi READ or WRITE command with the given parameters. 19351 * 19352 * Arguments: un: Pointer to the sd_lun struct for the target. 19353 * cmd: SCMD_READ or SCMD_WRITE 19354 * bufaddr: Address of caller's buffer to receive the RDWR data 19355 * buflen: Length of caller's buffer receive the RDWR data. 19356 * start_block: Block number for the start of the RDWR operation. 19357 * (Assumes target-native block size.) 19358 * residp: Pointer to variable to receive the redisual of the 19359 * RDWR operation (may be NULL of no residual requested). 19360 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19361 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19362 * to use the USCSI "direct" chain and bypass the normal 19363 * command waitq. 19364 * 19365 * Return Code: 0 - Success 19366 * errno return code from sd_send_scsi_cmd() 19367 * 19368 * Context: Can sleep. Does not return until command is completed. 19369 */ 19370 19371 static int 19372 sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 19373 size_t buflen, daddr_t start_block, int path_flag) 19374 { 19375 struct scsi_extended_sense sense_buf; 19376 union scsi_cdb cdb; 19377 struct uscsi_cmd ucmd_buf; 19378 uint32_t block_count; 19379 int status; 19380 int cdbsize; 19381 uchar_t flag; 19382 19383 ASSERT(un != NULL); 19384 ASSERT(!mutex_owned(SD_MUTEX(un))); 19385 ASSERT(bufaddr != NULL); 19386 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE)); 19387 19388 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un); 19389 19390 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 19391 return (EINVAL); 19392 } 19393 19394 mutex_enter(SD_MUTEX(un)); 19395 block_count = SD_BYTES2TGTBLOCKS(un, buflen); 19396 mutex_exit(SD_MUTEX(un)); 19397 19398 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE; 19399 19400 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: " 19401 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n", 19402 bufaddr, buflen, start_block, block_count); 19403 19404 bzero(&cdb, sizeof (cdb)); 19405 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19406 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19407 19408 /* Compute CDB size to use */ 19409 if (start_block > 0xffffffff) 19410 cdbsize = CDB_GROUP4; 19411 else if ((start_block & 0xFFE00000) || 19412 (un->un_f_cfg_is_atapi == TRUE)) 19413 cdbsize = CDB_GROUP1; 19414 else 19415 cdbsize = CDB_GROUP0; 19416 19417 switch (cdbsize) { 19418 case CDB_GROUP0: /* 6-byte CDBs */ 19419 cdb.scc_cmd = cmd; 19420 FORMG0ADDR(&cdb, start_block); 19421 FORMG0COUNT(&cdb, block_count); 19422 break; 19423 case CDB_GROUP1: /* 10-byte CDBs */ 19424 cdb.scc_cmd = cmd | SCMD_GROUP1; 19425 FORMG1ADDR(&cdb, start_block); 19426 FORMG1COUNT(&cdb, block_count); 19427 break; 19428 case CDB_GROUP4: /* 16-byte CDBs */ 19429 cdb.scc_cmd = cmd | SCMD_GROUP4; 19430 FORMG4LONGADDR(&cdb, (uint64_t)start_block); 19431 FORMG4COUNT(&cdb, block_count); 19432 break; 19433 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */ 19434 default: 19435 /* All others reserved */ 19436 return (EINVAL); 19437 } 19438 19439 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */ 19440 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 19441 19442 ucmd_buf.uscsi_cdb = (char *)&cdb; 19443 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 19444 ucmd_buf.uscsi_bufaddr = bufaddr; 19445 ucmd_buf.uscsi_buflen = buflen; 19446 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19447 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19448 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; 19449 ucmd_buf.uscsi_timeout = 60; 19450 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19451 UIO_SYSSPACE, path_flag); 19452 switch (status) { 19453 case 0: 19454 break; /* Success! */ 19455 case EIO: 19456 switch (ucmd_buf.uscsi_status) { 19457 case STATUS_RESERVATION_CONFLICT: 19458 status = EACCES; 19459 break; 19460 default: 19461 break; 19462 } 19463 break; 19464 default: 19465 break; 19466 } 19467 19468 if (status == 0) { 19469 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data", 19470 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19471 } 19472 19473 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n"); 19474 19475 return (status); 19476 } 19477 19478 19479 /* 19480 * Function: sd_send_scsi_LOG_SENSE 19481 * 19482 * Description: Issue a scsi LOG_SENSE command with the given parameters. 19483 * 19484 * Arguments: un: Pointer to the sd_lun struct for the target. 19485 * 19486 * Return Code: 0 - Success 19487 * errno return code from sd_send_scsi_cmd() 19488 * 19489 * Context: Can sleep. Does not return until command is completed. 19490 */ 19491 19492 static int 19493 sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, uint16_t buflen, 19494 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, 19495 int path_flag) 19496 19497 { 19498 struct scsi_extended_sense sense_buf; 19499 union scsi_cdb cdb; 19500 struct uscsi_cmd ucmd_buf; 19501 int status; 19502 19503 ASSERT(un != NULL); 19504 ASSERT(!mutex_owned(SD_MUTEX(un))); 19505 19506 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un); 19507 19508 bzero(&cdb, sizeof (cdb)); 19509 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19510 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19511 19512 cdb.scc_cmd = SCMD_LOG_SENSE_G1; 19513 cdb.cdb_opaque[2] = (page_control << 6) | page_code; 19514 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8); 19515 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF); 19516 FORMG1COUNT(&cdb, buflen); 19517 19518 ucmd_buf.uscsi_cdb = (char *)&cdb; 19519 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19520 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19521 ucmd_buf.uscsi_buflen = buflen; 19522 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19523 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19524 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19525 ucmd_buf.uscsi_timeout = 60; 19526 19527 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19528 UIO_SYSSPACE, path_flag); 19529 19530 switch (status) { 19531 case 0: 19532 break; 19533 case EIO: 19534 switch (ucmd_buf.uscsi_status) { 19535 case STATUS_RESERVATION_CONFLICT: 19536 status = EACCES; 19537 break; 19538 case STATUS_CHECK: 19539 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19540 (scsi_sense_key((uint8_t *)&sense_buf) == 19541 KEY_ILLEGAL_REQUEST) && 19542 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x24)) { 19543 /* 19544 * ASC 0x24: INVALID FIELD IN CDB 19545 */ 19546 switch (page_code) { 19547 case START_STOP_CYCLE_PAGE: 19548 /* 19549 * The start stop cycle counter is 19550 * implemented as page 0x31 in earlier 19551 * generation disks. In new generation 19552 * disks the start stop cycle counter is 19553 * implemented as page 0xE. To properly 19554 * handle this case if an attempt for 19555 * log page 0xE is made and fails we 19556 * will try again using page 0x31. 19557 * 19558 * Network storage BU committed to 19559 * maintain the page 0x31 for this 19560 * purpose and will not have any other 19561 * page implemented with page code 0x31 19562 * until all disks transition to the 19563 * standard page. 19564 */ 19565 mutex_enter(SD_MUTEX(un)); 19566 un->un_start_stop_cycle_page = 19567 START_STOP_CYCLE_VU_PAGE; 19568 cdb.cdb_opaque[2] = 19569 (char)(page_control << 6) | 19570 un->un_start_stop_cycle_page; 19571 mutex_exit(SD_MUTEX(un)); 19572 status = sd_send_scsi_cmd( 19573 SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19574 UIO_SYSSPACE, path_flag); 19575 19576 break; 19577 case TEMPERATURE_PAGE: 19578 status = ENOTTY; 19579 break; 19580 default: 19581 break; 19582 } 19583 } 19584 break; 19585 default: 19586 break; 19587 } 19588 break; 19589 default: 19590 break; 19591 } 19592 19593 if (status == 0) { 19594 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data", 19595 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19596 } 19597 19598 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n"); 19599 19600 return (status); 19601 } 19602 19603 19604 /* 19605 * Function: sdioctl 19606 * 19607 * Description: Driver's ioctl(9e) entry point function. 19608 * 19609 * Arguments: dev - device number 19610 * cmd - ioctl operation to be performed 19611 * arg - user argument, contains data to be set or reference 19612 * parameter for get 19613 * flag - bit flag, indicating open settings, 32/64 bit type 19614 * cred_p - user credential pointer 19615 * rval_p - calling process return value (OPT) 19616 * 19617 * Return Code: EINVAL 19618 * ENOTTY 19619 * ENXIO 19620 * EIO 19621 * EFAULT 19622 * ENOTSUP 19623 * EPERM 19624 * 19625 * Context: Called from the device switch at normal priority. 19626 */ 19627 19628 static int 19629 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) 19630 { 19631 struct sd_lun *un = NULL; 19632 int err = 0; 19633 int i = 0; 19634 cred_t *cr; 19635 int tmprval = EINVAL; 19636 int is_valid; 19637 19638 /* 19639 * All device accesses go thru sdstrategy where we check on suspend 19640 * status 19641 */ 19642 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 19643 return (ENXIO); 19644 } 19645 19646 ASSERT(!mutex_owned(SD_MUTEX(un))); 19647 19648 19649 is_valid = SD_IS_VALID_LABEL(un); 19650 19651 /* 19652 * Moved this wait from sd_uscsi_strategy to here for 19653 * reasons of deadlock prevention. Internal driver commands, 19654 * specifically those to change a devices power level, result 19655 * in a call to sd_uscsi_strategy. 19656 */ 19657 mutex_enter(SD_MUTEX(un)); 19658 while ((un->un_state == SD_STATE_SUSPENDED) || 19659 (un->un_state == SD_STATE_PM_CHANGING)) { 19660 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 19661 } 19662 /* 19663 * Twiddling the counter here protects commands from now 19664 * through to the top of sd_uscsi_strategy. Without the 19665 * counter inc. a power down, for example, could get in 19666 * after the above check for state is made and before 19667 * execution gets to the top of sd_uscsi_strategy. 19668 * That would cause problems. 19669 */ 19670 un->un_ncmds_in_driver++; 19671 19672 if (!is_valid && 19673 (flag & (FNDELAY | FNONBLOCK))) { 19674 switch (cmd) { 19675 case DKIOCGGEOM: /* SD_PATH_DIRECT */ 19676 case DKIOCGVTOC: 19677 case DKIOCGAPART: 19678 case DKIOCPARTINFO: 19679 case DKIOCSGEOM: 19680 case DKIOCSAPART: 19681 case DKIOCGETEFI: 19682 case DKIOCPARTITION: 19683 case DKIOCSVTOC: 19684 case DKIOCSETEFI: 19685 case DKIOCGMBOOT: 19686 case DKIOCSMBOOT: 19687 case DKIOCG_PHYGEOM: 19688 case DKIOCG_VIRTGEOM: 19689 /* let cmlb handle it */ 19690 goto skip_ready_valid; 19691 19692 case CDROMPAUSE: 19693 case CDROMRESUME: 19694 case CDROMPLAYMSF: 19695 case CDROMPLAYTRKIND: 19696 case CDROMREADTOCHDR: 19697 case CDROMREADTOCENTRY: 19698 case CDROMSTOP: 19699 case CDROMSTART: 19700 case CDROMVOLCTRL: 19701 case CDROMSUBCHNL: 19702 case CDROMREADMODE2: 19703 case CDROMREADMODE1: 19704 case CDROMREADOFFSET: 19705 case CDROMSBLKMODE: 19706 case CDROMGBLKMODE: 19707 case CDROMGDRVSPEED: 19708 case CDROMSDRVSPEED: 19709 case CDROMCDDA: 19710 case CDROMCDXA: 19711 case CDROMSUBCODE: 19712 if (!ISCD(un)) { 19713 un->un_ncmds_in_driver--; 19714 ASSERT(un->un_ncmds_in_driver >= 0); 19715 mutex_exit(SD_MUTEX(un)); 19716 return (ENOTTY); 19717 } 19718 break; 19719 case FDEJECT: 19720 case DKIOCEJECT: 19721 case CDROMEJECT: 19722 if (!un->un_f_eject_media_supported) { 19723 un->un_ncmds_in_driver--; 19724 ASSERT(un->un_ncmds_in_driver >= 0); 19725 mutex_exit(SD_MUTEX(un)); 19726 return (ENOTTY); 19727 } 19728 break; 19729 case DKIOCFLUSHWRITECACHE: 19730 mutex_exit(SD_MUTEX(un)); 19731 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 19732 if (err != 0) { 19733 mutex_enter(SD_MUTEX(un)); 19734 un->un_ncmds_in_driver--; 19735 ASSERT(un->un_ncmds_in_driver >= 0); 19736 mutex_exit(SD_MUTEX(un)); 19737 return (EIO); 19738 } 19739 mutex_enter(SD_MUTEX(un)); 19740 /* FALLTHROUGH */ 19741 case DKIOCREMOVABLE: 19742 case DKIOCHOTPLUGGABLE: 19743 case DKIOCINFO: 19744 case DKIOCGMEDIAINFO: 19745 case MHIOCENFAILFAST: 19746 case MHIOCSTATUS: 19747 case MHIOCTKOWN: 19748 case MHIOCRELEASE: 19749 case MHIOCGRP_INKEYS: 19750 case MHIOCGRP_INRESV: 19751 case MHIOCGRP_REGISTER: 19752 case MHIOCGRP_RESERVE: 19753 case MHIOCGRP_PREEMPTANDABORT: 19754 case MHIOCGRP_REGISTERANDIGNOREKEY: 19755 case CDROMCLOSETRAY: 19756 case USCSICMD: 19757 goto skip_ready_valid; 19758 default: 19759 break; 19760 } 19761 19762 mutex_exit(SD_MUTEX(un)); 19763 err = sd_ready_and_valid(un); 19764 mutex_enter(SD_MUTEX(un)); 19765 19766 if (err != SD_READY_VALID) { 19767 switch (cmd) { 19768 case DKIOCSTATE: 19769 case CDROMGDRVSPEED: 19770 case CDROMSDRVSPEED: 19771 case FDEJECT: /* for eject command */ 19772 case DKIOCEJECT: 19773 case CDROMEJECT: 19774 case DKIOCREMOVABLE: 19775 case DKIOCHOTPLUGGABLE: 19776 break; 19777 default: 19778 if (un->un_f_has_removable_media) { 19779 err = ENXIO; 19780 } else { 19781 /* Do not map SD_RESERVED_BY_OTHERS to EIO */ 19782 if (err == SD_RESERVED_BY_OTHERS) { 19783 err = EACCES; 19784 } else { 19785 err = EIO; 19786 } 19787 } 19788 un->un_ncmds_in_driver--; 19789 ASSERT(un->un_ncmds_in_driver >= 0); 19790 mutex_exit(SD_MUTEX(un)); 19791 return (err); 19792 } 19793 } 19794 } 19795 19796 skip_ready_valid: 19797 mutex_exit(SD_MUTEX(un)); 19798 19799 switch (cmd) { 19800 case DKIOCINFO: 19801 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n"); 19802 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag); 19803 break; 19804 19805 case DKIOCGMEDIAINFO: 19806 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n"); 19807 err = sd_get_media_info(dev, (caddr_t)arg, flag); 19808 break; 19809 19810 case DKIOCGGEOM: 19811 case DKIOCGVTOC: 19812 case DKIOCGAPART: 19813 case DKIOCPARTINFO: 19814 case DKIOCSGEOM: 19815 case DKIOCSAPART: 19816 case DKIOCGETEFI: 19817 case DKIOCPARTITION: 19818 case DKIOCSVTOC: 19819 case DKIOCSETEFI: 19820 case DKIOCGMBOOT: 19821 case DKIOCSMBOOT: 19822 case DKIOCG_PHYGEOM: 19823 case DKIOCG_VIRTGEOM: 19824 SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd); 19825 19826 /* TUR should spin up */ 19827 19828 if (un->un_f_has_removable_media) 19829 err = sd_send_scsi_TEST_UNIT_READY(un, 19830 SD_CHECK_FOR_MEDIA); 19831 else 19832 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 19833 19834 if (err != 0) 19835 break; 19836 19837 err = cmlb_ioctl(un->un_cmlbhandle, dev, 19838 cmd, arg, flag, cred_p, rval_p, (void *)SD_PATH_DIRECT); 19839 19840 if ((err == 0) && 19841 ((cmd == DKIOCSETEFI) || 19842 (un->un_f_pkstats_enabled) && 19843 (cmd == DKIOCSAPART || cmd == DKIOCSVTOC))) { 19844 19845 tmprval = cmlb_validate(un->un_cmlbhandle, CMLB_SILENT, 19846 (void *)SD_PATH_DIRECT); 19847 if ((tmprval == 0) && un->un_f_pkstats_enabled) { 19848 sd_set_pstats(un); 19849 SD_TRACE(SD_LOG_IO_PARTITION, un, 19850 "sd_ioctl: un:0x%p pstats created and " 19851 "set\n", un); 19852 } 19853 } 19854 19855 if ((cmd == DKIOCSVTOC) || 19856 ((cmd == DKIOCSETEFI) && (tmprval == 0))) { 19857 19858 mutex_enter(SD_MUTEX(un)); 19859 if (un->un_f_devid_supported && 19860 (un->un_f_opt_fab_devid == TRUE)) { 19861 if (un->un_devid == NULL) { 19862 sd_register_devid(un, SD_DEVINFO(un), 19863 SD_TARGET_IS_UNRESERVED); 19864 } else { 19865 /* 19866 * The device id for this disk 19867 * has been fabricated. The 19868 * device id must be preserved 19869 * by writing it back out to 19870 * disk. 19871 */ 19872 if (sd_write_deviceid(un) != 0) { 19873 ddi_devid_free(un->un_devid); 19874 un->un_devid = NULL; 19875 } 19876 } 19877 } 19878 mutex_exit(SD_MUTEX(un)); 19879 } 19880 19881 break; 19882 19883 case DKIOCLOCK: 19884 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n"); 19885 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 19886 SD_PATH_STANDARD); 19887 break; 19888 19889 case DKIOCUNLOCK: 19890 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n"); 19891 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 19892 SD_PATH_STANDARD); 19893 break; 19894 19895 case DKIOCSTATE: { 19896 enum dkio_state state; 19897 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n"); 19898 19899 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) { 19900 err = EFAULT; 19901 } else { 19902 err = sd_check_media(dev, state); 19903 if (err == 0) { 19904 if (ddi_copyout(&un->un_mediastate, (void *)arg, 19905 sizeof (int), flag) != 0) 19906 err = EFAULT; 19907 } 19908 } 19909 break; 19910 } 19911 19912 case DKIOCREMOVABLE: 19913 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n"); 19914 i = un->un_f_has_removable_media ? 1 : 0; 19915 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 19916 err = EFAULT; 19917 } else { 19918 err = 0; 19919 } 19920 break; 19921 19922 case DKIOCHOTPLUGGABLE: 19923 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCHOTPLUGGABLE\n"); 19924 i = un->un_f_is_hotpluggable ? 1 : 0; 19925 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 19926 err = EFAULT; 19927 } else { 19928 err = 0; 19929 } 19930 break; 19931 19932 case DKIOCGTEMPERATURE: 19933 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n"); 19934 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag); 19935 break; 19936 19937 case MHIOCENFAILFAST: 19938 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n"); 19939 if ((err = drv_priv(cred_p)) == 0) { 19940 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag); 19941 } 19942 break; 19943 19944 case MHIOCTKOWN: 19945 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n"); 19946 if ((err = drv_priv(cred_p)) == 0) { 19947 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag); 19948 } 19949 break; 19950 19951 case MHIOCRELEASE: 19952 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n"); 19953 if ((err = drv_priv(cred_p)) == 0) { 19954 err = sd_mhdioc_release(dev); 19955 } 19956 break; 19957 19958 case MHIOCSTATUS: 19959 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n"); 19960 if ((err = drv_priv(cred_p)) == 0) { 19961 switch (sd_send_scsi_TEST_UNIT_READY(un, 0)) { 19962 case 0: 19963 err = 0; 19964 break; 19965 case EACCES: 19966 *rval_p = 1; 19967 err = 0; 19968 break; 19969 default: 19970 err = EIO; 19971 break; 19972 } 19973 } 19974 break; 19975 19976 case MHIOCQRESERVE: 19977 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n"); 19978 if ((err = drv_priv(cred_p)) == 0) { 19979 err = sd_reserve_release(dev, SD_RESERVE); 19980 } 19981 break; 19982 19983 case MHIOCREREGISTERDEVID: 19984 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n"); 19985 if (drv_priv(cred_p) == EPERM) { 19986 err = EPERM; 19987 } else if (!un->un_f_devid_supported) { 19988 err = ENOTTY; 19989 } else { 19990 err = sd_mhdioc_register_devid(dev); 19991 } 19992 break; 19993 19994 case MHIOCGRP_INKEYS: 19995 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n"); 19996 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 19997 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 19998 err = ENOTSUP; 19999 } else { 20000 err = sd_mhdioc_inkeys(dev, (caddr_t)arg, 20001 flag); 20002 } 20003 } 20004 break; 20005 20006 case MHIOCGRP_INRESV: 20007 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n"); 20008 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 20009 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20010 err = ENOTSUP; 20011 } else { 20012 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag); 20013 } 20014 } 20015 break; 20016 20017 case MHIOCGRP_REGISTER: 20018 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n"); 20019 if ((err = drv_priv(cred_p)) != EPERM) { 20020 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20021 err = ENOTSUP; 20022 } else if (arg != NULL) { 20023 mhioc_register_t reg; 20024 if (ddi_copyin((void *)arg, ®, 20025 sizeof (mhioc_register_t), flag) != 0) { 20026 err = EFAULT; 20027 } else { 20028 err = 20029 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20030 un, SD_SCSI3_REGISTER, 20031 (uchar_t *)®); 20032 } 20033 } 20034 } 20035 break; 20036 20037 case MHIOCGRP_RESERVE: 20038 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n"); 20039 if ((err = drv_priv(cred_p)) != EPERM) { 20040 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20041 err = ENOTSUP; 20042 } else if (arg != NULL) { 20043 mhioc_resv_desc_t resv_desc; 20044 if (ddi_copyin((void *)arg, &resv_desc, 20045 sizeof (mhioc_resv_desc_t), flag) != 0) { 20046 err = EFAULT; 20047 } else { 20048 err = 20049 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20050 un, SD_SCSI3_RESERVE, 20051 (uchar_t *)&resv_desc); 20052 } 20053 } 20054 } 20055 break; 20056 20057 case MHIOCGRP_PREEMPTANDABORT: 20058 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 20059 if ((err = drv_priv(cred_p)) != EPERM) { 20060 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20061 err = ENOTSUP; 20062 } else if (arg != NULL) { 20063 mhioc_preemptandabort_t preempt_abort; 20064 if (ddi_copyin((void *)arg, &preempt_abort, 20065 sizeof (mhioc_preemptandabort_t), 20066 flag) != 0) { 20067 err = EFAULT; 20068 } else { 20069 err = 20070 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20071 un, SD_SCSI3_PREEMPTANDABORT, 20072 (uchar_t *)&preempt_abort); 20073 } 20074 } 20075 } 20076 break; 20077 20078 case MHIOCGRP_REGISTERANDIGNOREKEY: 20079 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTERANDIGNOREKEY\n"); 20080 if ((err = drv_priv(cred_p)) != EPERM) { 20081 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20082 err = ENOTSUP; 20083 } else if (arg != NULL) { 20084 mhioc_registerandignorekey_t r_and_i; 20085 if (ddi_copyin((void *)arg, (void *)&r_and_i, 20086 sizeof (mhioc_registerandignorekey_t), 20087 flag) != 0) { 20088 err = EFAULT; 20089 } else { 20090 err = 20091 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20092 un, SD_SCSI3_REGISTERANDIGNOREKEY, 20093 (uchar_t *)&r_and_i); 20094 } 20095 } 20096 } 20097 break; 20098 20099 case USCSICMD: 20100 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n"); 20101 cr = ddi_get_cred(); 20102 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 20103 err = EPERM; 20104 } else { 20105 enum uio_seg uioseg; 20106 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE : 20107 UIO_USERSPACE; 20108 if (un->un_f_format_in_progress == TRUE) { 20109 err = EAGAIN; 20110 break; 20111 } 20112 err = sd_send_scsi_cmd(dev, (struct uscsi_cmd *)arg, 20113 flag, uioseg, SD_PATH_STANDARD); 20114 } 20115 break; 20116 20117 case CDROMPAUSE: 20118 case CDROMRESUME: 20119 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n"); 20120 if (!ISCD(un)) { 20121 err = ENOTTY; 20122 } else { 20123 err = sr_pause_resume(dev, cmd); 20124 } 20125 break; 20126 20127 case CDROMPLAYMSF: 20128 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n"); 20129 if (!ISCD(un)) { 20130 err = ENOTTY; 20131 } else { 20132 err = sr_play_msf(dev, (caddr_t)arg, flag); 20133 } 20134 break; 20135 20136 case CDROMPLAYTRKIND: 20137 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); 20138 #if defined(__i386) || defined(__amd64) 20139 /* 20140 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead 20141 */ 20142 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 20143 #else 20144 if (!ISCD(un)) { 20145 #endif 20146 err = ENOTTY; 20147 } else { 20148 err = sr_play_trkind(dev, (caddr_t)arg, flag); 20149 } 20150 break; 20151 20152 case CDROMREADTOCHDR: 20153 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n"); 20154 if (!ISCD(un)) { 20155 err = ENOTTY; 20156 } else { 20157 err = sr_read_tochdr(dev, (caddr_t)arg, flag); 20158 } 20159 break; 20160 20161 case CDROMREADTOCENTRY: 20162 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n"); 20163 if (!ISCD(un)) { 20164 err = ENOTTY; 20165 } else { 20166 err = sr_read_tocentry(dev, (caddr_t)arg, flag); 20167 } 20168 break; 20169 20170 case CDROMSTOP: 20171 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n"); 20172 if (!ISCD(un)) { 20173 err = ENOTTY; 20174 } else { 20175 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_STOP, 20176 SD_PATH_STANDARD); 20177 } 20178 break; 20179 20180 case CDROMSTART: 20181 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n"); 20182 if (!ISCD(un)) { 20183 err = ENOTTY; 20184 } else { 20185 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 20186 SD_PATH_STANDARD); 20187 } 20188 break; 20189 20190 case CDROMCLOSETRAY: 20191 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n"); 20192 if (!ISCD(un)) { 20193 err = ENOTTY; 20194 } else { 20195 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_CLOSE, 20196 SD_PATH_STANDARD); 20197 } 20198 break; 20199 20200 case FDEJECT: /* for eject command */ 20201 case DKIOCEJECT: 20202 case CDROMEJECT: 20203 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n"); 20204 if (!un->un_f_eject_media_supported) { 20205 err = ENOTTY; 20206 } else { 20207 err = sr_eject(dev); 20208 } 20209 break; 20210 20211 case CDROMVOLCTRL: 20212 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n"); 20213 if (!ISCD(un)) { 20214 err = ENOTTY; 20215 } else { 20216 err = sr_volume_ctrl(dev, (caddr_t)arg, flag); 20217 } 20218 break; 20219 20220 case CDROMSUBCHNL: 20221 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n"); 20222 if (!ISCD(un)) { 20223 err = ENOTTY; 20224 } else { 20225 err = sr_read_subchannel(dev, (caddr_t)arg, flag); 20226 } 20227 break; 20228 20229 case CDROMREADMODE2: 20230 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n"); 20231 if (!ISCD(un)) { 20232 err = ENOTTY; 20233 } else if (un->un_f_cfg_is_atapi == TRUE) { 20234 /* 20235 * If the drive supports READ CD, use that instead of 20236 * switching the LBA size via a MODE SELECT 20237 * Block Descriptor 20238 */ 20239 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag); 20240 } else { 20241 err = sr_read_mode2(dev, (caddr_t)arg, flag); 20242 } 20243 break; 20244 20245 case CDROMREADMODE1: 20246 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n"); 20247 if (!ISCD(un)) { 20248 err = ENOTTY; 20249 } else { 20250 err = sr_read_mode1(dev, (caddr_t)arg, flag); 20251 } 20252 break; 20253 20254 case CDROMREADOFFSET: 20255 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n"); 20256 if (!ISCD(un)) { 20257 err = ENOTTY; 20258 } else { 20259 err = sr_read_sony_session_offset(dev, (caddr_t)arg, 20260 flag); 20261 } 20262 break; 20263 20264 case CDROMSBLKMODE: 20265 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n"); 20266 /* 20267 * There is no means of changing block size in case of atapi 20268 * drives, thus return ENOTTY if drive type is atapi 20269 */ 20270 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 20271 err = ENOTTY; 20272 } else if (un->un_f_mmc_cap == TRUE) { 20273 20274 /* 20275 * MMC Devices do not support changing the 20276 * logical block size 20277 * 20278 * Note: EINVAL is being returned instead of ENOTTY to 20279 * maintain consistancy with the original mmc 20280 * driver update. 20281 */ 20282 err = EINVAL; 20283 } else { 20284 mutex_enter(SD_MUTEX(un)); 20285 if ((!(un->un_exclopen & (1<<SDPART(dev)))) || 20286 (un->un_ncmds_in_transport > 0)) { 20287 mutex_exit(SD_MUTEX(un)); 20288 err = EINVAL; 20289 } else { 20290 mutex_exit(SD_MUTEX(un)); 20291 err = sr_change_blkmode(dev, cmd, arg, flag); 20292 } 20293 } 20294 break; 20295 20296 case CDROMGBLKMODE: 20297 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n"); 20298 if (!ISCD(un)) { 20299 err = ENOTTY; 20300 } else if ((un->un_f_cfg_is_atapi != FALSE) && 20301 (un->un_f_blockcount_is_valid != FALSE)) { 20302 /* 20303 * Drive is an ATAPI drive so return target block 20304 * size for ATAPI drives since we cannot change the 20305 * blocksize on ATAPI drives. Used primarily to detect 20306 * if an ATAPI cdrom is present. 20307 */ 20308 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg, 20309 sizeof (int), flag) != 0) { 20310 err = EFAULT; 20311 } else { 20312 err = 0; 20313 } 20314 20315 } else { 20316 /* 20317 * Drive supports changing block sizes via a Mode 20318 * Select. 20319 */ 20320 err = sr_change_blkmode(dev, cmd, arg, flag); 20321 } 20322 break; 20323 20324 case CDROMGDRVSPEED: 20325 case CDROMSDRVSPEED: 20326 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n"); 20327 if (!ISCD(un)) { 20328 err = ENOTTY; 20329 } else if (un->un_f_mmc_cap == TRUE) { 20330 /* 20331 * Note: In the future the driver implementation 20332 * for getting and 20333 * setting cd speed should entail: 20334 * 1) If non-mmc try the Toshiba mode page 20335 * (sr_change_speed) 20336 * 2) If mmc but no support for Real Time Streaming try 20337 * the SET CD SPEED (0xBB) command 20338 * (sr_atapi_change_speed) 20339 * 3) If mmc and support for Real Time Streaming 20340 * try the GET PERFORMANCE and SET STREAMING 20341 * commands (not yet implemented, 4380808) 20342 */ 20343 /* 20344 * As per recent MMC spec, CD-ROM speed is variable 20345 * and changes with LBA. Since there is no such 20346 * things as drive speed now, fail this ioctl. 20347 * 20348 * Note: EINVAL is returned for consistancy of original 20349 * implementation which included support for getting 20350 * the drive speed of mmc devices but not setting 20351 * the drive speed. Thus EINVAL would be returned 20352 * if a set request was made for an mmc device. 20353 * We no longer support get or set speed for 20354 * mmc but need to remain consistent with regard 20355 * to the error code returned. 20356 */ 20357 err = EINVAL; 20358 } else if (un->un_f_cfg_is_atapi == TRUE) { 20359 err = sr_atapi_change_speed(dev, cmd, arg, flag); 20360 } else { 20361 err = sr_change_speed(dev, cmd, arg, flag); 20362 } 20363 break; 20364 20365 case CDROMCDDA: 20366 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n"); 20367 if (!ISCD(un)) { 20368 err = ENOTTY; 20369 } else { 20370 err = sr_read_cdda(dev, (void *)arg, flag); 20371 } 20372 break; 20373 20374 case CDROMCDXA: 20375 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n"); 20376 if (!ISCD(un)) { 20377 err = ENOTTY; 20378 } else { 20379 err = sr_read_cdxa(dev, (caddr_t)arg, flag); 20380 } 20381 break; 20382 20383 case CDROMSUBCODE: 20384 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n"); 20385 if (!ISCD(un)) { 20386 err = ENOTTY; 20387 } else { 20388 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag); 20389 } 20390 break; 20391 20392 20393 #ifdef SDDEBUG 20394 /* RESET/ABORTS testing ioctls */ 20395 case DKIOCRESET: { 20396 int reset_level; 20397 20398 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) { 20399 err = EFAULT; 20400 } else { 20401 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: " 20402 "reset_level = 0x%lx\n", reset_level); 20403 if (scsi_reset(SD_ADDRESS(un), reset_level)) { 20404 err = 0; 20405 } else { 20406 err = EIO; 20407 } 20408 } 20409 break; 20410 } 20411 20412 case DKIOCABORT: 20413 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n"); 20414 if (scsi_abort(SD_ADDRESS(un), NULL)) { 20415 err = 0; 20416 } else { 20417 err = EIO; 20418 } 20419 break; 20420 #endif 20421 20422 #ifdef SD_FAULT_INJECTION 20423 /* SDIOC FaultInjection testing ioctls */ 20424 case SDIOCSTART: 20425 case SDIOCSTOP: 20426 case SDIOCINSERTPKT: 20427 case SDIOCINSERTXB: 20428 case SDIOCINSERTUN: 20429 case SDIOCINSERTARQ: 20430 case SDIOCPUSH: 20431 case SDIOCRETRIEVE: 20432 case SDIOCRUN: 20433 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" 20434 "SDIOC detected cmd:0x%X:\n", cmd); 20435 /* call error generator */ 20436 sd_faultinjection_ioctl(cmd, arg, un); 20437 err = 0; 20438 break; 20439 20440 #endif /* SD_FAULT_INJECTION */ 20441 20442 case DKIOCFLUSHWRITECACHE: 20443 { 20444 struct dk_callback *dkc = (struct dk_callback *)arg; 20445 20446 mutex_enter(SD_MUTEX(un)); 20447 if (!un->un_f_sync_cache_supported || 20448 !un->un_f_write_cache_enabled) { 20449 err = un->un_f_sync_cache_supported ? 20450 0 : ENOTSUP; 20451 mutex_exit(SD_MUTEX(un)); 20452 if ((flag & FKIOCTL) && dkc != NULL && 20453 dkc->dkc_callback != NULL) { 20454 (*dkc->dkc_callback)(dkc->dkc_cookie, 20455 err); 20456 /* 20457 * Did callback and reported error. 20458 * Since we did a callback, ioctl 20459 * should return 0. 20460 */ 20461 err = 0; 20462 } 20463 break; 20464 } 20465 mutex_exit(SD_MUTEX(un)); 20466 20467 if ((flag & FKIOCTL) && dkc != NULL && 20468 dkc->dkc_callback != NULL) { 20469 /* async SYNC CACHE request */ 20470 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 20471 } else { 20472 /* synchronous SYNC CACHE request */ 20473 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 20474 } 20475 } 20476 break; 20477 20478 case DKIOCGETWCE: { 20479 20480 int wce; 20481 20482 if ((err = sd_get_write_cache_enabled(un, &wce)) != 0) { 20483 break; 20484 } 20485 20486 if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) { 20487 err = EFAULT; 20488 } 20489 break; 20490 } 20491 20492 case DKIOCSETWCE: { 20493 20494 int wce, sync_supported; 20495 20496 if (ddi_copyin((void *)arg, &wce, sizeof (wce), flag)) { 20497 err = EFAULT; 20498 break; 20499 } 20500 20501 /* 20502 * Synchronize multiple threads trying to enable 20503 * or disable the cache via the un_f_wcc_cv 20504 * condition variable. 20505 */ 20506 mutex_enter(SD_MUTEX(un)); 20507 20508 /* 20509 * Don't allow the cache to be enabled if the 20510 * config file has it disabled. 20511 */ 20512 if (un->un_f_opt_disable_cache && wce) { 20513 mutex_exit(SD_MUTEX(un)); 20514 err = EINVAL; 20515 break; 20516 } 20517 20518 /* 20519 * Wait for write cache change in progress 20520 * bit to be clear before proceeding. 20521 */ 20522 while (un->un_f_wcc_inprog) 20523 cv_wait(&un->un_wcc_cv, SD_MUTEX(un)); 20524 20525 un->un_f_wcc_inprog = 1; 20526 20527 if (un->un_f_write_cache_enabled && wce == 0) { 20528 /* 20529 * Disable the write cache. Don't clear 20530 * un_f_write_cache_enabled until after 20531 * the mode select and flush are complete. 20532 */ 20533 sync_supported = un->un_f_sync_cache_supported; 20534 20535 /* 20536 * If cache flush is suppressed, we assume that the 20537 * controller firmware will take care of managing the 20538 * write cache for us: no need to explicitly 20539 * disable it. 20540 */ 20541 if (!un->un_f_suppress_cache_flush) { 20542 mutex_exit(SD_MUTEX(un)); 20543 if ((err = sd_cache_control(un, 20544 SD_CACHE_NOCHANGE, 20545 SD_CACHE_DISABLE)) == 0 && 20546 sync_supported) { 20547 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, 20548 NULL); 20549 } 20550 } else { 20551 mutex_exit(SD_MUTEX(un)); 20552 } 20553 20554 mutex_enter(SD_MUTEX(un)); 20555 if (err == 0) { 20556 un->un_f_write_cache_enabled = 0; 20557 } 20558 20559 } else if (!un->un_f_write_cache_enabled && wce != 0) { 20560 /* 20561 * Set un_f_write_cache_enabled first, so there is 20562 * no window where the cache is enabled, but the 20563 * bit says it isn't. 20564 */ 20565 un->un_f_write_cache_enabled = 1; 20566 20567 /* 20568 * If cache flush is suppressed, we assume that the 20569 * controller firmware will take care of managing the 20570 * write cache for us: no need to explicitly 20571 * enable it. 20572 */ 20573 if (!un->un_f_suppress_cache_flush) { 20574 mutex_exit(SD_MUTEX(un)); 20575 err = sd_cache_control(un, SD_CACHE_NOCHANGE, 20576 SD_CACHE_ENABLE); 20577 } else { 20578 mutex_exit(SD_MUTEX(un)); 20579 } 20580 20581 mutex_enter(SD_MUTEX(un)); 20582 20583 if (err) { 20584 un->un_f_write_cache_enabled = 0; 20585 } 20586 } 20587 20588 un->un_f_wcc_inprog = 0; 20589 cv_broadcast(&un->un_wcc_cv); 20590 mutex_exit(SD_MUTEX(un)); 20591 break; 20592 } 20593 20594 default: 20595 err = ENOTTY; 20596 break; 20597 } 20598 mutex_enter(SD_MUTEX(un)); 20599 un->un_ncmds_in_driver--; 20600 ASSERT(un->un_ncmds_in_driver >= 0); 20601 mutex_exit(SD_MUTEX(un)); 20602 20603 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 20604 return (err); 20605 } 20606 20607 20608 /* 20609 * Function: sd_dkio_ctrl_info 20610 * 20611 * Description: This routine is the driver entry point for handling controller 20612 * information ioctl requests (DKIOCINFO). 20613 * 20614 * Arguments: dev - the device number 20615 * arg - pointer to user provided dk_cinfo structure 20616 * specifying the controller type and attributes. 20617 * flag - this argument is a pass through to ddi_copyxxx() 20618 * directly from the mode argument of ioctl(). 20619 * 20620 * Return Code: 0 20621 * EFAULT 20622 * ENXIO 20623 */ 20624 20625 static int 20626 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag) 20627 { 20628 struct sd_lun *un = NULL; 20629 struct dk_cinfo *info; 20630 dev_info_t *pdip; 20631 int lun, tgt; 20632 20633 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20634 return (ENXIO); 20635 } 20636 20637 info = (struct dk_cinfo *) 20638 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); 20639 20640 switch (un->un_ctype) { 20641 case CTYPE_CDROM: 20642 info->dki_ctype = DKC_CDROM; 20643 break; 20644 default: 20645 info->dki_ctype = DKC_SCSI_CCS; 20646 break; 20647 } 20648 pdip = ddi_get_parent(SD_DEVINFO(un)); 20649 info->dki_cnum = ddi_get_instance(pdip); 20650 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) { 20651 (void) strcpy(info->dki_cname, ddi_get_name(pdip)); 20652 } else { 20653 (void) strncpy(info->dki_cname, ddi_node_name(pdip), 20654 DK_DEVLEN - 1); 20655 } 20656 20657 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 20658 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0); 20659 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 20660 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0); 20661 20662 /* Unit Information */ 20663 info->dki_unit = ddi_get_instance(SD_DEVINFO(un)); 20664 info->dki_slave = ((tgt << 3) | lun); 20665 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)), 20666 DK_DEVLEN - 1); 20667 info->dki_flags = DKI_FMTVOL; 20668 info->dki_partition = SDPART(dev); 20669 20670 /* Max Transfer size of this device in blocks */ 20671 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize; 20672 info->dki_addr = 0; 20673 info->dki_space = 0; 20674 info->dki_prio = 0; 20675 info->dki_vec = 0; 20676 20677 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) { 20678 kmem_free(info, sizeof (struct dk_cinfo)); 20679 return (EFAULT); 20680 } else { 20681 kmem_free(info, sizeof (struct dk_cinfo)); 20682 return (0); 20683 } 20684 } 20685 20686 20687 /* 20688 * Function: sd_get_media_info 20689 * 20690 * Description: This routine is the driver entry point for handling ioctl 20691 * requests for the media type or command set profile used by the 20692 * drive to operate on the media (DKIOCGMEDIAINFO). 20693 * 20694 * Arguments: dev - the device number 20695 * arg - pointer to user provided dk_minfo structure 20696 * specifying the media type, logical block size and 20697 * drive capacity. 20698 * flag - this argument is a pass through to ddi_copyxxx() 20699 * directly from the mode argument of ioctl(). 20700 * 20701 * Return Code: 0 20702 * EACCESS 20703 * EFAULT 20704 * ENXIO 20705 * EIO 20706 */ 20707 20708 static int 20709 sd_get_media_info(dev_t dev, caddr_t arg, int flag) 20710 { 20711 struct sd_lun *un = NULL; 20712 struct uscsi_cmd com; 20713 struct scsi_inquiry *sinq; 20714 struct dk_minfo media_info; 20715 u_longlong_t media_capacity; 20716 uint64_t capacity; 20717 uint_t lbasize; 20718 uchar_t *out_data; 20719 uchar_t *rqbuf; 20720 int rval = 0; 20721 int rtn; 20722 20723 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 20724 (un->un_state == SD_STATE_OFFLINE)) { 20725 return (ENXIO); 20726 } 20727 20728 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info: entry\n"); 20729 20730 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 20731 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 20732 20733 /* Issue a TUR to determine if the drive is ready with media present */ 20734 rval = sd_send_scsi_TEST_UNIT_READY(un, SD_CHECK_FOR_MEDIA); 20735 if (rval == ENXIO) { 20736 goto done; 20737 } 20738 20739 /* Now get configuration data */ 20740 if (ISCD(un)) { 20741 media_info.dki_media_type = DK_CDROM; 20742 20743 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 20744 if (un->un_f_mmc_cap == TRUE) { 20745 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, 20746 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN, 20747 SD_PATH_STANDARD); 20748 20749 if (rtn) { 20750 /* 20751 * Failed for other than an illegal request 20752 * or command not supported 20753 */ 20754 if ((com.uscsi_status == STATUS_CHECK) && 20755 (com.uscsi_rqstatus == STATUS_GOOD)) { 20756 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 20757 (rqbuf[12] != 0x20)) { 20758 rval = EIO; 20759 goto done; 20760 } 20761 } 20762 } else { 20763 /* 20764 * The GET CONFIGURATION command succeeded 20765 * so set the media type according to the 20766 * returned data 20767 */ 20768 media_info.dki_media_type = out_data[6]; 20769 media_info.dki_media_type <<= 8; 20770 media_info.dki_media_type |= out_data[7]; 20771 } 20772 } 20773 } else { 20774 /* 20775 * The profile list is not available, so we attempt to identify 20776 * the media type based on the inquiry data 20777 */ 20778 sinq = un->un_sd->sd_inq; 20779 if ((sinq->inq_dtype == DTYPE_DIRECT) || 20780 (sinq->inq_dtype == DTYPE_OPTICAL)) { 20781 /* This is a direct access device or optical disk */ 20782 media_info.dki_media_type = DK_FIXED_DISK; 20783 20784 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 20785 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 20786 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 20787 media_info.dki_media_type = DK_ZIP; 20788 } else if ( 20789 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 20790 media_info.dki_media_type = DK_JAZ; 20791 } 20792 } 20793 } else { 20794 /* 20795 * Not a CD, direct access or optical disk so return 20796 * unknown media 20797 */ 20798 media_info.dki_media_type = DK_UNKNOWN; 20799 } 20800 } 20801 20802 /* Now read the capacity so we can provide the lbasize and capacity */ 20803 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 20804 SD_PATH_DIRECT)) { 20805 case 0: 20806 break; 20807 case EACCES: 20808 rval = EACCES; 20809 goto done; 20810 default: 20811 rval = EIO; 20812 goto done; 20813 } 20814 20815 media_info.dki_lbsize = lbasize; 20816 media_capacity = capacity; 20817 20818 /* 20819 * sd_send_scsi_READ_CAPACITY() reports capacity in 20820 * un->un_sys_blocksize chunks. So we need to convert it into 20821 * cap.lbasize chunks. 20822 */ 20823 media_capacity *= un->un_sys_blocksize; 20824 media_capacity /= lbasize; 20825 media_info.dki_capacity = media_capacity; 20826 20827 if (ddi_copyout(&media_info, arg, sizeof (struct dk_minfo), flag)) { 20828 rval = EFAULT; 20829 /* Put goto. Anybody might add some code below in future */ 20830 goto done; 20831 } 20832 done: 20833 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 20834 kmem_free(rqbuf, SENSE_LENGTH); 20835 return (rval); 20836 } 20837 20838 20839 /* 20840 * Function: sd_check_media 20841 * 20842 * Description: This utility routine implements the functionality for the 20843 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the 20844 * driver state changes from that specified by the user 20845 * (inserted or ejected). For example, if the user specifies 20846 * DKIO_EJECTED and the current media state is inserted this 20847 * routine will immediately return DKIO_INSERTED. However, if the 20848 * current media state is not inserted the user thread will be 20849 * blocked until the drive state changes. If DKIO_NONE is specified 20850 * the user thread will block until a drive state change occurs. 20851 * 20852 * Arguments: dev - the device number 20853 * state - user pointer to a dkio_state, updated with the current 20854 * drive state at return. 20855 * 20856 * Return Code: ENXIO 20857 * EIO 20858 * EAGAIN 20859 * EINTR 20860 */ 20861 20862 static int 20863 sd_check_media(dev_t dev, enum dkio_state state) 20864 { 20865 struct sd_lun *un = NULL; 20866 enum dkio_state prev_state; 20867 opaque_t token = NULL; 20868 int rval = 0; 20869 20870 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20871 return (ENXIO); 20872 } 20873 20874 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n"); 20875 20876 mutex_enter(SD_MUTEX(un)); 20877 20878 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: " 20879 "state=%x, mediastate=%x\n", state, un->un_mediastate); 20880 20881 prev_state = un->un_mediastate; 20882 20883 /* is there anything to do? */ 20884 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) { 20885 /* 20886 * submit the request to the scsi_watch service; 20887 * scsi_media_watch_cb() does the real work 20888 */ 20889 mutex_exit(SD_MUTEX(un)); 20890 20891 /* 20892 * This change handles the case where a scsi watch request is 20893 * added to a device that is powered down. To accomplish this 20894 * we power up the device before adding the scsi watch request, 20895 * since the scsi watch sends a TUR directly to the device 20896 * which the device cannot handle if it is powered down. 20897 */ 20898 if (sd_pm_entry(un) != DDI_SUCCESS) { 20899 mutex_enter(SD_MUTEX(un)); 20900 goto done; 20901 } 20902 20903 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), 20904 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 20905 (caddr_t)dev); 20906 20907 sd_pm_exit(un); 20908 20909 mutex_enter(SD_MUTEX(un)); 20910 if (token == NULL) { 20911 rval = EAGAIN; 20912 goto done; 20913 } 20914 20915 /* 20916 * This is a special case IOCTL that doesn't return 20917 * until the media state changes. Routine sdpower 20918 * knows about and handles this so don't count it 20919 * as an active cmd in the driver, which would 20920 * keep the device busy to the pm framework. 20921 * If the count isn't decremented the device can't 20922 * be powered down. 20923 */ 20924 un->un_ncmds_in_driver--; 20925 ASSERT(un->un_ncmds_in_driver >= 0); 20926 20927 /* 20928 * if a prior request had been made, this will be the same 20929 * token, as scsi_watch was designed that way. 20930 */ 20931 un->un_swr_token = token; 20932 un->un_specified_mediastate = state; 20933 20934 /* 20935 * now wait for media change 20936 * we will not be signalled unless mediastate == state but it is 20937 * still better to test for this condition, since there is a 20938 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED 20939 */ 20940 SD_TRACE(SD_LOG_COMMON, un, 20941 "sd_check_media: waiting for media state change\n"); 20942 while (un->un_mediastate == state) { 20943 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) { 20944 SD_TRACE(SD_LOG_COMMON, un, 20945 "sd_check_media: waiting for media state " 20946 "was interrupted\n"); 20947 un->un_ncmds_in_driver++; 20948 rval = EINTR; 20949 goto done; 20950 } 20951 SD_TRACE(SD_LOG_COMMON, un, 20952 "sd_check_media: received signal, state=%x\n", 20953 un->un_mediastate); 20954 } 20955 /* 20956 * Inc the counter to indicate the device once again 20957 * has an active outstanding cmd. 20958 */ 20959 un->un_ncmds_in_driver++; 20960 } 20961 20962 /* invalidate geometry */ 20963 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) { 20964 sr_ejected(un); 20965 } 20966 20967 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) { 20968 uint64_t capacity; 20969 uint_t lbasize; 20970 20971 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n"); 20972 mutex_exit(SD_MUTEX(un)); 20973 /* 20974 * Since the following routines use SD_PATH_DIRECT, we must 20975 * call PM directly before the upcoming disk accesses. This 20976 * may cause the disk to be power/spin up. 20977 */ 20978 20979 if (sd_pm_entry(un) == DDI_SUCCESS) { 20980 rval = sd_send_scsi_READ_CAPACITY(un, 20981 &capacity, 20982 &lbasize, SD_PATH_DIRECT); 20983 if (rval != 0) { 20984 sd_pm_exit(un); 20985 mutex_enter(SD_MUTEX(un)); 20986 goto done; 20987 } 20988 } else { 20989 rval = EIO; 20990 mutex_enter(SD_MUTEX(un)); 20991 goto done; 20992 } 20993 mutex_enter(SD_MUTEX(un)); 20994 20995 sd_update_block_info(un, lbasize, capacity); 20996 20997 /* 20998 * Check if the media in the device is writable or not 20999 */ 21000 if (ISCD(un)) 21001 sd_check_for_writable_cd(un, SD_PATH_DIRECT); 21002 21003 mutex_exit(SD_MUTEX(un)); 21004 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 21005 if ((cmlb_validate(un->un_cmlbhandle, 0, 21006 (void *)SD_PATH_DIRECT) == 0) && un->un_f_pkstats_enabled) { 21007 sd_set_pstats(un); 21008 SD_TRACE(SD_LOG_IO_PARTITION, un, 21009 "sd_check_media: un:0x%p pstats created and " 21010 "set\n", un); 21011 } 21012 21013 rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 21014 SD_PATH_DIRECT); 21015 sd_pm_exit(un); 21016 21017 mutex_enter(SD_MUTEX(un)); 21018 } 21019 done: 21020 un->un_f_watcht_stopped = FALSE; 21021 if (un->un_swr_token) { 21022 /* 21023 * Use of this local token and the mutex ensures that we avoid 21024 * some race conditions associated with terminating the 21025 * scsi watch. 21026 */ 21027 token = un->un_swr_token; 21028 un->un_swr_token = (opaque_t)NULL; 21029 mutex_exit(SD_MUTEX(un)); 21030 (void) scsi_watch_request_terminate(token, 21031 SCSI_WATCH_TERMINATE_WAIT); 21032 mutex_enter(SD_MUTEX(un)); 21033 } 21034 21035 /* 21036 * Update the capacity kstat value, if no media previously 21037 * (capacity kstat is 0) and a media has been inserted 21038 * (un_f_blockcount_is_valid == TRUE) 21039 */ 21040 if (un->un_errstats) { 21041 struct sd_errstats *stp = NULL; 21042 21043 stp = (struct sd_errstats *)un->un_errstats->ks_data; 21044 if ((stp->sd_capacity.value.ui64 == 0) && 21045 (un->un_f_blockcount_is_valid == TRUE)) { 21046 stp->sd_capacity.value.ui64 = 21047 (uint64_t)((uint64_t)un->un_blockcount * 21048 un->un_sys_blocksize); 21049 } 21050 } 21051 mutex_exit(SD_MUTEX(un)); 21052 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n"); 21053 return (rval); 21054 } 21055 21056 21057 /* 21058 * Function: sd_delayed_cv_broadcast 21059 * 21060 * Description: Delayed cv_broadcast to allow for target to recover from media 21061 * insertion. 21062 * 21063 * Arguments: arg - driver soft state (unit) structure 21064 */ 21065 21066 static void 21067 sd_delayed_cv_broadcast(void *arg) 21068 { 21069 struct sd_lun *un = arg; 21070 21071 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n"); 21072 21073 mutex_enter(SD_MUTEX(un)); 21074 un->un_dcvb_timeid = NULL; 21075 cv_broadcast(&un->un_state_cv); 21076 mutex_exit(SD_MUTEX(un)); 21077 } 21078 21079 21080 /* 21081 * Function: sd_media_watch_cb 21082 * 21083 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This 21084 * routine processes the TUR sense data and updates the driver 21085 * state if a transition has occurred. The user thread 21086 * (sd_check_media) is then signalled. 21087 * 21088 * Arguments: arg - the device 'dev_t' is used for context to discriminate 21089 * among multiple watches that share this callback function 21090 * resultp - scsi watch facility result packet containing scsi 21091 * packet, status byte and sense data 21092 * 21093 * Return Code: 0 for success, -1 for failure 21094 */ 21095 21096 static int 21097 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 21098 { 21099 struct sd_lun *un; 21100 struct scsi_status *statusp = resultp->statusp; 21101 uint8_t *sensep = (uint8_t *)resultp->sensep; 21102 enum dkio_state state = DKIO_NONE; 21103 dev_t dev = (dev_t)arg; 21104 uchar_t actual_sense_length; 21105 uint8_t skey, asc, ascq; 21106 21107 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21108 return (-1); 21109 } 21110 actual_sense_length = resultp->actual_sense_length; 21111 21112 mutex_enter(SD_MUTEX(un)); 21113 SD_TRACE(SD_LOG_COMMON, un, 21114 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n", 21115 *((char *)statusp), (void *)sensep, actual_sense_length); 21116 21117 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) { 21118 un->un_mediastate = DKIO_DEV_GONE; 21119 cv_broadcast(&un->un_state_cv); 21120 mutex_exit(SD_MUTEX(un)); 21121 21122 return (0); 21123 } 21124 21125 /* 21126 * If there was a check condition then sensep points to valid sense data 21127 * If status was not a check condition but a reservation or busy status 21128 * then the new state is DKIO_NONE 21129 */ 21130 if (sensep != NULL) { 21131 skey = scsi_sense_key(sensep); 21132 asc = scsi_sense_asc(sensep); 21133 ascq = scsi_sense_ascq(sensep); 21134 21135 SD_INFO(SD_LOG_COMMON, un, 21136 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n", 21137 skey, asc, ascq); 21138 /* This routine only uses up to 13 bytes of sense data. */ 21139 if (actual_sense_length >= 13) { 21140 if (skey == KEY_UNIT_ATTENTION) { 21141 if (asc == 0x28) { 21142 state = DKIO_INSERTED; 21143 } 21144 } else if (skey == KEY_NOT_READY) { 21145 /* 21146 * if 02/04/02 means that the host 21147 * should send start command. Explicitly 21148 * leave the media state as is 21149 * (inserted) as the media is inserted 21150 * and host has stopped device for PM 21151 * reasons. Upon next true read/write 21152 * to this media will bring the 21153 * device to the right state good for 21154 * media access. 21155 */ 21156 if (asc == 0x3a) { 21157 state = DKIO_EJECTED; 21158 } else { 21159 /* 21160 * If the drive is busy with an 21161 * operation or long write, keep the 21162 * media in an inserted state. 21163 */ 21164 21165 if ((asc == 0x04) && 21166 ((ascq == 0x02) || 21167 (ascq == 0x07) || 21168 (ascq == 0x08))) { 21169 state = DKIO_INSERTED; 21170 } 21171 } 21172 } else if (skey == KEY_NO_SENSE) { 21173 if ((asc == 0x00) && (ascq == 0x00)) { 21174 /* 21175 * Sense Data 00/00/00 does not provide 21176 * any information about the state of 21177 * the media. Ignore it. 21178 */ 21179 mutex_exit(SD_MUTEX(un)); 21180 return (0); 21181 } 21182 } 21183 } 21184 } else if ((*((char *)statusp) == STATUS_GOOD) && 21185 (resultp->pkt->pkt_reason == CMD_CMPLT)) { 21186 state = DKIO_INSERTED; 21187 } 21188 21189 SD_TRACE(SD_LOG_COMMON, un, 21190 "sd_media_watch_cb: state=%x, specified=%x\n", 21191 state, un->un_specified_mediastate); 21192 21193 /* 21194 * now signal the waiting thread if this is *not* the specified state; 21195 * delay the signal if the state is DKIO_INSERTED to allow the target 21196 * to recover 21197 */ 21198 if (state != un->un_specified_mediastate) { 21199 un->un_mediastate = state; 21200 if (state == DKIO_INSERTED) { 21201 /* 21202 * delay the signal to give the drive a chance 21203 * to do what it apparently needs to do 21204 */ 21205 SD_TRACE(SD_LOG_COMMON, un, 21206 "sd_media_watch_cb: delayed cv_broadcast\n"); 21207 if (un->un_dcvb_timeid == NULL) { 21208 un->un_dcvb_timeid = 21209 timeout(sd_delayed_cv_broadcast, un, 21210 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY)); 21211 } 21212 } else { 21213 SD_TRACE(SD_LOG_COMMON, un, 21214 "sd_media_watch_cb: immediate cv_broadcast\n"); 21215 cv_broadcast(&un->un_state_cv); 21216 } 21217 } 21218 mutex_exit(SD_MUTEX(un)); 21219 return (0); 21220 } 21221 21222 21223 /* 21224 * Function: sd_dkio_get_temp 21225 * 21226 * Description: This routine is the driver entry point for handling ioctl 21227 * requests to get the disk temperature. 21228 * 21229 * Arguments: dev - the device number 21230 * arg - pointer to user provided dk_temperature structure. 21231 * flag - this argument is a pass through to ddi_copyxxx() 21232 * directly from the mode argument of ioctl(). 21233 * 21234 * Return Code: 0 21235 * EFAULT 21236 * ENXIO 21237 * EAGAIN 21238 */ 21239 21240 static int 21241 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag) 21242 { 21243 struct sd_lun *un = NULL; 21244 struct dk_temperature *dktemp = NULL; 21245 uchar_t *temperature_page; 21246 int rval = 0; 21247 int path_flag = SD_PATH_STANDARD; 21248 21249 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21250 return (ENXIO); 21251 } 21252 21253 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP); 21254 21255 /* copyin the disk temp argument to get the user flags */ 21256 if (ddi_copyin((void *)arg, dktemp, 21257 sizeof (struct dk_temperature), flag) != 0) { 21258 rval = EFAULT; 21259 goto done; 21260 } 21261 21262 /* Initialize the temperature to invalid. */ 21263 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 21264 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 21265 21266 /* 21267 * Note: Investigate removing the "bypass pm" semantic. 21268 * Can we just bypass PM always? 21269 */ 21270 if (dktemp->dkt_flags & DKT_BYPASS_PM) { 21271 path_flag = SD_PATH_DIRECT; 21272 ASSERT(!mutex_owned(&un->un_pm_mutex)); 21273 mutex_enter(&un->un_pm_mutex); 21274 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 21275 /* 21276 * If DKT_BYPASS_PM is set, and the drive happens to be 21277 * in low power mode, we can not wake it up, Need to 21278 * return EAGAIN. 21279 */ 21280 mutex_exit(&un->un_pm_mutex); 21281 rval = EAGAIN; 21282 goto done; 21283 } else { 21284 /* 21285 * Indicate to PM the device is busy. This is required 21286 * to avoid a race - i.e. the ioctl is issuing a 21287 * command and the pm framework brings down the device 21288 * to low power mode (possible power cut-off on some 21289 * platforms). 21290 */ 21291 mutex_exit(&un->un_pm_mutex); 21292 if (sd_pm_entry(un) != DDI_SUCCESS) { 21293 rval = EAGAIN; 21294 goto done; 21295 } 21296 } 21297 } 21298 21299 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP); 21300 21301 if ((rval = sd_send_scsi_LOG_SENSE(un, temperature_page, 21302 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag)) != 0) { 21303 goto done2; 21304 } 21305 21306 /* 21307 * For the current temperature verify that the parameter length is 0x02 21308 * and the parameter code is 0x00 21309 */ 21310 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) && 21311 (temperature_page[5] == 0x00)) { 21312 if (temperature_page[9] == 0xFF) { 21313 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 21314 } else { 21315 dktemp->dkt_cur_temp = (short)(temperature_page[9]); 21316 } 21317 } 21318 21319 /* 21320 * For the reference temperature verify that the parameter 21321 * length is 0x02 and the parameter code is 0x01 21322 */ 21323 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) && 21324 (temperature_page[11] == 0x01)) { 21325 if (temperature_page[15] == 0xFF) { 21326 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 21327 } else { 21328 dktemp->dkt_ref_temp = (short)(temperature_page[15]); 21329 } 21330 } 21331 21332 /* Do the copyout regardless of the temperature commands status. */ 21333 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature), 21334 flag) != 0) { 21335 rval = EFAULT; 21336 } 21337 21338 done2: 21339 if (path_flag == SD_PATH_DIRECT) { 21340 sd_pm_exit(un); 21341 } 21342 21343 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE); 21344 done: 21345 if (dktemp != NULL) { 21346 kmem_free(dktemp, sizeof (struct dk_temperature)); 21347 } 21348 21349 return (rval); 21350 } 21351 21352 21353 /* 21354 * Function: sd_log_page_supported 21355 * 21356 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of 21357 * supported log pages. 21358 * 21359 * Arguments: un - 21360 * log_page - 21361 * 21362 * Return Code: -1 - on error (log sense is optional and may not be supported). 21363 * 0 - log page not found. 21364 * 1 - log page found. 21365 */ 21366 21367 static int 21368 sd_log_page_supported(struct sd_lun *un, int log_page) 21369 { 21370 uchar_t *log_page_data; 21371 int i; 21372 int match = 0; 21373 int log_size; 21374 21375 log_page_data = kmem_zalloc(0xFF, KM_SLEEP); 21376 21377 if (sd_send_scsi_LOG_SENSE(un, log_page_data, 0xFF, 0, 0x01, 0, 21378 SD_PATH_DIRECT) != 0) { 21379 SD_ERROR(SD_LOG_COMMON, un, 21380 "sd_log_page_supported: failed log page retrieval\n"); 21381 kmem_free(log_page_data, 0xFF); 21382 return (-1); 21383 } 21384 log_size = log_page_data[3]; 21385 21386 /* 21387 * The list of supported log pages start from the fourth byte. Check 21388 * until we run out of log pages or a match is found. 21389 */ 21390 for (i = 4; (i < (log_size + 4)) && !match; i++) { 21391 if (log_page_data[i] == log_page) { 21392 match++; 21393 } 21394 } 21395 kmem_free(log_page_data, 0xFF); 21396 return (match); 21397 } 21398 21399 21400 /* 21401 * Function: sd_mhdioc_failfast 21402 * 21403 * Description: This routine is the driver entry point for handling ioctl 21404 * requests to enable/disable the multihost failfast option. 21405 * (MHIOCENFAILFAST) 21406 * 21407 * Arguments: dev - the device number 21408 * arg - user specified probing interval. 21409 * flag - this argument is a pass through to ddi_copyxxx() 21410 * directly from the mode argument of ioctl(). 21411 * 21412 * Return Code: 0 21413 * EFAULT 21414 * ENXIO 21415 */ 21416 21417 static int 21418 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag) 21419 { 21420 struct sd_lun *un = NULL; 21421 int mh_time; 21422 int rval = 0; 21423 21424 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21425 return (ENXIO); 21426 } 21427 21428 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag)) 21429 return (EFAULT); 21430 21431 if (mh_time) { 21432 mutex_enter(SD_MUTEX(un)); 21433 un->un_resvd_status |= SD_FAILFAST; 21434 mutex_exit(SD_MUTEX(un)); 21435 /* 21436 * If mh_time is INT_MAX, then this ioctl is being used for 21437 * SCSI-3 PGR purposes, and we don't need to spawn watch thread. 21438 */ 21439 if (mh_time != INT_MAX) { 21440 rval = sd_check_mhd(dev, mh_time); 21441 } 21442 } else { 21443 (void) sd_check_mhd(dev, 0); 21444 mutex_enter(SD_MUTEX(un)); 21445 un->un_resvd_status &= ~SD_FAILFAST; 21446 mutex_exit(SD_MUTEX(un)); 21447 } 21448 return (rval); 21449 } 21450 21451 21452 /* 21453 * Function: sd_mhdioc_takeown 21454 * 21455 * Description: This routine is the driver entry point for handling ioctl 21456 * requests to forcefully acquire exclusive access rights to the 21457 * multihost disk (MHIOCTKOWN). 21458 * 21459 * Arguments: dev - the device number 21460 * arg - user provided structure specifying the delay 21461 * parameters in milliseconds 21462 * flag - this argument is a pass through to ddi_copyxxx() 21463 * directly from the mode argument of ioctl(). 21464 * 21465 * Return Code: 0 21466 * EFAULT 21467 * ENXIO 21468 */ 21469 21470 static int 21471 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag) 21472 { 21473 struct sd_lun *un = NULL; 21474 struct mhioctkown *tkown = NULL; 21475 int rval = 0; 21476 21477 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21478 return (ENXIO); 21479 } 21480 21481 if (arg != NULL) { 21482 tkown = (struct mhioctkown *) 21483 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP); 21484 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag); 21485 if (rval != 0) { 21486 rval = EFAULT; 21487 goto error; 21488 } 21489 } 21490 21491 rval = sd_take_ownership(dev, tkown); 21492 mutex_enter(SD_MUTEX(un)); 21493 if (rval == 0) { 21494 un->un_resvd_status |= SD_RESERVE; 21495 if (tkown != NULL && tkown->reinstate_resv_delay != 0) { 21496 sd_reinstate_resv_delay = 21497 tkown->reinstate_resv_delay * 1000; 21498 } else { 21499 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 21500 } 21501 /* 21502 * Give the scsi_watch routine interval set by 21503 * the MHIOCENFAILFAST ioctl precedence here. 21504 */ 21505 if ((un->un_resvd_status & SD_FAILFAST) == 0) { 21506 mutex_exit(SD_MUTEX(un)); 21507 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000); 21508 SD_TRACE(SD_LOG_IOCTL_MHD, un, 21509 "sd_mhdioc_takeown : %d\n", 21510 sd_reinstate_resv_delay); 21511 } else { 21512 mutex_exit(SD_MUTEX(un)); 21513 } 21514 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY, 21515 sd_mhd_reset_notify_cb, (caddr_t)un); 21516 } else { 21517 un->un_resvd_status &= ~SD_RESERVE; 21518 mutex_exit(SD_MUTEX(un)); 21519 } 21520 21521 error: 21522 if (tkown != NULL) { 21523 kmem_free(tkown, sizeof (struct mhioctkown)); 21524 } 21525 return (rval); 21526 } 21527 21528 21529 /* 21530 * Function: sd_mhdioc_release 21531 * 21532 * Description: This routine is the driver entry point for handling ioctl 21533 * requests to release exclusive access rights to the multihost 21534 * disk (MHIOCRELEASE). 21535 * 21536 * Arguments: dev - the device number 21537 * 21538 * Return Code: 0 21539 * ENXIO 21540 */ 21541 21542 static int 21543 sd_mhdioc_release(dev_t dev) 21544 { 21545 struct sd_lun *un = NULL; 21546 timeout_id_t resvd_timeid_save; 21547 int resvd_status_save; 21548 int rval = 0; 21549 21550 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21551 return (ENXIO); 21552 } 21553 21554 mutex_enter(SD_MUTEX(un)); 21555 resvd_status_save = un->un_resvd_status; 21556 un->un_resvd_status &= 21557 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE); 21558 if (un->un_resvd_timeid) { 21559 resvd_timeid_save = un->un_resvd_timeid; 21560 un->un_resvd_timeid = NULL; 21561 mutex_exit(SD_MUTEX(un)); 21562 (void) untimeout(resvd_timeid_save); 21563 } else { 21564 mutex_exit(SD_MUTEX(un)); 21565 } 21566 21567 /* 21568 * destroy any pending timeout thread that may be attempting to 21569 * reinstate reservation on this device. 21570 */ 21571 sd_rmv_resv_reclaim_req(dev); 21572 21573 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) { 21574 mutex_enter(SD_MUTEX(un)); 21575 if ((un->un_mhd_token) && 21576 ((un->un_resvd_status & SD_FAILFAST) == 0)) { 21577 mutex_exit(SD_MUTEX(un)); 21578 (void) sd_check_mhd(dev, 0); 21579 } else { 21580 mutex_exit(SD_MUTEX(un)); 21581 } 21582 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 21583 sd_mhd_reset_notify_cb, (caddr_t)un); 21584 } else { 21585 /* 21586 * sd_mhd_watch_cb will restart the resvd recover timeout thread 21587 */ 21588 mutex_enter(SD_MUTEX(un)); 21589 un->un_resvd_status = resvd_status_save; 21590 mutex_exit(SD_MUTEX(un)); 21591 } 21592 return (rval); 21593 } 21594 21595 21596 /* 21597 * Function: sd_mhdioc_register_devid 21598 * 21599 * Description: This routine is the driver entry point for handling ioctl 21600 * requests to register the device id (MHIOCREREGISTERDEVID). 21601 * 21602 * Note: The implementation for this ioctl has been updated to 21603 * be consistent with the original PSARC case (1999/357) 21604 * (4375899, 4241671, 4220005) 21605 * 21606 * Arguments: dev - the device number 21607 * 21608 * Return Code: 0 21609 * ENXIO 21610 */ 21611 21612 static int 21613 sd_mhdioc_register_devid(dev_t dev) 21614 { 21615 struct sd_lun *un = NULL; 21616 int rval = 0; 21617 21618 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21619 return (ENXIO); 21620 } 21621 21622 ASSERT(!mutex_owned(SD_MUTEX(un))); 21623 21624 mutex_enter(SD_MUTEX(un)); 21625 21626 /* If a devid already exists, de-register it */ 21627 if (un->un_devid != NULL) { 21628 ddi_devid_unregister(SD_DEVINFO(un)); 21629 /* 21630 * After unregister devid, needs to free devid memory 21631 */ 21632 ddi_devid_free(un->un_devid); 21633 un->un_devid = NULL; 21634 } 21635 21636 /* Check for reservation conflict */ 21637 mutex_exit(SD_MUTEX(un)); 21638 rval = sd_send_scsi_TEST_UNIT_READY(un, 0); 21639 mutex_enter(SD_MUTEX(un)); 21640 21641 switch (rval) { 21642 case 0: 21643 sd_register_devid(un, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED); 21644 break; 21645 case EACCES: 21646 break; 21647 default: 21648 rval = EIO; 21649 } 21650 21651 mutex_exit(SD_MUTEX(un)); 21652 return (rval); 21653 } 21654 21655 21656 /* 21657 * Function: sd_mhdioc_inkeys 21658 * 21659 * Description: This routine is the driver entry point for handling ioctl 21660 * requests to issue the SCSI-3 Persistent In Read Keys command 21661 * to the device (MHIOCGRP_INKEYS). 21662 * 21663 * Arguments: dev - the device number 21664 * arg - user provided in_keys structure 21665 * flag - this argument is a pass through to ddi_copyxxx() 21666 * directly from the mode argument of ioctl(). 21667 * 21668 * Return Code: code returned by sd_persistent_reservation_in_read_keys() 21669 * ENXIO 21670 * EFAULT 21671 */ 21672 21673 static int 21674 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag) 21675 { 21676 struct sd_lun *un; 21677 mhioc_inkeys_t inkeys; 21678 int rval = 0; 21679 21680 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21681 return (ENXIO); 21682 } 21683 21684 #ifdef _MULTI_DATAMODEL 21685 switch (ddi_model_convert_from(flag & FMODELS)) { 21686 case DDI_MODEL_ILP32: { 21687 struct mhioc_inkeys32 inkeys32; 21688 21689 if (ddi_copyin(arg, &inkeys32, 21690 sizeof (struct mhioc_inkeys32), flag) != 0) { 21691 return (EFAULT); 21692 } 21693 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li; 21694 if ((rval = sd_persistent_reservation_in_read_keys(un, 21695 &inkeys, flag)) != 0) { 21696 return (rval); 21697 } 21698 inkeys32.generation = inkeys.generation; 21699 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32), 21700 flag) != 0) { 21701 return (EFAULT); 21702 } 21703 break; 21704 } 21705 case DDI_MODEL_NONE: 21706 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), 21707 flag) != 0) { 21708 return (EFAULT); 21709 } 21710 if ((rval = sd_persistent_reservation_in_read_keys(un, 21711 &inkeys, flag)) != 0) { 21712 return (rval); 21713 } 21714 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), 21715 flag) != 0) { 21716 return (EFAULT); 21717 } 21718 break; 21719 } 21720 21721 #else /* ! _MULTI_DATAMODEL */ 21722 21723 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) { 21724 return (EFAULT); 21725 } 21726 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag); 21727 if (rval != 0) { 21728 return (rval); 21729 } 21730 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) { 21731 return (EFAULT); 21732 } 21733 21734 #endif /* _MULTI_DATAMODEL */ 21735 21736 return (rval); 21737 } 21738 21739 21740 /* 21741 * Function: sd_mhdioc_inresv 21742 * 21743 * Description: This routine is the driver entry point for handling ioctl 21744 * requests to issue the SCSI-3 Persistent In Read Reservations 21745 * command to the device (MHIOCGRP_INKEYS). 21746 * 21747 * Arguments: dev - the device number 21748 * arg - user provided in_resv structure 21749 * flag - this argument is a pass through to ddi_copyxxx() 21750 * directly from the mode argument of ioctl(). 21751 * 21752 * Return Code: code returned by sd_persistent_reservation_in_read_resv() 21753 * ENXIO 21754 * EFAULT 21755 */ 21756 21757 static int 21758 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag) 21759 { 21760 struct sd_lun *un; 21761 mhioc_inresvs_t inresvs; 21762 int rval = 0; 21763 21764 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21765 return (ENXIO); 21766 } 21767 21768 #ifdef _MULTI_DATAMODEL 21769 21770 switch (ddi_model_convert_from(flag & FMODELS)) { 21771 case DDI_MODEL_ILP32: { 21772 struct mhioc_inresvs32 inresvs32; 21773 21774 if (ddi_copyin(arg, &inresvs32, 21775 sizeof (struct mhioc_inresvs32), flag) != 0) { 21776 return (EFAULT); 21777 } 21778 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li; 21779 if ((rval = sd_persistent_reservation_in_read_resv(un, 21780 &inresvs, flag)) != 0) { 21781 return (rval); 21782 } 21783 inresvs32.generation = inresvs.generation; 21784 if (ddi_copyout(&inresvs32, arg, 21785 sizeof (struct mhioc_inresvs32), flag) != 0) { 21786 return (EFAULT); 21787 } 21788 break; 21789 } 21790 case DDI_MODEL_NONE: 21791 if (ddi_copyin(arg, &inresvs, 21792 sizeof (mhioc_inresvs_t), flag) != 0) { 21793 return (EFAULT); 21794 } 21795 if ((rval = sd_persistent_reservation_in_read_resv(un, 21796 &inresvs, flag)) != 0) { 21797 return (rval); 21798 } 21799 if (ddi_copyout(&inresvs, arg, 21800 sizeof (mhioc_inresvs_t), flag) != 0) { 21801 return (EFAULT); 21802 } 21803 break; 21804 } 21805 21806 #else /* ! _MULTI_DATAMODEL */ 21807 21808 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) { 21809 return (EFAULT); 21810 } 21811 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag); 21812 if (rval != 0) { 21813 return (rval); 21814 } 21815 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) { 21816 return (EFAULT); 21817 } 21818 21819 #endif /* ! _MULTI_DATAMODEL */ 21820 21821 return (rval); 21822 } 21823 21824 21825 /* 21826 * The following routines support the clustering functionality described below 21827 * and implement lost reservation reclaim functionality. 21828 * 21829 * Clustering 21830 * ---------- 21831 * The clustering code uses two different, independent forms of SCSI 21832 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3 21833 * Persistent Group Reservations. For any particular disk, it will use either 21834 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk. 21835 * 21836 * SCSI-2 21837 * The cluster software takes ownership of a multi-hosted disk by issuing the 21838 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the 21839 * MHIOCRELEASE ioctl. Closely related is the MHIOCENFAILFAST ioctl -- a 21840 * cluster, just after taking ownership of the disk with the MHIOCTKOWN ioctl 21841 * then issues the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the 21842 * driver. The meaning of failfast is that if the driver (on this host) ever 21843 * encounters the scsi error return code RESERVATION_CONFLICT from the device, 21844 * it should immediately panic the host. The motivation for this ioctl is that 21845 * if this host does encounter reservation conflict, the underlying cause is 21846 * that some other host of the cluster has decided that this host is no longer 21847 * in the cluster and has seized control of the disks for itself. Since this 21848 * host is no longer in the cluster, it ought to panic itself. The 21849 * MHIOCENFAILFAST ioctl does two things: 21850 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT 21851 * error to panic the host 21852 * (b) it sets up a periodic timer to test whether this host still has 21853 * "access" (in that no other host has reserved the device): if the 21854 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The 21855 * purpose of that periodic timer is to handle scenarios where the host is 21856 * otherwise temporarily quiescent, temporarily doing no real i/o. 21857 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host, 21858 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for 21859 * the device itself. 21860 * 21861 * SCSI-3 PGR 21862 * A direct semantic implementation of the SCSI-3 Persistent Reservation 21863 * facility is supported through the shared multihost disk ioctls 21864 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE, 21865 * MHIOCGRP_PREEMPTANDABORT) 21866 * 21867 * Reservation Reclaim: 21868 * -------------------- 21869 * To support the lost reservation reclaim operations this driver creates a 21870 * single thread to handle reinstating reservations on all devices that have 21871 * lost reservations sd_resv_reclaim_requests are logged for all devices that 21872 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb 21873 * and the reservation reclaim thread loops through the requests to regain the 21874 * lost reservations. 21875 */ 21876 21877 /* 21878 * Function: sd_check_mhd() 21879 * 21880 * Description: This function sets up and submits a scsi watch request or 21881 * terminates an existing watch request. This routine is used in 21882 * support of reservation reclaim. 21883 * 21884 * Arguments: dev - the device 'dev_t' is used for context to discriminate 21885 * among multiple watches that share the callback function 21886 * interval - the number of microseconds specifying the watch 21887 * interval for issuing TEST UNIT READY commands. If 21888 * set to 0 the watch should be terminated. If the 21889 * interval is set to 0 and if the device is required 21890 * to hold reservation while disabling failfast, the 21891 * watch is restarted with an interval of 21892 * reinstate_resv_delay. 21893 * 21894 * Return Code: 0 - Successful submit/terminate of scsi watch request 21895 * ENXIO - Indicates an invalid device was specified 21896 * EAGAIN - Unable to submit the scsi watch request 21897 */ 21898 21899 static int 21900 sd_check_mhd(dev_t dev, int interval) 21901 { 21902 struct sd_lun *un; 21903 opaque_t token; 21904 21905 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21906 return (ENXIO); 21907 } 21908 21909 /* is this a watch termination request? */ 21910 if (interval == 0) { 21911 mutex_enter(SD_MUTEX(un)); 21912 /* if there is an existing watch task then terminate it */ 21913 if (un->un_mhd_token) { 21914 token = un->un_mhd_token; 21915 un->un_mhd_token = NULL; 21916 mutex_exit(SD_MUTEX(un)); 21917 (void) scsi_watch_request_terminate(token, 21918 SCSI_WATCH_TERMINATE_WAIT); 21919 mutex_enter(SD_MUTEX(un)); 21920 } else { 21921 mutex_exit(SD_MUTEX(un)); 21922 /* 21923 * Note: If we return here we don't check for the 21924 * failfast case. This is the original legacy 21925 * implementation but perhaps we should be checking 21926 * the failfast case. 21927 */ 21928 return (0); 21929 } 21930 /* 21931 * If the device is required to hold reservation while 21932 * disabling failfast, we need to restart the scsi_watch 21933 * routine with an interval of reinstate_resv_delay. 21934 */ 21935 if (un->un_resvd_status & SD_RESERVE) { 21936 interval = sd_reinstate_resv_delay/1000; 21937 } else { 21938 /* no failfast so bail */ 21939 mutex_exit(SD_MUTEX(un)); 21940 return (0); 21941 } 21942 mutex_exit(SD_MUTEX(un)); 21943 } 21944 21945 /* 21946 * adjust minimum time interval to 1 second, 21947 * and convert from msecs to usecs 21948 */ 21949 if (interval > 0 && interval < 1000) { 21950 interval = 1000; 21951 } 21952 interval *= 1000; 21953 21954 /* 21955 * submit the request to the scsi_watch service 21956 */ 21957 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval, 21958 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev); 21959 if (token == NULL) { 21960 return (EAGAIN); 21961 } 21962 21963 /* 21964 * save token for termination later on 21965 */ 21966 mutex_enter(SD_MUTEX(un)); 21967 un->un_mhd_token = token; 21968 mutex_exit(SD_MUTEX(un)); 21969 return (0); 21970 } 21971 21972 21973 /* 21974 * Function: sd_mhd_watch_cb() 21975 * 21976 * Description: This function is the call back function used by the scsi watch 21977 * facility. The scsi watch facility sends the "Test Unit Ready" 21978 * and processes the status. If applicable (i.e. a "Unit Attention" 21979 * status and automatic "Request Sense" not used) the scsi watch 21980 * facility will send a "Request Sense" and retrieve the sense data 21981 * to be passed to this callback function. In either case the 21982 * automatic "Request Sense" or the facility submitting one, this 21983 * callback is passed the status and sense data. 21984 * 21985 * Arguments: arg - the device 'dev_t' is used for context to discriminate 21986 * among multiple watches that share this callback function 21987 * resultp - scsi watch facility result packet containing scsi 21988 * packet, status byte and sense data 21989 * 21990 * Return Code: 0 - continue the watch task 21991 * non-zero - terminate the watch task 21992 */ 21993 21994 static int 21995 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 21996 { 21997 struct sd_lun *un; 21998 struct scsi_status *statusp; 21999 uint8_t *sensep; 22000 struct scsi_pkt *pkt; 22001 uchar_t actual_sense_length; 22002 dev_t dev = (dev_t)arg; 22003 22004 ASSERT(resultp != NULL); 22005 statusp = resultp->statusp; 22006 sensep = (uint8_t *)resultp->sensep; 22007 pkt = resultp->pkt; 22008 actual_sense_length = resultp->actual_sense_length; 22009 22010 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22011 return (ENXIO); 22012 } 22013 22014 SD_TRACE(SD_LOG_IOCTL_MHD, un, 22015 "sd_mhd_watch_cb: reason '%s', status '%s'\n", 22016 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp))); 22017 22018 /* Begin processing of the status and/or sense data */ 22019 if (pkt->pkt_reason != CMD_CMPLT) { 22020 /* Handle the incomplete packet */ 22021 sd_mhd_watch_incomplete(un, pkt); 22022 return (0); 22023 } else if (*((unsigned char *)statusp) != STATUS_GOOD) { 22024 if (*((unsigned char *)statusp) 22025 == STATUS_RESERVATION_CONFLICT) { 22026 /* 22027 * Handle a reservation conflict by panicking if 22028 * configured for failfast or by logging the conflict 22029 * and updating the reservation status 22030 */ 22031 mutex_enter(SD_MUTEX(un)); 22032 if ((un->un_resvd_status & SD_FAILFAST) && 22033 (sd_failfast_enable)) { 22034 sd_panic_for_res_conflict(un); 22035 /*NOTREACHED*/ 22036 } 22037 SD_INFO(SD_LOG_IOCTL_MHD, un, 22038 "sd_mhd_watch_cb: Reservation Conflict\n"); 22039 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 22040 mutex_exit(SD_MUTEX(un)); 22041 } 22042 } 22043 22044 if (sensep != NULL) { 22045 if (actual_sense_length >= (SENSE_LENGTH - 2)) { 22046 mutex_enter(SD_MUTEX(un)); 22047 if ((scsi_sense_asc(sensep) == 22048 SD_SCSI_RESET_SENSE_CODE) && 22049 (un->un_resvd_status & SD_RESERVE)) { 22050 /* 22051 * The additional sense code indicates a power 22052 * on or bus device reset has occurred; update 22053 * the reservation status. 22054 */ 22055 un->un_resvd_status |= 22056 (SD_LOST_RESERVE | SD_WANT_RESERVE); 22057 SD_INFO(SD_LOG_IOCTL_MHD, un, 22058 "sd_mhd_watch_cb: Lost Reservation\n"); 22059 } 22060 } else { 22061 return (0); 22062 } 22063 } else { 22064 mutex_enter(SD_MUTEX(un)); 22065 } 22066 22067 if ((un->un_resvd_status & SD_RESERVE) && 22068 (un->un_resvd_status & SD_LOST_RESERVE)) { 22069 if (un->un_resvd_status & SD_WANT_RESERVE) { 22070 /* 22071 * A reset occurred in between the last probe and this 22072 * one so if a timeout is pending cancel it. 22073 */ 22074 if (un->un_resvd_timeid) { 22075 timeout_id_t temp_id = un->un_resvd_timeid; 22076 un->un_resvd_timeid = NULL; 22077 mutex_exit(SD_MUTEX(un)); 22078 (void) untimeout(temp_id); 22079 mutex_enter(SD_MUTEX(un)); 22080 } 22081 un->un_resvd_status &= ~SD_WANT_RESERVE; 22082 } 22083 if (un->un_resvd_timeid == 0) { 22084 /* Schedule a timeout to handle the lost reservation */ 22085 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover, 22086 (void *)dev, 22087 drv_usectohz(sd_reinstate_resv_delay)); 22088 } 22089 } 22090 mutex_exit(SD_MUTEX(un)); 22091 return (0); 22092 } 22093 22094 22095 /* 22096 * Function: sd_mhd_watch_incomplete() 22097 * 22098 * Description: This function is used to find out why a scsi pkt sent by the 22099 * scsi watch facility was not completed. Under some scenarios this 22100 * routine will return. Otherwise it will send a bus reset to see 22101 * if the drive is still online. 22102 * 22103 * Arguments: un - driver soft state (unit) structure 22104 * pkt - incomplete scsi pkt 22105 */ 22106 22107 static void 22108 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt) 22109 { 22110 int be_chatty; 22111 int perr; 22112 22113 ASSERT(pkt != NULL); 22114 ASSERT(un != NULL); 22115 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT)); 22116 perr = (pkt->pkt_statistics & STAT_PERR); 22117 22118 mutex_enter(SD_MUTEX(un)); 22119 if (un->un_state == SD_STATE_DUMPING) { 22120 mutex_exit(SD_MUTEX(un)); 22121 return; 22122 } 22123 22124 switch (pkt->pkt_reason) { 22125 case CMD_UNX_BUS_FREE: 22126 /* 22127 * If we had a parity error that caused the target to drop BSY*, 22128 * don't be chatty about it. 22129 */ 22130 if (perr && be_chatty) { 22131 be_chatty = 0; 22132 } 22133 break; 22134 case CMD_TAG_REJECT: 22135 /* 22136 * The SCSI-2 spec states that a tag reject will be sent by the 22137 * target if tagged queuing is not supported. A tag reject may 22138 * also be sent during certain initialization periods or to 22139 * control internal resources. For the latter case the target 22140 * may also return Queue Full. 22141 * 22142 * If this driver receives a tag reject from a target that is 22143 * going through an init period or controlling internal 22144 * resources tagged queuing will be disabled. This is a less 22145 * than optimal behavior but the driver is unable to determine 22146 * the target state and assumes tagged queueing is not supported 22147 */ 22148 pkt->pkt_flags = 0; 22149 un->un_tagflags = 0; 22150 22151 if (un->un_f_opt_queueing == TRUE) { 22152 un->un_throttle = min(un->un_throttle, 3); 22153 } else { 22154 un->un_throttle = 1; 22155 } 22156 mutex_exit(SD_MUTEX(un)); 22157 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 22158 mutex_enter(SD_MUTEX(un)); 22159 break; 22160 case CMD_INCOMPLETE: 22161 /* 22162 * The transport stopped with an abnormal state, fallthrough and 22163 * reset the target and/or bus unless selection did not complete 22164 * (indicated by STATE_GOT_BUS) in which case we don't want to 22165 * go through a target/bus reset 22166 */ 22167 if (pkt->pkt_state == STATE_GOT_BUS) { 22168 break; 22169 } 22170 /*FALLTHROUGH*/ 22171 22172 case CMD_TIMEOUT: 22173 default: 22174 /* 22175 * The lun may still be running the command, so a lun reset 22176 * should be attempted. If the lun reset fails or cannot be 22177 * issued, than try a target reset. Lastly try a bus reset. 22178 */ 22179 if ((pkt->pkt_statistics & 22180 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 22181 int reset_retval = 0; 22182 mutex_exit(SD_MUTEX(un)); 22183 if (un->un_f_allow_bus_device_reset == TRUE) { 22184 if (un->un_f_lun_reset_enabled == TRUE) { 22185 reset_retval = 22186 scsi_reset(SD_ADDRESS(un), 22187 RESET_LUN); 22188 } 22189 if (reset_retval == 0) { 22190 reset_retval = 22191 scsi_reset(SD_ADDRESS(un), 22192 RESET_TARGET); 22193 } 22194 } 22195 if (reset_retval == 0) { 22196 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 22197 } 22198 mutex_enter(SD_MUTEX(un)); 22199 } 22200 break; 22201 } 22202 22203 /* A device/bus reset has occurred; update the reservation status. */ 22204 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics & 22205 (STAT_BUS_RESET | STAT_DEV_RESET))) { 22206 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 22207 un->un_resvd_status |= 22208 (SD_LOST_RESERVE | SD_WANT_RESERVE); 22209 SD_INFO(SD_LOG_IOCTL_MHD, un, 22210 "sd_mhd_watch_incomplete: Lost Reservation\n"); 22211 } 22212 } 22213 22214 /* 22215 * The disk has been turned off; Update the device state. 22216 * 22217 * Note: Should we be offlining the disk here? 22218 */ 22219 if (pkt->pkt_state == STATE_GOT_BUS) { 22220 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: " 22221 "Disk not responding to selection\n"); 22222 if (un->un_state != SD_STATE_OFFLINE) { 22223 New_state(un, SD_STATE_OFFLINE); 22224 } 22225 } else if (be_chatty) { 22226 /* 22227 * suppress messages if they are all the same pkt reason; 22228 * with TQ, many (up to 256) are returned with the same 22229 * pkt_reason 22230 */ 22231 if (pkt->pkt_reason != un->un_last_pkt_reason) { 22232 SD_ERROR(SD_LOG_IOCTL_MHD, un, 22233 "sd_mhd_watch_incomplete: " 22234 "SCSI transport failed: reason '%s'\n", 22235 scsi_rname(pkt->pkt_reason)); 22236 } 22237 } 22238 un->un_last_pkt_reason = pkt->pkt_reason; 22239 mutex_exit(SD_MUTEX(un)); 22240 } 22241 22242 22243 /* 22244 * Function: sd_sname() 22245 * 22246 * Description: This is a simple little routine to return a string containing 22247 * a printable description of command status byte for use in 22248 * logging. 22249 * 22250 * Arguments: status - pointer to a status byte 22251 * 22252 * Return Code: char * - string containing status description. 22253 */ 22254 22255 static char * 22256 sd_sname(uchar_t status) 22257 { 22258 switch (status & STATUS_MASK) { 22259 case STATUS_GOOD: 22260 return ("good status"); 22261 case STATUS_CHECK: 22262 return ("check condition"); 22263 case STATUS_MET: 22264 return ("condition met"); 22265 case STATUS_BUSY: 22266 return ("busy"); 22267 case STATUS_INTERMEDIATE: 22268 return ("intermediate"); 22269 case STATUS_INTERMEDIATE_MET: 22270 return ("intermediate - condition met"); 22271 case STATUS_RESERVATION_CONFLICT: 22272 return ("reservation_conflict"); 22273 case STATUS_TERMINATED: 22274 return ("command terminated"); 22275 case STATUS_QFULL: 22276 return ("queue full"); 22277 default: 22278 return ("<unknown status>"); 22279 } 22280 } 22281 22282 22283 /* 22284 * Function: sd_mhd_resvd_recover() 22285 * 22286 * Description: This function adds a reservation entry to the 22287 * sd_resv_reclaim_request list and signals the reservation 22288 * reclaim thread that there is work pending. If the reservation 22289 * reclaim thread has not been previously created this function 22290 * will kick it off. 22291 * 22292 * Arguments: arg - the device 'dev_t' is used for context to discriminate 22293 * among multiple watches that share this callback function 22294 * 22295 * Context: This routine is called by timeout() and is run in interrupt 22296 * context. It must not sleep or call other functions which may 22297 * sleep. 22298 */ 22299 22300 static void 22301 sd_mhd_resvd_recover(void *arg) 22302 { 22303 dev_t dev = (dev_t)arg; 22304 struct sd_lun *un; 22305 struct sd_thr_request *sd_treq = NULL; 22306 struct sd_thr_request *sd_cur = NULL; 22307 struct sd_thr_request *sd_prev = NULL; 22308 int already_there = 0; 22309 22310 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22311 return; 22312 } 22313 22314 mutex_enter(SD_MUTEX(un)); 22315 un->un_resvd_timeid = NULL; 22316 if (un->un_resvd_status & SD_WANT_RESERVE) { 22317 /* 22318 * There was a reset so don't issue the reserve, allow the 22319 * sd_mhd_watch_cb callback function to notice this and 22320 * reschedule the timeout for reservation. 22321 */ 22322 mutex_exit(SD_MUTEX(un)); 22323 return; 22324 } 22325 mutex_exit(SD_MUTEX(un)); 22326 22327 /* 22328 * Add this device to the sd_resv_reclaim_request list and the 22329 * sd_resv_reclaim_thread should take care of the rest. 22330 * 22331 * Note: We can't sleep in this context so if the memory allocation 22332 * fails allow the sd_mhd_watch_cb callback function to notice this and 22333 * reschedule the timeout for reservation. (4378460) 22334 */ 22335 sd_treq = (struct sd_thr_request *) 22336 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP); 22337 if (sd_treq == NULL) { 22338 return; 22339 } 22340 22341 sd_treq->sd_thr_req_next = NULL; 22342 sd_treq->dev = dev; 22343 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22344 if (sd_tr.srq_thr_req_head == NULL) { 22345 sd_tr.srq_thr_req_head = sd_treq; 22346 } else { 22347 sd_cur = sd_prev = sd_tr.srq_thr_req_head; 22348 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) { 22349 if (sd_cur->dev == dev) { 22350 /* 22351 * already in Queue so don't log 22352 * another request for the device 22353 */ 22354 already_there = 1; 22355 break; 22356 } 22357 sd_prev = sd_cur; 22358 } 22359 if (!already_there) { 22360 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: " 22361 "logging request for %lx\n", dev); 22362 sd_prev->sd_thr_req_next = sd_treq; 22363 } else { 22364 kmem_free(sd_treq, sizeof (struct sd_thr_request)); 22365 } 22366 } 22367 22368 /* 22369 * Create a kernel thread to do the reservation reclaim and free up this 22370 * thread. We cannot block this thread while we go away to do the 22371 * reservation reclaim 22372 */ 22373 if (sd_tr.srq_resv_reclaim_thread == NULL) 22374 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0, 22375 sd_resv_reclaim_thread, NULL, 22376 0, &p0, TS_RUN, v.v_maxsyspri - 2); 22377 22378 /* Tell the reservation reclaim thread that it has work to do */ 22379 cv_signal(&sd_tr.srq_resv_reclaim_cv); 22380 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22381 } 22382 22383 /* 22384 * Function: sd_resv_reclaim_thread() 22385 * 22386 * Description: This function implements the reservation reclaim operations 22387 * 22388 * Arguments: arg - the device 'dev_t' is used for context to discriminate 22389 * among multiple watches that share this callback function 22390 */ 22391 22392 static void 22393 sd_resv_reclaim_thread() 22394 { 22395 struct sd_lun *un; 22396 struct sd_thr_request *sd_mhreq; 22397 22398 /* Wait for work */ 22399 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22400 if (sd_tr.srq_thr_req_head == NULL) { 22401 cv_wait(&sd_tr.srq_resv_reclaim_cv, 22402 &sd_tr.srq_resv_reclaim_mutex); 22403 } 22404 22405 /* Loop while we have work */ 22406 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) { 22407 un = ddi_get_soft_state(sd_state, 22408 SDUNIT(sd_tr.srq_thr_cur_req->dev)); 22409 if (un == NULL) { 22410 /* 22411 * softstate structure is NULL so just 22412 * dequeue the request and continue 22413 */ 22414 sd_tr.srq_thr_req_head = 22415 sd_tr.srq_thr_cur_req->sd_thr_req_next; 22416 kmem_free(sd_tr.srq_thr_cur_req, 22417 sizeof (struct sd_thr_request)); 22418 continue; 22419 } 22420 22421 /* dequeue the request */ 22422 sd_mhreq = sd_tr.srq_thr_cur_req; 22423 sd_tr.srq_thr_req_head = 22424 sd_tr.srq_thr_cur_req->sd_thr_req_next; 22425 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22426 22427 /* 22428 * Reclaim reservation only if SD_RESERVE is still set. There 22429 * may have been a call to MHIOCRELEASE before we got here. 22430 */ 22431 mutex_enter(SD_MUTEX(un)); 22432 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 22433 /* 22434 * Note: The SD_LOST_RESERVE flag is cleared before 22435 * reclaiming the reservation. If this is done after the 22436 * call to sd_reserve_release a reservation loss in the 22437 * window between pkt completion of reserve cmd and 22438 * mutex_enter below may not be recognized 22439 */ 22440 un->un_resvd_status &= ~SD_LOST_RESERVE; 22441 mutex_exit(SD_MUTEX(un)); 22442 22443 if (sd_reserve_release(sd_mhreq->dev, 22444 SD_RESERVE) == 0) { 22445 mutex_enter(SD_MUTEX(un)); 22446 un->un_resvd_status |= SD_RESERVE; 22447 mutex_exit(SD_MUTEX(un)); 22448 SD_INFO(SD_LOG_IOCTL_MHD, un, 22449 "sd_resv_reclaim_thread: " 22450 "Reservation Recovered\n"); 22451 } else { 22452 mutex_enter(SD_MUTEX(un)); 22453 un->un_resvd_status |= SD_LOST_RESERVE; 22454 mutex_exit(SD_MUTEX(un)); 22455 SD_INFO(SD_LOG_IOCTL_MHD, un, 22456 "sd_resv_reclaim_thread: Failed " 22457 "Reservation Recovery\n"); 22458 } 22459 } else { 22460 mutex_exit(SD_MUTEX(un)); 22461 } 22462 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22463 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req); 22464 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22465 sd_mhreq = sd_tr.srq_thr_cur_req = NULL; 22466 /* 22467 * wakeup the destroy thread if anyone is waiting on 22468 * us to complete. 22469 */ 22470 cv_signal(&sd_tr.srq_inprocess_cv); 22471 SD_TRACE(SD_LOG_IOCTL_MHD, un, 22472 "sd_resv_reclaim_thread: cv_signalling current request \n"); 22473 } 22474 22475 /* 22476 * cleanup the sd_tr structure now that this thread will not exist 22477 */ 22478 ASSERT(sd_tr.srq_thr_req_head == NULL); 22479 ASSERT(sd_tr.srq_thr_cur_req == NULL); 22480 sd_tr.srq_resv_reclaim_thread = NULL; 22481 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22482 thread_exit(); 22483 } 22484 22485 22486 /* 22487 * Function: sd_rmv_resv_reclaim_req() 22488 * 22489 * Description: This function removes any pending reservation reclaim requests 22490 * for the specified device. 22491 * 22492 * Arguments: dev - the device 'dev_t' 22493 */ 22494 22495 static void 22496 sd_rmv_resv_reclaim_req(dev_t dev) 22497 { 22498 struct sd_thr_request *sd_mhreq; 22499 struct sd_thr_request *sd_prev; 22500 22501 /* Remove a reservation reclaim request from the list */ 22502 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22503 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) { 22504 /* 22505 * We are attempting to reinstate reservation for 22506 * this device. We wait for sd_reserve_release() 22507 * to return before we return. 22508 */ 22509 cv_wait(&sd_tr.srq_inprocess_cv, 22510 &sd_tr.srq_resv_reclaim_mutex); 22511 } else { 22512 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head; 22513 if (sd_mhreq && sd_mhreq->dev == dev) { 22514 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next; 22515 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22516 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22517 return; 22518 } 22519 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) { 22520 if (sd_mhreq && sd_mhreq->dev == dev) { 22521 break; 22522 } 22523 sd_prev = sd_mhreq; 22524 } 22525 if (sd_mhreq != NULL) { 22526 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next; 22527 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22528 } 22529 } 22530 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22531 } 22532 22533 22534 /* 22535 * Function: sd_mhd_reset_notify_cb() 22536 * 22537 * Description: This is a call back function for scsi_reset_notify. This 22538 * function updates the softstate reserved status and logs the 22539 * reset. The driver scsi watch facility callback function 22540 * (sd_mhd_watch_cb) and reservation reclaim thread functionality 22541 * will reclaim the reservation. 22542 * 22543 * Arguments: arg - driver soft state (unit) structure 22544 */ 22545 22546 static void 22547 sd_mhd_reset_notify_cb(caddr_t arg) 22548 { 22549 struct sd_lun *un = (struct sd_lun *)arg; 22550 22551 mutex_enter(SD_MUTEX(un)); 22552 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 22553 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE); 22554 SD_INFO(SD_LOG_IOCTL_MHD, un, 22555 "sd_mhd_reset_notify_cb: Lost Reservation\n"); 22556 } 22557 mutex_exit(SD_MUTEX(un)); 22558 } 22559 22560 22561 /* 22562 * Function: sd_take_ownership() 22563 * 22564 * Description: This routine implements an algorithm to achieve a stable 22565 * reservation on disks which don't implement priority reserve, 22566 * and makes sure that other host lose re-reservation attempts. 22567 * This algorithm contains of a loop that keeps issuing the RESERVE 22568 * for some period of time (min_ownership_delay, default 6 seconds) 22569 * During that loop, it looks to see if there has been a bus device 22570 * reset or bus reset (both of which cause an existing reservation 22571 * to be lost). If the reservation is lost issue RESERVE until a 22572 * period of min_ownership_delay with no resets has gone by, or 22573 * until max_ownership_delay has expired. This loop ensures that 22574 * the host really did manage to reserve the device, in spite of 22575 * resets. The looping for min_ownership_delay (default six 22576 * seconds) is important to early generation clustering products, 22577 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an 22578 * MHIOCENFAILFAST periodic timer of two seconds. By having 22579 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having 22580 * MHIOCENFAILFAST poll every two seconds, the idea is that by the 22581 * time the MHIOCTKOWN ioctl returns, the other host (if any) will 22582 * have already noticed, via the MHIOCENFAILFAST polling, that it 22583 * no longer "owns" the disk and will have panicked itself. Thus, 22584 * the host issuing the MHIOCTKOWN is assured (with timing 22585 * dependencies) that by the time it actually starts to use the 22586 * disk for real work, the old owner is no longer accessing it. 22587 * 22588 * min_ownership_delay is the minimum amount of time for which the 22589 * disk must be reserved continuously devoid of resets before the 22590 * MHIOCTKOWN ioctl will return success. 22591 * 22592 * max_ownership_delay indicates the amount of time by which the 22593 * take ownership should succeed or timeout with an error. 22594 * 22595 * Arguments: dev - the device 'dev_t' 22596 * *p - struct containing timing info. 22597 * 22598 * Return Code: 0 for success or error code 22599 */ 22600 22601 static int 22602 sd_take_ownership(dev_t dev, struct mhioctkown *p) 22603 { 22604 struct sd_lun *un; 22605 int rval; 22606 int err; 22607 int reservation_count = 0; 22608 int min_ownership_delay = 6000000; /* in usec */ 22609 int max_ownership_delay = 30000000; /* in usec */ 22610 clock_t start_time; /* starting time of this algorithm */ 22611 clock_t end_time; /* time limit for giving up */ 22612 clock_t ownership_time; /* time limit for stable ownership */ 22613 clock_t current_time; 22614 clock_t previous_current_time; 22615 22616 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22617 return (ENXIO); 22618 } 22619 22620 /* 22621 * Attempt a device reservation. A priority reservation is requested. 22622 */ 22623 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE)) 22624 != SD_SUCCESS) { 22625 SD_ERROR(SD_LOG_IOCTL_MHD, un, 22626 "sd_take_ownership: return(1)=%d\n", rval); 22627 return (rval); 22628 } 22629 22630 /* Update the softstate reserved status to indicate the reservation */ 22631 mutex_enter(SD_MUTEX(un)); 22632 un->un_resvd_status |= SD_RESERVE; 22633 un->un_resvd_status &= 22634 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT); 22635 mutex_exit(SD_MUTEX(un)); 22636 22637 if (p != NULL) { 22638 if (p->min_ownership_delay != 0) { 22639 min_ownership_delay = p->min_ownership_delay * 1000; 22640 } 22641 if (p->max_ownership_delay != 0) { 22642 max_ownership_delay = p->max_ownership_delay * 1000; 22643 } 22644 } 22645 SD_INFO(SD_LOG_IOCTL_MHD, un, 22646 "sd_take_ownership: min, max delays: %d, %d\n", 22647 min_ownership_delay, max_ownership_delay); 22648 22649 start_time = ddi_get_lbolt(); 22650 current_time = start_time; 22651 ownership_time = current_time + drv_usectohz(min_ownership_delay); 22652 end_time = start_time + drv_usectohz(max_ownership_delay); 22653 22654 while (current_time - end_time < 0) { 22655 delay(drv_usectohz(500000)); 22656 22657 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) { 22658 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) { 22659 mutex_enter(SD_MUTEX(un)); 22660 rval = (un->un_resvd_status & 22661 SD_RESERVATION_CONFLICT) ? EACCES : EIO; 22662 mutex_exit(SD_MUTEX(un)); 22663 break; 22664 } 22665 } 22666 previous_current_time = current_time; 22667 current_time = ddi_get_lbolt(); 22668 mutex_enter(SD_MUTEX(un)); 22669 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) { 22670 ownership_time = ddi_get_lbolt() + 22671 drv_usectohz(min_ownership_delay); 22672 reservation_count = 0; 22673 } else { 22674 reservation_count++; 22675 } 22676 un->un_resvd_status |= SD_RESERVE; 22677 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE); 22678 mutex_exit(SD_MUTEX(un)); 22679 22680 SD_INFO(SD_LOG_IOCTL_MHD, un, 22681 "sd_take_ownership: ticks for loop iteration=%ld, " 22682 "reservation=%s\n", (current_time - previous_current_time), 22683 reservation_count ? "ok" : "reclaimed"); 22684 22685 if (current_time - ownership_time >= 0 && 22686 reservation_count >= 4) { 22687 rval = 0; /* Achieved a stable ownership */ 22688 break; 22689 } 22690 if (current_time - end_time >= 0) { 22691 rval = EACCES; /* No ownership in max possible time */ 22692 break; 22693 } 22694 } 22695 SD_TRACE(SD_LOG_IOCTL_MHD, un, 22696 "sd_take_ownership: return(2)=%d\n", rval); 22697 return (rval); 22698 } 22699 22700 22701 /* 22702 * Function: sd_reserve_release() 22703 * 22704 * Description: This function builds and sends scsi RESERVE, RELEASE, and 22705 * PRIORITY RESERVE commands based on a user specified command type 22706 * 22707 * Arguments: dev - the device 'dev_t' 22708 * cmd - user specified command type; one of SD_PRIORITY_RESERVE, 22709 * SD_RESERVE, SD_RELEASE 22710 * 22711 * Return Code: 0 or Error Code 22712 */ 22713 22714 static int 22715 sd_reserve_release(dev_t dev, int cmd) 22716 { 22717 struct uscsi_cmd *com = NULL; 22718 struct sd_lun *un = NULL; 22719 char cdb[CDB_GROUP0]; 22720 int rval; 22721 22722 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) || 22723 (cmd == SD_PRIORITY_RESERVE)); 22724 22725 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22726 return (ENXIO); 22727 } 22728 22729 /* instantiate and initialize the command and cdb */ 22730 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 22731 bzero(cdb, CDB_GROUP0); 22732 com->uscsi_flags = USCSI_SILENT; 22733 com->uscsi_timeout = un->un_reserve_release_time; 22734 com->uscsi_cdblen = CDB_GROUP0; 22735 com->uscsi_cdb = cdb; 22736 if (cmd == SD_RELEASE) { 22737 cdb[0] = SCMD_RELEASE; 22738 } else { 22739 cdb[0] = SCMD_RESERVE; 22740 } 22741 22742 /* Send the command. */ 22743 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 22744 SD_PATH_STANDARD); 22745 22746 /* 22747 * "break" a reservation that is held by another host, by issuing a 22748 * reset if priority reserve is desired, and we could not get the 22749 * device. 22750 */ 22751 if ((cmd == SD_PRIORITY_RESERVE) && 22752 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 22753 /* 22754 * First try to reset the LUN. If we cannot, then try a target 22755 * reset, followed by a bus reset if the target reset fails. 22756 */ 22757 int reset_retval = 0; 22758 if (un->un_f_lun_reset_enabled == TRUE) { 22759 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 22760 } 22761 if (reset_retval == 0) { 22762 /* The LUN reset either failed or was not issued */ 22763 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 22764 } 22765 if ((reset_retval == 0) && 22766 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) { 22767 rval = EIO; 22768 kmem_free(com, sizeof (*com)); 22769 return (rval); 22770 } 22771 22772 bzero(com, sizeof (struct uscsi_cmd)); 22773 com->uscsi_flags = USCSI_SILENT; 22774 com->uscsi_cdb = cdb; 22775 com->uscsi_cdblen = CDB_GROUP0; 22776 com->uscsi_timeout = 5; 22777 22778 /* 22779 * Reissue the last reserve command, this time without request 22780 * sense. Assume that it is just a regular reserve command. 22781 */ 22782 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 22783 SD_PATH_STANDARD); 22784 } 22785 22786 /* Return an error if still getting a reservation conflict. */ 22787 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 22788 rval = EACCES; 22789 } 22790 22791 kmem_free(com, sizeof (*com)); 22792 return (rval); 22793 } 22794 22795 22796 #define SD_NDUMP_RETRIES 12 22797 /* 22798 * System Crash Dump routine 22799 */ 22800 22801 static int 22802 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 22803 { 22804 int instance; 22805 int partition; 22806 int i; 22807 int err; 22808 struct sd_lun *un; 22809 struct scsi_pkt *wr_pktp; 22810 struct buf *wr_bp; 22811 struct buf wr_buf; 22812 daddr_t tgt_byte_offset; /* rmw - byte offset for target */ 22813 daddr_t tgt_blkno; /* rmw - blkno for target */ 22814 size_t tgt_byte_count; /* rmw - # of bytes to xfer */ 22815 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */ 22816 size_t io_start_offset; 22817 int doing_rmw = FALSE; 22818 int rval; 22819 ssize_t dma_resid; 22820 daddr_t oblkno; 22821 diskaddr_t nblks = 0; 22822 diskaddr_t start_block; 22823 22824 instance = SDUNIT(dev); 22825 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 22826 !SD_IS_VALID_LABEL(un) || ISCD(un)) { 22827 return (ENXIO); 22828 } 22829 22830 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 22831 22832 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); 22833 22834 partition = SDPART(dev); 22835 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); 22836 22837 /* Validate blocks to dump at against partition size. */ 22838 22839 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 22840 &nblks, &start_block, NULL, NULL, (void *)SD_PATH_DIRECT); 22841 22842 if ((blkno + nblk) > nblks) { 22843 SD_TRACE(SD_LOG_DUMP, un, 22844 "sddump: dump range larger than partition: " 22845 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 22846 blkno, nblk, nblks); 22847 return (EINVAL); 22848 } 22849 22850 mutex_enter(&un->un_pm_mutex); 22851 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 22852 struct scsi_pkt *start_pktp; 22853 22854 mutex_exit(&un->un_pm_mutex); 22855 22856 /* 22857 * use pm framework to power on HBA 1st 22858 */ 22859 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 22860 22861 /* 22862 * Dump no long uses sdpower to power on a device, it's 22863 * in-line here so it can be done in polled mode. 22864 */ 22865 22866 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n"); 22867 22868 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL, 22869 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL); 22870 22871 if (start_pktp == NULL) { 22872 /* We were not given a SCSI packet, fail. */ 22873 return (EIO); 22874 } 22875 bzero(start_pktp->pkt_cdbp, CDB_GROUP0); 22876 start_pktp->pkt_cdbp[0] = SCMD_START_STOP; 22877 start_pktp->pkt_cdbp[4] = SD_TARGET_START; 22878 start_pktp->pkt_flags = FLAG_NOINTR; 22879 22880 mutex_enter(SD_MUTEX(un)); 22881 SD_FILL_SCSI1_LUN(un, start_pktp); 22882 mutex_exit(SD_MUTEX(un)); 22883 /* 22884 * Scsi_poll returns 0 (success) if the command completes and 22885 * the status block is STATUS_GOOD. 22886 */ 22887 if (sd_scsi_poll(un, start_pktp) != 0) { 22888 scsi_destroy_pkt(start_pktp); 22889 return (EIO); 22890 } 22891 scsi_destroy_pkt(start_pktp); 22892 (void) sd_ddi_pm_resume(un); 22893 } else { 22894 mutex_exit(&un->un_pm_mutex); 22895 } 22896 22897 mutex_enter(SD_MUTEX(un)); 22898 un->un_throttle = 0; 22899 22900 /* 22901 * The first time through, reset the specific target device. 22902 * However, when cpr calls sddump we know that sd is in a 22903 * a good state so no bus reset is required. 22904 * Clear sense data via Request Sense cmd. 22905 * In sddump we don't care about allow_bus_device_reset anymore 22906 */ 22907 22908 if ((un->un_state != SD_STATE_SUSPENDED) && 22909 (un->un_state != SD_STATE_DUMPING)) { 22910 22911 New_state(un, SD_STATE_DUMPING); 22912 22913 if (un->un_f_is_fibre == FALSE) { 22914 mutex_exit(SD_MUTEX(un)); 22915 /* 22916 * Attempt a bus reset for parallel scsi. 22917 * 22918 * Note: A bus reset is required because on some host 22919 * systems (i.e. E420R) a bus device reset is 22920 * insufficient to reset the state of the target. 22921 * 22922 * Note: Don't issue the reset for fibre-channel, 22923 * because this tends to hang the bus (loop) for 22924 * too long while everyone is logging out and in 22925 * and the deadman timer for dumping will fire 22926 * before the dump is complete. 22927 */ 22928 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) { 22929 mutex_enter(SD_MUTEX(un)); 22930 Restore_state(un); 22931 mutex_exit(SD_MUTEX(un)); 22932 return (EIO); 22933 } 22934 22935 /* Delay to give the device some recovery time. */ 22936 drv_usecwait(10000); 22937 22938 if (sd_send_polled_RQS(un) == SD_FAILURE) { 22939 SD_INFO(SD_LOG_DUMP, un, 22940 "sddump: sd_send_polled_RQS failed\n"); 22941 } 22942 mutex_enter(SD_MUTEX(un)); 22943 } 22944 } 22945 22946 /* 22947 * Convert the partition-relative block number to a 22948 * disk physical block number. 22949 */ 22950 blkno += start_block; 22951 22952 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno); 22953 22954 22955 /* 22956 * Check if the device has a non-512 block size. 22957 */ 22958 wr_bp = NULL; 22959 if (NOT_DEVBSIZE(un)) { 22960 tgt_byte_offset = blkno * un->un_sys_blocksize; 22961 tgt_byte_count = nblk * un->un_sys_blocksize; 22962 if ((tgt_byte_offset % un->un_tgt_blocksize) || 22963 (tgt_byte_count % un->un_tgt_blocksize)) { 22964 doing_rmw = TRUE; 22965 /* 22966 * Calculate the block number and number of block 22967 * in terms of the media block size. 22968 */ 22969 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 22970 tgt_nblk = 22971 ((tgt_byte_offset + tgt_byte_count + 22972 (un->un_tgt_blocksize - 1)) / 22973 un->un_tgt_blocksize) - tgt_blkno; 22974 22975 /* 22976 * Invoke the routine which is going to do read part 22977 * of read-modify-write. 22978 * Note that this routine returns a pointer to 22979 * a valid bp in wr_bp. 22980 */ 22981 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk, 22982 &wr_bp); 22983 if (err) { 22984 mutex_exit(SD_MUTEX(un)); 22985 return (err); 22986 } 22987 /* 22988 * Offset is being calculated as - 22989 * (original block # * system block size) - 22990 * (new block # * target block size) 22991 */ 22992 io_start_offset = 22993 ((uint64_t)(blkno * un->un_sys_blocksize)) - 22994 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize)); 22995 22996 ASSERT((io_start_offset >= 0) && 22997 (io_start_offset < un->un_tgt_blocksize)); 22998 /* 22999 * Do the modify portion of read modify write. 23000 */ 23001 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset], 23002 (size_t)nblk * un->un_sys_blocksize); 23003 } else { 23004 doing_rmw = FALSE; 23005 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 23006 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize; 23007 } 23008 23009 /* Convert blkno and nblk to target blocks */ 23010 blkno = tgt_blkno; 23011 nblk = tgt_nblk; 23012 } else { 23013 wr_bp = &wr_buf; 23014 bzero(wr_bp, sizeof (struct buf)); 23015 wr_bp->b_flags = B_BUSY; 23016 wr_bp->b_un.b_addr = addr; 23017 wr_bp->b_bcount = nblk << DEV_BSHIFT; 23018 wr_bp->b_resid = 0; 23019 } 23020 23021 mutex_exit(SD_MUTEX(un)); 23022 23023 /* 23024 * Obtain a SCSI packet for the write command. 23025 * It should be safe to call the allocator here without 23026 * worrying about being locked for DVMA mapping because 23027 * the address we're passed is already a DVMA mapping 23028 * 23029 * We are also not going to worry about semaphore ownership 23030 * in the dump buffer. Dumping is single threaded at present. 23031 */ 23032 23033 wr_pktp = NULL; 23034 23035 dma_resid = wr_bp->b_bcount; 23036 oblkno = blkno; 23037 23038 while (dma_resid != 0) { 23039 23040 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 23041 wr_bp->b_flags &= ~B_ERROR; 23042 23043 if (un->un_partial_dma_supported == 1) { 23044 blkno = oblkno + 23045 ((wr_bp->b_bcount - dma_resid) / 23046 un->un_tgt_blocksize); 23047 nblk = dma_resid / un->un_tgt_blocksize; 23048 23049 if (wr_pktp) { 23050 /* 23051 * Partial DMA transfers after initial transfer 23052 */ 23053 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp, 23054 blkno, nblk); 23055 } else { 23056 /* Initial transfer */ 23057 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 23058 un->un_pkt_flags, NULL_FUNC, NULL, 23059 blkno, nblk); 23060 } 23061 } else { 23062 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 23063 0, NULL_FUNC, NULL, blkno, nblk); 23064 } 23065 23066 if (rval == 0) { 23067 /* We were given a SCSI packet, continue. */ 23068 break; 23069 } 23070 23071 if (i == 0) { 23072 if (wr_bp->b_flags & B_ERROR) { 23073 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23074 "no resources for dumping; " 23075 "error code: 0x%x, retrying", 23076 geterror(wr_bp)); 23077 } else { 23078 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23079 "no resources for dumping; retrying"); 23080 } 23081 } else if (i != (SD_NDUMP_RETRIES - 1)) { 23082 if (wr_bp->b_flags & B_ERROR) { 23083 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 23084 "no resources for dumping; error code: " 23085 "0x%x, retrying\n", geterror(wr_bp)); 23086 } 23087 } else { 23088 if (wr_bp->b_flags & B_ERROR) { 23089 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 23090 "no resources for dumping; " 23091 "error code: 0x%x, retries failed, " 23092 "giving up.\n", geterror(wr_bp)); 23093 } else { 23094 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 23095 "no resources for dumping; " 23096 "retries failed, giving up.\n"); 23097 } 23098 mutex_enter(SD_MUTEX(un)); 23099 Restore_state(un); 23100 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) { 23101 mutex_exit(SD_MUTEX(un)); 23102 scsi_free_consistent_buf(wr_bp); 23103 } else { 23104 mutex_exit(SD_MUTEX(un)); 23105 } 23106 return (EIO); 23107 } 23108 drv_usecwait(10000); 23109 } 23110 23111 if (un->un_partial_dma_supported == 1) { 23112 /* 23113 * save the resid from PARTIAL_DMA 23114 */ 23115 dma_resid = wr_pktp->pkt_resid; 23116 if (dma_resid != 0) 23117 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid); 23118 wr_pktp->pkt_resid = 0; 23119 } else { 23120 dma_resid = 0; 23121 } 23122 23123 /* SunBug 1222170 */ 23124 wr_pktp->pkt_flags = FLAG_NOINTR; 23125 23126 err = EIO; 23127 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 23128 23129 /* 23130 * Scsi_poll returns 0 (success) if the command completes and 23131 * the status block is STATUS_GOOD. We should only check 23132 * errors if this condition is not true. Even then we should 23133 * send our own request sense packet only if we have a check 23134 * condition and auto request sense has not been performed by 23135 * the hba. 23136 */ 23137 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n"); 23138 23139 if ((sd_scsi_poll(un, wr_pktp) == 0) && 23140 (wr_pktp->pkt_resid == 0)) { 23141 err = SD_SUCCESS; 23142 break; 23143 } 23144 23145 /* 23146 * Check CMD_DEV_GONE 1st, give up if device is gone. 23147 */ 23148 if (wr_pktp->pkt_reason == CMD_DEV_GONE) { 23149 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23150 "Error while dumping state...Device is gone\n"); 23151 break; 23152 } 23153 23154 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) { 23155 SD_INFO(SD_LOG_DUMP, un, 23156 "sddump: write failed with CHECK, try # %d\n", i); 23157 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) { 23158 (void) sd_send_polled_RQS(un); 23159 } 23160 23161 continue; 23162 } 23163 23164 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) { 23165 int reset_retval = 0; 23166 23167 SD_INFO(SD_LOG_DUMP, un, 23168 "sddump: write failed with BUSY, try # %d\n", i); 23169 23170 if (un->un_f_lun_reset_enabled == TRUE) { 23171 reset_retval = scsi_reset(SD_ADDRESS(un), 23172 RESET_LUN); 23173 } 23174 if (reset_retval == 0) { 23175 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 23176 } 23177 (void) sd_send_polled_RQS(un); 23178 23179 } else { 23180 SD_INFO(SD_LOG_DUMP, un, 23181 "sddump: write failed with 0x%x, try # %d\n", 23182 SD_GET_PKT_STATUS(wr_pktp), i); 23183 mutex_enter(SD_MUTEX(un)); 23184 sd_reset_target(un, wr_pktp); 23185 mutex_exit(SD_MUTEX(un)); 23186 } 23187 23188 /* 23189 * If we are not getting anywhere with lun/target resets, 23190 * let's reset the bus. 23191 */ 23192 if (i == SD_NDUMP_RETRIES/2) { 23193 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 23194 (void) sd_send_polled_RQS(un); 23195 } 23196 } 23197 } 23198 23199 scsi_destroy_pkt(wr_pktp); 23200 mutex_enter(SD_MUTEX(un)); 23201 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) { 23202 mutex_exit(SD_MUTEX(un)); 23203 scsi_free_consistent_buf(wr_bp); 23204 } else { 23205 mutex_exit(SD_MUTEX(un)); 23206 } 23207 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err); 23208 return (err); 23209 } 23210 23211 /* 23212 * Function: sd_scsi_poll() 23213 * 23214 * Description: This is a wrapper for the scsi_poll call. 23215 * 23216 * Arguments: sd_lun - The unit structure 23217 * scsi_pkt - The scsi packet being sent to the device. 23218 * 23219 * Return Code: 0 - Command completed successfully with good status 23220 * -1 - Command failed. This could indicate a check condition 23221 * or other status value requiring recovery action. 23222 * 23223 */ 23224 23225 static int 23226 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp) 23227 { 23228 int status; 23229 23230 ASSERT(un != NULL); 23231 ASSERT(!mutex_owned(SD_MUTEX(un))); 23232 ASSERT(pktp != NULL); 23233 23234 status = SD_SUCCESS; 23235 23236 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) { 23237 pktp->pkt_flags |= un->un_tagflags; 23238 pktp->pkt_flags &= ~FLAG_NODISCON; 23239 } 23240 23241 status = sd_ddi_scsi_poll(pktp); 23242 /* 23243 * Scsi_poll returns 0 (success) if the command completes and the 23244 * status block is STATUS_GOOD. We should only check errors if this 23245 * condition is not true. Even then we should send our own request 23246 * sense packet only if we have a check condition and auto 23247 * request sense has not been performed by the hba. 23248 * Don't get RQS data if pkt_reason is CMD_DEV_GONE. 23249 */ 23250 if ((status != SD_SUCCESS) && 23251 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) && 23252 (pktp->pkt_state & STATE_ARQ_DONE) == 0 && 23253 (pktp->pkt_reason != CMD_DEV_GONE)) 23254 (void) sd_send_polled_RQS(un); 23255 23256 return (status); 23257 } 23258 23259 /* 23260 * Function: sd_send_polled_RQS() 23261 * 23262 * Description: This sends the request sense command to a device. 23263 * 23264 * Arguments: sd_lun - The unit structure 23265 * 23266 * Return Code: 0 - Command completed successfully with good status 23267 * -1 - Command failed. 23268 * 23269 */ 23270 23271 static int 23272 sd_send_polled_RQS(struct sd_lun *un) 23273 { 23274 int ret_val; 23275 struct scsi_pkt *rqs_pktp; 23276 struct buf *rqs_bp; 23277 23278 ASSERT(un != NULL); 23279 ASSERT(!mutex_owned(SD_MUTEX(un))); 23280 23281 ret_val = SD_SUCCESS; 23282 23283 rqs_pktp = un->un_rqs_pktp; 23284 rqs_bp = un->un_rqs_bp; 23285 23286 mutex_enter(SD_MUTEX(un)); 23287 23288 if (un->un_sense_isbusy) { 23289 ret_val = SD_FAILURE; 23290 mutex_exit(SD_MUTEX(un)); 23291 return (ret_val); 23292 } 23293 23294 /* 23295 * If the request sense buffer (and packet) is not in use, 23296 * let's set the un_sense_isbusy and send our packet 23297 */ 23298 un->un_sense_isbusy = 1; 23299 rqs_pktp->pkt_resid = 0; 23300 rqs_pktp->pkt_reason = 0; 23301 rqs_pktp->pkt_flags |= FLAG_NOINTR; 23302 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH); 23303 23304 mutex_exit(SD_MUTEX(un)); 23305 23306 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at" 23307 " 0x%p\n", rqs_bp->b_un.b_addr); 23308 23309 /* 23310 * Can't send this to sd_scsi_poll, we wrap ourselves around the 23311 * axle - it has a call into us! 23312 */ 23313 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) { 23314 SD_INFO(SD_LOG_COMMON, un, 23315 "sd_send_polled_RQS: RQS failed\n"); 23316 } 23317 23318 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:", 23319 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX); 23320 23321 mutex_enter(SD_MUTEX(un)); 23322 un->un_sense_isbusy = 0; 23323 mutex_exit(SD_MUTEX(un)); 23324 23325 return (ret_val); 23326 } 23327 23328 /* 23329 * Defines needed for localized version of the scsi_poll routine. 23330 */ 23331 #define SD_CSEC 10000 /* usecs */ 23332 #define SD_SEC_TO_CSEC (1000000/SD_CSEC) 23333 23334 23335 /* 23336 * Function: sd_ddi_scsi_poll() 23337 * 23338 * Description: Localized version of the scsi_poll routine. The purpose is to 23339 * send a scsi_pkt to a device as a polled command. This version 23340 * is to ensure more robust handling of transport errors. 23341 * Specifically this routine cures not ready, coming ready 23342 * transition for power up and reset of sonoma's. This can take 23343 * up to 45 seconds for power-on and 20 seconds for reset of a 23344 * sonoma lun. 23345 * 23346 * Arguments: scsi_pkt - The scsi_pkt being sent to a device 23347 * 23348 * Return Code: 0 - Command completed successfully with good status 23349 * -1 - Command failed. 23350 * 23351 */ 23352 23353 static int 23354 sd_ddi_scsi_poll(struct scsi_pkt *pkt) 23355 { 23356 int busy_count; 23357 int timeout; 23358 int rval = SD_FAILURE; 23359 int savef; 23360 uint8_t *sensep; 23361 long savet; 23362 void (*savec)(); 23363 /* 23364 * The following is defined in machdep.c and is used in determining if 23365 * the scsi transport system will do polled I/O instead of interrupt 23366 * I/O when called from xx_dump(). 23367 */ 23368 extern int do_polled_io; 23369 23370 /* 23371 * save old flags in pkt, to restore at end 23372 */ 23373 savef = pkt->pkt_flags; 23374 savec = pkt->pkt_comp; 23375 savet = pkt->pkt_time; 23376 23377 pkt->pkt_flags |= FLAG_NOINTR; 23378 23379 /* 23380 * XXX there is nothing in the SCSA spec that states that we should not 23381 * do a callback for polled cmds; however, removing this will break sd 23382 * and probably other target drivers 23383 */ 23384 pkt->pkt_comp = NULL; 23385 23386 /* 23387 * we don't like a polled command without timeout. 23388 * 60 seconds seems long enough. 23389 */ 23390 if (pkt->pkt_time == 0) { 23391 pkt->pkt_time = SCSI_POLL_TIMEOUT; 23392 } 23393 23394 /* 23395 * Send polled cmd. 23396 * 23397 * We do some error recovery for various errors. Tran_busy, 23398 * queue full, and non-dispatched commands are retried every 10 msec. 23399 * as they are typically transient failures. Busy status and Not 23400 * Ready are retried every second as this status takes a while to 23401 * change. Unit attention is retried for pkt_time (60) times 23402 * with no delay. 23403 */ 23404 timeout = pkt->pkt_time * SD_SEC_TO_CSEC; 23405 23406 for (busy_count = 0; busy_count < timeout; busy_count++) { 23407 int rc; 23408 int poll_delay; 23409 23410 /* 23411 * Initialize pkt status variables. 23412 */ 23413 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0; 23414 23415 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) { 23416 if (rc != TRAN_BUSY) { 23417 /* Transport failed - give up. */ 23418 break; 23419 } else { 23420 /* Transport busy - try again. */ 23421 poll_delay = 1 * SD_CSEC; /* 10 msec */ 23422 } 23423 } else { 23424 /* 23425 * Transport accepted - check pkt status. 23426 */ 23427 rc = (*pkt->pkt_scbp) & STATUS_MASK; 23428 if (pkt->pkt_reason == CMD_CMPLT && 23429 rc == STATUS_CHECK && 23430 pkt->pkt_state & STATE_ARQ_DONE) { 23431 struct scsi_arq_status *arqstat = 23432 (struct scsi_arq_status *)(pkt->pkt_scbp); 23433 23434 sensep = (uint8_t *)&arqstat->sts_sensedata; 23435 } else { 23436 sensep = NULL; 23437 } 23438 23439 if ((pkt->pkt_reason == CMD_CMPLT) && 23440 (rc == STATUS_GOOD)) { 23441 /* No error - we're done */ 23442 rval = SD_SUCCESS; 23443 break; 23444 23445 } else if (pkt->pkt_reason == CMD_DEV_GONE) { 23446 /* Lost connection - give up */ 23447 break; 23448 23449 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) && 23450 (pkt->pkt_state == 0)) { 23451 /* Pkt not dispatched - try again. */ 23452 poll_delay = 1 * SD_CSEC; /* 10 msec. */ 23453 23454 } else if ((pkt->pkt_reason == CMD_CMPLT) && 23455 (rc == STATUS_QFULL)) { 23456 /* Queue full - try again. */ 23457 poll_delay = 1 * SD_CSEC; /* 10 msec. */ 23458 23459 } else if ((pkt->pkt_reason == CMD_CMPLT) && 23460 (rc == STATUS_BUSY)) { 23461 /* Busy - try again. */ 23462 poll_delay = 100 * SD_CSEC; /* 1 sec. */ 23463 busy_count += (SD_SEC_TO_CSEC - 1); 23464 23465 } else if ((sensep != NULL) && 23466 (scsi_sense_key(sensep) == 23467 KEY_UNIT_ATTENTION)) { 23468 /* Unit Attention - try again */ 23469 busy_count += (SD_SEC_TO_CSEC - 1); /* 1 */ 23470 continue; 23471 23472 } else if ((sensep != NULL) && 23473 (scsi_sense_key(sensep) == KEY_NOT_READY) && 23474 (scsi_sense_asc(sensep) == 0x04) && 23475 (scsi_sense_ascq(sensep) == 0x01)) { 23476 /* Not ready -> ready - try again. */ 23477 poll_delay = 100 * SD_CSEC; /* 1 sec. */ 23478 busy_count += (SD_SEC_TO_CSEC - 1); 23479 23480 } else { 23481 /* BAD status - give up. */ 23482 break; 23483 } 23484 } 23485 23486 if ((curthread->t_flag & T_INTR_THREAD) == 0 && 23487 !do_polled_io) { 23488 delay(drv_usectohz(poll_delay)); 23489 } else { 23490 /* we busy wait during cpr_dump or interrupt threads */ 23491 drv_usecwait(poll_delay); 23492 } 23493 } 23494 23495 pkt->pkt_flags = savef; 23496 pkt->pkt_comp = savec; 23497 pkt->pkt_time = savet; 23498 return (rval); 23499 } 23500 23501 23502 /* 23503 * Function: sd_persistent_reservation_in_read_keys 23504 * 23505 * Description: This routine is the driver entry point for handling CD-ROM 23506 * multi-host persistent reservation requests (MHIOCGRP_INKEYS) 23507 * by sending the SCSI-3 PRIN commands to the device. 23508 * Processes the read keys command response by copying the 23509 * reservation key information into the user provided buffer. 23510 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented. 23511 * 23512 * Arguments: un - Pointer to soft state struct for the target. 23513 * usrp - user provided pointer to multihost Persistent In Read 23514 * Keys structure (mhioc_inkeys_t) 23515 * flag - this argument is a pass through to ddi_copyxxx() 23516 * directly from the mode argument of ioctl(). 23517 * 23518 * Return Code: 0 - Success 23519 * EACCES 23520 * ENOTSUP 23521 * errno return code from sd_send_scsi_cmd() 23522 * 23523 * Context: Can sleep. Does not return until command is completed. 23524 */ 23525 23526 static int 23527 sd_persistent_reservation_in_read_keys(struct sd_lun *un, 23528 mhioc_inkeys_t *usrp, int flag) 23529 { 23530 #ifdef _MULTI_DATAMODEL 23531 struct mhioc_key_list32 li32; 23532 #endif 23533 sd_prin_readkeys_t *in; 23534 mhioc_inkeys_t *ptr; 23535 mhioc_key_list_t li; 23536 uchar_t *data_bufp; 23537 int data_len; 23538 int rval; 23539 size_t copysz; 23540 23541 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) { 23542 return (EINVAL); 23543 } 23544 bzero(&li, sizeof (mhioc_key_list_t)); 23545 23546 /* 23547 * Get the listsize from user 23548 */ 23549 #ifdef _MULTI_DATAMODEL 23550 23551 switch (ddi_model_convert_from(flag & FMODELS)) { 23552 case DDI_MODEL_ILP32: 23553 copysz = sizeof (struct mhioc_key_list32); 23554 if (ddi_copyin(ptr->li, &li32, copysz, flag)) { 23555 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23556 "sd_persistent_reservation_in_read_keys: " 23557 "failed ddi_copyin: mhioc_key_list32_t\n"); 23558 rval = EFAULT; 23559 goto done; 23560 } 23561 li.listsize = li32.listsize; 23562 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list; 23563 break; 23564 23565 case DDI_MODEL_NONE: 23566 copysz = sizeof (mhioc_key_list_t); 23567 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 23568 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23569 "sd_persistent_reservation_in_read_keys: " 23570 "failed ddi_copyin: mhioc_key_list_t\n"); 23571 rval = EFAULT; 23572 goto done; 23573 } 23574 break; 23575 } 23576 23577 #else /* ! _MULTI_DATAMODEL */ 23578 copysz = sizeof (mhioc_key_list_t); 23579 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 23580 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23581 "sd_persistent_reservation_in_read_keys: " 23582 "failed ddi_copyin: mhioc_key_list_t\n"); 23583 rval = EFAULT; 23584 goto done; 23585 } 23586 #endif 23587 23588 data_len = li.listsize * MHIOC_RESV_KEY_SIZE; 23589 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t)); 23590 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 23591 23592 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 23593 data_len, data_bufp)) != 0) { 23594 goto done; 23595 } 23596 in = (sd_prin_readkeys_t *)data_bufp; 23597 ptr->generation = BE_32(in->generation); 23598 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE; 23599 23600 /* 23601 * Return the min(listsize, listlen) keys 23602 */ 23603 #ifdef _MULTI_DATAMODEL 23604 23605 switch (ddi_model_convert_from(flag & FMODELS)) { 23606 case DDI_MODEL_ILP32: 23607 li32.listlen = li.listlen; 23608 if (ddi_copyout(&li32, ptr->li, copysz, flag)) { 23609 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23610 "sd_persistent_reservation_in_read_keys: " 23611 "failed ddi_copyout: mhioc_key_list32_t\n"); 23612 rval = EFAULT; 23613 goto done; 23614 } 23615 break; 23616 23617 case DDI_MODEL_NONE: 23618 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 23619 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23620 "sd_persistent_reservation_in_read_keys: " 23621 "failed ddi_copyout: mhioc_key_list_t\n"); 23622 rval = EFAULT; 23623 goto done; 23624 } 23625 break; 23626 } 23627 23628 #else /* ! _MULTI_DATAMODEL */ 23629 23630 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 23631 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23632 "sd_persistent_reservation_in_read_keys: " 23633 "failed ddi_copyout: mhioc_key_list_t\n"); 23634 rval = EFAULT; 23635 goto done; 23636 } 23637 23638 #endif /* _MULTI_DATAMODEL */ 23639 23640 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE, 23641 li.listsize * MHIOC_RESV_KEY_SIZE); 23642 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) { 23643 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23644 "sd_persistent_reservation_in_read_keys: " 23645 "failed ddi_copyout: keylist\n"); 23646 rval = EFAULT; 23647 } 23648 done: 23649 kmem_free(data_bufp, data_len); 23650 return (rval); 23651 } 23652 23653 23654 /* 23655 * Function: sd_persistent_reservation_in_read_resv 23656 * 23657 * Description: This routine is the driver entry point for handling CD-ROM 23658 * multi-host persistent reservation requests (MHIOCGRP_INRESV) 23659 * by sending the SCSI-3 PRIN commands to the device. 23660 * Process the read persistent reservations command response by 23661 * copying the reservation information into the user provided 23662 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented. 23663 * 23664 * Arguments: un - Pointer to soft state struct for the target. 23665 * usrp - user provided pointer to multihost Persistent In Read 23666 * Keys structure (mhioc_inkeys_t) 23667 * flag - this argument is a pass through to ddi_copyxxx() 23668 * directly from the mode argument of ioctl(). 23669 * 23670 * Return Code: 0 - Success 23671 * EACCES 23672 * ENOTSUP 23673 * errno return code from sd_send_scsi_cmd() 23674 * 23675 * Context: Can sleep. Does not return until command is completed. 23676 */ 23677 23678 static int 23679 sd_persistent_reservation_in_read_resv(struct sd_lun *un, 23680 mhioc_inresvs_t *usrp, int flag) 23681 { 23682 #ifdef _MULTI_DATAMODEL 23683 struct mhioc_resv_desc_list32 resvlist32; 23684 #endif 23685 sd_prin_readresv_t *in; 23686 mhioc_inresvs_t *ptr; 23687 sd_readresv_desc_t *readresv_ptr; 23688 mhioc_resv_desc_list_t resvlist; 23689 mhioc_resv_desc_t resvdesc; 23690 uchar_t *data_bufp; 23691 int data_len; 23692 int rval; 23693 int i; 23694 size_t copysz; 23695 mhioc_resv_desc_t *bufp; 23696 23697 if ((ptr = usrp) == NULL) { 23698 return (EINVAL); 23699 } 23700 23701 /* 23702 * Get the listsize from user 23703 */ 23704 #ifdef _MULTI_DATAMODEL 23705 switch (ddi_model_convert_from(flag & FMODELS)) { 23706 case DDI_MODEL_ILP32: 23707 copysz = sizeof (struct mhioc_resv_desc_list32); 23708 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) { 23709 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23710 "sd_persistent_reservation_in_read_resv: " 23711 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23712 rval = EFAULT; 23713 goto done; 23714 } 23715 resvlist.listsize = resvlist32.listsize; 23716 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list; 23717 break; 23718 23719 case DDI_MODEL_NONE: 23720 copysz = sizeof (mhioc_resv_desc_list_t); 23721 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 23722 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23723 "sd_persistent_reservation_in_read_resv: " 23724 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23725 rval = EFAULT; 23726 goto done; 23727 } 23728 break; 23729 } 23730 #else /* ! _MULTI_DATAMODEL */ 23731 copysz = sizeof (mhioc_resv_desc_list_t); 23732 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 23733 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23734 "sd_persistent_reservation_in_read_resv: " 23735 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23736 rval = EFAULT; 23737 goto done; 23738 } 23739 #endif /* ! _MULTI_DATAMODEL */ 23740 23741 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN; 23742 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t)); 23743 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 23744 23745 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_RESV, 23746 data_len, data_bufp)) != 0) { 23747 goto done; 23748 } 23749 in = (sd_prin_readresv_t *)data_bufp; 23750 ptr->generation = BE_32(in->generation); 23751 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN; 23752 23753 /* 23754 * Return the min(listsize, listlen( keys 23755 */ 23756 #ifdef _MULTI_DATAMODEL 23757 23758 switch (ddi_model_convert_from(flag & FMODELS)) { 23759 case DDI_MODEL_ILP32: 23760 resvlist32.listlen = resvlist.listlen; 23761 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) { 23762 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23763 "sd_persistent_reservation_in_read_resv: " 23764 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 23765 rval = EFAULT; 23766 goto done; 23767 } 23768 break; 23769 23770 case DDI_MODEL_NONE: 23771 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 23772 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23773 "sd_persistent_reservation_in_read_resv: " 23774 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 23775 rval = EFAULT; 23776 goto done; 23777 } 23778 break; 23779 } 23780 23781 #else /* ! _MULTI_DATAMODEL */ 23782 23783 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 23784 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23785 "sd_persistent_reservation_in_read_resv: " 23786 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 23787 rval = EFAULT; 23788 goto done; 23789 } 23790 23791 #endif /* ! _MULTI_DATAMODEL */ 23792 23793 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc; 23794 bufp = resvlist.list; 23795 copysz = sizeof (mhioc_resv_desc_t); 23796 for (i = 0; i < min(resvlist.listlen, resvlist.listsize); 23797 i++, readresv_ptr++, bufp++) { 23798 23799 bcopy(&readresv_ptr->resvkey, &resvdesc.key, 23800 MHIOC_RESV_KEY_SIZE); 23801 resvdesc.type = readresv_ptr->type; 23802 resvdesc.scope = readresv_ptr->scope; 23803 resvdesc.scope_specific_addr = 23804 BE_32(readresv_ptr->scope_specific_addr); 23805 23806 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) { 23807 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23808 "sd_persistent_reservation_in_read_resv: " 23809 "failed ddi_copyout: resvlist\n"); 23810 rval = EFAULT; 23811 goto done; 23812 } 23813 } 23814 done: 23815 kmem_free(data_bufp, data_len); 23816 return (rval); 23817 } 23818 23819 23820 /* 23821 * Function: sr_change_blkmode() 23822 * 23823 * Description: This routine is the driver entry point for handling CD-ROM 23824 * block mode ioctl requests. Support for returning and changing 23825 * the current block size in use by the device is implemented. The 23826 * LBA size is changed via a MODE SELECT Block Descriptor. 23827 * 23828 * This routine issues a mode sense with an allocation length of 23829 * 12 bytes for the mode page header and a single block descriptor. 23830 * 23831 * Arguments: dev - the device 'dev_t' 23832 * cmd - the request type; one of CDROMGBLKMODE (get) or 23833 * CDROMSBLKMODE (set) 23834 * data - current block size or requested block size 23835 * flag - this argument is a pass through to ddi_copyxxx() directly 23836 * from the mode argument of ioctl(). 23837 * 23838 * Return Code: the code returned by sd_send_scsi_cmd() 23839 * EINVAL if invalid arguments are provided 23840 * EFAULT if ddi_copyxxx() fails 23841 * ENXIO if fail ddi_get_soft_state 23842 * EIO if invalid mode sense block descriptor length 23843 * 23844 */ 23845 23846 static int 23847 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag) 23848 { 23849 struct sd_lun *un = NULL; 23850 struct mode_header *sense_mhp, *select_mhp; 23851 struct block_descriptor *sense_desc, *select_desc; 23852 int current_bsize; 23853 int rval = EINVAL; 23854 uchar_t *sense = NULL; 23855 uchar_t *select = NULL; 23856 23857 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE)); 23858 23859 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23860 return (ENXIO); 23861 } 23862 23863 /* 23864 * The block length is changed via the Mode Select block descriptor, the 23865 * "Read/Write Error Recovery" mode page (0x1) contents are not actually 23866 * required as part of this routine. Therefore the mode sense allocation 23867 * length is specified to be the length of a mode page header and a 23868 * block descriptor. 23869 */ 23870 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 23871 23872 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 23873 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD)) != 0) { 23874 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23875 "sr_change_blkmode: Mode Sense Failed\n"); 23876 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23877 return (rval); 23878 } 23879 23880 /* Check the block descriptor len to handle only 1 block descriptor */ 23881 sense_mhp = (struct mode_header *)sense; 23882 if ((sense_mhp->bdesc_length == 0) || 23883 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) { 23884 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23885 "sr_change_blkmode: Mode Sense returned invalid block" 23886 " descriptor length\n"); 23887 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23888 return (EIO); 23889 } 23890 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH); 23891 current_bsize = ((sense_desc->blksize_hi << 16) | 23892 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo); 23893 23894 /* Process command */ 23895 switch (cmd) { 23896 case CDROMGBLKMODE: 23897 /* Return the block size obtained during the mode sense */ 23898 if (ddi_copyout(¤t_bsize, (void *)data, 23899 sizeof (int), flag) != 0) 23900 rval = EFAULT; 23901 break; 23902 case CDROMSBLKMODE: 23903 /* Validate the requested block size */ 23904 switch (data) { 23905 case CDROM_BLK_512: 23906 case CDROM_BLK_1024: 23907 case CDROM_BLK_2048: 23908 case CDROM_BLK_2056: 23909 case CDROM_BLK_2336: 23910 case CDROM_BLK_2340: 23911 case CDROM_BLK_2352: 23912 case CDROM_BLK_2368: 23913 case CDROM_BLK_2448: 23914 case CDROM_BLK_2646: 23915 case CDROM_BLK_2647: 23916 break; 23917 default: 23918 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23919 "sr_change_blkmode: " 23920 "Block Size '%ld' Not Supported\n", data); 23921 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23922 return (EINVAL); 23923 } 23924 23925 /* 23926 * The current block size matches the requested block size so 23927 * there is no need to send the mode select to change the size 23928 */ 23929 if (current_bsize == data) { 23930 break; 23931 } 23932 23933 /* Build the select data for the requested block size */ 23934 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 23935 select_mhp = (struct mode_header *)select; 23936 select_desc = 23937 (struct block_descriptor *)(select + MODE_HEADER_LENGTH); 23938 /* 23939 * The LBA size is changed via the block descriptor, so the 23940 * descriptor is built according to the user data 23941 */ 23942 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH; 23943 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16); 23944 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8); 23945 select_desc->blksize_lo = (char)((data) & 0x000000ff); 23946 23947 /* Send the mode select for the requested block size */ 23948 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 23949 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 23950 SD_PATH_STANDARD)) != 0) { 23951 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23952 "sr_change_blkmode: Mode Select Failed\n"); 23953 /* 23954 * The mode select failed for the requested block size, 23955 * so reset the data for the original block size and 23956 * send it to the target. The error is indicated by the 23957 * return value for the failed mode select. 23958 */ 23959 select_desc->blksize_hi = sense_desc->blksize_hi; 23960 select_desc->blksize_mid = sense_desc->blksize_mid; 23961 select_desc->blksize_lo = sense_desc->blksize_lo; 23962 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 23963 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 23964 SD_PATH_STANDARD); 23965 } else { 23966 ASSERT(!mutex_owned(SD_MUTEX(un))); 23967 mutex_enter(SD_MUTEX(un)); 23968 sd_update_block_info(un, (uint32_t)data, 0); 23969 mutex_exit(SD_MUTEX(un)); 23970 } 23971 break; 23972 default: 23973 /* should not reach here, but check anyway */ 23974 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23975 "sr_change_blkmode: Command '%x' Not Supported\n", cmd); 23976 rval = EINVAL; 23977 break; 23978 } 23979 23980 if (select) { 23981 kmem_free(select, BUFLEN_CHG_BLK_MODE); 23982 } 23983 if (sense) { 23984 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23985 } 23986 return (rval); 23987 } 23988 23989 23990 /* 23991 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines 23992 * implement driver support for getting and setting the CD speed. The command 23993 * set used will be based on the device type. If the device has not been 23994 * identified as MMC the Toshiba vendor specific mode page will be used. If 23995 * the device is MMC but does not support the Real Time Streaming feature 23996 * the SET CD SPEED command will be used to set speed and mode page 0x2A will 23997 * be used to read the speed. 23998 */ 23999 24000 /* 24001 * Function: sr_change_speed() 24002 * 24003 * Description: This routine is the driver entry point for handling CD-ROM 24004 * drive speed ioctl requests for devices supporting the Toshiba 24005 * vendor specific drive speed mode page. Support for returning 24006 * and changing the current drive speed in use by the device is 24007 * implemented. 24008 * 24009 * Arguments: dev - the device 'dev_t' 24010 * cmd - the request type; one of CDROMGDRVSPEED (get) or 24011 * CDROMSDRVSPEED (set) 24012 * data - current drive speed or requested drive speed 24013 * flag - this argument is a pass through to ddi_copyxxx() directly 24014 * from the mode argument of ioctl(). 24015 * 24016 * Return Code: the code returned by sd_send_scsi_cmd() 24017 * EINVAL if invalid arguments are provided 24018 * EFAULT if ddi_copyxxx() fails 24019 * ENXIO if fail ddi_get_soft_state 24020 * EIO if invalid mode sense block descriptor length 24021 */ 24022 24023 static int 24024 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 24025 { 24026 struct sd_lun *un = NULL; 24027 struct mode_header *sense_mhp, *select_mhp; 24028 struct mode_speed *sense_page, *select_page; 24029 int current_speed; 24030 int rval = EINVAL; 24031 int bd_len; 24032 uchar_t *sense = NULL; 24033 uchar_t *select = NULL; 24034 24035 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 24036 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24037 return (ENXIO); 24038 } 24039 24040 /* 24041 * Note: The drive speed is being modified here according to a Toshiba 24042 * vendor specific mode page (0x31). 24043 */ 24044 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 24045 24046 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 24047 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED, 24048 SD_PATH_STANDARD)) != 0) { 24049 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24050 "sr_change_speed: Mode Sense Failed\n"); 24051 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24052 return (rval); 24053 } 24054 sense_mhp = (struct mode_header *)sense; 24055 24056 /* Check the block descriptor len to handle only 1 block descriptor */ 24057 bd_len = sense_mhp->bdesc_length; 24058 if (bd_len > MODE_BLK_DESC_LENGTH) { 24059 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24060 "sr_change_speed: Mode Sense returned invalid block " 24061 "descriptor length\n"); 24062 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24063 return (EIO); 24064 } 24065 24066 sense_page = (struct mode_speed *) 24067 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 24068 current_speed = sense_page->speed; 24069 24070 /* Process command */ 24071 switch (cmd) { 24072 case CDROMGDRVSPEED: 24073 /* Return the drive speed obtained during the mode sense */ 24074 if (current_speed == 0x2) { 24075 current_speed = CDROM_TWELVE_SPEED; 24076 } 24077 if (ddi_copyout(¤t_speed, (void *)data, 24078 sizeof (int), flag) != 0) { 24079 rval = EFAULT; 24080 } 24081 break; 24082 case CDROMSDRVSPEED: 24083 /* Validate the requested drive speed */ 24084 switch ((uchar_t)data) { 24085 case CDROM_TWELVE_SPEED: 24086 data = 0x2; 24087 /*FALLTHROUGH*/ 24088 case CDROM_NORMAL_SPEED: 24089 case CDROM_DOUBLE_SPEED: 24090 case CDROM_QUAD_SPEED: 24091 case CDROM_MAXIMUM_SPEED: 24092 break; 24093 default: 24094 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24095 "sr_change_speed: " 24096 "Drive Speed '%d' Not Supported\n", (uchar_t)data); 24097 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24098 return (EINVAL); 24099 } 24100 24101 /* 24102 * The current drive speed matches the requested drive speed so 24103 * there is no need to send the mode select to change the speed 24104 */ 24105 if (current_speed == data) { 24106 break; 24107 } 24108 24109 /* Build the select data for the requested drive speed */ 24110 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 24111 select_mhp = (struct mode_header *)select; 24112 select_mhp->bdesc_length = 0; 24113 select_page = 24114 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 24115 select_page = 24116 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 24117 select_page->mode_page.code = CDROM_MODE_SPEED; 24118 select_page->mode_page.length = 2; 24119 select_page->speed = (uchar_t)data; 24120 24121 /* Send the mode select for the requested block size */ 24122 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 24123 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 24124 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 24125 /* 24126 * The mode select failed for the requested drive speed, 24127 * so reset the data for the original drive speed and 24128 * send it to the target. The error is indicated by the 24129 * return value for the failed mode select. 24130 */ 24131 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24132 "sr_drive_speed: Mode Select Failed\n"); 24133 select_page->speed = sense_page->speed; 24134 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 24135 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 24136 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 24137 } 24138 break; 24139 default: 24140 /* should not reach here, but check anyway */ 24141 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24142 "sr_change_speed: Command '%x' Not Supported\n", cmd); 24143 rval = EINVAL; 24144 break; 24145 } 24146 24147 if (select) { 24148 kmem_free(select, BUFLEN_MODE_CDROM_SPEED); 24149 } 24150 if (sense) { 24151 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24152 } 24153 24154 return (rval); 24155 } 24156 24157 24158 /* 24159 * Function: sr_atapi_change_speed() 24160 * 24161 * Description: This routine is the driver entry point for handling CD-ROM 24162 * drive speed ioctl requests for MMC devices that do not support 24163 * the Real Time Streaming feature (0x107). 24164 * 24165 * Note: This routine will use the SET SPEED command which may not 24166 * be supported by all devices. 24167 * 24168 * Arguments: dev- the device 'dev_t' 24169 * cmd- the request type; one of CDROMGDRVSPEED (get) or 24170 * CDROMSDRVSPEED (set) 24171 * data- current drive speed or requested drive speed 24172 * flag- this argument is a pass through to ddi_copyxxx() directly 24173 * from the mode argument of ioctl(). 24174 * 24175 * Return Code: the code returned by sd_send_scsi_cmd() 24176 * EINVAL if invalid arguments are provided 24177 * EFAULT if ddi_copyxxx() fails 24178 * ENXIO if fail ddi_get_soft_state 24179 * EIO if invalid mode sense block descriptor length 24180 */ 24181 24182 static int 24183 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 24184 { 24185 struct sd_lun *un; 24186 struct uscsi_cmd *com = NULL; 24187 struct mode_header_grp2 *sense_mhp; 24188 uchar_t *sense_page; 24189 uchar_t *sense = NULL; 24190 char cdb[CDB_GROUP5]; 24191 int bd_len; 24192 int current_speed = 0; 24193 int max_speed = 0; 24194 int rval; 24195 24196 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 24197 24198 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24199 return (ENXIO); 24200 } 24201 24202 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 24203 24204 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 24205 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, 24206 SD_PATH_STANDARD)) != 0) { 24207 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24208 "sr_atapi_change_speed: Mode Sense Failed\n"); 24209 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24210 return (rval); 24211 } 24212 24213 /* Check the block descriptor len to handle only 1 block descriptor */ 24214 sense_mhp = (struct mode_header_grp2 *)sense; 24215 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 24216 if (bd_len > MODE_BLK_DESC_LENGTH) { 24217 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24218 "sr_atapi_change_speed: Mode Sense returned invalid " 24219 "block descriptor length\n"); 24220 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24221 return (EIO); 24222 } 24223 24224 /* Calculate the current and maximum drive speeds */ 24225 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 24226 current_speed = (sense_page[14] << 8) | sense_page[15]; 24227 max_speed = (sense_page[8] << 8) | sense_page[9]; 24228 24229 /* Process the command */ 24230 switch (cmd) { 24231 case CDROMGDRVSPEED: 24232 current_speed /= SD_SPEED_1X; 24233 if (ddi_copyout(¤t_speed, (void *)data, 24234 sizeof (int), flag) != 0) 24235 rval = EFAULT; 24236 break; 24237 case CDROMSDRVSPEED: 24238 /* Convert the speed code to KB/sec */ 24239 switch ((uchar_t)data) { 24240 case CDROM_NORMAL_SPEED: 24241 current_speed = SD_SPEED_1X; 24242 break; 24243 case CDROM_DOUBLE_SPEED: 24244 current_speed = 2 * SD_SPEED_1X; 24245 break; 24246 case CDROM_QUAD_SPEED: 24247 current_speed = 4 * SD_SPEED_1X; 24248 break; 24249 case CDROM_TWELVE_SPEED: 24250 current_speed = 12 * SD_SPEED_1X; 24251 break; 24252 case CDROM_MAXIMUM_SPEED: 24253 current_speed = 0xffff; 24254 break; 24255 default: 24256 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24257 "sr_atapi_change_speed: invalid drive speed %d\n", 24258 (uchar_t)data); 24259 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24260 return (EINVAL); 24261 } 24262 24263 /* Check the request against the drive's max speed. */ 24264 if (current_speed != 0xffff) { 24265 if (current_speed > max_speed) { 24266 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24267 return (EINVAL); 24268 } 24269 } 24270 24271 /* 24272 * Build and send the SET SPEED command 24273 * 24274 * Note: The SET SPEED (0xBB) command used in this routine is 24275 * obsolete per the SCSI MMC spec but still supported in the 24276 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 24277 * therefore the command is still implemented in this routine. 24278 */ 24279 bzero(cdb, sizeof (cdb)); 24280 cdb[0] = (char)SCMD_SET_CDROM_SPEED; 24281 cdb[2] = (uchar_t)(current_speed >> 8); 24282 cdb[3] = (uchar_t)current_speed; 24283 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24284 com->uscsi_cdb = (caddr_t)cdb; 24285 com->uscsi_cdblen = CDB_GROUP5; 24286 com->uscsi_bufaddr = NULL; 24287 com->uscsi_buflen = 0; 24288 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24289 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, 0, SD_PATH_STANDARD); 24290 break; 24291 default: 24292 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24293 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd); 24294 rval = EINVAL; 24295 } 24296 24297 if (sense) { 24298 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24299 } 24300 if (com) { 24301 kmem_free(com, sizeof (*com)); 24302 } 24303 return (rval); 24304 } 24305 24306 24307 /* 24308 * Function: sr_pause_resume() 24309 * 24310 * Description: This routine is the driver entry point for handling CD-ROM 24311 * pause/resume ioctl requests. This only affects the audio play 24312 * operation. 24313 * 24314 * Arguments: dev - the device 'dev_t' 24315 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used 24316 * for setting the resume bit of the cdb. 24317 * 24318 * Return Code: the code returned by sd_send_scsi_cmd() 24319 * EINVAL if invalid mode specified 24320 * 24321 */ 24322 24323 static int 24324 sr_pause_resume(dev_t dev, int cmd) 24325 { 24326 struct sd_lun *un; 24327 struct uscsi_cmd *com; 24328 char cdb[CDB_GROUP1]; 24329 int rval; 24330 24331 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24332 return (ENXIO); 24333 } 24334 24335 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24336 bzero(cdb, CDB_GROUP1); 24337 cdb[0] = SCMD_PAUSE_RESUME; 24338 switch (cmd) { 24339 case CDROMRESUME: 24340 cdb[8] = 1; 24341 break; 24342 case CDROMPAUSE: 24343 cdb[8] = 0; 24344 break; 24345 default: 24346 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:" 24347 " Command '%x' Not Supported\n", cmd); 24348 rval = EINVAL; 24349 goto done; 24350 } 24351 24352 com->uscsi_cdb = cdb; 24353 com->uscsi_cdblen = CDB_GROUP1; 24354 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24355 24356 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24357 SD_PATH_STANDARD); 24358 24359 done: 24360 kmem_free(com, sizeof (*com)); 24361 return (rval); 24362 } 24363 24364 24365 /* 24366 * Function: sr_play_msf() 24367 * 24368 * Description: This routine is the driver entry point for handling CD-ROM 24369 * ioctl requests to output the audio signals at the specified 24370 * starting address and continue the audio play until the specified 24371 * ending address (CDROMPLAYMSF) The address is in Minute Second 24372 * Frame (MSF) format. 24373 * 24374 * Arguments: dev - the device 'dev_t' 24375 * data - pointer to user provided audio msf structure, 24376 * specifying start/end addresses. 24377 * flag - this argument is a pass through to ddi_copyxxx() 24378 * directly from the mode argument of ioctl(). 24379 * 24380 * Return Code: the code returned by sd_send_scsi_cmd() 24381 * EFAULT if ddi_copyxxx() fails 24382 * ENXIO if fail ddi_get_soft_state 24383 * EINVAL if data pointer is NULL 24384 */ 24385 24386 static int 24387 sr_play_msf(dev_t dev, caddr_t data, int flag) 24388 { 24389 struct sd_lun *un; 24390 struct uscsi_cmd *com; 24391 struct cdrom_msf msf_struct; 24392 struct cdrom_msf *msf = &msf_struct; 24393 char cdb[CDB_GROUP1]; 24394 int rval; 24395 24396 if (data == NULL) { 24397 return (EINVAL); 24398 } 24399 24400 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24401 return (ENXIO); 24402 } 24403 24404 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) { 24405 return (EFAULT); 24406 } 24407 24408 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24409 bzero(cdb, CDB_GROUP1); 24410 cdb[0] = SCMD_PLAYAUDIO_MSF; 24411 if (un->un_f_cfg_playmsf_bcd == TRUE) { 24412 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0); 24413 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0); 24414 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0); 24415 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1); 24416 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1); 24417 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1); 24418 } else { 24419 cdb[3] = msf->cdmsf_min0; 24420 cdb[4] = msf->cdmsf_sec0; 24421 cdb[5] = msf->cdmsf_frame0; 24422 cdb[6] = msf->cdmsf_min1; 24423 cdb[7] = msf->cdmsf_sec1; 24424 cdb[8] = msf->cdmsf_frame1; 24425 } 24426 com->uscsi_cdb = cdb; 24427 com->uscsi_cdblen = CDB_GROUP1; 24428 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24429 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24430 SD_PATH_STANDARD); 24431 kmem_free(com, sizeof (*com)); 24432 return (rval); 24433 } 24434 24435 24436 /* 24437 * Function: sr_play_trkind() 24438 * 24439 * Description: This routine is the driver entry point for handling CD-ROM 24440 * ioctl requests to output the audio signals at the specified 24441 * starting address and continue the audio play until the specified 24442 * ending address (CDROMPLAYTRKIND). The address is in Track Index 24443 * format. 24444 * 24445 * Arguments: dev - the device 'dev_t' 24446 * data - pointer to user provided audio track/index structure, 24447 * specifying start/end addresses. 24448 * flag - this argument is a pass through to ddi_copyxxx() 24449 * directly from the mode argument of ioctl(). 24450 * 24451 * Return Code: the code returned by sd_send_scsi_cmd() 24452 * EFAULT if ddi_copyxxx() fails 24453 * ENXIO if fail ddi_get_soft_state 24454 * EINVAL if data pointer is NULL 24455 */ 24456 24457 static int 24458 sr_play_trkind(dev_t dev, caddr_t data, int flag) 24459 { 24460 struct cdrom_ti ti_struct; 24461 struct cdrom_ti *ti = &ti_struct; 24462 struct uscsi_cmd *com = NULL; 24463 char cdb[CDB_GROUP1]; 24464 int rval; 24465 24466 if (data == NULL) { 24467 return (EINVAL); 24468 } 24469 24470 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) { 24471 return (EFAULT); 24472 } 24473 24474 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24475 bzero(cdb, CDB_GROUP1); 24476 cdb[0] = SCMD_PLAYAUDIO_TI; 24477 cdb[4] = ti->cdti_trk0; 24478 cdb[5] = ti->cdti_ind0; 24479 cdb[7] = ti->cdti_trk1; 24480 cdb[8] = ti->cdti_ind1; 24481 com->uscsi_cdb = cdb; 24482 com->uscsi_cdblen = CDB_GROUP1; 24483 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24484 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24485 SD_PATH_STANDARD); 24486 kmem_free(com, sizeof (*com)); 24487 return (rval); 24488 } 24489 24490 24491 /* 24492 * Function: sr_read_all_subcodes() 24493 * 24494 * Description: This routine is the driver entry point for handling CD-ROM 24495 * ioctl requests to return raw subcode data while the target is 24496 * playing audio (CDROMSUBCODE). 24497 * 24498 * Arguments: dev - the device 'dev_t' 24499 * data - pointer to user provided cdrom subcode structure, 24500 * specifying the transfer length and address. 24501 * flag - this argument is a pass through to ddi_copyxxx() 24502 * directly from the mode argument of ioctl(). 24503 * 24504 * Return Code: the code returned by sd_send_scsi_cmd() 24505 * EFAULT if ddi_copyxxx() fails 24506 * ENXIO if fail ddi_get_soft_state 24507 * EINVAL if data pointer is NULL 24508 */ 24509 24510 static int 24511 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag) 24512 { 24513 struct sd_lun *un = NULL; 24514 struct uscsi_cmd *com = NULL; 24515 struct cdrom_subcode *subcode = NULL; 24516 int rval; 24517 size_t buflen; 24518 char cdb[CDB_GROUP5]; 24519 24520 #ifdef _MULTI_DATAMODEL 24521 /* To support ILP32 applications in an LP64 world */ 24522 struct cdrom_subcode32 cdrom_subcode32; 24523 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32; 24524 #endif 24525 if (data == NULL) { 24526 return (EINVAL); 24527 } 24528 24529 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24530 return (ENXIO); 24531 } 24532 24533 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP); 24534 24535 #ifdef _MULTI_DATAMODEL 24536 switch (ddi_model_convert_from(flag & FMODELS)) { 24537 case DDI_MODEL_ILP32: 24538 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) { 24539 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24540 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24541 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24542 return (EFAULT); 24543 } 24544 /* Convert the ILP32 uscsi data from the application to LP64 */ 24545 cdrom_subcode32tocdrom_subcode(cdsc32, subcode); 24546 break; 24547 case DDI_MODEL_NONE: 24548 if (ddi_copyin(data, subcode, 24549 sizeof (struct cdrom_subcode), flag)) { 24550 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24551 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24552 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24553 return (EFAULT); 24554 } 24555 break; 24556 } 24557 #else /* ! _MULTI_DATAMODEL */ 24558 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) { 24559 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24560 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24561 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24562 return (EFAULT); 24563 } 24564 #endif /* _MULTI_DATAMODEL */ 24565 24566 /* 24567 * Since MMC-2 expects max 3 bytes for length, check if the 24568 * length input is greater than 3 bytes 24569 */ 24570 if ((subcode->cdsc_length & 0xFF000000) != 0) { 24571 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24572 "sr_read_all_subcodes: " 24573 "cdrom transfer length too large: %d (limit %d)\n", 24574 subcode->cdsc_length, 0xFFFFFF); 24575 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24576 return (EINVAL); 24577 } 24578 24579 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length; 24580 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24581 bzero(cdb, CDB_GROUP5); 24582 24583 if (un->un_f_mmc_cap == TRUE) { 24584 cdb[0] = (char)SCMD_READ_CD; 24585 cdb[2] = (char)0xff; 24586 cdb[3] = (char)0xff; 24587 cdb[4] = (char)0xff; 24588 cdb[5] = (char)0xff; 24589 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 24590 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 24591 cdb[8] = ((subcode->cdsc_length) & 0x000000ff); 24592 cdb[10] = 1; 24593 } else { 24594 /* 24595 * Note: A vendor specific command (0xDF) is being used her to 24596 * request a read of all subcodes. 24597 */ 24598 cdb[0] = (char)SCMD_READ_ALL_SUBCODES; 24599 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24); 24600 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 24601 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 24602 cdb[9] = ((subcode->cdsc_length) & 0x000000ff); 24603 } 24604 com->uscsi_cdb = cdb; 24605 com->uscsi_cdblen = CDB_GROUP5; 24606 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr; 24607 com->uscsi_buflen = buflen; 24608 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24609 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 24610 SD_PATH_STANDARD); 24611 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24612 kmem_free(com, sizeof (*com)); 24613 return (rval); 24614 } 24615 24616 24617 /* 24618 * Function: sr_read_subchannel() 24619 * 24620 * Description: This routine is the driver entry point for handling CD-ROM 24621 * ioctl requests to return the Q sub-channel data of the CD 24622 * current position block. (CDROMSUBCHNL) The data includes the 24623 * track number, index number, absolute CD-ROM address (LBA or MSF 24624 * format per the user) , track relative CD-ROM address (LBA or MSF 24625 * format per the user), control data and audio status. 24626 * 24627 * Arguments: dev - the device 'dev_t' 24628 * data - pointer to user provided cdrom sub-channel structure 24629 * flag - this argument is a pass through to ddi_copyxxx() 24630 * directly from the mode argument of ioctl(). 24631 * 24632 * Return Code: the code returned by sd_send_scsi_cmd() 24633 * EFAULT if ddi_copyxxx() fails 24634 * ENXIO if fail ddi_get_soft_state 24635 * EINVAL if data pointer is NULL 24636 */ 24637 24638 static int 24639 sr_read_subchannel(dev_t dev, caddr_t data, int flag) 24640 { 24641 struct sd_lun *un; 24642 struct uscsi_cmd *com; 24643 struct cdrom_subchnl subchanel; 24644 struct cdrom_subchnl *subchnl = &subchanel; 24645 char cdb[CDB_GROUP1]; 24646 caddr_t buffer; 24647 int rval; 24648 24649 if (data == NULL) { 24650 return (EINVAL); 24651 } 24652 24653 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24654 (un->un_state == SD_STATE_OFFLINE)) { 24655 return (ENXIO); 24656 } 24657 24658 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) { 24659 return (EFAULT); 24660 } 24661 24662 buffer = kmem_zalloc((size_t)16, KM_SLEEP); 24663 bzero(cdb, CDB_GROUP1); 24664 cdb[0] = SCMD_READ_SUBCHANNEL; 24665 /* Set the MSF bit based on the user requested address format */ 24666 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02; 24667 /* 24668 * Set the Q bit in byte 2 to indicate that Q sub-channel data be 24669 * returned 24670 */ 24671 cdb[2] = 0x40; 24672 /* 24673 * Set byte 3 to specify the return data format. A value of 0x01 24674 * indicates that the CD-ROM current position should be returned. 24675 */ 24676 cdb[3] = 0x01; 24677 cdb[8] = 0x10; 24678 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24679 com->uscsi_cdb = cdb; 24680 com->uscsi_cdblen = CDB_GROUP1; 24681 com->uscsi_bufaddr = buffer; 24682 com->uscsi_buflen = 16; 24683 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24684 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24685 SD_PATH_STANDARD); 24686 if (rval != 0) { 24687 kmem_free(buffer, 16); 24688 kmem_free(com, sizeof (*com)); 24689 return (rval); 24690 } 24691 24692 /* Process the returned Q sub-channel data */ 24693 subchnl->cdsc_audiostatus = buffer[1]; 24694 subchnl->cdsc_adr = (buffer[5] & 0xF0); 24695 subchnl->cdsc_ctrl = (buffer[5] & 0x0F); 24696 subchnl->cdsc_trk = buffer[6]; 24697 subchnl->cdsc_ind = buffer[7]; 24698 if (subchnl->cdsc_format & CDROM_LBA) { 24699 subchnl->cdsc_absaddr.lba = 24700 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 24701 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 24702 subchnl->cdsc_reladdr.lba = 24703 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) + 24704 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]); 24705 } else if (un->un_f_cfg_readsub_bcd == TRUE) { 24706 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]); 24707 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]); 24708 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]); 24709 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]); 24710 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]); 24711 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]); 24712 } else { 24713 subchnl->cdsc_absaddr.msf.minute = buffer[9]; 24714 subchnl->cdsc_absaddr.msf.second = buffer[10]; 24715 subchnl->cdsc_absaddr.msf.frame = buffer[11]; 24716 subchnl->cdsc_reladdr.msf.minute = buffer[13]; 24717 subchnl->cdsc_reladdr.msf.second = buffer[14]; 24718 subchnl->cdsc_reladdr.msf.frame = buffer[15]; 24719 } 24720 kmem_free(buffer, 16); 24721 kmem_free(com, sizeof (*com)); 24722 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag) 24723 != 0) { 24724 return (EFAULT); 24725 } 24726 return (rval); 24727 } 24728 24729 24730 /* 24731 * Function: sr_read_tocentry() 24732 * 24733 * Description: This routine is the driver entry point for handling CD-ROM 24734 * ioctl requests to read from the Table of Contents (TOC) 24735 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL 24736 * fields, the starting address (LBA or MSF format per the user) 24737 * and the data mode if the user specified track is a data track. 24738 * 24739 * Note: The READ HEADER (0x44) command used in this routine is 24740 * obsolete per the SCSI MMC spec but still supported in the 24741 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 24742 * therefore the command is still implemented in this routine. 24743 * 24744 * Arguments: dev - the device 'dev_t' 24745 * data - pointer to user provided toc entry structure, 24746 * specifying the track # and the address format 24747 * (LBA or MSF). 24748 * flag - this argument is a pass through to ddi_copyxxx() 24749 * directly from the mode argument of ioctl(). 24750 * 24751 * Return Code: the code returned by sd_send_scsi_cmd() 24752 * EFAULT if ddi_copyxxx() fails 24753 * ENXIO if fail ddi_get_soft_state 24754 * EINVAL if data pointer is NULL 24755 */ 24756 24757 static int 24758 sr_read_tocentry(dev_t dev, caddr_t data, int flag) 24759 { 24760 struct sd_lun *un = NULL; 24761 struct uscsi_cmd *com; 24762 struct cdrom_tocentry toc_entry; 24763 struct cdrom_tocentry *entry = &toc_entry; 24764 caddr_t buffer; 24765 int rval; 24766 char cdb[CDB_GROUP1]; 24767 24768 if (data == NULL) { 24769 return (EINVAL); 24770 } 24771 24772 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24773 (un->un_state == SD_STATE_OFFLINE)) { 24774 return (ENXIO); 24775 } 24776 24777 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) { 24778 return (EFAULT); 24779 } 24780 24781 /* Validate the requested track and address format */ 24782 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) { 24783 return (EINVAL); 24784 } 24785 24786 if (entry->cdte_track == 0) { 24787 return (EINVAL); 24788 } 24789 24790 buffer = kmem_zalloc((size_t)12, KM_SLEEP); 24791 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24792 bzero(cdb, CDB_GROUP1); 24793 24794 cdb[0] = SCMD_READ_TOC; 24795 /* Set the MSF bit based on the user requested address format */ 24796 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2); 24797 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 24798 cdb[6] = BYTE_TO_BCD(entry->cdte_track); 24799 } else { 24800 cdb[6] = entry->cdte_track; 24801 } 24802 24803 /* 24804 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 24805 * (4 byte TOC response header + 8 byte track descriptor) 24806 */ 24807 cdb[8] = 12; 24808 com->uscsi_cdb = cdb; 24809 com->uscsi_cdblen = CDB_GROUP1; 24810 com->uscsi_bufaddr = buffer; 24811 com->uscsi_buflen = 0x0C; 24812 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ); 24813 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24814 SD_PATH_STANDARD); 24815 if (rval != 0) { 24816 kmem_free(buffer, 12); 24817 kmem_free(com, sizeof (*com)); 24818 return (rval); 24819 } 24820 24821 /* Process the toc entry */ 24822 entry->cdte_adr = (buffer[5] & 0xF0) >> 4; 24823 entry->cdte_ctrl = (buffer[5] & 0x0F); 24824 if (entry->cdte_format & CDROM_LBA) { 24825 entry->cdte_addr.lba = 24826 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 24827 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 24828 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) { 24829 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]); 24830 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]); 24831 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]); 24832 /* 24833 * Send a READ TOC command using the LBA address format to get 24834 * the LBA for the track requested so it can be used in the 24835 * READ HEADER request 24836 * 24837 * Note: The MSF bit of the READ HEADER command specifies the 24838 * output format. The block address specified in that command 24839 * must be in LBA format. 24840 */ 24841 cdb[1] = 0; 24842 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24843 SD_PATH_STANDARD); 24844 if (rval != 0) { 24845 kmem_free(buffer, 12); 24846 kmem_free(com, sizeof (*com)); 24847 return (rval); 24848 } 24849 } else { 24850 entry->cdte_addr.msf.minute = buffer[9]; 24851 entry->cdte_addr.msf.second = buffer[10]; 24852 entry->cdte_addr.msf.frame = buffer[11]; 24853 /* 24854 * Send a READ TOC command using the LBA address format to get 24855 * the LBA for the track requested so it can be used in the 24856 * READ HEADER request 24857 * 24858 * Note: The MSF bit of the READ HEADER command specifies the 24859 * output format. The block address specified in that command 24860 * must be in LBA format. 24861 */ 24862 cdb[1] = 0; 24863 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24864 SD_PATH_STANDARD); 24865 if (rval != 0) { 24866 kmem_free(buffer, 12); 24867 kmem_free(com, sizeof (*com)); 24868 return (rval); 24869 } 24870 } 24871 24872 /* 24873 * Build and send the READ HEADER command to determine the data mode of 24874 * the user specified track. 24875 */ 24876 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) && 24877 (entry->cdte_track != CDROM_LEADOUT)) { 24878 bzero(cdb, CDB_GROUP1); 24879 cdb[0] = SCMD_READ_HEADER; 24880 cdb[2] = buffer[8]; 24881 cdb[3] = buffer[9]; 24882 cdb[4] = buffer[10]; 24883 cdb[5] = buffer[11]; 24884 cdb[8] = 0x08; 24885 com->uscsi_buflen = 0x08; 24886 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24887 SD_PATH_STANDARD); 24888 if (rval == 0) { 24889 entry->cdte_datamode = buffer[0]; 24890 } else { 24891 /* 24892 * READ HEADER command failed, since this is 24893 * obsoleted in one spec, its better to return 24894 * -1 for an invlid track so that we can still 24895 * receive the rest of the TOC data. 24896 */ 24897 entry->cdte_datamode = (uchar_t)-1; 24898 } 24899 } else { 24900 entry->cdte_datamode = (uchar_t)-1; 24901 } 24902 24903 kmem_free(buffer, 12); 24904 kmem_free(com, sizeof (*com)); 24905 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0) 24906 return (EFAULT); 24907 24908 return (rval); 24909 } 24910 24911 24912 /* 24913 * Function: sr_read_tochdr() 24914 * 24915 * Description: This routine is the driver entry point for handling CD-ROM 24916 * ioctl requests to read the Table of Contents (TOC) header 24917 * (CDROMREADTOHDR). The TOC header consists of the disk starting 24918 * and ending track numbers 24919 * 24920 * Arguments: dev - the device 'dev_t' 24921 * data - pointer to user provided toc header structure, 24922 * specifying the starting and ending track numbers. 24923 * flag - this argument is a pass through to ddi_copyxxx() 24924 * directly from the mode argument of ioctl(). 24925 * 24926 * Return Code: the code returned by sd_send_scsi_cmd() 24927 * EFAULT if ddi_copyxxx() fails 24928 * ENXIO if fail ddi_get_soft_state 24929 * EINVAL if data pointer is NULL 24930 */ 24931 24932 static int 24933 sr_read_tochdr(dev_t dev, caddr_t data, int flag) 24934 { 24935 struct sd_lun *un; 24936 struct uscsi_cmd *com; 24937 struct cdrom_tochdr toc_header; 24938 struct cdrom_tochdr *hdr = &toc_header; 24939 char cdb[CDB_GROUP1]; 24940 int rval; 24941 caddr_t buffer; 24942 24943 if (data == NULL) { 24944 return (EINVAL); 24945 } 24946 24947 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24948 (un->un_state == SD_STATE_OFFLINE)) { 24949 return (ENXIO); 24950 } 24951 24952 buffer = kmem_zalloc(4, KM_SLEEP); 24953 bzero(cdb, CDB_GROUP1); 24954 cdb[0] = SCMD_READ_TOC; 24955 /* 24956 * Specifying a track number of 0x00 in the READ TOC command indicates 24957 * that the TOC header should be returned 24958 */ 24959 cdb[6] = 0x00; 24960 /* 24961 * Bytes 7 & 8 are the 4 byte allocation length for TOC header. 24962 * (2 byte data len + 1 byte starting track # + 1 byte ending track #) 24963 */ 24964 cdb[8] = 0x04; 24965 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24966 com->uscsi_cdb = cdb; 24967 com->uscsi_cdblen = CDB_GROUP1; 24968 com->uscsi_bufaddr = buffer; 24969 com->uscsi_buflen = 0x04; 24970 com->uscsi_timeout = 300; 24971 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24972 24973 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24974 SD_PATH_STANDARD); 24975 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 24976 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]); 24977 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]); 24978 } else { 24979 hdr->cdth_trk0 = buffer[2]; 24980 hdr->cdth_trk1 = buffer[3]; 24981 } 24982 kmem_free(buffer, 4); 24983 kmem_free(com, sizeof (*com)); 24984 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) { 24985 return (EFAULT); 24986 } 24987 return (rval); 24988 } 24989 24990 24991 /* 24992 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(), 24993 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for 24994 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data, 24995 * digital audio and extended architecture digital audio. These modes are 24996 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3 24997 * MMC specs. 24998 * 24999 * In addition to support for the various data formats these routines also 25000 * include support for devices that implement only the direct access READ 25001 * commands (0x08, 0x28), devices that implement the READ_CD commands 25002 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and 25003 * READ CDXA commands (0xD8, 0xDB) 25004 */ 25005 25006 /* 25007 * Function: sr_read_mode1() 25008 * 25009 * Description: This routine is the driver entry point for handling CD-ROM 25010 * ioctl read mode1 requests (CDROMREADMODE1). 25011 * 25012 * Arguments: dev - the device 'dev_t' 25013 * data - pointer to user provided cd read structure specifying 25014 * the lba buffer address and length. 25015 * flag - this argument is a pass through to ddi_copyxxx() 25016 * directly from the mode argument of ioctl(). 25017 * 25018 * Return Code: the code returned by sd_send_scsi_cmd() 25019 * EFAULT if ddi_copyxxx() fails 25020 * ENXIO if fail ddi_get_soft_state 25021 * EINVAL if data pointer is NULL 25022 */ 25023 25024 static int 25025 sr_read_mode1(dev_t dev, caddr_t data, int flag) 25026 { 25027 struct sd_lun *un; 25028 struct cdrom_read mode1_struct; 25029 struct cdrom_read *mode1 = &mode1_struct; 25030 int rval; 25031 #ifdef _MULTI_DATAMODEL 25032 /* To support ILP32 applications in an LP64 world */ 25033 struct cdrom_read32 cdrom_read32; 25034 struct cdrom_read32 *cdrd32 = &cdrom_read32; 25035 #endif /* _MULTI_DATAMODEL */ 25036 25037 if (data == NULL) { 25038 return (EINVAL); 25039 } 25040 25041 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25042 (un->un_state == SD_STATE_OFFLINE)) { 25043 return (ENXIO); 25044 } 25045 25046 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25047 "sd_read_mode1: entry: un:0x%p\n", un); 25048 25049 #ifdef _MULTI_DATAMODEL 25050 switch (ddi_model_convert_from(flag & FMODELS)) { 25051 case DDI_MODEL_ILP32: 25052 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 25053 return (EFAULT); 25054 } 25055 /* Convert the ILP32 uscsi data from the application to LP64 */ 25056 cdrom_read32tocdrom_read(cdrd32, mode1); 25057 break; 25058 case DDI_MODEL_NONE: 25059 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 25060 return (EFAULT); 25061 } 25062 } 25063 #else /* ! _MULTI_DATAMODEL */ 25064 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 25065 return (EFAULT); 25066 } 25067 #endif /* _MULTI_DATAMODEL */ 25068 25069 rval = sd_send_scsi_READ(un, mode1->cdread_bufaddr, 25070 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD); 25071 25072 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25073 "sd_read_mode1: exit: un:0x%p\n", un); 25074 25075 return (rval); 25076 } 25077 25078 25079 /* 25080 * Function: sr_read_cd_mode2() 25081 * 25082 * Description: This routine is the driver entry point for handling CD-ROM 25083 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 25084 * support the READ CD (0xBE) command or the 1st generation 25085 * READ CD (0xD4) command. 25086 * 25087 * Arguments: dev - the device 'dev_t' 25088 * data - pointer to user provided cd read structure specifying 25089 * the lba buffer address and length. 25090 * flag - this argument is a pass through to ddi_copyxxx() 25091 * directly from the mode argument of ioctl(). 25092 * 25093 * Return Code: the code returned by sd_send_scsi_cmd() 25094 * EFAULT if ddi_copyxxx() fails 25095 * ENXIO if fail ddi_get_soft_state 25096 * EINVAL if data pointer is NULL 25097 */ 25098 25099 static int 25100 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag) 25101 { 25102 struct sd_lun *un; 25103 struct uscsi_cmd *com; 25104 struct cdrom_read mode2_struct; 25105 struct cdrom_read *mode2 = &mode2_struct; 25106 uchar_t cdb[CDB_GROUP5]; 25107 int nblocks; 25108 int rval; 25109 #ifdef _MULTI_DATAMODEL 25110 /* To support ILP32 applications in an LP64 world */ 25111 struct cdrom_read32 cdrom_read32; 25112 struct cdrom_read32 *cdrd32 = &cdrom_read32; 25113 #endif /* _MULTI_DATAMODEL */ 25114 25115 if (data == NULL) { 25116 return (EINVAL); 25117 } 25118 25119 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25120 (un->un_state == SD_STATE_OFFLINE)) { 25121 return (ENXIO); 25122 } 25123 25124 #ifdef _MULTI_DATAMODEL 25125 switch (ddi_model_convert_from(flag & FMODELS)) { 25126 case DDI_MODEL_ILP32: 25127 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 25128 return (EFAULT); 25129 } 25130 /* Convert the ILP32 uscsi data from the application to LP64 */ 25131 cdrom_read32tocdrom_read(cdrd32, mode2); 25132 break; 25133 case DDI_MODEL_NONE: 25134 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 25135 return (EFAULT); 25136 } 25137 break; 25138 } 25139 25140 #else /* ! _MULTI_DATAMODEL */ 25141 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 25142 return (EFAULT); 25143 } 25144 #endif /* _MULTI_DATAMODEL */ 25145 25146 bzero(cdb, sizeof (cdb)); 25147 if (un->un_f_cfg_read_cd_xd4 == TRUE) { 25148 /* Read command supported by 1st generation atapi drives */ 25149 cdb[0] = SCMD_READ_CDD4; 25150 } else { 25151 /* Universal CD Access Command */ 25152 cdb[0] = SCMD_READ_CD; 25153 } 25154 25155 /* 25156 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book 25157 */ 25158 cdb[1] = CDROM_SECTOR_TYPE_MODE2; 25159 25160 /* set the start address */ 25161 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF); 25162 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF); 25163 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 25164 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF); 25165 25166 /* set the transfer length */ 25167 nblocks = mode2->cdread_buflen / 2336; 25168 cdb[6] = (uchar_t)(nblocks >> 16); 25169 cdb[7] = (uchar_t)(nblocks >> 8); 25170 cdb[8] = (uchar_t)nblocks; 25171 25172 /* set the filter bits */ 25173 cdb[9] = CDROM_READ_CD_USERDATA; 25174 25175 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25176 com->uscsi_cdb = (caddr_t)cdb; 25177 com->uscsi_cdblen = sizeof (cdb); 25178 com->uscsi_bufaddr = mode2->cdread_bufaddr; 25179 com->uscsi_buflen = mode2->cdread_buflen; 25180 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25181 25182 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25183 SD_PATH_STANDARD); 25184 kmem_free(com, sizeof (*com)); 25185 return (rval); 25186 } 25187 25188 25189 /* 25190 * Function: sr_read_mode2() 25191 * 25192 * Description: This routine is the driver entry point for handling CD-ROM 25193 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 25194 * do not support the READ CD (0xBE) command. 25195 * 25196 * Arguments: dev - the device 'dev_t' 25197 * data - pointer to user provided cd read structure specifying 25198 * the lba buffer address and length. 25199 * flag - this argument is a pass through to ddi_copyxxx() 25200 * directly from the mode argument of ioctl(). 25201 * 25202 * Return Code: the code returned by sd_send_scsi_cmd() 25203 * EFAULT if ddi_copyxxx() fails 25204 * ENXIO if fail ddi_get_soft_state 25205 * EINVAL if data pointer is NULL 25206 * EIO if fail to reset block size 25207 * EAGAIN if commands are in progress in the driver 25208 */ 25209 25210 static int 25211 sr_read_mode2(dev_t dev, caddr_t data, int flag) 25212 { 25213 struct sd_lun *un; 25214 struct cdrom_read mode2_struct; 25215 struct cdrom_read *mode2 = &mode2_struct; 25216 int rval; 25217 uint32_t restore_blksize; 25218 struct uscsi_cmd *com; 25219 uchar_t cdb[CDB_GROUP0]; 25220 int nblocks; 25221 25222 #ifdef _MULTI_DATAMODEL 25223 /* To support ILP32 applications in an LP64 world */ 25224 struct cdrom_read32 cdrom_read32; 25225 struct cdrom_read32 *cdrd32 = &cdrom_read32; 25226 #endif /* _MULTI_DATAMODEL */ 25227 25228 if (data == NULL) { 25229 return (EINVAL); 25230 } 25231 25232 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25233 (un->un_state == SD_STATE_OFFLINE)) { 25234 return (ENXIO); 25235 } 25236 25237 /* 25238 * Because this routine will update the device and driver block size 25239 * being used we want to make sure there are no commands in progress. 25240 * If commands are in progress the user will have to try again. 25241 * 25242 * We check for 1 instead of 0 because we increment un_ncmds_in_driver 25243 * in sdioctl to protect commands from sdioctl through to the top of 25244 * sd_uscsi_strategy. See sdioctl for details. 25245 */ 25246 mutex_enter(SD_MUTEX(un)); 25247 if (un->un_ncmds_in_driver != 1) { 25248 mutex_exit(SD_MUTEX(un)); 25249 return (EAGAIN); 25250 } 25251 mutex_exit(SD_MUTEX(un)); 25252 25253 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25254 "sd_read_mode2: entry: un:0x%p\n", un); 25255 25256 #ifdef _MULTI_DATAMODEL 25257 switch (ddi_model_convert_from(flag & FMODELS)) { 25258 case DDI_MODEL_ILP32: 25259 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 25260 return (EFAULT); 25261 } 25262 /* Convert the ILP32 uscsi data from the application to LP64 */ 25263 cdrom_read32tocdrom_read(cdrd32, mode2); 25264 break; 25265 case DDI_MODEL_NONE: 25266 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 25267 return (EFAULT); 25268 } 25269 break; 25270 } 25271 #else /* ! _MULTI_DATAMODEL */ 25272 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) { 25273 return (EFAULT); 25274 } 25275 #endif /* _MULTI_DATAMODEL */ 25276 25277 /* Store the current target block size for restoration later */ 25278 restore_blksize = un->un_tgt_blocksize; 25279 25280 /* Change the device and soft state target block size to 2336 */ 25281 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) { 25282 rval = EIO; 25283 goto done; 25284 } 25285 25286 25287 bzero(cdb, sizeof (cdb)); 25288 25289 /* set READ operation */ 25290 cdb[0] = SCMD_READ; 25291 25292 /* adjust lba for 2kbyte blocks from 512 byte blocks */ 25293 mode2->cdread_lba >>= 2; 25294 25295 /* set the start address */ 25296 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F); 25297 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 25298 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF); 25299 25300 /* set the transfer length */ 25301 nblocks = mode2->cdread_buflen / 2336; 25302 cdb[4] = (uchar_t)nblocks & 0xFF; 25303 25304 /* build command */ 25305 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25306 com->uscsi_cdb = (caddr_t)cdb; 25307 com->uscsi_cdblen = sizeof (cdb); 25308 com->uscsi_bufaddr = mode2->cdread_bufaddr; 25309 com->uscsi_buflen = mode2->cdread_buflen; 25310 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25311 25312 /* 25313 * Issue SCSI command with user space address for read buffer. 25314 * 25315 * This sends the command through main channel in the driver. 25316 * 25317 * Since this is accessed via an IOCTL call, we go through the 25318 * standard path, so that if the device was powered down, then 25319 * it would be 'awakened' to handle the command. 25320 */ 25321 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25322 SD_PATH_STANDARD); 25323 25324 kmem_free(com, sizeof (*com)); 25325 25326 /* Restore the device and soft state target block size */ 25327 if (sr_sector_mode(dev, restore_blksize) != 0) { 25328 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25329 "can't do switch back to mode 1\n"); 25330 /* 25331 * If sd_send_scsi_READ succeeded we still need to report 25332 * an error because we failed to reset the block size 25333 */ 25334 if (rval == 0) { 25335 rval = EIO; 25336 } 25337 } 25338 25339 done: 25340 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25341 "sd_read_mode2: exit: un:0x%p\n", un); 25342 25343 return (rval); 25344 } 25345 25346 25347 /* 25348 * Function: sr_sector_mode() 25349 * 25350 * Description: This utility function is used by sr_read_mode2 to set the target 25351 * block size based on the user specified size. This is a legacy 25352 * implementation based upon a vendor specific mode page 25353 * 25354 * Arguments: dev - the device 'dev_t' 25355 * data - flag indicating if block size is being set to 2336 or 25356 * 512. 25357 * 25358 * Return Code: the code returned by sd_send_scsi_cmd() 25359 * EFAULT if ddi_copyxxx() fails 25360 * ENXIO if fail ddi_get_soft_state 25361 * EINVAL if data pointer is NULL 25362 */ 25363 25364 static int 25365 sr_sector_mode(dev_t dev, uint32_t blksize) 25366 { 25367 struct sd_lun *un; 25368 uchar_t *sense; 25369 uchar_t *select; 25370 int rval; 25371 25372 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25373 (un->un_state == SD_STATE_OFFLINE)) { 25374 return (ENXIO); 25375 } 25376 25377 sense = kmem_zalloc(20, KM_SLEEP); 25378 25379 /* Note: This is a vendor specific mode page (0x81) */ 25380 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 20, 0x81, 25381 SD_PATH_STANDARD)) != 0) { 25382 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 25383 "sr_sector_mode: Mode Sense failed\n"); 25384 kmem_free(sense, 20); 25385 return (rval); 25386 } 25387 select = kmem_zalloc(20, KM_SLEEP); 25388 select[3] = 0x08; 25389 select[10] = ((blksize >> 8) & 0xff); 25390 select[11] = (blksize & 0xff); 25391 select[12] = 0x01; 25392 select[13] = 0x06; 25393 select[14] = sense[14]; 25394 select[15] = sense[15]; 25395 if (blksize == SD_MODE2_BLKSIZE) { 25396 select[14] |= 0x01; 25397 } 25398 25399 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 20, 25400 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 25401 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 25402 "sr_sector_mode: Mode Select failed\n"); 25403 } else { 25404 /* 25405 * Only update the softstate block size if we successfully 25406 * changed the device block mode. 25407 */ 25408 mutex_enter(SD_MUTEX(un)); 25409 sd_update_block_info(un, blksize, 0); 25410 mutex_exit(SD_MUTEX(un)); 25411 } 25412 kmem_free(sense, 20); 25413 kmem_free(select, 20); 25414 return (rval); 25415 } 25416 25417 25418 /* 25419 * Function: sr_read_cdda() 25420 * 25421 * Description: This routine is the driver entry point for handling CD-ROM 25422 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If 25423 * the target supports CDDA these requests are handled via a vendor 25424 * specific command (0xD8) If the target does not support CDDA 25425 * these requests are handled via the READ CD command (0xBE). 25426 * 25427 * Arguments: dev - the device 'dev_t' 25428 * data - pointer to user provided CD-DA structure specifying 25429 * the track starting address, transfer length, and 25430 * subcode options. 25431 * flag - this argument is a pass through to ddi_copyxxx() 25432 * directly from the mode argument of ioctl(). 25433 * 25434 * Return Code: the code returned by sd_send_scsi_cmd() 25435 * EFAULT if ddi_copyxxx() fails 25436 * ENXIO if fail ddi_get_soft_state 25437 * EINVAL if invalid arguments are provided 25438 * ENOTTY 25439 */ 25440 25441 static int 25442 sr_read_cdda(dev_t dev, caddr_t data, int flag) 25443 { 25444 struct sd_lun *un; 25445 struct uscsi_cmd *com; 25446 struct cdrom_cdda *cdda; 25447 int rval; 25448 size_t buflen; 25449 char cdb[CDB_GROUP5]; 25450 25451 #ifdef _MULTI_DATAMODEL 25452 /* To support ILP32 applications in an LP64 world */ 25453 struct cdrom_cdda32 cdrom_cdda32; 25454 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32; 25455 #endif /* _MULTI_DATAMODEL */ 25456 25457 if (data == NULL) { 25458 return (EINVAL); 25459 } 25460 25461 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25462 return (ENXIO); 25463 } 25464 25465 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP); 25466 25467 #ifdef _MULTI_DATAMODEL 25468 switch (ddi_model_convert_from(flag & FMODELS)) { 25469 case DDI_MODEL_ILP32: 25470 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) { 25471 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25472 "sr_read_cdda: ddi_copyin Failed\n"); 25473 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25474 return (EFAULT); 25475 } 25476 /* Convert the ILP32 uscsi data from the application to LP64 */ 25477 cdrom_cdda32tocdrom_cdda(cdda32, cdda); 25478 break; 25479 case DDI_MODEL_NONE: 25480 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 25481 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25482 "sr_read_cdda: ddi_copyin Failed\n"); 25483 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25484 return (EFAULT); 25485 } 25486 break; 25487 } 25488 #else /* ! _MULTI_DATAMODEL */ 25489 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 25490 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25491 "sr_read_cdda: ddi_copyin Failed\n"); 25492 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25493 return (EFAULT); 25494 } 25495 #endif /* _MULTI_DATAMODEL */ 25496 25497 /* 25498 * Since MMC-2 expects max 3 bytes for length, check if the 25499 * length input is greater than 3 bytes 25500 */ 25501 if ((cdda->cdda_length & 0xFF000000) != 0) { 25502 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: " 25503 "cdrom transfer length too large: %d (limit %d)\n", 25504 cdda->cdda_length, 0xFFFFFF); 25505 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25506 return (EINVAL); 25507 } 25508 25509 switch (cdda->cdda_subcode) { 25510 case CDROM_DA_NO_SUBCODE: 25511 buflen = CDROM_BLK_2352 * cdda->cdda_length; 25512 break; 25513 case CDROM_DA_SUBQ: 25514 buflen = CDROM_BLK_2368 * cdda->cdda_length; 25515 break; 25516 case CDROM_DA_ALL_SUBCODE: 25517 buflen = CDROM_BLK_2448 * cdda->cdda_length; 25518 break; 25519 case CDROM_DA_SUBCODE_ONLY: 25520 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length; 25521 break; 25522 default: 25523 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25524 "sr_read_cdda: Subcode '0x%x' Not Supported\n", 25525 cdda->cdda_subcode); 25526 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25527 return (EINVAL); 25528 } 25529 25530 /* Build and send the command */ 25531 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25532 bzero(cdb, CDB_GROUP5); 25533 25534 if (un->un_f_cfg_cdda == TRUE) { 25535 cdb[0] = (char)SCMD_READ_CD; 25536 cdb[1] = 0x04; 25537 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 25538 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 25539 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 25540 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 25541 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 25542 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 25543 cdb[8] = ((cdda->cdda_length) & 0x000000ff); 25544 cdb[9] = 0x10; 25545 switch (cdda->cdda_subcode) { 25546 case CDROM_DA_NO_SUBCODE : 25547 cdb[10] = 0x0; 25548 break; 25549 case CDROM_DA_SUBQ : 25550 cdb[10] = 0x2; 25551 break; 25552 case CDROM_DA_ALL_SUBCODE : 25553 cdb[10] = 0x1; 25554 break; 25555 case CDROM_DA_SUBCODE_ONLY : 25556 /* FALLTHROUGH */ 25557 default : 25558 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25559 kmem_free(com, sizeof (*com)); 25560 return (ENOTTY); 25561 } 25562 } else { 25563 cdb[0] = (char)SCMD_READ_CDDA; 25564 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 25565 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 25566 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 25567 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 25568 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24); 25569 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 25570 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 25571 cdb[9] = ((cdda->cdda_length) & 0x000000ff); 25572 cdb[10] = cdda->cdda_subcode; 25573 } 25574 25575 com->uscsi_cdb = cdb; 25576 com->uscsi_cdblen = CDB_GROUP5; 25577 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data; 25578 com->uscsi_buflen = buflen; 25579 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25580 25581 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25582 SD_PATH_STANDARD); 25583 25584 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25585 kmem_free(com, sizeof (*com)); 25586 return (rval); 25587 } 25588 25589 25590 /* 25591 * Function: sr_read_cdxa() 25592 * 25593 * Description: This routine is the driver entry point for handling CD-ROM 25594 * ioctl requests to return CD-XA (Extended Architecture) data. 25595 * (CDROMCDXA). 25596 * 25597 * Arguments: dev - the device 'dev_t' 25598 * data - pointer to user provided CD-XA structure specifying 25599 * the data starting address, transfer length, and format 25600 * flag - this argument is a pass through to ddi_copyxxx() 25601 * directly from the mode argument of ioctl(). 25602 * 25603 * Return Code: the code returned by sd_send_scsi_cmd() 25604 * EFAULT if ddi_copyxxx() fails 25605 * ENXIO if fail ddi_get_soft_state 25606 * EINVAL if data pointer is NULL 25607 */ 25608 25609 static int 25610 sr_read_cdxa(dev_t dev, caddr_t data, int flag) 25611 { 25612 struct sd_lun *un; 25613 struct uscsi_cmd *com; 25614 struct cdrom_cdxa *cdxa; 25615 int rval; 25616 size_t buflen; 25617 char cdb[CDB_GROUP5]; 25618 uchar_t read_flags; 25619 25620 #ifdef _MULTI_DATAMODEL 25621 /* To support ILP32 applications in an LP64 world */ 25622 struct cdrom_cdxa32 cdrom_cdxa32; 25623 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32; 25624 #endif /* _MULTI_DATAMODEL */ 25625 25626 if (data == NULL) { 25627 return (EINVAL); 25628 } 25629 25630 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25631 return (ENXIO); 25632 } 25633 25634 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP); 25635 25636 #ifdef _MULTI_DATAMODEL 25637 switch (ddi_model_convert_from(flag & FMODELS)) { 25638 case DDI_MODEL_ILP32: 25639 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) { 25640 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25641 return (EFAULT); 25642 } 25643 /* 25644 * Convert the ILP32 uscsi data from the 25645 * application to LP64 for internal use. 25646 */ 25647 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa); 25648 break; 25649 case DDI_MODEL_NONE: 25650 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 25651 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25652 return (EFAULT); 25653 } 25654 break; 25655 } 25656 #else /* ! _MULTI_DATAMODEL */ 25657 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 25658 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25659 return (EFAULT); 25660 } 25661 #endif /* _MULTI_DATAMODEL */ 25662 25663 /* 25664 * Since MMC-2 expects max 3 bytes for length, check if the 25665 * length input is greater than 3 bytes 25666 */ 25667 if ((cdxa->cdxa_length & 0xFF000000) != 0) { 25668 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: " 25669 "cdrom transfer length too large: %d (limit %d)\n", 25670 cdxa->cdxa_length, 0xFFFFFF); 25671 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25672 return (EINVAL); 25673 } 25674 25675 switch (cdxa->cdxa_format) { 25676 case CDROM_XA_DATA: 25677 buflen = CDROM_BLK_2048 * cdxa->cdxa_length; 25678 read_flags = 0x10; 25679 break; 25680 case CDROM_XA_SECTOR_DATA: 25681 buflen = CDROM_BLK_2352 * cdxa->cdxa_length; 25682 read_flags = 0xf8; 25683 break; 25684 case CDROM_XA_DATA_W_ERROR: 25685 buflen = CDROM_BLK_2646 * cdxa->cdxa_length; 25686 read_flags = 0xfc; 25687 break; 25688 default: 25689 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25690 "sr_read_cdxa: Format '0x%x' Not Supported\n", 25691 cdxa->cdxa_format); 25692 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25693 return (EINVAL); 25694 } 25695 25696 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25697 bzero(cdb, CDB_GROUP5); 25698 if (un->un_f_mmc_cap == TRUE) { 25699 cdb[0] = (char)SCMD_READ_CD; 25700 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 25701 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 25702 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 25703 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 25704 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 25705 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 25706 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff); 25707 cdb[9] = (char)read_flags; 25708 } else { 25709 /* 25710 * Note: A vendor specific command (0xDB) is being used her to 25711 * request a read of all subcodes. 25712 */ 25713 cdb[0] = (char)SCMD_READ_CDXA; 25714 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 25715 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 25716 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 25717 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 25718 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24); 25719 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 25720 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 25721 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff); 25722 cdb[10] = cdxa->cdxa_format; 25723 } 25724 com->uscsi_cdb = cdb; 25725 com->uscsi_cdblen = CDB_GROUP5; 25726 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data; 25727 com->uscsi_buflen = buflen; 25728 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25729 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25730 SD_PATH_STANDARD); 25731 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25732 kmem_free(com, sizeof (*com)); 25733 return (rval); 25734 } 25735 25736 25737 /* 25738 * Function: sr_eject() 25739 * 25740 * Description: This routine is the driver entry point for handling CD-ROM 25741 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT) 25742 * 25743 * Arguments: dev - the device 'dev_t' 25744 * 25745 * Return Code: the code returned by sd_send_scsi_cmd() 25746 */ 25747 25748 static int 25749 sr_eject(dev_t dev) 25750 { 25751 struct sd_lun *un; 25752 int rval; 25753 25754 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25755 (un->un_state == SD_STATE_OFFLINE)) { 25756 return (ENXIO); 25757 } 25758 25759 /* 25760 * To prevent race conditions with the eject 25761 * command, keep track of an eject command as 25762 * it progresses. If we are already handling 25763 * an eject command in the driver for the given 25764 * unit and another request to eject is received 25765 * immediately return EAGAIN so we don't lose 25766 * the command if the current eject command fails. 25767 */ 25768 mutex_enter(SD_MUTEX(un)); 25769 if (un->un_f_ejecting == TRUE) { 25770 mutex_exit(SD_MUTEX(un)); 25771 return (EAGAIN); 25772 } 25773 un->un_f_ejecting = TRUE; 25774 mutex_exit(SD_MUTEX(un)); 25775 25776 if ((rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 25777 SD_PATH_STANDARD)) != 0) { 25778 mutex_enter(SD_MUTEX(un)); 25779 un->un_f_ejecting = FALSE; 25780 mutex_exit(SD_MUTEX(un)); 25781 return (rval); 25782 } 25783 25784 rval = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_EJECT, 25785 SD_PATH_STANDARD); 25786 25787 if (rval == 0) { 25788 mutex_enter(SD_MUTEX(un)); 25789 sr_ejected(un); 25790 un->un_mediastate = DKIO_EJECTED; 25791 un->un_f_ejecting = FALSE; 25792 cv_broadcast(&un->un_state_cv); 25793 mutex_exit(SD_MUTEX(un)); 25794 } else { 25795 mutex_enter(SD_MUTEX(un)); 25796 un->un_f_ejecting = FALSE; 25797 mutex_exit(SD_MUTEX(un)); 25798 } 25799 return (rval); 25800 } 25801 25802 25803 /* 25804 * Function: sr_ejected() 25805 * 25806 * Description: This routine updates the soft state structure to invalidate the 25807 * geometry information after the media has been ejected or a 25808 * media eject has been detected. 25809 * 25810 * Arguments: un - driver soft state (unit) structure 25811 */ 25812 25813 static void 25814 sr_ejected(struct sd_lun *un) 25815 { 25816 struct sd_errstats *stp; 25817 25818 ASSERT(un != NULL); 25819 ASSERT(mutex_owned(SD_MUTEX(un))); 25820 25821 un->un_f_blockcount_is_valid = FALSE; 25822 un->un_f_tgt_blocksize_is_valid = FALSE; 25823 mutex_exit(SD_MUTEX(un)); 25824 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 25825 mutex_enter(SD_MUTEX(un)); 25826 25827 if (un->un_errstats != NULL) { 25828 stp = (struct sd_errstats *)un->un_errstats->ks_data; 25829 stp->sd_capacity.value.ui64 = 0; 25830 } 25831 25832 /* remove "capacity-of-device" properties */ 25833 (void) ddi_prop_remove(DDI_DEV_T_NONE, SD_DEVINFO(un), 25834 "device-nblocks"); 25835 (void) ddi_prop_remove(DDI_DEV_T_NONE, SD_DEVINFO(un), 25836 "device-blksize"); 25837 } 25838 25839 25840 /* 25841 * Function: sr_check_wp() 25842 * 25843 * Description: This routine checks the write protection of a removable 25844 * media disk and hotpluggable devices via the write protect bit of 25845 * the Mode Page Header device specific field. Some devices choke 25846 * on unsupported mode page. In order to workaround this issue, 25847 * this routine has been implemented to use 0x3f mode page(request 25848 * for all pages) for all device types. 25849 * 25850 * Arguments: dev - the device 'dev_t' 25851 * 25852 * Return Code: int indicating if the device is write protected (1) or not (0) 25853 * 25854 * Context: Kernel thread. 25855 * 25856 */ 25857 25858 static int 25859 sr_check_wp(dev_t dev) 25860 { 25861 struct sd_lun *un; 25862 uchar_t device_specific; 25863 uchar_t *sense; 25864 int hdrlen; 25865 int rval = FALSE; 25866 25867 /* 25868 * Note: The return codes for this routine should be reworked to 25869 * properly handle the case of a NULL softstate. 25870 */ 25871 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25872 return (FALSE); 25873 } 25874 25875 if (un->un_f_cfg_is_atapi == TRUE) { 25876 /* 25877 * The mode page contents are not required; set the allocation 25878 * length for the mode page header only 25879 */ 25880 hdrlen = MODE_HEADER_LENGTH_GRP2; 25881 sense = kmem_zalloc(hdrlen, KM_SLEEP); 25882 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, hdrlen, 25883 MODEPAGE_ALLPAGES, SD_PATH_STANDARD) != 0) 25884 goto err_exit; 25885 device_specific = 25886 ((struct mode_header_grp2 *)sense)->device_specific; 25887 } else { 25888 hdrlen = MODE_HEADER_LENGTH; 25889 sense = kmem_zalloc(hdrlen, KM_SLEEP); 25890 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, hdrlen, 25891 MODEPAGE_ALLPAGES, SD_PATH_STANDARD) != 0) 25892 goto err_exit; 25893 device_specific = 25894 ((struct mode_header *)sense)->device_specific; 25895 } 25896 25897 /* 25898 * Write protect mode sense failed; not all disks 25899 * understand this query. Return FALSE assuming that 25900 * these devices are not writable. 25901 */ 25902 if (device_specific & WRITE_PROTECT) { 25903 rval = TRUE; 25904 } 25905 25906 err_exit: 25907 kmem_free(sense, hdrlen); 25908 return (rval); 25909 } 25910 25911 /* 25912 * Function: sr_volume_ctrl() 25913 * 25914 * Description: This routine is the driver entry point for handling CD-ROM 25915 * audio output volume ioctl requests. (CDROMVOLCTRL) 25916 * 25917 * Arguments: dev - the device 'dev_t' 25918 * data - pointer to user audio volume control structure 25919 * flag - this argument is a pass through to ddi_copyxxx() 25920 * directly from the mode argument of ioctl(). 25921 * 25922 * Return Code: the code returned by sd_send_scsi_cmd() 25923 * EFAULT if ddi_copyxxx() fails 25924 * ENXIO if fail ddi_get_soft_state 25925 * EINVAL if data pointer is NULL 25926 * 25927 */ 25928 25929 static int 25930 sr_volume_ctrl(dev_t dev, caddr_t data, int flag) 25931 { 25932 struct sd_lun *un; 25933 struct cdrom_volctrl volume; 25934 struct cdrom_volctrl *vol = &volume; 25935 uchar_t *sense_page; 25936 uchar_t *select_page; 25937 uchar_t *sense; 25938 uchar_t *select; 25939 int sense_buflen; 25940 int select_buflen; 25941 int rval; 25942 25943 if (data == NULL) { 25944 return (EINVAL); 25945 } 25946 25947 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25948 (un->un_state == SD_STATE_OFFLINE)) { 25949 return (ENXIO); 25950 } 25951 25952 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) { 25953 return (EFAULT); 25954 } 25955 25956 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 25957 struct mode_header_grp2 *sense_mhp; 25958 struct mode_header_grp2 *select_mhp; 25959 int bd_len; 25960 25961 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN; 25962 select_buflen = MODE_HEADER_LENGTH_GRP2 + 25963 MODEPAGE_AUDIO_CTRL_LEN; 25964 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 25965 select = kmem_zalloc(select_buflen, KM_SLEEP); 25966 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 25967 sense_buflen, MODEPAGE_AUDIO_CTRL, 25968 SD_PATH_STANDARD)) != 0) { 25969 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 25970 "sr_volume_ctrl: Mode Sense Failed\n"); 25971 kmem_free(sense, sense_buflen); 25972 kmem_free(select, select_buflen); 25973 return (rval); 25974 } 25975 sense_mhp = (struct mode_header_grp2 *)sense; 25976 select_mhp = (struct mode_header_grp2 *)select; 25977 bd_len = (sense_mhp->bdesc_length_hi << 8) | 25978 sense_mhp->bdesc_length_lo; 25979 if (bd_len > MODE_BLK_DESC_LENGTH) { 25980 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25981 "sr_volume_ctrl: Mode Sense returned invalid " 25982 "block descriptor length\n"); 25983 kmem_free(sense, sense_buflen); 25984 kmem_free(select, select_buflen); 25985 return (EIO); 25986 } 25987 sense_page = (uchar_t *) 25988 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 25989 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2); 25990 select_mhp->length_msb = 0; 25991 select_mhp->length_lsb = 0; 25992 select_mhp->bdesc_length_hi = 0; 25993 select_mhp->bdesc_length_lo = 0; 25994 } else { 25995 struct mode_header *sense_mhp, *select_mhp; 25996 25997 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 25998 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 25999 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 26000 select = kmem_zalloc(select_buflen, KM_SLEEP); 26001 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 26002 sense_buflen, MODEPAGE_AUDIO_CTRL, 26003 SD_PATH_STANDARD)) != 0) { 26004 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26005 "sr_volume_ctrl: Mode Sense Failed\n"); 26006 kmem_free(sense, sense_buflen); 26007 kmem_free(select, select_buflen); 26008 return (rval); 26009 } 26010 sense_mhp = (struct mode_header *)sense; 26011 select_mhp = (struct mode_header *)select; 26012 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) { 26013 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26014 "sr_volume_ctrl: Mode Sense returned invalid " 26015 "block descriptor length\n"); 26016 kmem_free(sense, sense_buflen); 26017 kmem_free(select, select_buflen); 26018 return (EIO); 26019 } 26020 sense_page = (uchar_t *) 26021 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 26022 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH); 26023 select_mhp->length = 0; 26024 select_mhp->bdesc_length = 0; 26025 } 26026 /* 26027 * Note: An audio control data structure could be created and overlayed 26028 * on the following in place of the array indexing method implemented. 26029 */ 26030 26031 /* Build the select data for the user volume data */ 26032 select_page[0] = MODEPAGE_AUDIO_CTRL; 26033 select_page[1] = 0xE; 26034 /* Set the immediate bit */ 26035 select_page[2] = 0x04; 26036 /* Zero out reserved fields */ 26037 select_page[3] = 0x00; 26038 select_page[4] = 0x00; 26039 /* Return sense data for fields not to be modified */ 26040 select_page[5] = sense_page[5]; 26041 select_page[6] = sense_page[6]; 26042 select_page[7] = sense_page[7]; 26043 /* Set the user specified volume levels for channel 0 and 1 */ 26044 select_page[8] = 0x01; 26045 select_page[9] = vol->channel0; 26046 select_page[10] = 0x02; 26047 select_page[11] = vol->channel1; 26048 /* Channel 2 and 3 are currently unsupported so return the sense data */ 26049 select_page[12] = sense_page[12]; 26050 select_page[13] = sense_page[13]; 26051 select_page[14] = sense_page[14]; 26052 select_page[15] = sense_page[15]; 26053 26054 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 26055 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, select, 26056 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 26057 } else { 26058 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 26059 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 26060 } 26061 26062 kmem_free(sense, sense_buflen); 26063 kmem_free(select, select_buflen); 26064 return (rval); 26065 } 26066 26067 26068 /* 26069 * Function: sr_read_sony_session_offset() 26070 * 26071 * Description: This routine is the driver entry point for handling CD-ROM 26072 * ioctl requests for session offset information. (CDROMREADOFFSET) 26073 * The address of the first track in the last session of a 26074 * multi-session CD-ROM is returned 26075 * 26076 * Note: This routine uses a vendor specific key value in the 26077 * command control field without implementing any vendor check here 26078 * or in the ioctl routine. 26079 * 26080 * Arguments: dev - the device 'dev_t' 26081 * data - pointer to an int to hold the requested address 26082 * flag - this argument is a pass through to ddi_copyxxx() 26083 * directly from the mode argument of ioctl(). 26084 * 26085 * Return Code: the code returned by sd_send_scsi_cmd() 26086 * EFAULT if ddi_copyxxx() fails 26087 * ENXIO if fail ddi_get_soft_state 26088 * EINVAL if data pointer is NULL 26089 */ 26090 26091 static int 26092 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag) 26093 { 26094 struct sd_lun *un; 26095 struct uscsi_cmd *com; 26096 caddr_t buffer; 26097 char cdb[CDB_GROUP1]; 26098 int session_offset = 0; 26099 int rval; 26100 26101 if (data == NULL) { 26102 return (EINVAL); 26103 } 26104 26105 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26106 (un->un_state == SD_STATE_OFFLINE)) { 26107 return (ENXIO); 26108 } 26109 26110 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP); 26111 bzero(cdb, CDB_GROUP1); 26112 cdb[0] = SCMD_READ_TOC; 26113 /* 26114 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 26115 * (4 byte TOC response header + 8 byte response data) 26116 */ 26117 cdb[8] = SONY_SESSION_OFFSET_LEN; 26118 /* Byte 9 is the control byte. A vendor specific value is used */ 26119 cdb[9] = SONY_SESSION_OFFSET_KEY; 26120 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26121 com->uscsi_cdb = cdb; 26122 com->uscsi_cdblen = CDB_GROUP1; 26123 com->uscsi_bufaddr = buffer; 26124 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN; 26125 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26126 26127 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26128 SD_PATH_STANDARD); 26129 if (rval != 0) { 26130 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 26131 kmem_free(com, sizeof (*com)); 26132 return (rval); 26133 } 26134 if (buffer[1] == SONY_SESSION_OFFSET_VALID) { 26135 session_offset = 26136 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 26137 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 26138 /* 26139 * Offset returned offset in current lbasize block's. Convert to 26140 * 2k block's to return to the user 26141 */ 26142 if (un->un_tgt_blocksize == CDROM_BLK_512) { 26143 session_offset >>= 2; 26144 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) { 26145 session_offset >>= 1; 26146 } 26147 } 26148 26149 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) { 26150 rval = EFAULT; 26151 } 26152 26153 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 26154 kmem_free(com, sizeof (*com)); 26155 return (rval); 26156 } 26157 26158 26159 /* 26160 * Function: sd_wm_cache_constructor() 26161 * 26162 * Description: Cache Constructor for the wmap cache for the read/modify/write 26163 * devices. 26164 * 26165 * Arguments: wm - A pointer to the sd_w_map to be initialized. 26166 * un - sd_lun structure for the device. 26167 * flag - the km flags passed to constructor 26168 * 26169 * Return Code: 0 on success. 26170 * -1 on failure. 26171 */ 26172 26173 /*ARGSUSED*/ 26174 static int 26175 sd_wm_cache_constructor(void *wm, void *un, int flags) 26176 { 26177 bzero(wm, sizeof (struct sd_w_map)); 26178 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL); 26179 return (0); 26180 } 26181 26182 26183 /* 26184 * Function: sd_wm_cache_destructor() 26185 * 26186 * Description: Cache destructor for the wmap cache for the read/modify/write 26187 * devices. 26188 * 26189 * Arguments: wm - A pointer to the sd_w_map to be initialized. 26190 * un - sd_lun structure for the device. 26191 */ 26192 /*ARGSUSED*/ 26193 static void 26194 sd_wm_cache_destructor(void *wm, void *un) 26195 { 26196 cv_destroy(&((struct sd_w_map *)wm)->wm_avail); 26197 } 26198 26199 26200 /* 26201 * Function: sd_range_lock() 26202 * 26203 * Description: Lock the range of blocks specified as parameter to ensure 26204 * that read, modify write is atomic and no other i/o writes 26205 * to the same location. The range is specified in terms 26206 * of start and end blocks. Block numbers are the actual 26207 * media block numbers and not system. 26208 * 26209 * Arguments: un - sd_lun structure for the device. 26210 * startb - The starting block number 26211 * endb - The end block number 26212 * typ - type of i/o - simple/read_modify_write 26213 * 26214 * Return Code: wm - pointer to the wmap structure. 26215 * 26216 * Context: This routine can sleep. 26217 */ 26218 26219 static struct sd_w_map * 26220 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ) 26221 { 26222 struct sd_w_map *wmp = NULL; 26223 struct sd_w_map *sl_wmp = NULL; 26224 struct sd_w_map *tmp_wmp; 26225 wm_state state = SD_WM_CHK_LIST; 26226 26227 26228 ASSERT(un != NULL); 26229 ASSERT(!mutex_owned(SD_MUTEX(un))); 26230 26231 mutex_enter(SD_MUTEX(un)); 26232 26233 while (state != SD_WM_DONE) { 26234 26235 switch (state) { 26236 case SD_WM_CHK_LIST: 26237 /* 26238 * This is the starting state. Check the wmap list 26239 * to see if the range is currently available. 26240 */ 26241 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) { 26242 /* 26243 * If this is a simple write and no rmw 26244 * i/o is pending then try to lock the 26245 * range as the range should be available. 26246 */ 26247 state = SD_WM_LOCK_RANGE; 26248 } else { 26249 tmp_wmp = sd_get_range(un, startb, endb); 26250 if (tmp_wmp != NULL) { 26251 if ((wmp != NULL) && ONLIST(un, wmp)) { 26252 /* 26253 * Should not keep onlist wmps 26254 * while waiting this macro 26255 * will also do wmp = NULL; 26256 */ 26257 FREE_ONLIST_WMAP(un, wmp); 26258 } 26259 /* 26260 * sl_wmp is the wmap on which wait 26261 * is done, since the tmp_wmp points 26262 * to the inuse wmap, set sl_wmp to 26263 * tmp_wmp and change the state to sleep 26264 */ 26265 sl_wmp = tmp_wmp; 26266 state = SD_WM_WAIT_MAP; 26267 } else { 26268 state = SD_WM_LOCK_RANGE; 26269 } 26270 26271 } 26272 break; 26273 26274 case SD_WM_LOCK_RANGE: 26275 ASSERT(un->un_wm_cache); 26276 /* 26277 * The range need to be locked, try to get a wmap. 26278 * First attempt it with NO_SLEEP, want to avoid a sleep 26279 * if possible as we will have to release the sd mutex 26280 * if we have to sleep. 26281 */ 26282 if (wmp == NULL) 26283 wmp = kmem_cache_alloc(un->un_wm_cache, 26284 KM_NOSLEEP); 26285 if (wmp == NULL) { 26286 mutex_exit(SD_MUTEX(un)); 26287 _NOTE(DATA_READABLE_WITHOUT_LOCK 26288 (sd_lun::un_wm_cache)) 26289 wmp = kmem_cache_alloc(un->un_wm_cache, 26290 KM_SLEEP); 26291 mutex_enter(SD_MUTEX(un)); 26292 /* 26293 * we released the mutex so recheck and go to 26294 * check list state. 26295 */ 26296 state = SD_WM_CHK_LIST; 26297 } else { 26298 /* 26299 * We exit out of state machine since we 26300 * have the wmap. Do the housekeeping first. 26301 * place the wmap on the wmap list if it is not 26302 * on it already and then set the state to done. 26303 */ 26304 wmp->wm_start = startb; 26305 wmp->wm_end = endb; 26306 wmp->wm_flags = typ | SD_WM_BUSY; 26307 if (typ & SD_WTYPE_RMW) { 26308 un->un_rmw_count++; 26309 } 26310 /* 26311 * If not already on the list then link 26312 */ 26313 if (!ONLIST(un, wmp)) { 26314 wmp->wm_next = un->un_wm; 26315 wmp->wm_prev = NULL; 26316 if (wmp->wm_next) 26317 wmp->wm_next->wm_prev = wmp; 26318 un->un_wm = wmp; 26319 } 26320 state = SD_WM_DONE; 26321 } 26322 break; 26323 26324 case SD_WM_WAIT_MAP: 26325 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY); 26326 /* 26327 * Wait is done on sl_wmp, which is set in the 26328 * check_list state. 26329 */ 26330 sl_wmp->wm_wanted_count++; 26331 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un)); 26332 sl_wmp->wm_wanted_count--; 26333 /* 26334 * We can reuse the memory from the completed sl_wmp 26335 * lock range for our new lock, but only if noone is 26336 * waiting for it. 26337 */ 26338 ASSERT(!(sl_wmp->wm_flags & SD_WM_BUSY)); 26339 if (sl_wmp->wm_wanted_count == 0) { 26340 if (wmp != NULL) 26341 CHK_N_FREEWMP(un, wmp); 26342 wmp = sl_wmp; 26343 } 26344 sl_wmp = NULL; 26345 /* 26346 * After waking up, need to recheck for availability of 26347 * range. 26348 */ 26349 state = SD_WM_CHK_LIST; 26350 break; 26351 26352 default: 26353 panic("sd_range_lock: " 26354 "Unknown state %d in sd_range_lock", state); 26355 /*NOTREACHED*/ 26356 } /* switch(state) */ 26357 26358 } /* while(state != SD_WM_DONE) */ 26359 26360 mutex_exit(SD_MUTEX(un)); 26361 26362 ASSERT(wmp != NULL); 26363 26364 return (wmp); 26365 } 26366 26367 26368 /* 26369 * Function: sd_get_range() 26370 * 26371 * Description: Find if there any overlapping I/O to this one 26372 * Returns the write-map of 1st such I/O, NULL otherwise. 26373 * 26374 * Arguments: un - sd_lun structure for the device. 26375 * startb - The starting block number 26376 * endb - The end block number 26377 * 26378 * Return Code: wm - pointer to the wmap structure. 26379 */ 26380 26381 static struct sd_w_map * 26382 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb) 26383 { 26384 struct sd_w_map *wmp; 26385 26386 ASSERT(un != NULL); 26387 26388 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) { 26389 if (!(wmp->wm_flags & SD_WM_BUSY)) { 26390 continue; 26391 } 26392 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) { 26393 break; 26394 } 26395 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) { 26396 break; 26397 } 26398 } 26399 26400 return (wmp); 26401 } 26402 26403 26404 /* 26405 * Function: sd_free_inlist_wmap() 26406 * 26407 * Description: Unlink and free a write map struct. 26408 * 26409 * Arguments: un - sd_lun structure for the device. 26410 * wmp - sd_w_map which needs to be unlinked. 26411 */ 26412 26413 static void 26414 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp) 26415 { 26416 ASSERT(un != NULL); 26417 26418 if (un->un_wm == wmp) { 26419 un->un_wm = wmp->wm_next; 26420 } else { 26421 wmp->wm_prev->wm_next = wmp->wm_next; 26422 } 26423 26424 if (wmp->wm_next) { 26425 wmp->wm_next->wm_prev = wmp->wm_prev; 26426 } 26427 26428 wmp->wm_next = wmp->wm_prev = NULL; 26429 26430 kmem_cache_free(un->un_wm_cache, wmp); 26431 } 26432 26433 26434 /* 26435 * Function: sd_range_unlock() 26436 * 26437 * Description: Unlock the range locked by wm. 26438 * Free write map if nobody else is waiting on it. 26439 * 26440 * Arguments: un - sd_lun structure for the device. 26441 * wmp - sd_w_map which needs to be unlinked. 26442 */ 26443 26444 static void 26445 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm) 26446 { 26447 ASSERT(un != NULL); 26448 ASSERT(wm != NULL); 26449 ASSERT(!mutex_owned(SD_MUTEX(un))); 26450 26451 mutex_enter(SD_MUTEX(un)); 26452 26453 if (wm->wm_flags & SD_WTYPE_RMW) { 26454 un->un_rmw_count--; 26455 } 26456 26457 if (wm->wm_wanted_count) { 26458 wm->wm_flags = 0; 26459 /* 26460 * Broadcast that the wmap is available now. 26461 */ 26462 cv_broadcast(&wm->wm_avail); 26463 } else { 26464 /* 26465 * If no one is waiting on the map, it should be free'ed. 26466 */ 26467 sd_free_inlist_wmap(un, wm); 26468 } 26469 26470 mutex_exit(SD_MUTEX(un)); 26471 } 26472 26473 26474 /* 26475 * Function: sd_read_modify_write_task 26476 * 26477 * Description: Called from a taskq thread to initiate the write phase of 26478 * a read-modify-write request. This is used for targets where 26479 * un->un_sys_blocksize != un->un_tgt_blocksize. 26480 * 26481 * Arguments: arg - a pointer to the buf(9S) struct for the write command. 26482 * 26483 * Context: Called under taskq thread context. 26484 */ 26485 26486 static void 26487 sd_read_modify_write_task(void *arg) 26488 { 26489 struct sd_mapblocksize_info *bsp; 26490 struct buf *bp; 26491 struct sd_xbuf *xp; 26492 struct sd_lun *un; 26493 26494 bp = arg; /* The bp is given in arg */ 26495 ASSERT(bp != NULL); 26496 26497 /* Get the pointer to the layer-private data struct */ 26498 xp = SD_GET_XBUF(bp); 26499 ASSERT(xp != NULL); 26500 bsp = xp->xb_private; 26501 ASSERT(bsp != NULL); 26502 26503 un = SD_GET_UN(bp); 26504 ASSERT(un != NULL); 26505 ASSERT(!mutex_owned(SD_MUTEX(un))); 26506 26507 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 26508 "sd_read_modify_write_task: entry: buf:0x%p\n", bp); 26509 26510 /* 26511 * This is the write phase of a read-modify-write request, called 26512 * under the context of a taskq thread in response to the completion 26513 * of the read portion of the rmw request completing under interrupt 26514 * context. The write request must be sent from here down the iostart 26515 * chain as if it were being sent from sd_mapblocksize_iostart(), so 26516 * we use the layer index saved in the layer-private data area. 26517 */ 26518 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp); 26519 26520 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 26521 "sd_read_modify_write_task: exit: buf:0x%p\n", bp); 26522 } 26523 26524 26525 /* 26526 * Function: sddump_do_read_of_rmw() 26527 * 26528 * Description: This routine will be called from sddump, If sddump is called 26529 * with an I/O which not aligned on device blocksize boundary 26530 * then the write has to be converted to read-modify-write. 26531 * Do the read part here in order to keep sddump simple. 26532 * Note - That the sd_mutex is held across the call to this 26533 * routine. 26534 * 26535 * Arguments: un - sd_lun 26536 * blkno - block number in terms of media block size. 26537 * nblk - number of blocks. 26538 * bpp - pointer to pointer to the buf structure. On return 26539 * from this function, *bpp points to the valid buffer 26540 * to which the write has to be done. 26541 * 26542 * Return Code: 0 for success or errno-type return code 26543 */ 26544 26545 static int 26546 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 26547 struct buf **bpp) 26548 { 26549 int err; 26550 int i; 26551 int rval; 26552 struct buf *bp; 26553 struct scsi_pkt *pkt = NULL; 26554 uint32_t target_blocksize; 26555 26556 ASSERT(un != NULL); 26557 ASSERT(mutex_owned(SD_MUTEX(un))); 26558 26559 target_blocksize = un->un_tgt_blocksize; 26560 26561 mutex_exit(SD_MUTEX(un)); 26562 26563 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL, 26564 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL); 26565 if (bp == NULL) { 26566 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26567 "no resources for dumping; giving up"); 26568 err = ENOMEM; 26569 goto done; 26570 } 26571 26572 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL, 26573 blkno, nblk); 26574 if (rval != 0) { 26575 scsi_free_consistent_buf(bp); 26576 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26577 "no resources for dumping; giving up"); 26578 err = ENOMEM; 26579 goto done; 26580 } 26581 26582 pkt->pkt_flags |= FLAG_NOINTR; 26583 26584 err = EIO; 26585 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 26586 26587 /* 26588 * Scsi_poll returns 0 (success) if the command completes and 26589 * the status block is STATUS_GOOD. We should only check 26590 * errors if this condition is not true. Even then we should 26591 * send our own request sense packet only if we have a check 26592 * condition and auto request sense has not been performed by 26593 * the hba. 26594 */ 26595 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n"); 26596 26597 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) { 26598 err = 0; 26599 break; 26600 } 26601 26602 /* 26603 * Check CMD_DEV_GONE 1st, give up if device is gone, 26604 * no need to read RQS data. 26605 */ 26606 if (pkt->pkt_reason == CMD_DEV_GONE) { 26607 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26608 "Error while dumping state with rmw..." 26609 "Device is gone\n"); 26610 break; 26611 } 26612 26613 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) { 26614 SD_INFO(SD_LOG_DUMP, un, 26615 "sddump: read failed with CHECK, try # %d\n", i); 26616 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) { 26617 (void) sd_send_polled_RQS(un); 26618 } 26619 26620 continue; 26621 } 26622 26623 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) { 26624 int reset_retval = 0; 26625 26626 SD_INFO(SD_LOG_DUMP, un, 26627 "sddump: read failed with BUSY, try # %d\n", i); 26628 26629 if (un->un_f_lun_reset_enabled == TRUE) { 26630 reset_retval = scsi_reset(SD_ADDRESS(un), 26631 RESET_LUN); 26632 } 26633 if (reset_retval == 0) { 26634 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 26635 } 26636 (void) sd_send_polled_RQS(un); 26637 26638 } else { 26639 SD_INFO(SD_LOG_DUMP, un, 26640 "sddump: read failed with 0x%x, try # %d\n", 26641 SD_GET_PKT_STATUS(pkt), i); 26642 mutex_enter(SD_MUTEX(un)); 26643 sd_reset_target(un, pkt); 26644 mutex_exit(SD_MUTEX(un)); 26645 } 26646 26647 /* 26648 * If we are not getting anywhere with lun/target resets, 26649 * let's reset the bus. 26650 */ 26651 if (i > SD_NDUMP_RETRIES/2) { 26652 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 26653 (void) sd_send_polled_RQS(un); 26654 } 26655 26656 } 26657 scsi_destroy_pkt(pkt); 26658 26659 if (err != 0) { 26660 scsi_free_consistent_buf(bp); 26661 *bpp = NULL; 26662 } else { 26663 *bpp = bp; 26664 } 26665 26666 done: 26667 mutex_enter(SD_MUTEX(un)); 26668 return (err); 26669 } 26670 26671 26672 /* 26673 * Function: sd_failfast_flushq 26674 * 26675 * Description: Take all bp's on the wait queue that have B_FAILFAST set 26676 * in b_flags and move them onto the failfast queue, then kick 26677 * off a thread to return all bp's on the failfast queue to 26678 * their owners with an error set. 26679 * 26680 * Arguments: un - pointer to the soft state struct for the instance. 26681 * 26682 * Context: may execute in interrupt context. 26683 */ 26684 26685 static void 26686 sd_failfast_flushq(struct sd_lun *un) 26687 { 26688 struct buf *bp; 26689 struct buf *next_waitq_bp; 26690 struct buf *prev_waitq_bp = NULL; 26691 26692 ASSERT(un != NULL); 26693 ASSERT(mutex_owned(SD_MUTEX(un))); 26694 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE); 26695 ASSERT(un->un_failfast_bp == NULL); 26696 26697 SD_TRACE(SD_LOG_IO_FAILFAST, un, 26698 "sd_failfast_flushq: entry: un:0x%p\n", un); 26699 26700 /* 26701 * Check if we should flush all bufs when entering failfast state, or 26702 * just those with B_FAILFAST set. 26703 */ 26704 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { 26705 /* 26706 * Move *all* bp's on the wait queue to the failfast flush 26707 * queue, including those that do NOT have B_FAILFAST set. 26708 */ 26709 if (un->un_failfast_headp == NULL) { 26710 ASSERT(un->un_failfast_tailp == NULL); 26711 un->un_failfast_headp = un->un_waitq_headp; 26712 } else { 26713 ASSERT(un->un_failfast_tailp != NULL); 26714 un->un_failfast_tailp->av_forw = un->un_waitq_headp; 26715 } 26716 26717 un->un_failfast_tailp = un->un_waitq_tailp; 26718 26719 /* update kstat for each bp moved out of the waitq */ 26720 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) { 26721 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 26722 } 26723 26724 /* empty the waitq */ 26725 un->un_waitq_headp = un->un_waitq_tailp = NULL; 26726 26727 } else { 26728 /* 26729 * Go thru the wait queue, pick off all entries with 26730 * B_FAILFAST set, and move these onto the failfast queue. 26731 */ 26732 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) { 26733 /* 26734 * Save the pointer to the next bp on the wait queue, 26735 * so we get to it on the next iteration of this loop. 26736 */ 26737 next_waitq_bp = bp->av_forw; 26738 26739 /* 26740 * If this bp from the wait queue does NOT have 26741 * B_FAILFAST set, just move on to the next element 26742 * in the wait queue. Note, this is the only place 26743 * where it is correct to set prev_waitq_bp. 26744 */ 26745 if ((bp->b_flags & B_FAILFAST) == 0) { 26746 prev_waitq_bp = bp; 26747 continue; 26748 } 26749 26750 /* 26751 * Remove the bp from the wait queue. 26752 */ 26753 if (bp == un->un_waitq_headp) { 26754 /* The bp is the first element of the waitq. */ 26755 un->un_waitq_headp = next_waitq_bp; 26756 if (un->un_waitq_headp == NULL) { 26757 /* The wait queue is now empty */ 26758 un->un_waitq_tailp = NULL; 26759 } 26760 } else { 26761 /* 26762 * The bp is either somewhere in the middle 26763 * or at the end of the wait queue. 26764 */ 26765 ASSERT(un->un_waitq_headp != NULL); 26766 ASSERT(prev_waitq_bp != NULL); 26767 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST) 26768 == 0); 26769 if (bp == un->un_waitq_tailp) { 26770 /* bp is the last entry on the waitq. */ 26771 ASSERT(next_waitq_bp == NULL); 26772 un->un_waitq_tailp = prev_waitq_bp; 26773 } 26774 prev_waitq_bp->av_forw = next_waitq_bp; 26775 } 26776 bp->av_forw = NULL; 26777 26778 /* 26779 * update kstat since the bp is moved out of 26780 * the waitq 26781 */ 26782 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 26783 26784 /* 26785 * Now put the bp onto the failfast queue. 26786 */ 26787 if (un->un_failfast_headp == NULL) { 26788 /* failfast queue is currently empty */ 26789 ASSERT(un->un_failfast_tailp == NULL); 26790 un->un_failfast_headp = 26791 un->un_failfast_tailp = bp; 26792 } else { 26793 /* Add the bp to the end of the failfast q */ 26794 ASSERT(un->un_failfast_tailp != NULL); 26795 ASSERT(un->un_failfast_tailp->b_flags & 26796 B_FAILFAST); 26797 un->un_failfast_tailp->av_forw = bp; 26798 un->un_failfast_tailp = bp; 26799 } 26800 } 26801 } 26802 26803 /* 26804 * Now return all bp's on the failfast queue to their owners. 26805 */ 26806 while ((bp = un->un_failfast_headp) != NULL) { 26807 26808 un->un_failfast_headp = bp->av_forw; 26809 if (un->un_failfast_headp == NULL) { 26810 un->un_failfast_tailp = NULL; 26811 } 26812 26813 /* 26814 * We want to return the bp with a failure error code, but 26815 * we do not want a call to sd_start_cmds() to occur here, 26816 * so use sd_return_failed_command_no_restart() instead of 26817 * sd_return_failed_command(). 26818 */ 26819 sd_return_failed_command_no_restart(un, bp, EIO); 26820 } 26821 26822 /* Flush the xbuf queues if required. */ 26823 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) { 26824 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback); 26825 } 26826 26827 SD_TRACE(SD_LOG_IO_FAILFAST, un, 26828 "sd_failfast_flushq: exit: un:0x%p\n", un); 26829 } 26830 26831 26832 /* 26833 * Function: sd_failfast_flushq_callback 26834 * 26835 * Description: Return TRUE if the given bp meets the criteria for failfast 26836 * flushing. Used with ddi_xbuf_flushq(9F). 26837 * 26838 * Arguments: bp - ptr to buf struct to be examined. 26839 * 26840 * Context: Any 26841 */ 26842 26843 static int 26844 sd_failfast_flushq_callback(struct buf *bp) 26845 { 26846 /* 26847 * Return TRUE if (1) we want to flush ALL bufs when the failfast 26848 * state is entered; OR (2) the given bp has B_FAILFAST set. 26849 */ 26850 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || 26851 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE); 26852 } 26853 26854 26855 26856 /* 26857 * Function: sd_setup_next_xfer 26858 * 26859 * Description: Prepare next I/O operation using DMA_PARTIAL 26860 * 26861 */ 26862 26863 static int 26864 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 26865 struct scsi_pkt *pkt, struct sd_xbuf *xp) 26866 { 26867 ssize_t num_blks_not_xfered; 26868 daddr_t strt_blk_num; 26869 ssize_t bytes_not_xfered; 26870 int rval; 26871 26872 ASSERT(pkt->pkt_resid == 0); 26873 26874 /* 26875 * Calculate next block number and amount to be transferred. 26876 * 26877 * How much data NOT transfered to the HBA yet. 26878 */ 26879 bytes_not_xfered = xp->xb_dma_resid; 26880 26881 /* 26882 * figure how many blocks NOT transfered to the HBA yet. 26883 */ 26884 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered); 26885 26886 /* 26887 * set starting block number to the end of what WAS transfered. 26888 */ 26889 strt_blk_num = xp->xb_blkno + 26890 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered); 26891 26892 /* 26893 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt 26894 * will call scsi_initpkt with NULL_FUNC so we do not have to release 26895 * the disk mutex here. 26896 */ 26897 rval = sd_setup_next_rw_pkt(un, pkt, bp, 26898 strt_blk_num, num_blks_not_xfered); 26899 26900 if (rval == 0) { 26901 26902 /* 26903 * Success. 26904 * 26905 * Adjust things if there are still more blocks to be 26906 * transfered. 26907 */ 26908 xp->xb_dma_resid = pkt->pkt_resid; 26909 pkt->pkt_resid = 0; 26910 26911 return (1); 26912 } 26913 26914 /* 26915 * There's really only one possible return value from 26916 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt 26917 * returns NULL. 26918 */ 26919 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 26920 26921 bp->b_resid = bp->b_bcount; 26922 bp->b_flags |= B_ERROR; 26923 26924 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26925 "Error setting up next portion of DMA transfer\n"); 26926 26927 return (0); 26928 } 26929 26930 /* 26931 * Function: sd_panic_for_res_conflict 26932 * 26933 * Description: Call panic with a string formatted with "Reservation Conflict" 26934 * and a human readable identifier indicating the SD instance 26935 * that experienced the reservation conflict. 26936 * 26937 * Arguments: un - pointer to the soft state struct for the instance. 26938 * 26939 * Context: may execute in interrupt context. 26940 */ 26941 26942 #define SD_RESV_CONFLICT_FMT_LEN 40 26943 void 26944 sd_panic_for_res_conflict(struct sd_lun *un) 26945 { 26946 char panic_str[SD_RESV_CONFLICT_FMT_LEN+MAXPATHLEN]; 26947 char path_str[MAXPATHLEN]; 26948 26949 (void) snprintf(panic_str, sizeof (panic_str), 26950 "Reservation Conflict\nDisk: %s", 26951 ddi_pathname(SD_DEVINFO(un), path_str)); 26952 26953 panic(panic_str); 26954 } 26955 26956 /* 26957 * Note: The following sd_faultinjection_ioctl( ) routines implement 26958 * driver support for handling fault injection for error analysis 26959 * causing faults in multiple layers of the driver. 26960 * 26961 */ 26962 26963 #ifdef SD_FAULT_INJECTION 26964 static uint_t sd_fault_injection_on = 0; 26965 26966 /* 26967 * Function: sd_faultinjection_ioctl() 26968 * 26969 * Description: This routine is the driver entry point for handling 26970 * faultinjection ioctls to inject errors into the 26971 * layer model 26972 * 26973 * Arguments: cmd - the ioctl cmd received 26974 * arg - the arguments from user and returns 26975 */ 26976 26977 static void 26978 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) { 26979 26980 uint_t i; 26981 uint_t rval; 26982 26983 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); 26984 26985 mutex_enter(SD_MUTEX(un)); 26986 26987 switch (cmd) { 26988 case SDIOCRUN: 26989 /* Allow pushed faults to be injected */ 26990 SD_INFO(SD_LOG_SDTEST, un, 26991 "sd_faultinjection_ioctl: Injecting Fault Run\n"); 26992 26993 sd_fault_injection_on = 1; 26994 26995 SD_INFO(SD_LOG_IOERR, un, 26996 "sd_faultinjection_ioctl: run finished\n"); 26997 break; 26998 26999 case SDIOCSTART: 27000 /* Start Injection Session */ 27001 SD_INFO(SD_LOG_SDTEST, un, 27002 "sd_faultinjection_ioctl: Injecting Fault Start\n"); 27003 27004 sd_fault_injection_on = 0; 27005 un->sd_injection_mask = 0xFFFFFFFF; 27006 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 27007 un->sd_fi_fifo_pkt[i] = NULL; 27008 un->sd_fi_fifo_xb[i] = NULL; 27009 un->sd_fi_fifo_un[i] = NULL; 27010 un->sd_fi_fifo_arq[i] = NULL; 27011 } 27012 un->sd_fi_fifo_start = 0; 27013 un->sd_fi_fifo_end = 0; 27014 27015 mutex_enter(&(un->un_fi_mutex)); 27016 un->sd_fi_log[0] = '\0'; 27017 un->sd_fi_buf_len = 0; 27018 mutex_exit(&(un->un_fi_mutex)); 27019 27020 SD_INFO(SD_LOG_IOERR, un, 27021 "sd_faultinjection_ioctl: start finished\n"); 27022 break; 27023 27024 case SDIOCSTOP: 27025 /* Stop Injection Session */ 27026 SD_INFO(SD_LOG_SDTEST, un, 27027 "sd_faultinjection_ioctl: Injecting Fault Stop\n"); 27028 sd_fault_injection_on = 0; 27029 un->sd_injection_mask = 0x0; 27030 27031 /* Empty stray or unuseds structs from fifo */ 27032 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 27033 if (un->sd_fi_fifo_pkt[i] != NULL) { 27034 kmem_free(un->sd_fi_fifo_pkt[i], 27035 sizeof (struct sd_fi_pkt)); 27036 } 27037 if (un->sd_fi_fifo_xb[i] != NULL) { 27038 kmem_free(un->sd_fi_fifo_xb[i], 27039 sizeof (struct sd_fi_xb)); 27040 } 27041 if (un->sd_fi_fifo_un[i] != NULL) { 27042 kmem_free(un->sd_fi_fifo_un[i], 27043 sizeof (struct sd_fi_un)); 27044 } 27045 if (un->sd_fi_fifo_arq[i] != NULL) { 27046 kmem_free(un->sd_fi_fifo_arq[i], 27047 sizeof (struct sd_fi_arq)); 27048 } 27049 un->sd_fi_fifo_pkt[i] = NULL; 27050 un->sd_fi_fifo_un[i] = NULL; 27051 un->sd_fi_fifo_xb[i] = NULL; 27052 un->sd_fi_fifo_arq[i] = NULL; 27053 } 27054 un->sd_fi_fifo_start = 0; 27055 un->sd_fi_fifo_end = 0; 27056 27057 SD_INFO(SD_LOG_IOERR, un, 27058 "sd_faultinjection_ioctl: stop finished\n"); 27059 break; 27060 27061 case SDIOCINSERTPKT: 27062 /* Store a packet struct to be pushed onto fifo */ 27063 SD_INFO(SD_LOG_SDTEST, un, 27064 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); 27065 27066 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27067 27068 sd_fault_injection_on = 0; 27069 27070 /* No more that SD_FI_MAX_ERROR allowed in Queue */ 27071 if (un->sd_fi_fifo_pkt[i] != NULL) { 27072 kmem_free(un->sd_fi_fifo_pkt[i], 27073 sizeof (struct sd_fi_pkt)); 27074 } 27075 if (arg != NULL) { 27076 un->sd_fi_fifo_pkt[i] = 27077 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); 27078 if (un->sd_fi_fifo_pkt[i] == NULL) { 27079 /* Alloc failed don't store anything */ 27080 break; 27081 } 27082 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], 27083 sizeof (struct sd_fi_pkt), 0); 27084 if (rval == -1) { 27085 kmem_free(un->sd_fi_fifo_pkt[i], 27086 sizeof (struct sd_fi_pkt)); 27087 un->sd_fi_fifo_pkt[i] = NULL; 27088 } 27089 } else { 27090 SD_INFO(SD_LOG_IOERR, un, 27091 "sd_faultinjection_ioctl: pkt null\n"); 27092 } 27093 break; 27094 27095 case SDIOCINSERTXB: 27096 /* Store a xb struct to be pushed onto fifo */ 27097 SD_INFO(SD_LOG_SDTEST, un, 27098 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); 27099 27100 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27101 27102 sd_fault_injection_on = 0; 27103 27104 if (un->sd_fi_fifo_xb[i] != NULL) { 27105 kmem_free(un->sd_fi_fifo_xb[i], 27106 sizeof (struct sd_fi_xb)); 27107 un->sd_fi_fifo_xb[i] = NULL; 27108 } 27109 if (arg != NULL) { 27110 un->sd_fi_fifo_xb[i] = 27111 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); 27112 if (un->sd_fi_fifo_xb[i] == NULL) { 27113 /* Alloc failed don't store anything */ 27114 break; 27115 } 27116 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], 27117 sizeof (struct sd_fi_xb), 0); 27118 27119 if (rval == -1) { 27120 kmem_free(un->sd_fi_fifo_xb[i], 27121 sizeof (struct sd_fi_xb)); 27122 un->sd_fi_fifo_xb[i] = NULL; 27123 } 27124 } else { 27125 SD_INFO(SD_LOG_IOERR, un, 27126 "sd_faultinjection_ioctl: xb null\n"); 27127 } 27128 break; 27129 27130 case SDIOCINSERTUN: 27131 /* Store a un struct to be pushed onto fifo */ 27132 SD_INFO(SD_LOG_SDTEST, un, 27133 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); 27134 27135 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27136 27137 sd_fault_injection_on = 0; 27138 27139 if (un->sd_fi_fifo_un[i] != NULL) { 27140 kmem_free(un->sd_fi_fifo_un[i], 27141 sizeof (struct sd_fi_un)); 27142 un->sd_fi_fifo_un[i] = NULL; 27143 } 27144 if (arg != NULL) { 27145 un->sd_fi_fifo_un[i] = 27146 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); 27147 if (un->sd_fi_fifo_un[i] == NULL) { 27148 /* Alloc failed don't store anything */ 27149 break; 27150 } 27151 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], 27152 sizeof (struct sd_fi_un), 0); 27153 if (rval == -1) { 27154 kmem_free(un->sd_fi_fifo_un[i], 27155 sizeof (struct sd_fi_un)); 27156 un->sd_fi_fifo_un[i] = NULL; 27157 } 27158 27159 } else { 27160 SD_INFO(SD_LOG_IOERR, un, 27161 "sd_faultinjection_ioctl: un null\n"); 27162 } 27163 27164 break; 27165 27166 case SDIOCINSERTARQ: 27167 /* Store a arq struct to be pushed onto fifo */ 27168 SD_INFO(SD_LOG_SDTEST, un, 27169 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); 27170 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27171 27172 sd_fault_injection_on = 0; 27173 27174 if (un->sd_fi_fifo_arq[i] != NULL) { 27175 kmem_free(un->sd_fi_fifo_arq[i], 27176 sizeof (struct sd_fi_arq)); 27177 un->sd_fi_fifo_arq[i] = NULL; 27178 } 27179 if (arg != NULL) { 27180 un->sd_fi_fifo_arq[i] = 27181 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); 27182 if (un->sd_fi_fifo_arq[i] == NULL) { 27183 /* Alloc failed don't store anything */ 27184 break; 27185 } 27186 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], 27187 sizeof (struct sd_fi_arq), 0); 27188 if (rval == -1) { 27189 kmem_free(un->sd_fi_fifo_arq[i], 27190 sizeof (struct sd_fi_arq)); 27191 un->sd_fi_fifo_arq[i] = NULL; 27192 } 27193 27194 } else { 27195 SD_INFO(SD_LOG_IOERR, un, 27196 "sd_faultinjection_ioctl: arq null\n"); 27197 } 27198 27199 break; 27200 27201 case SDIOCPUSH: 27202 /* Push stored xb, pkt, un, and arq onto fifo */ 27203 sd_fault_injection_on = 0; 27204 27205 if (arg != NULL) { 27206 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); 27207 if (rval != -1 && 27208 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 27209 un->sd_fi_fifo_end += i; 27210 } 27211 } else { 27212 SD_INFO(SD_LOG_IOERR, un, 27213 "sd_faultinjection_ioctl: push arg null\n"); 27214 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 27215 un->sd_fi_fifo_end++; 27216 } 27217 } 27218 SD_INFO(SD_LOG_IOERR, un, 27219 "sd_faultinjection_ioctl: push to end=%d\n", 27220 un->sd_fi_fifo_end); 27221 break; 27222 27223 case SDIOCRETRIEVE: 27224 /* Return buffer of log from Injection session */ 27225 SD_INFO(SD_LOG_SDTEST, un, 27226 "sd_faultinjection_ioctl: Injecting Fault Retreive"); 27227 27228 sd_fault_injection_on = 0; 27229 27230 mutex_enter(&(un->un_fi_mutex)); 27231 rval = ddi_copyout(un->sd_fi_log, (void *)arg, 27232 un->sd_fi_buf_len+1, 0); 27233 mutex_exit(&(un->un_fi_mutex)); 27234 27235 if (rval == -1) { 27236 /* 27237 * arg is possibly invalid setting 27238 * it to NULL for return 27239 */ 27240 arg = NULL; 27241 } 27242 break; 27243 } 27244 27245 mutex_exit(SD_MUTEX(un)); 27246 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl:" 27247 " exit\n"); 27248 } 27249 27250 27251 /* 27252 * Function: sd_injection_log() 27253 * 27254 * Description: This routine adds buff to the already existing injection log 27255 * for retrieval via faultinjection_ioctl for use in fault 27256 * detection and recovery 27257 * 27258 * Arguments: buf - the string to add to the log 27259 */ 27260 27261 static void 27262 sd_injection_log(char *buf, struct sd_lun *un) 27263 { 27264 uint_t len; 27265 27266 ASSERT(un != NULL); 27267 ASSERT(buf != NULL); 27268 27269 mutex_enter(&(un->un_fi_mutex)); 27270 27271 len = min(strlen(buf), 255); 27272 /* Add logged value to Injection log to be returned later */ 27273 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) { 27274 uint_t offset = strlen((char *)un->sd_fi_log); 27275 char *destp = (char *)un->sd_fi_log + offset; 27276 int i; 27277 for (i = 0; i < len; i++) { 27278 *destp++ = *buf++; 27279 } 27280 un->sd_fi_buf_len += len; 27281 un->sd_fi_log[un->sd_fi_buf_len] = '\0'; 27282 } 27283 27284 mutex_exit(&(un->un_fi_mutex)); 27285 } 27286 27287 27288 /* 27289 * Function: sd_faultinjection() 27290 * 27291 * Description: This routine takes the pkt and changes its 27292 * content based on error injection scenerio. 27293 * 27294 * Arguments: pktp - packet to be changed 27295 */ 27296 27297 static void 27298 sd_faultinjection(struct scsi_pkt *pktp) 27299 { 27300 uint_t i; 27301 struct sd_fi_pkt *fi_pkt; 27302 struct sd_fi_xb *fi_xb; 27303 struct sd_fi_un *fi_un; 27304 struct sd_fi_arq *fi_arq; 27305 struct buf *bp; 27306 struct sd_xbuf *xb; 27307 struct sd_lun *un; 27308 27309 ASSERT(pktp != NULL); 27310 27311 /* pull bp xb and un from pktp */ 27312 bp = (struct buf *)pktp->pkt_private; 27313 xb = SD_GET_XBUF(bp); 27314 un = SD_GET_UN(bp); 27315 27316 ASSERT(un != NULL); 27317 27318 mutex_enter(SD_MUTEX(un)); 27319 27320 SD_TRACE(SD_LOG_SDTEST, un, 27321 "sd_faultinjection: entry Injection from sdintr\n"); 27322 27323 /* if injection is off return */ 27324 if (sd_fault_injection_on == 0 || 27325 un->sd_fi_fifo_start == un->sd_fi_fifo_end) { 27326 mutex_exit(SD_MUTEX(un)); 27327 return; 27328 } 27329 27330 27331 /* take next set off fifo */ 27332 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; 27333 27334 fi_pkt = un->sd_fi_fifo_pkt[i]; 27335 fi_xb = un->sd_fi_fifo_xb[i]; 27336 fi_un = un->sd_fi_fifo_un[i]; 27337 fi_arq = un->sd_fi_fifo_arq[i]; 27338 27339 27340 /* set variables accordingly */ 27341 /* set pkt if it was on fifo */ 27342 if (fi_pkt != NULL) { 27343 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags"); 27344 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp"); 27345 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp"); 27346 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state"); 27347 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics"); 27348 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason"); 27349 27350 } 27351 27352 /* set xb if it was on fifo */ 27353 if (fi_xb != NULL) { 27354 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno"); 27355 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid"); 27356 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count"); 27357 SD_CONDSET(xb, xb, xb_victim_retry_count, 27358 "xb_victim_retry_count"); 27359 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status"); 27360 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state"); 27361 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid"); 27362 27363 /* copy in block data from sense */ 27364 if (fi_xb->xb_sense_data[0] != -1) { 27365 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, 27366 SENSE_LENGTH); 27367 } 27368 27369 /* copy in extended sense codes */ 27370 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_code, 27371 "es_code"); 27372 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_key, 27373 "es_key"); 27374 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_add_code, 27375 "es_add_code"); 27376 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, 27377 es_qual_code, "es_qual_code"); 27378 } 27379 27380 /* set un if it was on fifo */ 27381 if (fi_un != NULL) { 27382 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb"); 27383 SD_CONDSET(un, un, un_ctype, "un_ctype"); 27384 SD_CONDSET(un, un, un_reset_retry_count, 27385 "un_reset_retry_count"); 27386 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type"); 27387 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status"); 27388 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled"); 27389 SD_CONDSET(un, un, un_f_allow_bus_device_reset, 27390 "un_f_allow_bus_device_reset"); 27391 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing"); 27392 27393 } 27394 27395 /* copy in auto request sense if it was on fifo */ 27396 if (fi_arq != NULL) { 27397 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq)); 27398 } 27399 27400 /* free structs */ 27401 if (un->sd_fi_fifo_pkt[i] != NULL) { 27402 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); 27403 } 27404 if (un->sd_fi_fifo_xb[i] != NULL) { 27405 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); 27406 } 27407 if (un->sd_fi_fifo_un[i] != NULL) { 27408 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); 27409 } 27410 if (un->sd_fi_fifo_arq[i] != NULL) { 27411 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); 27412 } 27413 27414 /* 27415 * kmem_free does not gurantee to set to NULL 27416 * since we uses these to determine if we set 27417 * values or not lets confirm they are always 27418 * NULL after free 27419 */ 27420 un->sd_fi_fifo_pkt[i] = NULL; 27421 un->sd_fi_fifo_un[i] = NULL; 27422 un->sd_fi_fifo_xb[i] = NULL; 27423 un->sd_fi_fifo_arq[i] = NULL; 27424 27425 un->sd_fi_fifo_start++; 27426 27427 mutex_exit(SD_MUTEX(un)); 27428 27429 SD_TRACE(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n"); 27430 } 27431 27432 #endif /* SD_FAULT_INJECTION */ 27433 27434 /* 27435 * This routine is invoked in sd_unit_attach(). Before calling it, the 27436 * properties in conf file should be processed already, and "hotpluggable" 27437 * property was processed also. 27438 * 27439 * The sd driver distinguishes 3 different type of devices: removable media, 27440 * non-removable media, and hotpluggable. Below the differences are defined: 27441 * 27442 * 1. Device ID 27443 * 27444 * The device ID of a device is used to identify this device. Refer to 27445 * ddi_devid_register(9F). 27446 * 27447 * For a non-removable media disk device which can provide 0x80 or 0x83 27448 * VPD page (refer to INQUIRY command of SCSI SPC specification), a unique 27449 * device ID is created to identify this device. For other non-removable 27450 * media devices, a default device ID is created only if this device has 27451 * at least 2 alter cylinders. Otherwise, this device has no devid. 27452 * 27453 * ------------------------------------------------------- 27454 * removable media hotpluggable | Can Have Device ID 27455 * ------------------------------------------------------- 27456 * false false | Yes 27457 * false true | Yes 27458 * true x | No 27459 * ------------------------------------------------------ 27460 * 27461 * 27462 * 2. SCSI group 4 commands 27463 * 27464 * In SCSI specs, only some commands in group 4 command set can use 27465 * 8-byte addresses that can be used to access >2TB storage spaces. 27466 * Other commands have no such capability. Without supporting group4, 27467 * it is impossible to make full use of storage spaces of a disk with 27468 * capacity larger than 2TB. 27469 * 27470 * ----------------------------------------------- 27471 * removable media hotpluggable LP64 | Group 27472 * ----------------------------------------------- 27473 * false false false | 1 27474 * false false true | 4 27475 * false true false | 1 27476 * false true true | 4 27477 * true x x | 5 27478 * ----------------------------------------------- 27479 * 27480 * 27481 * 3. Check for VTOC Label 27482 * 27483 * If a direct-access disk has no EFI label, sd will check if it has a 27484 * valid VTOC label. Now, sd also does that check for removable media 27485 * and hotpluggable devices. 27486 * 27487 * -------------------------------------------------------------- 27488 * Direct-Access removable media hotpluggable | Check Label 27489 * ------------------------------------------------------------- 27490 * false false false | No 27491 * false false true | No 27492 * false true false | Yes 27493 * false true true | Yes 27494 * true x x | Yes 27495 * -------------------------------------------------------------- 27496 * 27497 * 27498 * 4. Building default VTOC label 27499 * 27500 * As section 3 says, sd checks if some kinds of devices have VTOC label. 27501 * If those devices have no valid VTOC label, sd(7d) will attempt to 27502 * create default VTOC for them. Currently sd creates default VTOC label 27503 * for all devices on x86 platform (VTOC_16), but only for removable 27504 * media devices on SPARC (VTOC_8). 27505 * 27506 * ----------------------------------------------------------- 27507 * removable media hotpluggable platform | Default Label 27508 * ----------------------------------------------------------- 27509 * false false sparc | No 27510 * false true x86 | Yes 27511 * false true sparc | Yes 27512 * true x x | Yes 27513 * ---------------------------------------------------------- 27514 * 27515 * 27516 * 5. Supported blocksizes of target devices 27517 * 27518 * Sd supports non-512-byte blocksize for removable media devices only. 27519 * For other devices, only 512-byte blocksize is supported. This may be 27520 * changed in near future because some RAID devices require non-512-byte 27521 * blocksize 27522 * 27523 * ----------------------------------------------------------- 27524 * removable media hotpluggable | non-512-byte blocksize 27525 * ----------------------------------------------------------- 27526 * false false | No 27527 * false true | No 27528 * true x | Yes 27529 * ----------------------------------------------------------- 27530 * 27531 * 27532 * 6. Automatic mount & unmount 27533 * 27534 * Sd(7d) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query 27535 * if a device is removable media device. It return 1 for removable media 27536 * devices, and 0 for others. 27537 * 27538 * The automatic mounting subsystem should distinguish between the types 27539 * of devices and apply automounting policies to each. 27540 * 27541 * 27542 * 7. fdisk partition management 27543 * 27544 * Fdisk is traditional partition method on x86 platform. Sd(7d) driver 27545 * just supports fdisk partitions on x86 platform. On sparc platform, sd 27546 * doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize 27547 * fdisk partitions on both x86 and SPARC platform. 27548 * 27549 * ----------------------------------------------------------- 27550 * platform removable media USB/1394 | fdisk supported 27551 * ----------------------------------------------------------- 27552 * x86 X X | true 27553 * ------------------------------------------------------------ 27554 * sparc X X | false 27555 * ------------------------------------------------------------ 27556 * 27557 * 27558 * 8. MBOOT/MBR 27559 * 27560 * Although sd(7d) doesn't support fdisk on SPARC platform, it does support 27561 * read/write mboot for removable media devices on sparc platform. 27562 * 27563 * ----------------------------------------------------------- 27564 * platform removable media USB/1394 | mboot supported 27565 * ----------------------------------------------------------- 27566 * x86 X X | true 27567 * ------------------------------------------------------------ 27568 * sparc false false | false 27569 * sparc false true | true 27570 * sparc true false | true 27571 * sparc true true | true 27572 * ------------------------------------------------------------ 27573 * 27574 * 27575 * 9. error handling during opening device 27576 * 27577 * If failed to open a disk device, an errno is returned. For some kinds 27578 * of errors, different errno is returned depending on if this device is 27579 * a removable media device. This brings USB/1394 hard disks in line with 27580 * expected hard disk behavior. It is not expected that this breaks any 27581 * application. 27582 * 27583 * ------------------------------------------------------ 27584 * removable media hotpluggable | errno 27585 * ------------------------------------------------------ 27586 * false false | EIO 27587 * false true | EIO 27588 * true x | ENXIO 27589 * ------------------------------------------------------ 27590 * 27591 * 27592 * 11. ioctls: DKIOCEJECT, CDROMEJECT 27593 * 27594 * These IOCTLs are applicable only to removable media devices. 27595 * 27596 * ----------------------------------------------------------- 27597 * removable media hotpluggable |DKIOCEJECT, CDROMEJECT 27598 * ----------------------------------------------------------- 27599 * false false | No 27600 * false true | No 27601 * true x | Yes 27602 * ----------------------------------------------------------- 27603 * 27604 * 27605 * 12. Kstats for partitions 27606 * 27607 * sd creates partition kstat for non-removable media devices. USB and 27608 * Firewire hard disks now have partition kstats 27609 * 27610 * ------------------------------------------------------ 27611 * removable media hotpluggable | kstat 27612 * ------------------------------------------------------ 27613 * false false | Yes 27614 * false true | Yes 27615 * true x | No 27616 * ------------------------------------------------------ 27617 * 27618 * 27619 * 13. Removable media & hotpluggable properties 27620 * 27621 * Sd driver creates a "removable-media" property for removable media 27622 * devices. Parent nexus drivers create a "hotpluggable" property if 27623 * it supports hotplugging. 27624 * 27625 * --------------------------------------------------------------------- 27626 * removable media hotpluggable | "removable-media" " hotpluggable" 27627 * --------------------------------------------------------------------- 27628 * false false | No No 27629 * false true | No Yes 27630 * true false | Yes No 27631 * true true | Yes Yes 27632 * --------------------------------------------------------------------- 27633 * 27634 * 27635 * 14. Power Management 27636 * 27637 * sd only power manages removable media devices or devices that support 27638 * LOG_SENSE or have a "pm-capable" property (PSARC/2002/250) 27639 * 27640 * A parent nexus that supports hotplugging can also set "pm-capable" 27641 * if the disk can be power managed. 27642 * 27643 * ------------------------------------------------------------ 27644 * removable media hotpluggable pm-capable | power manage 27645 * ------------------------------------------------------------ 27646 * false false false | No 27647 * false false true | Yes 27648 * false true false | No 27649 * false true true | Yes 27650 * true x x | Yes 27651 * ------------------------------------------------------------ 27652 * 27653 * USB and firewire hard disks can now be power managed independently 27654 * of the framebuffer 27655 * 27656 * 27657 * 15. Support for USB disks with capacity larger than 1TB 27658 * 27659 * Currently, sd doesn't permit a fixed disk device with capacity 27660 * larger than 1TB to be used in a 32-bit operating system environment. 27661 * However, sd doesn't do that for removable media devices. Instead, it 27662 * assumes that removable media devices cannot have a capacity larger 27663 * than 1TB. Therefore, using those devices on 32-bit system is partially 27664 * supported, which can cause some unexpected results. 27665 * 27666 * --------------------------------------------------------------------- 27667 * removable media USB/1394 | Capacity > 1TB | Used in 32-bit env 27668 * --------------------------------------------------------------------- 27669 * false false | true | no 27670 * false true | true | no 27671 * true false | true | Yes 27672 * true true | true | Yes 27673 * --------------------------------------------------------------------- 27674 * 27675 * 27676 * 16. Check write-protection at open time 27677 * 27678 * When a removable media device is being opened for writing without NDELAY 27679 * flag, sd will check if this device is writable. If attempting to open 27680 * without NDELAY flag a write-protected device, this operation will abort. 27681 * 27682 * ------------------------------------------------------------ 27683 * removable media USB/1394 | WP Check 27684 * ------------------------------------------------------------ 27685 * false false | No 27686 * false true | No 27687 * true false | Yes 27688 * true true | Yes 27689 * ------------------------------------------------------------ 27690 * 27691 * 27692 * 17. syslog when corrupted VTOC is encountered 27693 * 27694 * Currently, if an invalid VTOC is encountered, sd only print syslog 27695 * for fixed SCSI disks. 27696 * ------------------------------------------------------------ 27697 * removable media USB/1394 | print syslog 27698 * ------------------------------------------------------------ 27699 * false false | Yes 27700 * false true | No 27701 * true false | No 27702 * true true | No 27703 * ------------------------------------------------------------ 27704 */ 27705 static void 27706 sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi) 27707 { 27708 int pm_capable_prop; 27709 27710 ASSERT(un->un_sd); 27711 ASSERT(un->un_sd->sd_inq); 27712 27713 /* 27714 * Enable SYNC CACHE support for all devices. 27715 */ 27716 un->un_f_sync_cache_supported = TRUE; 27717 27718 if (un->un_sd->sd_inq->inq_rmb) { 27719 /* 27720 * The media of this device is removable. And for this kind 27721 * of devices, it is possible to change medium after opening 27722 * devices. Thus we should support this operation. 27723 */ 27724 un->un_f_has_removable_media = TRUE; 27725 27726 /* 27727 * support non-512-byte blocksize of removable media devices 27728 */ 27729 un->un_f_non_devbsize_supported = TRUE; 27730 27731 /* 27732 * Assume that all removable media devices support DOOR_LOCK 27733 */ 27734 un->un_f_doorlock_supported = TRUE; 27735 27736 /* 27737 * For a removable media device, it is possible to be opened 27738 * with NDELAY flag when there is no media in drive, in this 27739 * case we don't care if device is writable. But if without 27740 * NDELAY flag, we need to check if media is write-protected. 27741 */ 27742 un->un_f_chk_wp_open = TRUE; 27743 27744 /* 27745 * need to start a SCSI watch thread to monitor media state, 27746 * when media is being inserted or ejected, notify syseventd. 27747 */ 27748 un->un_f_monitor_media_state = TRUE; 27749 27750 /* 27751 * Some devices don't support START_STOP_UNIT command. 27752 * Therefore, we'd better check if a device supports it 27753 * before sending it. 27754 */ 27755 un->un_f_check_start_stop = TRUE; 27756 27757 /* 27758 * support eject media ioctl: 27759 * FDEJECT, DKIOCEJECT, CDROMEJECT 27760 */ 27761 un->un_f_eject_media_supported = TRUE; 27762 27763 /* 27764 * Because many removable-media devices don't support 27765 * LOG_SENSE, we couldn't use this command to check if 27766 * a removable media device support power-management. 27767 * We assume that they support power-management via 27768 * START_STOP_UNIT command and can be spun up and down 27769 * without limitations. 27770 */ 27771 un->un_f_pm_supported = TRUE; 27772 27773 /* 27774 * Need to create a zero length (Boolean) property 27775 * removable-media for the removable media devices. 27776 * Note that the return value of the property is not being 27777 * checked, since if unable to create the property 27778 * then do not want the attach to fail altogether. Consistent 27779 * with other property creation in attach. 27780 */ 27781 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, 27782 DDI_PROP_CANSLEEP, "removable-media", NULL, 0); 27783 27784 } else { 27785 /* 27786 * create device ID for device 27787 */ 27788 un->un_f_devid_supported = TRUE; 27789 27790 /* 27791 * Spin up non-removable-media devices once it is attached 27792 */ 27793 un->un_f_attach_spinup = TRUE; 27794 27795 /* 27796 * According to SCSI specification, Sense data has two kinds of 27797 * format: fixed format, and descriptor format. At present, we 27798 * don't support descriptor format sense data for removable 27799 * media. 27800 */ 27801 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT) { 27802 un->un_f_descr_format_supported = TRUE; 27803 } 27804 27805 /* 27806 * kstats are created only for non-removable media devices. 27807 * 27808 * Set this in sd.conf to 0 in order to disable kstats. The 27809 * default is 1, so they are enabled by default. 27810 */ 27811 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY, 27812 SD_DEVINFO(un), DDI_PROP_DONTPASS, 27813 "enable-partition-kstats", 1)); 27814 27815 /* 27816 * Check if HBA has set the "pm-capable" property. 27817 * If "pm-capable" exists and is non-zero then we can 27818 * power manage the device without checking the start/stop 27819 * cycle count log sense page. 27820 * 27821 * If "pm-capable" exists and is SD_PM_CAPABLE_FALSE (0) 27822 * then we should not power manage the device. 27823 * 27824 * If "pm-capable" doesn't exist then pm_capable_prop will 27825 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case, 27826 * sd will check the start/stop cycle count log sense page 27827 * and power manage the device if the cycle count limit has 27828 * not been exceeded. 27829 */ 27830 pm_capable_prop = ddi_prop_get_int(DDI_DEV_T_ANY, devi, 27831 DDI_PROP_DONTPASS, "pm-capable", SD_PM_CAPABLE_UNDEFINED); 27832 if (pm_capable_prop == SD_PM_CAPABLE_UNDEFINED) { 27833 un->un_f_log_sense_supported = TRUE; 27834 } else { 27835 /* 27836 * pm-capable property exists. 27837 * 27838 * Convert "TRUE" values for pm_capable_prop to 27839 * SD_PM_CAPABLE_TRUE (1) to make it easier to check 27840 * later. "TRUE" values are any values except 27841 * SD_PM_CAPABLE_FALSE (0) and 27842 * SD_PM_CAPABLE_UNDEFINED (-1) 27843 */ 27844 if (pm_capable_prop == SD_PM_CAPABLE_FALSE) { 27845 un->un_f_log_sense_supported = FALSE; 27846 } else { 27847 un->un_f_pm_supported = TRUE; 27848 } 27849 27850 SD_INFO(SD_LOG_ATTACH_DETACH, un, 27851 "sd_unit_attach: un:0x%p pm-capable " 27852 "property set to %d.\n", un, un->un_f_pm_supported); 27853 } 27854 } 27855 27856 if (un->un_f_is_hotpluggable) { 27857 27858 /* 27859 * Have to watch hotpluggable devices as well, since 27860 * that's the only way for userland applications to 27861 * detect hot removal while device is busy/mounted. 27862 */ 27863 un->un_f_monitor_media_state = TRUE; 27864 27865 un->un_f_check_start_stop = TRUE; 27866 27867 } 27868 } 27869 27870 /* 27871 * sd_tg_rdwr: 27872 * Provides rdwr access for cmlb via sd_tgops. The start_block is 27873 * in sys block size, req_length in bytes. 27874 * 27875 */ 27876 static int 27877 sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 27878 diskaddr_t start_block, size_t reqlength, void *tg_cookie) 27879 { 27880 struct sd_lun *un; 27881 int path_flag = (int)(uintptr_t)tg_cookie; 27882 char *dkl = NULL; 27883 diskaddr_t real_addr = start_block; 27884 diskaddr_t first_byte, end_block; 27885 27886 size_t buffer_size = reqlength; 27887 int rval; 27888 diskaddr_t cap; 27889 uint32_t lbasize; 27890 27891 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 27892 if (un == NULL) 27893 return (ENXIO); 27894 27895 if (cmd != TG_READ && cmd != TG_WRITE) 27896 return (EINVAL); 27897 27898 mutex_enter(SD_MUTEX(un)); 27899 if (un->un_f_tgt_blocksize_is_valid == FALSE) { 27900 mutex_exit(SD_MUTEX(un)); 27901 rval = sd_send_scsi_READ_CAPACITY(un, (uint64_t *)&cap, 27902 &lbasize, path_flag); 27903 if (rval != 0) 27904 return (rval); 27905 mutex_enter(SD_MUTEX(un)); 27906 sd_update_block_info(un, lbasize, cap); 27907 if ((un->un_f_tgt_blocksize_is_valid == FALSE)) { 27908 mutex_exit(SD_MUTEX(un)); 27909 return (EIO); 27910 } 27911 } 27912 27913 if (NOT_DEVBSIZE(un)) { 27914 /* 27915 * sys_blocksize != tgt_blocksize, need to re-adjust 27916 * blkno and save the index to beginning of dk_label 27917 */ 27918 first_byte = SD_SYSBLOCKS2BYTES(un, start_block); 27919 real_addr = first_byte / un->un_tgt_blocksize; 27920 27921 end_block = (first_byte + reqlength + 27922 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize; 27923 27924 /* round up buffer size to multiple of target block size */ 27925 buffer_size = (end_block - real_addr) * un->un_tgt_blocksize; 27926 27927 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_tg_rdwr", 27928 "label_addr: 0x%x allocation size: 0x%x\n", 27929 real_addr, buffer_size); 27930 27931 if (((first_byte % un->un_tgt_blocksize) != 0) || 27932 (reqlength % un->un_tgt_blocksize) != 0) 27933 /* the request is not aligned */ 27934 dkl = kmem_zalloc(buffer_size, KM_SLEEP); 27935 } 27936 27937 /* 27938 * The MMC standard allows READ CAPACITY to be 27939 * inaccurate by a bounded amount (in the interest of 27940 * response latency). As a result, failed READs are 27941 * commonplace (due to the reading of metadata and not 27942 * data). Depending on the per-Vendor/drive Sense data, 27943 * the failed READ can cause many (unnecessary) retries. 27944 */ 27945 27946 if (ISCD(un) && (cmd == TG_READ) && 27947 (un->un_f_blockcount_is_valid == TRUE) && 27948 ((start_block == (un->un_blockcount - 1))|| 27949 (start_block == (un->un_blockcount - 2)))) { 27950 path_flag = SD_PATH_DIRECT_PRIORITY; 27951 } 27952 27953 mutex_exit(SD_MUTEX(un)); 27954 if (cmd == TG_READ) { 27955 rval = sd_send_scsi_READ(un, (dkl != NULL)? dkl: bufaddr, 27956 buffer_size, real_addr, path_flag); 27957 if (dkl != NULL) 27958 bcopy(dkl + SD_TGTBYTEOFFSET(un, start_block, 27959 real_addr), bufaddr, reqlength); 27960 } else { 27961 if (dkl) { 27962 rval = sd_send_scsi_READ(un, dkl, buffer_size, 27963 real_addr, path_flag); 27964 if (rval) { 27965 kmem_free(dkl, buffer_size); 27966 return (rval); 27967 } 27968 bcopy(bufaddr, dkl + SD_TGTBYTEOFFSET(un, start_block, 27969 real_addr), reqlength); 27970 } 27971 rval = sd_send_scsi_WRITE(un, (dkl != NULL)? dkl: bufaddr, 27972 buffer_size, real_addr, path_flag); 27973 } 27974 27975 if (dkl != NULL) 27976 kmem_free(dkl, buffer_size); 27977 27978 return (rval); 27979 } 27980 27981 27982 static int 27983 sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie) 27984 { 27985 27986 struct sd_lun *un; 27987 diskaddr_t cap; 27988 uint32_t lbasize; 27989 int path_flag = (int)(uintptr_t)tg_cookie; 27990 int ret = 0; 27991 27992 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 27993 if (un == NULL) 27994 return (ENXIO); 27995 27996 switch (cmd) { 27997 case TG_GETPHYGEOM: 27998 case TG_GETVIRTGEOM: 27999 case TG_GETCAPACITY: 28000 case TG_GETBLOCKSIZE: 28001 mutex_enter(SD_MUTEX(un)); 28002 28003 if ((un->un_f_blockcount_is_valid == TRUE) && 28004 (un->un_f_tgt_blocksize_is_valid == TRUE)) { 28005 cap = un->un_blockcount; 28006 lbasize = un->un_tgt_blocksize; 28007 mutex_exit(SD_MUTEX(un)); 28008 } else { 28009 mutex_exit(SD_MUTEX(un)); 28010 ret = sd_send_scsi_READ_CAPACITY(un, (uint64_t *)&cap, 28011 &lbasize, path_flag); 28012 if (ret != 0) 28013 return (ret); 28014 mutex_enter(SD_MUTEX(un)); 28015 sd_update_block_info(un, lbasize, cap); 28016 if ((un->un_f_blockcount_is_valid == FALSE) || 28017 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 28018 mutex_exit(SD_MUTEX(un)); 28019 return (EIO); 28020 } 28021 mutex_exit(SD_MUTEX(un)); 28022 } 28023 28024 if (cmd == TG_GETCAPACITY) { 28025 *(diskaddr_t *)arg = cap; 28026 return (0); 28027 } 28028 28029 if (cmd == TG_GETBLOCKSIZE) { 28030 *(uint32_t *)arg = lbasize; 28031 return (0); 28032 } 28033 28034 if (cmd == TG_GETPHYGEOM) 28035 ret = sd_get_physical_geometry(un, (cmlb_geom_t *)arg, 28036 cap, lbasize, path_flag); 28037 else 28038 /* TG_GETVIRTGEOM */ 28039 ret = sd_get_virtual_geometry(un, 28040 (cmlb_geom_t *)arg, cap, lbasize); 28041 28042 return (ret); 28043 28044 case TG_GETATTR: 28045 mutex_enter(SD_MUTEX(un)); 28046 ((tg_attribute_t *)arg)->media_is_writable = 28047 un->un_f_mmc_writable_media; 28048 mutex_exit(SD_MUTEX(un)); 28049 return (0); 28050 default: 28051 return (ENOTTY); 28052 28053 } 28054 28055 } 28056