1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * SCSI disk target driver. 30 */ 31 #include <sys/scsi/scsi.h> 32 #include <sys/dkbad.h> 33 #include <sys/dklabel.h> 34 #include <sys/dkio.h> 35 #include <sys/fdio.h> 36 #include <sys/cdio.h> 37 #include <sys/mhd.h> 38 #include <sys/vtoc.h> 39 #include <sys/dktp/fdisk.h> 40 #include <sys/kstat.h> 41 #include <sys/vtrace.h> 42 #include <sys/note.h> 43 #include <sys/thread.h> 44 #include <sys/proc.h> 45 #include <sys/efi_partition.h> 46 #include <sys/var.h> 47 #include <sys/aio_req.h> 48 49 #ifdef __lock_lint 50 #define _LP64 51 #define __amd64 52 #endif 53 54 #if (defined(__fibre)) 55 /* Note: is there a leadville version of the following? */ 56 #include <sys/fc4/fcal_linkapp.h> 57 #endif 58 #include <sys/taskq.h> 59 #include <sys/uuid.h> 60 #include <sys/byteorder.h> 61 #include <sys/sdt.h> 62 63 #include "sd_xbuf.h" 64 65 #include <sys/scsi/targets/sddef.h> 66 #include <sys/cmlb.h> 67 68 69 /* 70 * Loadable module info. 71 */ 72 #if (defined(__fibre)) 73 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver %I%" 74 char _depends_on[] = "misc/scsi misc/cmlb drv/fcp"; 75 #else 76 #define SD_MODULE_NAME "SCSI Disk Driver %I%" 77 char _depends_on[] = "misc/scsi misc/cmlb"; 78 #endif 79 80 /* 81 * Define the interconnect type, to allow the driver to distinguish 82 * between parallel SCSI (sd) and fibre channel (ssd) behaviors. 83 * 84 * This is really for backward compatibility. In the future, the driver 85 * should actually check the "interconnect-type" property as reported by 86 * the HBA; however at present this property is not defined by all HBAs, 87 * so we will use this #define (1) to permit the driver to run in 88 * backward-compatibility mode; and (2) to print a notification message 89 * if an FC HBA does not support the "interconnect-type" property. The 90 * behavior of the driver will be to assume parallel SCSI behaviors unless 91 * the "interconnect-type" property is defined by the HBA **AND** has a 92 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or 93 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre 94 * Channel behaviors (as per the old ssd). (Note that the 95 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and 96 * will result in the driver assuming parallel SCSI behaviors.) 97 * 98 * (see common/sys/scsi/impl/services.h) 99 * 100 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default 101 * since some FC HBAs may already support that, and there is some code in 102 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the 103 * default would confuse that code, and besides things should work fine 104 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the 105 * "interconnect_type" property. 106 * 107 */ 108 #if (defined(__fibre)) 109 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE 110 #else 111 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL 112 #endif 113 114 /* 115 * The name of the driver, established from the module name in _init. 116 */ 117 static char *sd_label = NULL; 118 119 /* 120 * Driver name is unfortunately prefixed on some driver.conf properties. 121 */ 122 #if (defined(__fibre)) 123 #define sd_max_xfer_size ssd_max_xfer_size 124 #define sd_config_list ssd_config_list 125 static char *sd_max_xfer_size = "ssd_max_xfer_size"; 126 static char *sd_config_list = "ssd-config-list"; 127 #else 128 static char *sd_max_xfer_size = "sd_max_xfer_size"; 129 static char *sd_config_list = "sd-config-list"; 130 #endif 131 132 /* 133 * Driver global variables 134 */ 135 136 #if (defined(__fibre)) 137 /* 138 * These #defines are to avoid namespace collisions that occur because this 139 * code is currently used to compile two separate driver modules: sd and ssd. 140 * All global variables need to be treated this way (even if declared static) 141 * in order to allow the debugger to resolve the names properly. 142 * It is anticipated that in the near future the ssd module will be obsoleted, 143 * at which time this namespace issue should go away. 144 */ 145 #define sd_state ssd_state 146 #define sd_io_time ssd_io_time 147 #define sd_failfast_enable ssd_failfast_enable 148 #define sd_ua_retry_count ssd_ua_retry_count 149 #define sd_report_pfa ssd_report_pfa 150 #define sd_max_throttle ssd_max_throttle 151 #define sd_min_throttle ssd_min_throttle 152 #define sd_rot_delay ssd_rot_delay 153 154 #define sd_retry_on_reservation_conflict \ 155 ssd_retry_on_reservation_conflict 156 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay 157 #define sd_resv_conflict_name ssd_resv_conflict_name 158 159 #define sd_component_mask ssd_component_mask 160 #define sd_level_mask ssd_level_mask 161 #define sd_debug_un ssd_debug_un 162 #define sd_error_level ssd_error_level 163 164 #define sd_xbuf_active_limit ssd_xbuf_active_limit 165 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit 166 167 #define sd_tr ssd_tr 168 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout 169 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout 170 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable 171 #define sd_check_media_time ssd_check_media_time 172 #define sd_wait_cmds_complete ssd_wait_cmds_complete 173 #define sd_label_mutex ssd_label_mutex 174 #define sd_detach_mutex ssd_detach_mutex 175 #define sd_log_buf ssd_log_buf 176 #define sd_log_mutex ssd_log_mutex 177 178 #define sd_disk_table ssd_disk_table 179 #define sd_disk_table_size ssd_disk_table_size 180 #define sd_sense_mutex ssd_sense_mutex 181 #define sd_cdbtab ssd_cdbtab 182 183 #define sd_cb_ops ssd_cb_ops 184 #define sd_ops ssd_ops 185 #define sd_additional_codes ssd_additional_codes 186 #define sd_tgops ssd_tgops 187 188 #define sd_minor_data ssd_minor_data 189 #define sd_minor_data_efi ssd_minor_data_efi 190 191 #define sd_tq ssd_tq 192 #define sd_wmr_tq ssd_wmr_tq 193 #define sd_taskq_name ssd_taskq_name 194 #define sd_wmr_taskq_name ssd_wmr_taskq_name 195 #define sd_taskq_minalloc ssd_taskq_minalloc 196 #define sd_taskq_maxalloc ssd_taskq_maxalloc 197 198 #define sd_dump_format_string ssd_dump_format_string 199 200 #define sd_iostart_chain ssd_iostart_chain 201 #define sd_iodone_chain ssd_iodone_chain 202 203 #define sd_pm_idletime ssd_pm_idletime 204 205 #define sd_force_pm_supported ssd_force_pm_supported 206 207 #define sd_dtype_optical_bind ssd_dtype_optical_bind 208 209 #endif 210 211 212 #ifdef SDDEBUG 213 int sd_force_pm_supported = 0; 214 #endif /* SDDEBUG */ 215 216 void *sd_state = NULL; 217 int sd_io_time = SD_IO_TIME; 218 int sd_failfast_enable = 1; 219 int sd_ua_retry_count = SD_UA_RETRY_COUNT; 220 int sd_report_pfa = 1; 221 int sd_max_throttle = SD_MAX_THROTTLE; 222 int sd_min_throttle = SD_MIN_THROTTLE; 223 int sd_rot_delay = 4; /* Default 4ms Rotation delay */ 224 int sd_qfull_throttle_enable = TRUE; 225 226 int sd_retry_on_reservation_conflict = 1; 227 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 228 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) 229 230 static int sd_dtype_optical_bind = -1; 231 232 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ 233 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; 234 235 /* 236 * Global data for debug logging. To enable debug printing, sd_component_mask 237 * and sd_level_mask should be set to the desired bit patterns as outlined in 238 * sddef.h. 239 */ 240 uint_t sd_component_mask = 0x0; 241 uint_t sd_level_mask = 0x0; 242 struct sd_lun *sd_debug_un = NULL; 243 uint_t sd_error_level = SCSI_ERR_RETRYABLE; 244 245 /* Note: these may go away in the future... */ 246 static uint32_t sd_xbuf_active_limit = 512; 247 static uint32_t sd_xbuf_reserve_limit = 16; 248 249 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 }; 250 251 /* 252 * Timer value used to reset the throttle after it has been reduced 253 * (typically in response to TRAN_BUSY or STATUS_QFULL) 254 */ 255 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT; 256 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT; 257 258 /* 259 * Interval value associated with the media change scsi watch. 260 */ 261 static int sd_check_media_time = 3000000; 262 263 /* 264 * Wait value used for in progress operations during a DDI_SUSPEND 265 */ 266 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE; 267 268 /* 269 * sd_label_mutex protects a static buffer used in the disk label 270 * component of the driver 271 */ 272 static kmutex_t sd_label_mutex; 273 274 /* 275 * sd_detach_mutex protects un_layer_count, un_detach_count, and 276 * un_opens_in_progress in the sd_lun structure. 277 */ 278 static kmutex_t sd_detach_mutex; 279 280 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex, 281 sd_lun::{un_layer_count un_detach_count un_opens_in_progress})) 282 283 /* 284 * Global buffer and mutex for debug logging 285 */ 286 static char sd_log_buf[1024]; 287 static kmutex_t sd_log_mutex; 288 289 /* 290 * Structs and globals for recording attached lun information. 291 * This maintains a chain. Each node in the chain represents a SCSI controller. 292 * The structure records the number of luns attached to each target connected 293 * with the controller. 294 * For parallel scsi device only. 295 */ 296 struct sd_scsi_hba_tgt_lun { 297 struct sd_scsi_hba_tgt_lun *next; 298 dev_info_t *pdip; 299 int nlun[NTARGETS_WIDE]; 300 }; 301 302 /* 303 * Flag to indicate the lun is attached or detached 304 */ 305 #define SD_SCSI_LUN_ATTACH 0 306 #define SD_SCSI_LUN_DETACH 1 307 308 static kmutex_t sd_scsi_target_lun_mutex; 309 static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL; 310 311 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 312 sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip)) 313 314 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 315 sd_scsi_target_lun_head)) 316 317 /* 318 * "Smart" Probe Caching structs, globals, #defines, etc. 319 * For parallel scsi and non-self-identify device only. 320 */ 321 322 /* 323 * The following resources and routines are implemented to support 324 * "smart" probing, which caches the scsi_probe() results in an array, 325 * in order to help avoid long probe times. 326 */ 327 struct sd_scsi_probe_cache { 328 struct sd_scsi_probe_cache *next; 329 dev_info_t *pdip; 330 int cache[NTARGETS_WIDE]; 331 }; 332 333 static kmutex_t sd_scsi_probe_cache_mutex; 334 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; 335 336 /* 337 * Really we only need protection on the head of the linked list, but 338 * better safe than sorry. 339 */ 340 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 341 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) 342 343 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 344 sd_scsi_probe_cache_head)) 345 346 347 /* 348 * Vendor specific data name property declarations 349 */ 350 351 #if defined(__fibre) || defined(__i386) ||defined(__amd64) 352 353 static sd_tunables seagate_properties = { 354 SEAGATE_THROTTLE_VALUE, 355 0, 356 0, 357 0, 358 0, 359 0, 360 0, 361 0, 362 0 363 }; 364 365 366 static sd_tunables fujitsu_properties = { 367 FUJITSU_THROTTLE_VALUE, 368 0, 369 0, 370 0, 371 0, 372 0, 373 0, 374 0, 375 0 376 }; 377 378 static sd_tunables ibm_properties = { 379 IBM_THROTTLE_VALUE, 380 0, 381 0, 382 0, 383 0, 384 0, 385 0, 386 0, 387 0 388 }; 389 390 static sd_tunables purple_properties = { 391 PURPLE_THROTTLE_VALUE, 392 0, 393 0, 394 PURPLE_BUSY_RETRIES, 395 PURPLE_RESET_RETRY_COUNT, 396 PURPLE_RESERVE_RELEASE_TIME, 397 0, 398 0, 399 0 400 }; 401 402 static sd_tunables sve_properties = { 403 SVE_THROTTLE_VALUE, 404 0, 405 0, 406 SVE_BUSY_RETRIES, 407 SVE_RESET_RETRY_COUNT, 408 SVE_RESERVE_RELEASE_TIME, 409 SVE_MIN_THROTTLE_VALUE, 410 SVE_DISKSORT_DISABLED_FLAG, 411 0 412 }; 413 414 static sd_tunables maserati_properties = { 415 0, 416 0, 417 0, 418 0, 419 0, 420 0, 421 0, 422 MASERATI_DISKSORT_DISABLED_FLAG, 423 MASERATI_LUN_RESET_ENABLED_FLAG 424 }; 425 426 static sd_tunables pirus_properties = { 427 PIRUS_THROTTLE_VALUE, 428 0, 429 PIRUS_NRR_COUNT, 430 PIRUS_BUSY_RETRIES, 431 PIRUS_RESET_RETRY_COUNT, 432 0, 433 PIRUS_MIN_THROTTLE_VALUE, 434 PIRUS_DISKSORT_DISABLED_FLAG, 435 PIRUS_LUN_RESET_ENABLED_FLAG 436 }; 437 438 #endif 439 440 #if (defined(__sparc) && !defined(__fibre)) || \ 441 (defined(__i386) || defined(__amd64)) 442 443 444 static sd_tunables elite_properties = { 445 ELITE_THROTTLE_VALUE, 446 0, 447 0, 448 0, 449 0, 450 0, 451 0, 452 0, 453 0 454 }; 455 456 static sd_tunables st31200n_properties = { 457 ST31200N_THROTTLE_VALUE, 458 0, 459 0, 460 0, 461 0, 462 0, 463 0, 464 0, 465 0 466 }; 467 468 #endif /* Fibre or not */ 469 470 static sd_tunables lsi_properties_scsi = { 471 LSI_THROTTLE_VALUE, 472 0, 473 LSI_NOTREADY_RETRIES, 474 0, 475 0, 476 0, 477 0, 478 0, 479 0 480 }; 481 482 static sd_tunables symbios_properties = { 483 SYMBIOS_THROTTLE_VALUE, 484 0, 485 SYMBIOS_NOTREADY_RETRIES, 486 0, 487 0, 488 0, 489 0, 490 0, 491 0 492 }; 493 494 static sd_tunables lsi_properties = { 495 0, 496 0, 497 LSI_NOTREADY_RETRIES, 498 0, 499 0, 500 0, 501 0, 502 0, 503 0 504 }; 505 506 static sd_tunables lsi_oem_properties = { 507 0, 508 0, 509 LSI_OEM_NOTREADY_RETRIES, 510 0, 511 0, 512 0, 513 0, 514 0, 515 0, 516 1 517 }; 518 519 520 521 #if (defined(SD_PROP_TST)) 522 523 #define SD_TST_CTYPE_VAL CTYPE_CDROM 524 #define SD_TST_THROTTLE_VAL 16 525 #define SD_TST_NOTREADY_VAL 12 526 #define SD_TST_BUSY_VAL 60 527 #define SD_TST_RST_RETRY_VAL 36 528 #define SD_TST_RSV_REL_TIME 60 529 530 static sd_tunables tst_properties = { 531 SD_TST_THROTTLE_VAL, 532 SD_TST_CTYPE_VAL, 533 SD_TST_NOTREADY_VAL, 534 SD_TST_BUSY_VAL, 535 SD_TST_RST_RETRY_VAL, 536 SD_TST_RSV_REL_TIME, 537 0, 538 0, 539 0 540 }; 541 #endif 542 543 /* This is similar to the ANSI toupper implementation */ 544 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C)) 545 546 /* 547 * Static Driver Configuration Table 548 * 549 * This is the table of disks which need throttle adjustment (or, perhaps 550 * something else as defined by the flags at a future time.) device_id 551 * is a string consisting of concatenated vid (vendor), pid (product/model) 552 * and revision strings as defined in the scsi_inquiry structure. Offsets of 553 * the parts of the string are as defined by the sizes in the scsi_inquiry 554 * structure. Device type is searched as far as the device_id string is 555 * defined. Flags defines which values are to be set in the driver from the 556 * properties list. 557 * 558 * Entries below which begin and end with a "*" are a special case. 559 * These do not have a specific vendor, and the string which follows 560 * can appear anywhere in the 16 byte PID portion of the inquiry data. 561 * 562 * Entries below which begin and end with a " " (blank) are a special 563 * case. The comparison function will treat multiple consecutive blanks 564 * as equivalent to a single blank. For example, this causes a 565 * sd_disk_table entry of " NEC CDROM " to match a device's id string 566 * of "NEC CDROM". 567 * 568 * Note: The MD21 controller type has been obsoleted. 569 * ST318202F is a Legacy device 570 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been 571 * made with an FC connection. The entries here are a legacy. 572 */ 573 static sd_disk_config_t sd_disk_table[] = { 574 #if defined(__fibre) || defined(__i386) || defined(__amd64) 575 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 576 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 577 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 578 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 579 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 580 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 581 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 582 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 583 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 584 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 585 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 586 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 587 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 588 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 589 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 590 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 591 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 592 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 593 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 594 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 595 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 596 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 597 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 598 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties }, 599 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties }, 600 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties }, 601 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties }, 602 { "IBM 1724-100", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 603 { "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 604 { "IBM 1726-22x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 605 { "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 606 { "IBM 1726-42x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 607 { "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 608 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 609 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 610 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 611 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 612 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 613 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 614 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 615 { "IBM 1814", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 616 { "IBM 1814-200", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 617 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 618 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 619 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 620 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 621 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT | 622 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 623 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT | 624 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 625 { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties }, 626 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, 627 { "SUN T3", SD_CONF_BSET_THROTTLE | 628 SD_CONF_BSET_BSY_RETRY_COUNT| 629 SD_CONF_BSET_RST_RETRIES| 630 SD_CONF_BSET_RSV_REL_TIME, 631 &purple_properties }, 632 { "SUN SESS01", SD_CONF_BSET_THROTTLE | 633 SD_CONF_BSET_BSY_RETRY_COUNT| 634 SD_CONF_BSET_RST_RETRIES| 635 SD_CONF_BSET_RSV_REL_TIME| 636 SD_CONF_BSET_MIN_THROTTLE| 637 SD_CONF_BSET_DISKSORT_DISABLED, 638 &sve_properties }, 639 { "SUN T4", SD_CONF_BSET_THROTTLE | 640 SD_CONF_BSET_BSY_RETRY_COUNT| 641 SD_CONF_BSET_RST_RETRIES| 642 SD_CONF_BSET_RSV_REL_TIME, 643 &purple_properties }, 644 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | 645 SD_CONF_BSET_LUN_RESET_ENABLED, 646 &maserati_properties }, 647 { "SUN SE6920", SD_CONF_BSET_THROTTLE | 648 SD_CONF_BSET_NRR_COUNT| 649 SD_CONF_BSET_BSY_RETRY_COUNT| 650 SD_CONF_BSET_RST_RETRIES| 651 SD_CONF_BSET_MIN_THROTTLE| 652 SD_CONF_BSET_DISKSORT_DISABLED| 653 SD_CONF_BSET_LUN_RESET_ENABLED, 654 &pirus_properties }, 655 { "SUN SE6940", SD_CONF_BSET_THROTTLE | 656 SD_CONF_BSET_NRR_COUNT| 657 SD_CONF_BSET_BSY_RETRY_COUNT| 658 SD_CONF_BSET_RST_RETRIES| 659 SD_CONF_BSET_MIN_THROTTLE| 660 SD_CONF_BSET_DISKSORT_DISABLED| 661 SD_CONF_BSET_LUN_RESET_ENABLED, 662 &pirus_properties }, 663 { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE | 664 SD_CONF_BSET_NRR_COUNT| 665 SD_CONF_BSET_BSY_RETRY_COUNT| 666 SD_CONF_BSET_RST_RETRIES| 667 SD_CONF_BSET_MIN_THROTTLE| 668 SD_CONF_BSET_DISKSORT_DISABLED| 669 SD_CONF_BSET_LUN_RESET_ENABLED, 670 &pirus_properties }, 671 { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE | 672 SD_CONF_BSET_NRR_COUNT| 673 SD_CONF_BSET_BSY_RETRY_COUNT| 674 SD_CONF_BSET_RST_RETRIES| 675 SD_CONF_BSET_MIN_THROTTLE| 676 SD_CONF_BSET_DISKSORT_DISABLED| 677 SD_CONF_BSET_LUN_RESET_ENABLED, 678 &pirus_properties }, 679 { "SUN PSX1000", SD_CONF_BSET_THROTTLE | 680 SD_CONF_BSET_NRR_COUNT| 681 SD_CONF_BSET_BSY_RETRY_COUNT| 682 SD_CONF_BSET_RST_RETRIES| 683 SD_CONF_BSET_MIN_THROTTLE| 684 SD_CONF_BSET_DISKSORT_DISABLED| 685 SD_CONF_BSET_LUN_RESET_ENABLED, 686 &pirus_properties }, 687 { "SUN SE6330", SD_CONF_BSET_THROTTLE | 688 SD_CONF_BSET_NRR_COUNT| 689 SD_CONF_BSET_BSY_RETRY_COUNT| 690 SD_CONF_BSET_RST_RETRIES| 691 SD_CONF_BSET_MIN_THROTTLE| 692 SD_CONF_BSET_DISKSORT_DISABLED| 693 SD_CONF_BSET_LUN_RESET_ENABLED, 694 &pirus_properties }, 695 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 696 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 697 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 698 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 699 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, 700 #endif /* fibre or NON-sparc platforms */ 701 #if ((defined(__sparc) && !defined(__fibre)) ||\ 702 (defined(__i386) || defined(__amd64))) 703 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, 704 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, 705 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, 706 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, 707 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, 708 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL }, 709 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, 710 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, 711 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, 712 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, 713 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, 714 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, 715 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, 716 &symbios_properties }, 717 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, 718 &lsi_properties_scsi }, 719 #if defined(__i386) || defined(__amd64) 720 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD 721 | SD_CONF_BSET_READSUB_BCD 722 | SD_CONF_BSET_READ_TOC_ADDR_BCD 723 | SD_CONF_BSET_NO_READ_HEADER 724 | SD_CONF_BSET_READ_CD_XD4), NULL }, 725 726 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD 727 | SD_CONF_BSET_READSUB_BCD 728 | SD_CONF_BSET_READ_TOC_ADDR_BCD 729 | SD_CONF_BSET_NO_READ_HEADER 730 | SD_CONF_BSET_READ_CD_XD4), NULL }, 731 #endif /* __i386 || __amd64 */ 732 #endif /* sparc NON-fibre or NON-sparc platforms */ 733 734 #if (defined(SD_PROP_TST)) 735 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE 736 | SD_CONF_BSET_CTYPE 737 | SD_CONF_BSET_NRR_COUNT 738 | SD_CONF_BSET_FAB_DEVID 739 | SD_CONF_BSET_NOCACHE 740 | SD_CONF_BSET_BSY_RETRY_COUNT 741 | SD_CONF_BSET_PLAYMSF_BCD 742 | SD_CONF_BSET_READSUB_BCD 743 | SD_CONF_BSET_READ_TOC_TRK_BCD 744 | SD_CONF_BSET_READ_TOC_ADDR_BCD 745 | SD_CONF_BSET_NO_READ_HEADER 746 | SD_CONF_BSET_READ_CD_XD4 747 | SD_CONF_BSET_RST_RETRIES 748 | SD_CONF_BSET_RSV_REL_TIME 749 | SD_CONF_BSET_TUR_CHECK), &tst_properties}, 750 #endif 751 }; 752 753 static const int sd_disk_table_size = 754 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); 755 756 757 758 #define SD_INTERCONNECT_PARALLEL 0 759 #define SD_INTERCONNECT_FABRIC 1 760 #define SD_INTERCONNECT_FIBRE 2 761 #define SD_INTERCONNECT_SSA 3 762 #define SD_INTERCONNECT_SATA 4 763 #define SD_IS_PARALLEL_SCSI(un) \ 764 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL) 765 #define SD_IS_SERIAL(un) \ 766 ((un)->un_interconnect_type == SD_INTERCONNECT_SATA) 767 768 /* 769 * Definitions used by device id registration routines 770 */ 771 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */ 772 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */ 773 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */ 774 775 static kmutex_t sd_sense_mutex = {0}; 776 777 /* 778 * Macros for updates of the driver state 779 */ 780 #define New_state(un, s) \ 781 (un)->un_last_state = (un)->un_state, (un)->un_state = (s) 782 #define Restore_state(un) \ 783 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); } 784 785 static struct sd_cdbinfo sd_cdbtab[] = { 786 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, }, 787 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, }, 788 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, }, 789 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, }, 790 }; 791 792 /* 793 * Specifies the number of seconds that must have elapsed since the last 794 * cmd. has completed for a device to be declared idle to the PM framework. 795 */ 796 static int sd_pm_idletime = 1; 797 798 /* 799 * Internal function prototypes 800 */ 801 802 #if (defined(__fibre)) 803 /* 804 * These #defines are to avoid namespace collisions that occur because this 805 * code is currently used to compile two separate driver modules: sd and ssd. 806 * All function names need to be treated this way (even if declared static) 807 * in order to allow the debugger to resolve the names properly. 808 * It is anticipated that in the near future the ssd module will be obsoleted, 809 * at which time this ugliness should go away. 810 */ 811 #define sd_log_trace ssd_log_trace 812 #define sd_log_info ssd_log_info 813 #define sd_log_err ssd_log_err 814 #define sdprobe ssdprobe 815 #define sdinfo ssdinfo 816 #define sd_prop_op ssd_prop_op 817 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init 818 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini 819 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache 820 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache 821 #define sd_scsi_target_lun_init ssd_scsi_target_lun_init 822 #define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini 823 #define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count 824 #define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target 825 #define sd_spin_up_unit ssd_spin_up_unit 826 #define sd_enable_descr_sense ssd_enable_descr_sense 827 #define sd_reenable_dsense_task ssd_reenable_dsense_task 828 #define sd_set_mmc_caps ssd_set_mmc_caps 829 #define sd_read_unit_properties ssd_read_unit_properties 830 #define sd_process_sdconf_file ssd_process_sdconf_file 831 #define sd_process_sdconf_table ssd_process_sdconf_table 832 #define sd_sdconf_id_match ssd_sdconf_id_match 833 #define sd_blank_cmp ssd_blank_cmp 834 #define sd_chk_vers1_data ssd_chk_vers1_data 835 #define sd_set_vers1_properties ssd_set_vers1_properties 836 837 #define sd_get_physical_geometry ssd_get_physical_geometry 838 #define sd_get_virtual_geometry ssd_get_virtual_geometry 839 #define sd_update_block_info ssd_update_block_info 840 #define sd_register_devid ssd_register_devid 841 #define sd_get_devid ssd_get_devid 842 #define sd_create_devid ssd_create_devid 843 #define sd_write_deviceid ssd_write_deviceid 844 #define sd_check_vpd_page_support ssd_check_vpd_page_support 845 #define sd_setup_pm ssd_setup_pm 846 #define sd_create_pm_components ssd_create_pm_components 847 #define sd_ddi_suspend ssd_ddi_suspend 848 #define sd_ddi_pm_suspend ssd_ddi_pm_suspend 849 #define sd_ddi_resume ssd_ddi_resume 850 #define sd_ddi_pm_resume ssd_ddi_pm_resume 851 #define sdpower ssdpower 852 #define sdattach ssdattach 853 #define sddetach ssddetach 854 #define sd_unit_attach ssd_unit_attach 855 #define sd_unit_detach ssd_unit_detach 856 #define sd_set_unit_attributes ssd_set_unit_attributes 857 #define sd_create_errstats ssd_create_errstats 858 #define sd_set_errstats ssd_set_errstats 859 #define sd_set_pstats ssd_set_pstats 860 #define sddump ssddump 861 #define sd_scsi_poll ssd_scsi_poll 862 #define sd_send_polled_RQS ssd_send_polled_RQS 863 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll 864 #define sd_init_event_callbacks ssd_init_event_callbacks 865 #define sd_event_callback ssd_event_callback 866 #define sd_cache_control ssd_cache_control 867 #define sd_get_write_cache_enabled ssd_get_write_cache_enabled 868 #define sd_get_nv_sup ssd_get_nv_sup 869 #define sd_make_device ssd_make_device 870 #define sdopen ssdopen 871 #define sdclose ssdclose 872 #define sd_ready_and_valid ssd_ready_and_valid 873 #define sdmin ssdmin 874 #define sdread ssdread 875 #define sdwrite ssdwrite 876 #define sdaread ssdaread 877 #define sdawrite ssdawrite 878 #define sdstrategy ssdstrategy 879 #define sdioctl ssdioctl 880 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart 881 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart 882 #define sd_checksum_iostart ssd_checksum_iostart 883 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart 884 #define sd_pm_iostart ssd_pm_iostart 885 #define sd_core_iostart ssd_core_iostart 886 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone 887 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone 888 #define sd_checksum_iodone ssd_checksum_iodone 889 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone 890 #define sd_pm_iodone ssd_pm_iodone 891 #define sd_initpkt_for_buf ssd_initpkt_for_buf 892 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf 893 #define sd_setup_rw_pkt ssd_setup_rw_pkt 894 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt 895 #define sd_buf_iodone ssd_buf_iodone 896 #define sd_uscsi_strategy ssd_uscsi_strategy 897 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi 898 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi 899 #define sd_uscsi_iodone ssd_uscsi_iodone 900 #define sd_xbuf_strategy ssd_xbuf_strategy 901 #define sd_xbuf_init ssd_xbuf_init 902 #define sd_pm_entry ssd_pm_entry 903 #define sd_pm_exit ssd_pm_exit 904 905 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler 906 #define sd_pm_timeout_handler ssd_pm_timeout_handler 907 908 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq 909 #define sdintr ssdintr 910 #define sd_start_cmds ssd_start_cmds 911 #define sd_send_scsi_cmd ssd_send_scsi_cmd 912 #define sd_bioclone_alloc ssd_bioclone_alloc 913 #define sd_bioclone_free ssd_bioclone_free 914 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc 915 #define sd_shadow_buf_free ssd_shadow_buf_free 916 #define sd_print_transport_rejected_message \ 917 ssd_print_transport_rejected_message 918 #define sd_retry_command ssd_retry_command 919 #define sd_set_retry_bp ssd_set_retry_bp 920 #define sd_send_request_sense_command ssd_send_request_sense_command 921 #define sd_start_retry_command ssd_start_retry_command 922 #define sd_start_direct_priority_command \ 923 ssd_start_direct_priority_command 924 #define sd_return_failed_command ssd_return_failed_command 925 #define sd_return_failed_command_no_restart \ 926 ssd_return_failed_command_no_restart 927 #define sd_return_command ssd_return_command 928 #define sd_sync_with_callback ssd_sync_with_callback 929 #define sdrunout ssdrunout 930 #define sd_mark_rqs_busy ssd_mark_rqs_busy 931 #define sd_mark_rqs_idle ssd_mark_rqs_idle 932 #define sd_reduce_throttle ssd_reduce_throttle 933 #define sd_restore_throttle ssd_restore_throttle 934 #define sd_print_incomplete_msg ssd_print_incomplete_msg 935 #define sd_init_cdb_limits ssd_init_cdb_limits 936 #define sd_pkt_status_good ssd_pkt_status_good 937 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition 938 #define sd_pkt_status_busy ssd_pkt_status_busy 939 #define sd_pkt_status_reservation_conflict \ 940 ssd_pkt_status_reservation_conflict 941 #define sd_pkt_status_qfull ssd_pkt_status_qfull 942 #define sd_handle_request_sense ssd_handle_request_sense 943 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense 944 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg 945 #define sd_validate_sense_data ssd_validate_sense_data 946 #define sd_decode_sense ssd_decode_sense 947 #define sd_print_sense_msg ssd_print_sense_msg 948 #define sd_sense_key_no_sense ssd_sense_key_no_sense 949 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error 950 #define sd_sense_key_not_ready ssd_sense_key_not_ready 951 #define sd_sense_key_medium_or_hardware_error \ 952 ssd_sense_key_medium_or_hardware_error 953 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request 954 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention 955 #define sd_sense_key_fail_command ssd_sense_key_fail_command 956 #define sd_sense_key_blank_check ssd_sense_key_blank_check 957 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command 958 #define sd_sense_key_default ssd_sense_key_default 959 #define sd_print_retry_msg ssd_print_retry_msg 960 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg 961 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete 962 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err 963 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset 964 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted 965 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout 966 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free 967 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject 968 #define sd_pkt_reason_default ssd_pkt_reason_default 969 #define sd_reset_target ssd_reset_target 970 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback 971 #define sd_start_stop_unit_task ssd_start_stop_unit_task 972 #define sd_taskq_create ssd_taskq_create 973 #define sd_taskq_delete ssd_taskq_delete 974 #define sd_media_change_task ssd_media_change_task 975 #define sd_handle_mchange ssd_handle_mchange 976 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK 977 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY 978 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 979 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION 980 #define sd_send_scsi_feature_GET_CONFIGURATION \ 981 sd_send_scsi_feature_GET_CONFIGURATION 982 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT 983 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY 984 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY 985 #define sd_send_scsi_PERSISTENT_RESERVE_IN \ 986 ssd_send_scsi_PERSISTENT_RESERVE_IN 987 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ 988 ssd_send_scsi_PERSISTENT_RESERVE_OUT 989 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE 990 #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \ 991 ssd_send_scsi_SYNCHRONIZE_CACHE_biodone 992 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE 993 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT 994 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR 995 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE 996 #define sd_alloc_rqs ssd_alloc_rqs 997 #define sd_free_rqs ssd_free_rqs 998 #define sd_dump_memory ssd_dump_memory 999 #define sd_get_media_info ssd_get_media_info 1000 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info 1001 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf 1002 #define sd_setup_next_xfer ssd_setup_next_xfer 1003 #define sd_dkio_get_temp ssd_dkio_get_temp 1004 #define sd_check_mhd ssd_check_mhd 1005 #define sd_mhd_watch_cb ssd_mhd_watch_cb 1006 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete 1007 #define sd_sname ssd_sname 1008 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover 1009 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread 1010 #define sd_take_ownership ssd_take_ownership 1011 #define sd_reserve_release ssd_reserve_release 1012 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req 1013 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb 1014 #define sd_persistent_reservation_in_read_keys \ 1015 ssd_persistent_reservation_in_read_keys 1016 #define sd_persistent_reservation_in_read_resv \ 1017 ssd_persistent_reservation_in_read_resv 1018 #define sd_mhdioc_takeown ssd_mhdioc_takeown 1019 #define sd_mhdioc_failfast ssd_mhdioc_failfast 1020 #define sd_mhdioc_release ssd_mhdioc_release 1021 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid 1022 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys 1023 #define sd_mhdioc_inresv ssd_mhdioc_inresv 1024 #define sr_change_blkmode ssr_change_blkmode 1025 #define sr_change_speed ssr_change_speed 1026 #define sr_atapi_change_speed ssr_atapi_change_speed 1027 #define sr_pause_resume ssr_pause_resume 1028 #define sr_play_msf ssr_play_msf 1029 #define sr_play_trkind ssr_play_trkind 1030 #define sr_read_all_subcodes ssr_read_all_subcodes 1031 #define sr_read_subchannel ssr_read_subchannel 1032 #define sr_read_tocentry ssr_read_tocentry 1033 #define sr_read_tochdr ssr_read_tochdr 1034 #define sr_read_cdda ssr_read_cdda 1035 #define sr_read_cdxa ssr_read_cdxa 1036 #define sr_read_mode1 ssr_read_mode1 1037 #define sr_read_mode2 ssr_read_mode2 1038 #define sr_read_cd_mode2 ssr_read_cd_mode2 1039 #define sr_sector_mode ssr_sector_mode 1040 #define sr_eject ssr_eject 1041 #define sr_ejected ssr_ejected 1042 #define sr_check_wp ssr_check_wp 1043 #define sd_check_media ssd_check_media 1044 #define sd_media_watch_cb ssd_media_watch_cb 1045 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast 1046 #define sr_volume_ctrl ssr_volume_ctrl 1047 #define sr_read_sony_session_offset ssr_read_sony_session_offset 1048 #define sd_log_page_supported ssd_log_page_supported 1049 #define sd_check_for_writable_cd ssd_check_for_writable_cd 1050 #define sd_wm_cache_constructor ssd_wm_cache_constructor 1051 #define sd_wm_cache_destructor ssd_wm_cache_destructor 1052 #define sd_range_lock ssd_range_lock 1053 #define sd_get_range ssd_get_range 1054 #define sd_free_inlist_wmap ssd_free_inlist_wmap 1055 #define sd_range_unlock ssd_range_unlock 1056 #define sd_read_modify_write_task ssd_read_modify_write_task 1057 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw 1058 1059 #define sd_iostart_chain ssd_iostart_chain 1060 #define sd_iodone_chain ssd_iodone_chain 1061 #define sd_initpkt_map ssd_initpkt_map 1062 #define sd_destroypkt_map ssd_destroypkt_map 1063 #define sd_chain_type_map ssd_chain_type_map 1064 #define sd_chain_index_map ssd_chain_index_map 1065 1066 #define sd_failfast_flushctl ssd_failfast_flushctl 1067 #define sd_failfast_flushq ssd_failfast_flushq 1068 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback 1069 1070 #define sd_is_lsi ssd_is_lsi 1071 #define sd_tg_rdwr ssd_tg_rdwr 1072 #define sd_tg_getinfo ssd_tg_getinfo 1073 1074 #endif /* #if (defined(__fibre)) */ 1075 1076 1077 int _init(void); 1078 int _fini(void); 1079 int _info(struct modinfo *modinfop); 1080 1081 /*PRINTFLIKE3*/ 1082 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1083 /*PRINTFLIKE3*/ 1084 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1085 /*PRINTFLIKE3*/ 1086 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1087 1088 static int sdprobe(dev_info_t *devi); 1089 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 1090 void **result); 1091 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1092 int mod_flags, char *name, caddr_t valuep, int *lengthp); 1093 1094 /* 1095 * Smart probe for parallel scsi 1096 */ 1097 static void sd_scsi_probe_cache_init(void); 1098 static void sd_scsi_probe_cache_fini(void); 1099 static void sd_scsi_clear_probe_cache(void); 1100 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)()); 1101 1102 /* 1103 * Attached luns on target for parallel scsi 1104 */ 1105 static void sd_scsi_target_lun_init(void); 1106 static void sd_scsi_target_lun_fini(void); 1107 static int sd_scsi_get_target_lun_count(dev_info_t *dip, int target); 1108 static void sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag); 1109 1110 static int sd_spin_up_unit(struct sd_lun *un); 1111 #ifdef _LP64 1112 static void sd_enable_descr_sense(struct sd_lun *un); 1113 static void sd_reenable_dsense_task(void *arg); 1114 #endif /* _LP64 */ 1115 1116 static void sd_set_mmc_caps(struct sd_lun *un); 1117 1118 static void sd_read_unit_properties(struct sd_lun *un); 1119 static int sd_process_sdconf_file(struct sd_lun *un); 1120 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags, 1121 int *data_list, sd_tunables *values); 1122 static void sd_process_sdconf_table(struct sd_lun *un); 1123 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen); 1124 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen); 1125 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 1126 int list_len, char *dataname_ptr); 1127 static void sd_set_vers1_properties(struct sd_lun *un, int flags, 1128 sd_tunables *prop_list); 1129 1130 static void sd_register_devid(struct sd_lun *un, dev_info_t *devi, 1131 int reservation_flag); 1132 static int sd_get_devid(struct sd_lun *un); 1133 static ddi_devid_t sd_create_devid(struct sd_lun *un); 1134 static int sd_write_deviceid(struct sd_lun *un); 1135 static int sd_get_devid_page(struct sd_lun *un, uchar_t *wwn, int *len); 1136 static int sd_check_vpd_page_support(struct sd_lun *un); 1137 1138 static void sd_setup_pm(struct sd_lun *un, dev_info_t *devi); 1139 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); 1140 1141 static int sd_ddi_suspend(dev_info_t *devi); 1142 static int sd_ddi_pm_suspend(struct sd_lun *un); 1143 static int sd_ddi_resume(dev_info_t *devi); 1144 static int sd_ddi_pm_resume(struct sd_lun *un); 1145 static int sdpower(dev_info_t *devi, int component, int level); 1146 1147 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 1148 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 1149 static int sd_unit_attach(dev_info_t *devi); 1150 static int sd_unit_detach(dev_info_t *devi); 1151 1152 static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi); 1153 static void sd_create_errstats(struct sd_lun *un, int instance); 1154 static void sd_set_errstats(struct sd_lun *un); 1155 static void sd_set_pstats(struct sd_lun *un); 1156 1157 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 1158 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); 1159 static int sd_send_polled_RQS(struct sd_lun *un); 1160 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); 1161 1162 #if (defined(__fibre)) 1163 /* 1164 * Event callbacks (photon) 1165 */ 1166 static void sd_init_event_callbacks(struct sd_lun *un); 1167 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); 1168 #endif 1169 1170 /* 1171 * Defines for sd_cache_control 1172 */ 1173 1174 #define SD_CACHE_ENABLE 1 1175 #define SD_CACHE_DISABLE 0 1176 #define SD_CACHE_NOCHANGE -1 1177 1178 static int sd_cache_control(struct sd_lun *un, int rcd_flag, int wce_flag); 1179 static int sd_get_write_cache_enabled(struct sd_lun *un, int *is_enabled); 1180 static void sd_get_nv_sup(struct sd_lun *un); 1181 static dev_t sd_make_device(dev_info_t *devi); 1182 1183 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize, 1184 uint64_t capacity); 1185 1186 /* 1187 * Driver entry point functions. 1188 */ 1189 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 1190 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 1191 static int sd_ready_and_valid(struct sd_lun *un); 1192 1193 static void sdmin(struct buf *bp); 1194 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p); 1195 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 1196 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1197 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1198 1199 static int sdstrategy(struct buf *bp); 1200 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1201 1202 /* 1203 * Function prototypes for layering functions in the iostart chain. 1204 */ 1205 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un, 1206 struct buf *bp); 1207 static void sd_mapblocksize_iostart(int index, struct sd_lun *un, 1208 struct buf *bp); 1209 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp); 1210 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un, 1211 struct buf *bp); 1212 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp); 1213 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp); 1214 1215 /* 1216 * Function prototypes for layering functions in the iodone chain. 1217 */ 1218 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp); 1219 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp); 1220 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un, 1221 struct buf *bp); 1222 static void sd_mapblocksize_iodone(int index, struct sd_lun *un, 1223 struct buf *bp); 1224 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp); 1225 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un, 1226 struct buf *bp); 1227 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp); 1228 1229 /* 1230 * Prototypes for functions to support buf(9S) based IO. 1231 */ 1232 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg); 1233 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **); 1234 static void sd_destroypkt_for_buf(struct buf *); 1235 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp, 1236 struct buf *bp, int flags, 1237 int (*callback)(caddr_t), caddr_t callback_arg, 1238 diskaddr_t lba, uint32_t blockcount); 1239 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp, 1240 struct buf *bp, diskaddr_t lba, uint32_t blockcount); 1241 1242 /* 1243 * Prototypes for functions to support USCSI IO. 1244 */ 1245 static int sd_uscsi_strategy(struct buf *bp); 1246 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **); 1247 static void sd_destroypkt_for_uscsi(struct buf *); 1248 1249 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 1250 uchar_t chain_type, void *pktinfop); 1251 1252 static int sd_pm_entry(struct sd_lun *un); 1253 static void sd_pm_exit(struct sd_lun *un); 1254 1255 static void sd_pm_idletimeout_handler(void *arg); 1256 1257 /* 1258 * sd_core internal functions (used at the sd_core_io layer). 1259 */ 1260 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp); 1261 static void sdintr(struct scsi_pkt *pktp); 1262 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp); 1263 1264 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1265 enum uio_seg dataspace, int path_flag); 1266 1267 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen, 1268 daddr_t blkno, int (*func)(struct buf *)); 1269 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen, 1270 uint_t bflags, daddr_t blkno, int (*func)(struct buf *)); 1271 static void sd_bioclone_free(struct buf *bp); 1272 static void sd_shadow_buf_free(struct buf *bp); 1273 1274 static void sd_print_transport_rejected_message(struct sd_lun *un, 1275 struct sd_xbuf *xp, int code); 1276 static void sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, 1277 void *arg, int code); 1278 static void sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, 1279 void *arg, int code); 1280 static void sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, 1281 void *arg, int code); 1282 1283 static void sd_retry_command(struct sd_lun *un, struct buf *bp, 1284 int retry_check_flag, 1285 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, 1286 int c), 1287 void *user_arg, int failure_code, clock_t retry_delay, 1288 void (*statp)(kstat_io_t *)); 1289 1290 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, 1291 clock_t retry_delay, void (*statp)(kstat_io_t *)); 1292 1293 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 1294 struct scsi_pkt *pktp); 1295 static void sd_start_retry_command(void *arg); 1296 static void sd_start_direct_priority_command(void *arg); 1297 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, 1298 int errcode); 1299 static void sd_return_failed_command_no_restart(struct sd_lun *un, 1300 struct buf *bp, int errcode); 1301 static void sd_return_command(struct sd_lun *un, struct buf *bp); 1302 static void sd_sync_with_callback(struct sd_lun *un); 1303 static int sdrunout(caddr_t arg); 1304 1305 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp); 1306 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp); 1307 1308 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type); 1309 static void sd_restore_throttle(void *arg); 1310 1311 static void sd_init_cdb_limits(struct sd_lun *un); 1312 1313 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 1314 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1315 1316 /* 1317 * Error handling functions 1318 */ 1319 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 1320 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1321 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, 1322 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1323 static void sd_pkt_status_reservation_conflict(struct sd_lun *un, 1324 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1325 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 1326 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1327 1328 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp, 1329 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1330 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 1331 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1332 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp, 1333 struct sd_xbuf *xp, size_t actual_len); 1334 static void sd_decode_sense(struct sd_lun *un, struct buf *bp, 1335 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1336 1337 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp, 1338 void *arg, int code); 1339 1340 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 1341 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1342 static void sd_sense_key_recoverable_error(struct sd_lun *un, 1343 uint8_t *sense_datap, 1344 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1345 static void sd_sense_key_not_ready(struct sd_lun *un, 1346 uint8_t *sense_datap, 1347 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1348 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 1349 uint8_t *sense_datap, 1350 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1351 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 1352 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1353 static void sd_sense_key_unit_attention(struct sd_lun *un, 1354 uint8_t *sense_datap, 1355 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1356 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 1357 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1358 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 1359 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1360 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 1361 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1362 static void sd_sense_key_default(struct sd_lun *un, 1363 uint8_t *sense_datap, 1364 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1365 1366 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp, 1367 void *arg, int flag); 1368 1369 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 1370 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1371 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 1372 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1373 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 1374 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1375 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 1376 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1377 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 1378 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1379 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 1380 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1381 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 1382 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1383 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 1384 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1385 1386 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp); 1387 1388 static void sd_start_stop_unit_callback(void *arg); 1389 static void sd_start_stop_unit_task(void *arg); 1390 1391 static void sd_taskq_create(void); 1392 static void sd_taskq_delete(void); 1393 static void sd_media_change_task(void *arg); 1394 1395 static int sd_handle_mchange(struct sd_lun *un); 1396 static int sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag); 1397 static int sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, 1398 uint32_t *lbap, int path_flag); 1399 static int sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 1400 uint32_t *lbap, int path_flag); 1401 static int sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, 1402 int path_flag); 1403 static int sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, 1404 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp); 1405 static int sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag); 1406 static int sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, 1407 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp); 1408 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, 1409 uchar_t usr_cmd, uchar_t *usr_bufp); 1410 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, 1411 struct dk_callback *dkc); 1412 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp); 1413 static int sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, 1414 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1415 uchar_t *bufaddr, uint_t buflen, int path_flag); 1416 static int sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 1417 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1418 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag); 1419 static int sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, 1420 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag); 1421 static int sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, 1422 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag); 1423 static int sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 1424 size_t buflen, daddr_t start_block, int path_flag); 1425 #define sd_send_scsi_READ(un, bufaddr, buflen, start_block, path_flag) \ 1426 sd_send_scsi_RDWR(un, SCMD_READ, bufaddr, buflen, start_block, \ 1427 path_flag) 1428 #define sd_send_scsi_WRITE(un, bufaddr, buflen, start_block, path_flag) \ 1429 sd_send_scsi_RDWR(un, SCMD_WRITE, bufaddr, buflen, start_block,\ 1430 path_flag) 1431 1432 static int sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, 1433 uint16_t buflen, uchar_t page_code, uchar_t page_control, 1434 uint16_t param_ptr, int path_flag); 1435 1436 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un); 1437 static void sd_free_rqs(struct sd_lun *un); 1438 1439 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, 1440 uchar_t *data, int len, int fmt); 1441 static void sd_panic_for_res_conflict(struct sd_lun *un); 1442 1443 /* 1444 * Disk Ioctl Function Prototypes 1445 */ 1446 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag); 1447 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag); 1448 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag); 1449 1450 /* 1451 * Multi-host Ioctl Prototypes 1452 */ 1453 static int sd_check_mhd(dev_t dev, int interval); 1454 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1455 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt); 1456 static char *sd_sname(uchar_t status); 1457 static void sd_mhd_resvd_recover(void *arg); 1458 static void sd_resv_reclaim_thread(); 1459 static int sd_take_ownership(dev_t dev, struct mhioctkown *p); 1460 static int sd_reserve_release(dev_t dev, int cmd); 1461 static void sd_rmv_resv_reclaim_req(dev_t dev); 1462 static void sd_mhd_reset_notify_cb(caddr_t arg); 1463 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un, 1464 mhioc_inkeys_t *usrp, int flag); 1465 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un, 1466 mhioc_inresvs_t *usrp, int flag); 1467 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag); 1468 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag); 1469 static int sd_mhdioc_release(dev_t dev); 1470 static int sd_mhdioc_register_devid(dev_t dev); 1471 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag); 1472 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag); 1473 1474 /* 1475 * SCSI removable prototypes 1476 */ 1477 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag); 1478 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1479 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1480 static int sr_pause_resume(dev_t dev, int mode); 1481 static int sr_play_msf(dev_t dev, caddr_t data, int flag); 1482 static int sr_play_trkind(dev_t dev, caddr_t data, int flag); 1483 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag); 1484 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag); 1485 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag); 1486 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag); 1487 static int sr_read_cdda(dev_t dev, caddr_t data, int flag); 1488 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag); 1489 static int sr_read_mode1(dev_t dev, caddr_t data, int flag); 1490 static int sr_read_mode2(dev_t dev, caddr_t data, int flag); 1491 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag); 1492 static int sr_sector_mode(dev_t dev, uint32_t blksize); 1493 static int sr_eject(dev_t dev); 1494 static void sr_ejected(register struct sd_lun *un); 1495 static int sr_check_wp(dev_t dev); 1496 static int sd_check_media(dev_t dev, enum dkio_state state); 1497 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1498 static void sd_delayed_cv_broadcast(void *arg); 1499 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); 1500 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); 1501 1502 static int sd_log_page_supported(struct sd_lun *un, int log_page); 1503 1504 /* 1505 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. 1506 */ 1507 static void sd_check_for_writable_cd(struct sd_lun *un, int path_flag); 1508 static int sd_wm_cache_constructor(void *wm, void *un, int flags); 1509 static void sd_wm_cache_destructor(void *wm, void *un); 1510 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb, 1511 daddr_t endb, ushort_t typ); 1512 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb, 1513 daddr_t endb); 1514 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp); 1515 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm); 1516 static void sd_read_modify_write_task(void * arg); 1517 static int 1518 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 1519 struct buf **bpp); 1520 1521 1522 /* 1523 * Function prototypes for failfast support. 1524 */ 1525 static void sd_failfast_flushq(struct sd_lun *un); 1526 static int sd_failfast_flushq_callback(struct buf *bp); 1527 1528 /* 1529 * Function prototypes to check for lsi devices 1530 */ 1531 static void sd_is_lsi(struct sd_lun *un); 1532 1533 /* 1534 * Function prototypes for partial DMA support 1535 */ 1536 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 1537 struct scsi_pkt *pkt, struct sd_xbuf *xp); 1538 1539 1540 /* Function prototypes for cmlb */ 1541 static int sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 1542 diskaddr_t start_block, size_t reqlength, void *tg_cookie); 1543 1544 static int sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie); 1545 1546 /* 1547 * Constants for failfast support: 1548 * 1549 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO 1550 * failfast processing being performed. 1551 * 1552 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing 1553 * failfast processing on all bufs with B_FAILFAST set. 1554 */ 1555 1556 #define SD_FAILFAST_INACTIVE 0 1557 #define SD_FAILFAST_ACTIVE 1 1558 1559 /* 1560 * Bitmask to control behavior of buf(9S) flushes when a transition to 1561 * the failfast state occurs. Optional bits include: 1562 * 1563 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that 1564 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will 1565 * be flushed. 1566 * 1567 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the 1568 * driver, in addition to the regular wait queue. This includes the xbuf 1569 * queues. When clear, only the driver's wait queue will be flushed. 1570 */ 1571 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 1572 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 1573 1574 /* 1575 * The default behavior is to only flush bufs that have B_FAILFAST set, but 1576 * to flush all queues within the driver. 1577 */ 1578 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; 1579 1580 1581 /* 1582 * SD Testing Fault Injection 1583 */ 1584 #ifdef SD_FAULT_INJECTION 1585 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); 1586 static void sd_faultinjection(struct scsi_pkt *pktp); 1587 static void sd_injection_log(char *buf, struct sd_lun *un); 1588 #endif 1589 1590 /* 1591 * Device driver ops vector 1592 */ 1593 static struct cb_ops sd_cb_ops = { 1594 sdopen, /* open */ 1595 sdclose, /* close */ 1596 sdstrategy, /* strategy */ 1597 nodev, /* print */ 1598 sddump, /* dump */ 1599 sdread, /* read */ 1600 sdwrite, /* write */ 1601 sdioctl, /* ioctl */ 1602 nodev, /* devmap */ 1603 nodev, /* mmap */ 1604 nodev, /* segmap */ 1605 nochpoll, /* poll */ 1606 sd_prop_op, /* cb_prop_op */ 1607 0, /* streamtab */ 1608 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */ 1609 CB_REV, /* cb_rev */ 1610 sdaread, /* async I/O read entry point */ 1611 sdawrite /* async I/O write entry point */ 1612 }; 1613 1614 static struct dev_ops sd_ops = { 1615 DEVO_REV, /* devo_rev, */ 1616 0, /* refcnt */ 1617 sdinfo, /* info */ 1618 nulldev, /* identify */ 1619 sdprobe, /* probe */ 1620 sdattach, /* attach */ 1621 sddetach, /* detach */ 1622 nodev, /* reset */ 1623 &sd_cb_ops, /* driver operations */ 1624 NULL, /* bus operations */ 1625 sdpower /* power */ 1626 }; 1627 1628 1629 /* 1630 * This is the loadable module wrapper. 1631 */ 1632 #include <sys/modctl.h> 1633 1634 static struct modldrv modldrv = { 1635 &mod_driverops, /* Type of module. This one is a driver */ 1636 SD_MODULE_NAME, /* Module name. */ 1637 &sd_ops /* driver ops */ 1638 }; 1639 1640 1641 static struct modlinkage modlinkage = { 1642 MODREV_1, 1643 &modldrv, 1644 NULL 1645 }; 1646 1647 static cmlb_tg_ops_t sd_tgops = { 1648 TG_DK_OPS_VERSION_1, 1649 sd_tg_rdwr, 1650 sd_tg_getinfo 1651 }; 1652 1653 static struct scsi_asq_key_strings sd_additional_codes[] = { 1654 0x81, 0, "Logical Unit is Reserved", 1655 0x85, 0, "Audio Address Not Valid", 1656 0xb6, 0, "Media Load Mechanism Failed", 1657 0xB9, 0, "Audio Play Operation Aborted", 1658 0xbf, 0, "Buffer Overflow for Read All Subcodes Command", 1659 0x53, 2, "Medium removal prevented", 1660 0x6f, 0, "Authentication failed during key exchange", 1661 0x6f, 1, "Key not present", 1662 0x6f, 2, "Key not established", 1663 0x6f, 3, "Read without proper authentication", 1664 0x6f, 4, "Mismatched region to this logical unit", 1665 0x6f, 5, "Region reset count error", 1666 0xffff, 0x0, NULL 1667 }; 1668 1669 1670 /* 1671 * Struct for passing printing information for sense data messages 1672 */ 1673 struct sd_sense_info { 1674 int ssi_severity; 1675 int ssi_pfa_flag; 1676 }; 1677 1678 /* 1679 * Table of function pointers for iostart-side routines. Separate "chains" 1680 * of layered function calls are formed by placing the function pointers 1681 * sequentially in the desired order. Functions are called according to an 1682 * incrementing table index ordering. The last function in each chain must 1683 * be sd_core_iostart(). The corresponding iodone-side routines are expected 1684 * in the sd_iodone_chain[] array. 1685 * 1686 * Note: It may seem more natural to organize both the iostart and iodone 1687 * functions together, into an array of structures (or some similar 1688 * organization) with a common index, rather than two separate arrays which 1689 * must be maintained in synchronization. The purpose of this division is 1690 * to achieve improved performance: individual arrays allows for more 1691 * effective cache line utilization on certain platforms. 1692 */ 1693 1694 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp); 1695 1696 1697 static sd_chain_t sd_iostart_chain[] = { 1698 1699 /* Chain for buf IO for disk drive targets (PM enabled) */ 1700 sd_mapblockaddr_iostart, /* Index: 0 */ 1701 sd_pm_iostart, /* Index: 1 */ 1702 sd_core_iostart, /* Index: 2 */ 1703 1704 /* Chain for buf IO for disk drive targets (PM disabled) */ 1705 sd_mapblockaddr_iostart, /* Index: 3 */ 1706 sd_core_iostart, /* Index: 4 */ 1707 1708 /* Chain for buf IO for removable-media targets (PM enabled) */ 1709 sd_mapblockaddr_iostart, /* Index: 5 */ 1710 sd_mapblocksize_iostart, /* Index: 6 */ 1711 sd_pm_iostart, /* Index: 7 */ 1712 sd_core_iostart, /* Index: 8 */ 1713 1714 /* Chain for buf IO for removable-media targets (PM disabled) */ 1715 sd_mapblockaddr_iostart, /* Index: 9 */ 1716 sd_mapblocksize_iostart, /* Index: 10 */ 1717 sd_core_iostart, /* Index: 11 */ 1718 1719 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1720 sd_mapblockaddr_iostart, /* Index: 12 */ 1721 sd_checksum_iostart, /* Index: 13 */ 1722 sd_pm_iostart, /* Index: 14 */ 1723 sd_core_iostart, /* Index: 15 */ 1724 1725 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1726 sd_mapblockaddr_iostart, /* Index: 16 */ 1727 sd_checksum_iostart, /* Index: 17 */ 1728 sd_core_iostart, /* Index: 18 */ 1729 1730 /* Chain for USCSI commands (all targets) */ 1731 sd_pm_iostart, /* Index: 19 */ 1732 sd_core_iostart, /* Index: 20 */ 1733 1734 /* Chain for checksumming USCSI commands (all targets) */ 1735 sd_checksum_uscsi_iostart, /* Index: 21 */ 1736 sd_pm_iostart, /* Index: 22 */ 1737 sd_core_iostart, /* Index: 23 */ 1738 1739 /* Chain for "direct" USCSI commands (all targets) */ 1740 sd_core_iostart, /* Index: 24 */ 1741 1742 /* Chain for "direct priority" USCSI commands (all targets) */ 1743 sd_core_iostart, /* Index: 25 */ 1744 }; 1745 1746 /* 1747 * Macros to locate the first function of each iostart chain in the 1748 * sd_iostart_chain[] array. These are located by the index in the array. 1749 */ 1750 #define SD_CHAIN_DISK_IOSTART 0 1751 #define SD_CHAIN_DISK_IOSTART_NO_PM 3 1752 #define SD_CHAIN_RMMEDIA_IOSTART 5 1753 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9 1754 #define SD_CHAIN_CHKSUM_IOSTART 12 1755 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16 1756 #define SD_CHAIN_USCSI_CMD_IOSTART 19 1757 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21 1758 #define SD_CHAIN_DIRECT_CMD_IOSTART 24 1759 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25 1760 1761 1762 /* 1763 * Table of function pointers for the iodone-side routines for the driver- 1764 * internal layering mechanism. The calling sequence for iodone routines 1765 * uses a decrementing table index, so the last routine called in a chain 1766 * must be at the lowest array index location for that chain. The last 1767 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs) 1768 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering 1769 * of the functions in an iodone side chain must correspond to the ordering 1770 * of the iostart routines for that chain. Note that there is no iodone 1771 * side routine that corresponds to sd_core_iostart(), so there is no 1772 * entry in the table for this. 1773 */ 1774 1775 static sd_chain_t sd_iodone_chain[] = { 1776 1777 /* Chain for buf IO for disk drive targets (PM enabled) */ 1778 sd_buf_iodone, /* Index: 0 */ 1779 sd_mapblockaddr_iodone, /* Index: 1 */ 1780 sd_pm_iodone, /* Index: 2 */ 1781 1782 /* Chain for buf IO for disk drive targets (PM disabled) */ 1783 sd_buf_iodone, /* Index: 3 */ 1784 sd_mapblockaddr_iodone, /* Index: 4 */ 1785 1786 /* Chain for buf IO for removable-media targets (PM enabled) */ 1787 sd_buf_iodone, /* Index: 5 */ 1788 sd_mapblockaddr_iodone, /* Index: 6 */ 1789 sd_mapblocksize_iodone, /* Index: 7 */ 1790 sd_pm_iodone, /* Index: 8 */ 1791 1792 /* Chain for buf IO for removable-media targets (PM disabled) */ 1793 sd_buf_iodone, /* Index: 9 */ 1794 sd_mapblockaddr_iodone, /* Index: 10 */ 1795 sd_mapblocksize_iodone, /* Index: 11 */ 1796 1797 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1798 sd_buf_iodone, /* Index: 12 */ 1799 sd_mapblockaddr_iodone, /* Index: 13 */ 1800 sd_checksum_iodone, /* Index: 14 */ 1801 sd_pm_iodone, /* Index: 15 */ 1802 1803 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1804 sd_buf_iodone, /* Index: 16 */ 1805 sd_mapblockaddr_iodone, /* Index: 17 */ 1806 sd_checksum_iodone, /* Index: 18 */ 1807 1808 /* Chain for USCSI commands (non-checksum targets) */ 1809 sd_uscsi_iodone, /* Index: 19 */ 1810 sd_pm_iodone, /* Index: 20 */ 1811 1812 /* Chain for USCSI commands (checksum targets) */ 1813 sd_uscsi_iodone, /* Index: 21 */ 1814 sd_checksum_uscsi_iodone, /* Index: 22 */ 1815 sd_pm_iodone, /* Index: 22 */ 1816 1817 /* Chain for "direct" USCSI commands (all targets) */ 1818 sd_uscsi_iodone, /* Index: 24 */ 1819 1820 /* Chain for "direct priority" USCSI commands (all targets) */ 1821 sd_uscsi_iodone, /* Index: 25 */ 1822 }; 1823 1824 1825 /* 1826 * Macros to locate the "first" function in the sd_iodone_chain[] array for 1827 * each iodone-side chain. These are located by the array index, but as the 1828 * iodone side functions are called in a decrementing-index order, the 1829 * highest index number in each chain must be specified (as these correspond 1830 * to the first function in the iodone chain that will be called by the core 1831 * at IO completion time). 1832 */ 1833 1834 #define SD_CHAIN_DISK_IODONE 2 1835 #define SD_CHAIN_DISK_IODONE_NO_PM 4 1836 #define SD_CHAIN_RMMEDIA_IODONE 8 1837 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11 1838 #define SD_CHAIN_CHKSUM_IODONE 15 1839 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18 1840 #define SD_CHAIN_USCSI_CMD_IODONE 20 1841 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22 1842 #define SD_CHAIN_DIRECT_CMD_IODONE 24 1843 #define SD_CHAIN_PRIORITY_CMD_IODONE 25 1844 1845 1846 1847 1848 /* 1849 * Array to map a layering chain index to the appropriate initpkt routine. 1850 * The redundant entries are present so that the index used for accessing 1851 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1852 * with this table as well. 1853 */ 1854 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **); 1855 1856 static sd_initpkt_t sd_initpkt_map[] = { 1857 1858 /* Chain for buf IO for disk drive targets (PM enabled) */ 1859 sd_initpkt_for_buf, /* Index: 0 */ 1860 sd_initpkt_for_buf, /* Index: 1 */ 1861 sd_initpkt_for_buf, /* Index: 2 */ 1862 1863 /* Chain for buf IO for disk drive targets (PM disabled) */ 1864 sd_initpkt_for_buf, /* Index: 3 */ 1865 sd_initpkt_for_buf, /* Index: 4 */ 1866 1867 /* Chain for buf IO for removable-media targets (PM enabled) */ 1868 sd_initpkt_for_buf, /* Index: 5 */ 1869 sd_initpkt_for_buf, /* Index: 6 */ 1870 sd_initpkt_for_buf, /* Index: 7 */ 1871 sd_initpkt_for_buf, /* Index: 8 */ 1872 1873 /* Chain for buf IO for removable-media targets (PM disabled) */ 1874 sd_initpkt_for_buf, /* Index: 9 */ 1875 sd_initpkt_for_buf, /* Index: 10 */ 1876 sd_initpkt_for_buf, /* Index: 11 */ 1877 1878 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1879 sd_initpkt_for_buf, /* Index: 12 */ 1880 sd_initpkt_for_buf, /* Index: 13 */ 1881 sd_initpkt_for_buf, /* Index: 14 */ 1882 sd_initpkt_for_buf, /* Index: 15 */ 1883 1884 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1885 sd_initpkt_for_buf, /* Index: 16 */ 1886 sd_initpkt_for_buf, /* Index: 17 */ 1887 sd_initpkt_for_buf, /* Index: 18 */ 1888 1889 /* Chain for USCSI commands (non-checksum targets) */ 1890 sd_initpkt_for_uscsi, /* Index: 19 */ 1891 sd_initpkt_for_uscsi, /* Index: 20 */ 1892 1893 /* Chain for USCSI commands (checksum targets) */ 1894 sd_initpkt_for_uscsi, /* Index: 21 */ 1895 sd_initpkt_for_uscsi, /* Index: 22 */ 1896 sd_initpkt_for_uscsi, /* Index: 22 */ 1897 1898 /* Chain for "direct" USCSI commands (all targets) */ 1899 sd_initpkt_for_uscsi, /* Index: 24 */ 1900 1901 /* Chain for "direct priority" USCSI commands (all targets) */ 1902 sd_initpkt_for_uscsi, /* Index: 25 */ 1903 1904 }; 1905 1906 1907 /* 1908 * Array to map a layering chain index to the appropriate destroypktpkt routine. 1909 * The redundant entries are present so that the index used for accessing 1910 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1911 * with this table as well. 1912 */ 1913 typedef void (*sd_destroypkt_t)(struct buf *); 1914 1915 static sd_destroypkt_t sd_destroypkt_map[] = { 1916 1917 /* Chain for buf IO for disk drive targets (PM enabled) */ 1918 sd_destroypkt_for_buf, /* Index: 0 */ 1919 sd_destroypkt_for_buf, /* Index: 1 */ 1920 sd_destroypkt_for_buf, /* Index: 2 */ 1921 1922 /* Chain for buf IO for disk drive targets (PM disabled) */ 1923 sd_destroypkt_for_buf, /* Index: 3 */ 1924 sd_destroypkt_for_buf, /* Index: 4 */ 1925 1926 /* Chain for buf IO for removable-media targets (PM enabled) */ 1927 sd_destroypkt_for_buf, /* Index: 5 */ 1928 sd_destroypkt_for_buf, /* Index: 6 */ 1929 sd_destroypkt_for_buf, /* Index: 7 */ 1930 sd_destroypkt_for_buf, /* Index: 8 */ 1931 1932 /* Chain for buf IO for removable-media targets (PM disabled) */ 1933 sd_destroypkt_for_buf, /* Index: 9 */ 1934 sd_destroypkt_for_buf, /* Index: 10 */ 1935 sd_destroypkt_for_buf, /* Index: 11 */ 1936 1937 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1938 sd_destroypkt_for_buf, /* Index: 12 */ 1939 sd_destroypkt_for_buf, /* Index: 13 */ 1940 sd_destroypkt_for_buf, /* Index: 14 */ 1941 sd_destroypkt_for_buf, /* Index: 15 */ 1942 1943 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1944 sd_destroypkt_for_buf, /* Index: 16 */ 1945 sd_destroypkt_for_buf, /* Index: 17 */ 1946 sd_destroypkt_for_buf, /* Index: 18 */ 1947 1948 /* Chain for USCSI commands (non-checksum targets) */ 1949 sd_destroypkt_for_uscsi, /* Index: 19 */ 1950 sd_destroypkt_for_uscsi, /* Index: 20 */ 1951 1952 /* Chain for USCSI commands (checksum targets) */ 1953 sd_destroypkt_for_uscsi, /* Index: 21 */ 1954 sd_destroypkt_for_uscsi, /* Index: 22 */ 1955 sd_destroypkt_for_uscsi, /* Index: 22 */ 1956 1957 /* Chain for "direct" USCSI commands (all targets) */ 1958 sd_destroypkt_for_uscsi, /* Index: 24 */ 1959 1960 /* Chain for "direct priority" USCSI commands (all targets) */ 1961 sd_destroypkt_for_uscsi, /* Index: 25 */ 1962 1963 }; 1964 1965 1966 1967 /* 1968 * Array to map a layering chain index to the appropriate chain "type". 1969 * The chain type indicates a specific property/usage of the chain. 1970 * The redundant entries are present so that the index used for accessing 1971 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1972 * with this table as well. 1973 */ 1974 1975 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */ 1976 #define SD_CHAIN_BUFIO 1 /* regular buf IO */ 1977 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */ 1978 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */ 1979 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */ 1980 /* (for error recovery) */ 1981 1982 static int sd_chain_type_map[] = { 1983 1984 /* Chain for buf IO for disk drive targets (PM enabled) */ 1985 SD_CHAIN_BUFIO, /* Index: 0 */ 1986 SD_CHAIN_BUFIO, /* Index: 1 */ 1987 SD_CHAIN_BUFIO, /* Index: 2 */ 1988 1989 /* Chain for buf IO for disk drive targets (PM disabled) */ 1990 SD_CHAIN_BUFIO, /* Index: 3 */ 1991 SD_CHAIN_BUFIO, /* Index: 4 */ 1992 1993 /* Chain for buf IO for removable-media targets (PM enabled) */ 1994 SD_CHAIN_BUFIO, /* Index: 5 */ 1995 SD_CHAIN_BUFIO, /* Index: 6 */ 1996 SD_CHAIN_BUFIO, /* Index: 7 */ 1997 SD_CHAIN_BUFIO, /* Index: 8 */ 1998 1999 /* Chain for buf IO for removable-media targets (PM disabled) */ 2000 SD_CHAIN_BUFIO, /* Index: 9 */ 2001 SD_CHAIN_BUFIO, /* Index: 10 */ 2002 SD_CHAIN_BUFIO, /* Index: 11 */ 2003 2004 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2005 SD_CHAIN_BUFIO, /* Index: 12 */ 2006 SD_CHAIN_BUFIO, /* Index: 13 */ 2007 SD_CHAIN_BUFIO, /* Index: 14 */ 2008 SD_CHAIN_BUFIO, /* Index: 15 */ 2009 2010 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2011 SD_CHAIN_BUFIO, /* Index: 16 */ 2012 SD_CHAIN_BUFIO, /* Index: 17 */ 2013 SD_CHAIN_BUFIO, /* Index: 18 */ 2014 2015 /* Chain for USCSI commands (non-checksum targets) */ 2016 SD_CHAIN_USCSI, /* Index: 19 */ 2017 SD_CHAIN_USCSI, /* Index: 20 */ 2018 2019 /* Chain for USCSI commands (checksum targets) */ 2020 SD_CHAIN_USCSI, /* Index: 21 */ 2021 SD_CHAIN_USCSI, /* Index: 22 */ 2022 SD_CHAIN_USCSI, /* Index: 22 */ 2023 2024 /* Chain for "direct" USCSI commands (all targets) */ 2025 SD_CHAIN_DIRECT, /* Index: 24 */ 2026 2027 /* Chain for "direct priority" USCSI commands (all targets) */ 2028 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */ 2029 }; 2030 2031 2032 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */ 2033 #define SD_IS_BUFIO(xp) \ 2034 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO) 2035 2036 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */ 2037 #define SD_IS_DIRECT_PRIORITY(xp) \ 2038 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY) 2039 2040 2041 2042 /* 2043 * Struct, array, and macros to map a specific chain to the appropriate 2044 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays. 2045 * 2046 * The sd_chain_index_map[] array is used at attach time to set the various 2047 * un_xxx_chain type members of the sd_lun softstate to the specific layering 2048 * chain to be used with the instance. This allows different instances to use 2049 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart 2050 * and xb_chain_iodone index values in the sd_xbuf are initialized to these 2051 * values at sd_xbuf init time, this allows (1) layering chains may be changed 2052 * dynamically & without the use of locking; and (2) a layer may update the 2053 * xb_chain_io[start|done] member in a given xbuf with its current index value, 2054 * to allow for deferred processing of an IO within the same chain from a 2055 * different execution context. 2056 */ 2057 2058 struct sd_chain_index { 2059 int sci_iostart_index; 2060 int sci_iodone_index; 2061 }; 2062 2063 static struct sd_chain_index sd_chain_index_map[] = { 2064 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE }, 2065 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM }, 2066 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE }, 2067 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM }, 2068 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE }, 2069 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM }, 2070 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE }, 2071 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE }, 2072 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE }, 2073 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE }, 2074 }; 2075 2076 2077 /* 2078 * The following are indexes into the sd_chain_index_map[] array. 2079 */ 2080 2081 /* un->un_buf_chain_type must be set to one of these */ 2082 #define SD_CHAIN_INFO_DISK 0 2083 #define SD_CHAIN_INFO_DISK_NO_PM 1 2084 #define SD_CHAIN_INFO_RMMEDIA 2 2085 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3 2086 #define SD_CHAIN_INFO_CHKSUM 4 2087 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5 2088 2089 /* un->un_uscsi_chain_type must be set to one of these */ 2090 #define SD_CHAIN_INFO_USCSI_CMD 6 2091 /* USCSI with PM disabled is the same as DIRECT */ 2092 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8 2093 #define SD_CHAIN_INFO_USCSI_CHKSUM 7 2094 2095 /* un->un_direct_chain_type must be set to one of these */ 2096 #define SD_CHAIN_INFO_DIRECT_CMD 8 2097 2098 /* un->un_priority_chain_type must be set to one of these */ 2099 #define SD_CHAIN_INFO_PRIORITY_CMD 9 2100 2101 /* size for devid inquiries */ 2102 #define MAX_INQUIRY_SIZE 0xF0 2103 2104 /* 2105 * Macros used by functions to pass a given buf(9S) struct along to the 2106 * next function in the layering chain for further processing. 2107 * 2108 * In the following macros, passing more than three arguments to the called 2109 * routines causes the optimizer for the SPARC compiler to stop doing tail 2110 * call elimination which results in significant performance degradation. 2111 */ 2112 #define SD_BEGIN_IOSTART(index, un, bp) \ 2113 ((*(sd_iostart_chain[index]))(index, un, bp)) 2114 2115 #define SD_BEGIN_IODONE(index, un, bp) \ 2116 ((*(sd_iodone_chain[index]))(index, un, bp)) 2117 2118 #define SD_NEXT_IOSTART(index, un, bp) \ 2119 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp)) 2120 2121 #define SD_NEXT_IODONE(index, un, bp) \ 2122 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp)) 2123 2124 /* 2125 * Function: _init 2126 * 2127 * Description: This is the driver _init(9E) entry point. 2128 * 2129 * Return Code: Returns the value from mod_install(9F) or 2130 * ddi_soft_state_init(9F) as appropriate. 2131 * 2132 * Context: Called when driver module loaded. 2133 */ 2134 2135 int 2136 _init(void) 2137 { 2138 int err; 2139 2140 /* establish driver name from module name */ 2141 sd_label = mod_modname(&modlinkage); 2142 2143 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun), 2144 SD_MAXUNIT); 2145 2146 if (err != 0) { 2147 return (err); 2148 } 2149 2150 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL); 2151 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL); 2152 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL); 2153 2154 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL); 2155 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL); 2156 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL); 2157 2158 /* 2159 * it's ok to init here even for fibre device 2160 */ 2161 sd_scsi_probe_cache_init(); 2162 2163 sd_scsi_target_lun_init(); 2164 2165 /* 2166 * Creating taskq before mod_install ensures that all callers (threads) 2167 * that enter the module after a successfull mod_install encounter 2168 * a valid taskq. 2169 */ 2170 sd_taskq_create(); 2171 2172 err = mod_install(&modlinkage); 2173 if (err != 0) { 2174 /* delete taskq if install fails */ 2175 sd_taskq_delete(); 2176 2177 mutex_destroy(&sd_detach_mutex); 2178 mutex_destroy(&sd_log_mutex); 2179 mutex_destroy(&sd_label_mutex); 2180 2181 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2182 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2183 cv_destroy(&sd_tr.srq_inprocess_cv); 2184 2185 sd_scsi_probe_cache_fini(); 2186 2187 sd_scsi_target_lun_fini(); 2188 2189 ddi_soft_state_fini(&sd_state); 2190 return (err); 2191 } 2192 2193 return (err); 2194 } 2195 2196 2197 /* 2198 * Function: _fini 2199 * 2200 * Description: This is the driver _fini(9E) entry point. 2201 * 2202 * Return Code: Returns the value from mod_remove(9F) 2203 * 2204 * Context: Called when driver module is unloaded. 2205 */ 2206 2207 int 2208 _fini(void) 2209 { 2210 int err; 2211 2212 if ((err = mod_remove(&modlinkage)) != 0) { 2213 return (err); 2214 } 2215 2216 sd_taskq_delete(); 2217 2218 mutex_destroy(&sd_detach_mutex); 2219 mutex_destroy(&sd_log_mutex); 2220 mutex_destroy(&sd_label_mutex); 2221 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2222 2223 sd_scsi_probe_cache_fini(); 2224 2225 sd_scsi_target_lun_fini(); 2226 2227 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2228 cv_destroy(&sd_tr.srq_inprocess_cv); 2229 2230 ddi_soft_state_fini(&sd_state); 2231 2232 return (err); 2233 } 2234 2235 2236 /* 2237 * Function: _info 2238 * 2239 * Description: This is the driver _info(9E) entry point. 2240 * 2241 * Arguments: modinfop - pointer to the driver modinfo structure 2242 * 2243 * Return Code: Returns the value from mod_info(9F). 2244 * 2245 * Context: Kernel thread context 2246 */ 2247 2248 int 2249 _info(struct modinfo *modinfop) 2250 { 2251 return (mod_info(&modlinkage, modinfop)); 2252 } 2253 2254 2255 /* 2256 * The following routines implement the driver message logging facility. 2257 * They provide component- and level- based debug output filtering. 2258 * Output may also be restricted to messages for a single instance by 2259 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set 2260 * to NULL, then messages for all instances are printed. 2261 * 2262 * These routines have been cloned from each other due to the language 2263 * constraints of macros and variable argument list processing. 2264 */ 2265 2266 2267 /* 2268 * Function: sd_log_err 2269 * 2270 * Description: This routine is called by the SD_ERROR macro for debug 2271 * logging of error conditions. 2272 * 2273 * Arguments: comp - driver component being logged 2274 * dev - pointer to driver info structure 2275 * fmt - error string and format to be logged 2276 */ 2277 2278 static void 2279 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...) 2280 { 2281 va_list ap; 2282 dev_info_t *dev; 2283 2284 ASSERT(un != NULL); 2285 dev = SD_DEVINFO(un); 2286 ASSERT(dev != NULL); 2287 2288 /* 2289 * Filter messages based on the global component and level masks. 2290 * Also print if un matches the value of sd_debug_un, or if 2291 * sd_debug_un is set to NULL. 2292 */ 2293 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) && 2294 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2295 mutex_enter(&sd_log_mutex); 2296 va_start(ap, fmt); 2297 (void) vsprintf(sd_log_buf, fmt, ap); 2298 va_end(ap); 2299 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2300 mutex_exit(&sd_log_mutex); 2301 } 2302 #ifdef SD_FAULT_INJECTION 2303 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2304 if (un->sd_injection_mask & comp) { 2305 mutex_enter(&sd_log_mutex); 2306 va_start(ap, fmt); 2307 (void) vsprintf(sd_log_buf, fmt, ap); 2308 va_end(ap); 2309 sd_injection_log(sd_log_buf, un); 2310 mutex_exit(&sd_log_mutex); 2311 } 2312 #endif 2313 } 2314 2315 2316 /* 2317 * Function: sd_log_info 2318 * 2319 * Description: This routine is called by the SD_INFO macro for debug 2320 * logging of general purpose informational conditions. 2321 * 2322 * Arguments: comp - driver component being logged 2323 * dev - pointer to driver info structure 2324 * fmt - info string and format to be logged 2325 */ 2326 2327 static void 2328 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...) 2329 { 2330 va_list ap; 2331 dev_info_t *dev; 2332 2333 ASSERT(un != NULL); 2334 dev = SD_DEVINFO(un); 2335 ASSERT(dev != NULL); 2336 2337 /* 2338 * Filter messages based on the global component and level masks. 2339 * Also print if un matches the value of sd_debug_un, or if 2340 * sd_debug_un is set to NULL. 2341 */ 2342 if ((sd_component_mask & component) && 2343 (sd_level_mask & SD_LOGMASK_INFO) && 2344 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2345 mutex_enter(&sd_log_mutex); 2346 va_start(ap, fmt); 2347 (void) vsprintf(sd_log_buf, fmt, ap); 2348 va_end(ap); 2349 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2350 mutex_exit(&sd_log_mutex); 2351 } 2352 #ifdef SD_FAULT_INJECTION 2353 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2354 if (un->sd_injection_mask & component) { 2355 mutex_enter(&sd_log_mutex); 2356 va_start(ap, fmt); 2357 (void) vsprintf(sd_log_buf, fmt, ap); 2358 va_end(ap); 2359 sd_injection_log(sd_log_buf, un); 2360 mutex_exit(&sd_log_mutex); 2361 } 2362 #endif 2363 } 2364 2365 2366 /* 2367 * Function: sd_log_trace 2368 * 2369 * Description: This routine is called by the SD_TRACE macro for debug 2370 * logging of trace conditions (i.e. function entry/exit). 2371 * 2372 * Arguments: comp - driver component being logged 2373 * dev - pointer to driver info structure 2374 * fmt - trace string and format to be logged 2375 */ 2376 2377 static void 2378 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...) 2379 { 2380 va_list ap; 2381 dev_info_t *dev; 2382 2383 ASSERT(un != NULL); 2384 dev = SD_DEVINFO(un); 2385 ASSERT(dev != NULL); 2386 2387 /* 2388 * Filter messages based on the global component and level masks. 2389 * Also print if un matches the value of sd_debug_un, or if 2390 * sd_debug_un is set to NULL. 2391 */ 2392 if ((sd_component_mask & component) && 2393 (sd_level_mask & SD_LOGMASK_TRACE) && 2394 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2395 mutex_enter(&sd_log_mutex); 2396 va_start(ap, fmt); 2397 (void) vsprintf(sd_log_buf, fmt, ap); 2398 va_end(ap); 2399 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2400 mutex_exit(&sd_log_mutex); 2401 } 2402 #ifdef SD_FAULT_INJECTION 2403 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2404 if (un->sd_injection_mask & component) { 2405 mutex_enter(&sd_log_mutex); 2406 va_start(ap, fmt); 2407 (void) vsprintf(sd_log_buf, fmt, ap); 2408 va_end(ap); 2409 sd_injection_log(sd_log_buf, un); 2410 mutex_exit(&sd_log_mutex); 2411 } 2412 #endif 2413 } 2414 2415 2416 /* 2417 * Function: sdprobe 2418 * 2419 * Description: This is the driver probe(9e) entry point function. 2420 * 2421 * Arguments: devi - opaque device info handle 2422 * 2423 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful. 2424 * DDI_PROBE_FAILURE: If the probe failed. 2425 * DDI_PROBE_PARTIAL: If the instance is not present now, 2426 * but may be present in the future. 2427 */ 2428 2429 static int 2430 sdprobe(dev_info_t *devi) 2431 { 2432 struct scsi_device *devp; 2433 int rval; 2434 int instance; 2435 2436 /* 2437 * if it wasn't for pln, sdprobe could actually be nulldev 2438 * in the "__fibre" case. 2439 */ 2440 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { 2441 return (DDI_PROBE_DONTCARE); 2442 } 2443 2444 devp = ddi_get_driver_private(devi); 2445 2446 if (devp == NULL) { 2447 /* Ooops... nexus driver is mis-configured... */ 2448 return (DDI_PROBE_FAILURE); 2449 } 2450 2451 instance = ddi_get_instance(devi); 2452 2453 if (ddi_get_soft_state(sd_state, instance) != NULL) { 2454 return (DDI_PROBE_PARTIAL); 2455 } 2456 2457 /* 2458 * Call the SCSA utility probe routine to see if we actually 2459 * have a target at this SCSI nexus. 2460 */ 2461 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) { 2462 case SCSIPROBE_EXISTS: 2463 switch (devp->sd_inq->inq_dtype) { 2464 case DTYPE_DIRECT: 2465 rval = DDI_PROBE_SUCCESS; 2466 break; 2467 case DTYPE_RODIRECT: 2468 /* CDs etc. Can be removable media */ 2469 rval = DDI_PROBE_SUCCESS; 2470 break; 2471 case DTYPE_OPTICAL: 2472 /* 2473 * Rewritable optical driver HP115AA 2474 * Can also be removable media 2475 */ 2476 2477 /* 2478 * Do not attempt to bind to DTYPE_OPTICAL if 2479 * pre solaris 9 sparc sd behavior is required 2480 * 2481 * If first time through and sd_dtype_optical_bind 2482 * has not been set in /etc/system check properties 2483 */ 2484 2485 if (sd_dtype_optical_bind < 0) { 2486 sd_dtype_optical_bind = ddi_prop_get_int 2487 (DDI_DEV_T_ANY, devi, 0, 2488 "optical-device-bind", 1); 2489 } 2490 2491 if (sd_dtype_optical_bind == 0) { 2492 rval = DDI_PROBE_FAILURE; 2493 } else { 2494 rval = DDI_PROBE_SUCCESS; 2495 } 2496 break; 2497 2498 case DTYPE_NOTPRESENT: 2499 default: 2500 rval = DDI_PROBE_FAILURE; 2501 break; 2502 } 2503 break; 2504 default: 2505 rval = DDI_PROBE_PARTIAL; 2506 break; 2507 } 2508 2509 /* 2510 * This routine checks for resource allocation prior to freeing, 2511 * so it will take care of the "smart probing" case where a 2512 * scsi_probe() may or may not have been issued and will *not* 2513 * free previously-freed resources. 2514 */ 2515 scsi_unprobe(devp); 2516 return (rval); 2517 } 2518 2519 2520 /* 2521 * Function: sdinfo 2522 * 2523 * Description: This is the driver getinfo(9e) entry point function. 2524 * Given the device number, return the devinfo pointer from 2525 * the scsi_device structure or the instance number 2526 * associated with the dev_t. 2527 * 2528 * Arguments: dip - pointer to device info structure 2529 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO, 2530 * DDI_INFO_DEVT2INSTANCE) 2531 * arg - driver dev_t 2532 * resultp - user buffer for request response 2533 * 2534 * Return Code: DDI_SUCCESS 2535 * DDI_FAILURE 2536 */ 2537 /* ARGSUSED */ 2538 static int 2539 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2540 { 2541 struct sd_lun *un; 2542 dev_t dev; 2543 int instance; 2544 int error; 2545 2546 switch (infocmd) { 2547 case DDI_INFO_DEVT2DEVINFO: 2548 dev = (dev_t)arg; 2549 instance = SDUNIT(dev); 2550 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 2551 return (DDI_FAILURE); 2552 } 2553 *result = (void *) SD_DEVINFO(un); 2554 error = DDI_SUCCESS; 2555 break; 2556 case DDI_INFO_DEVT2INSTANCE: 2557 dev = (dev_t)arg; 2558 instance = SDUNIT(dev); 2559 *result = (void *)(uintptr_t)instance; 2560 error = DDI_SUCCESS; 2561 break; 2562 default: 2563 error = DDI_FAILURE; 2564 } 2565 return (error); 2566 } 2567 2568 /* 2569 * Function: sd_prop_op 2570 * 2571 * Description: This is the driver prop_op(9e) entry point function. 2572 * Return the number of blocks for the partition in question 2573 * or forward the request to the property facilities. 2574 * 2575 * Arguments: dev - device number 2576 * dip - pointer to device info structure 2577 * prop_op - property operator 2578 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent 2579 * name - pointer to property name 2580 * valuep - pointer or address of the user buffer 2581 * lengthp - property length 2582 * 2583 * Return Code: DDI_PROP_SUCCESS 2584 * DDI_PROP_NOT_FOUND 2585 * DDI_PROP_UNDEFINED 2586 * DDI_PROP_NO_MEMORY 2587 * DDI_PROP_BUF_TOO_SMALL 2588 */ 2589 2590 static int 2591 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 2592 char *name, caddr_t valuep, int *lengthp) 2593 { 2594 int instance = ddi_get_instance(dip); 2595 struct sd_lun *un; 2596 uint64_t nblocks64; 2597 uint_t dblk; 2598 2599 /* 2600 * Our dynamic properties are all device specific and size oriented. 2601 * Requests issued under conditions where size is valid are passed 2602 * to ddi_prop_op_nblocks with the size information, otherwise the 2603 * request is passed to ddi_prop_op. Size depends on valid geometry. 2604 */ 2605 un = ddi_get_soft_state(sd_state, instance); 2606 if ((dev == DDI_DEV_T_ANY) || (un == NULL)) { 2607 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2608 name, valuep, lengthp)); 2609 } else if (!SD_IS_VALID_LABEL(un)) { 2610 return (ddi_prop_op(dev, dip, prop_op, mod_flags, name, 2611 valuep, lengthp)); 2612 } 2613 2614 /* get nblocks value */ 2615 ASSERT(!mutex_owned(SD_MUTEX(un))); 2616 2617 (void) cmlb_partinfo(un->un_cmlbhandle, SDPART(dev), 2618 (diskaddr_t *)&nblocks64, NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 2619 2620 /* report size in target size blocks */ 2621 dblk = un->un_tgt_blocksize / un->un_sys_blocksize; 2622 return (ddi_prop_op_nblocks_blksize(dev, dip, prop_op, mod_flags, 2623 name, valuep, lengthp, nblocks64 / dblk, un->un_tgt_blocksize)); 2624 } 2625 2626 /* 2627 * The following functions are for smart probing: 2628 * sd_scsi_probe_cache_init() 2629 * sd_scsi_probe_cache_fini() 2630 * sd_scsi_clear_probe_cache() 2631 * sd_scsi_probe_with_cache() 2632 */ 2633 2634 /* 2635 * Function: sd_scsi_probe_cache_init 2636 * 2637 * Description: Initializes the probe response cache mutex and head pointer. 2638 * 2639 * Context: Kernel thread context 2640 */ 2641 2642 static void 2643 sd_scsi_probe_cache_init(void) 2644 { 2645 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL); 2646 sd_scsi_probe_cache_head = NULL; 2647 } 2648 2649 2650 /* 2651 * Function: sd_scsi_probe_cache_fini 2652 * 2653 * Description: Frees all resources associated with the probe response cache. 2654 * 2655 * Context: Kernel thread context 2656 */ 2657 2658 static void 2659 sd_scsi_probe_cache_fini(void) 2660 { 2661 struct sd_scsi_probe_cache *cp; 2662 struct sd_scsi_probe_cache *ncp; 2663 2664 /* Clean up our smart probing linked list */ 2665 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) { 2666 ncp = cp->next; 2667 kmem_free(cp, sizeof (struct sd_scsi_probe_cache)); 2668 } 2669 sd_scsi_probe_cache_head = NULL; 2670 mutex_destroy(&sd_scsi_probe_cache_mutex); 2671 } 2672 2673 2674 /* 2675 * Function: sd_scsi_clear_probe_cache 2676 * 2677 * Description: This routine clears the probe response cache. This is 2678 * done when open() returns ENXIO so that when deferred 2679 * attach is attempted (possibly after a device has been 2680 * turned on) we will retry the probe. Since we don't know 2681 * which target we failed to open, we just clear the 2682 * entire cache. 2683 * 2684 * Context: Kernel thread context 2685 */ 2686 2687 static void 2688 sd_scsi_clear_probe_cache(void) 2689 { 2690 struct sd_scsi_probe_cache *cp; 2691 int i; 2692 2693 mutex_enter(&sd_scsi_probe_cache_mutex); 2694 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2695 /* 2696 * Reset all entries to SCSIPROBE_EXISTS. This will 2697 * force probing to be performed the next time 2698 * sd_scsi_probe_with_cache is called. 2699 */ 2700 for (i = 0; i < NTARGETS_WIDE; i++) { 2701 cp->cache[i] = SCSIPROBE_EXISTS; 2702 } 2703 } 2704 mutex_exit(&sd_scsi_probe_cache_mutex); 2705 } 2706 2707 2708 /* 2709 * Function: sd_scsi_probe_with_cache 2710 * 2711 * Description: This routine implements support for a scsi device probe 2712 * with cache. The driver maintains a cache of the target 2713 * responses to scsi probes. If we get no response from a 2714 * target during a probe inquiry, we remember that, and we 2715 * avoid additional calls to scsi_probe on non-zero LUNs 2716 * on the same target until the cache is cleared. By doing 2717 * so we avoid the 1/4 sec selection timeout for nonzero 2718 * LUNs. lun0 of a target is always probed. 2719 * 2720 * Arguments: devp - Pointer to a scsi_device(9S) structure 2721 * waitfunc - indicates what the allocator routines should 2722 * do when resources are not available. This value 2723 * is passed on to scsi_probe() when that routine 2724 * is called. 2725 * 2726 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache; 2727 * otherwise the value returned by scsi_probe(9F). 2728 * 2729 * Context: Kernel thread context 2730 */ 2731 2732 static int 2733 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)()) 2734 { 2735 struct sd_scsi_probe_cache *cp; 2736 dev_info_t *pdip = ddi_get_parent(devp->sd_dev); 2737 int lun, tgt; 2738 2739 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2740 SCSI_ADDR_PROP_LUN, 0); 2741 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2742 SCSI_ADDR_PROP_TARGET, -1); 2743 2744 /* Make sure caching enabled and target in range */ 2745 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) { 2746 /* do it the old way (no cache) */ 2747 return (scsi_probe(devp, waitfn)); 2748 } 2749 2750 mutex_enter(&sd_scsi_probe_cache_mutex); 2751 2752 /* Find the cache for this scsi bus instance */ 2753 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2754 if (cp->pdip == pdip) { 2755 break; 2756 } 2757 } 2758 2759 /* If we can't find a cache for this pdip, create one */ 2760 if (cp == NULL) { 2761 int i; 2762 2763 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache), 2764 KM_SLEEP); 2765 cp->pdip = pdip; 2766 cp->next = sd_scsi_probe_cache_head; 2767 sd_scsi_probe_cache_head = cp; 2768 for (i = 0; i < NTARGETS_WIDE; i++) { 2769 cp->cache[i] = SCSIPROBE_EXISTS; 2770 } 2771 } 2772 2773 mutex_exit(&sd_scsi_probe_cache_mutex); 2774 2775 /* Recompute the cache for this target if LUN zero */ 2776 if (lun == 0) { 2777 cp->cache[tgt] = SCSIPROBE_EXISTS; 2778 } 2779 2780 /* Don't probe if cache remembers a NORESP from a previous LUN. */ 2781 if (cp->cache[tgt] != SCSIPROBE_EXISTS) { 2782 return (SCSIPROBE_NORESP); 2783 } 2784 2785 /* Do the actual probe; save & return the result */ 2786 return (cp->cache[tgt] = scsi_probe(devp, waitfn)); 2787 } 2788 2789 2790 /* 2791 * Function: sd_scsi_target_lun_init 2792 * 2793 * Description: Initializes the attached lun chain mutex and head pointer. 2794 * 2795 * Context: Kernel thread context 2796 */ 2797 2798 static void 2799 sd_scsi_target_lun_init(void) 2800 { 2801 mutex_init(&sd_scsi_target_lun_mutex, NULL, MUTEX_DRIVER, NULL); 2802 sd_scsi_target_lun_head = NULL; 2803 } 2804 2805 2806 /* 2807 * Function: sd_scsi_target_lun_fini 2808 * 2809 * Description: Frees all resources associated with the attached lun 2810 * chain 2811 * 2812 * Context: Kernel thread context 2813 */ 2814 2815 static void 2816 sd_scsi_target_lun_fini(void) 2817 { 2818 struct sd_scsi_hba_tgt_lun *cp; 2819 struct sd_scsi_hba_tgt_lun *ncp; 2820 2821 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = ncp) { 2822 ncp = cp->next; 2823 kmem_free(cp, sizeof (struct sd_scsi_hba_tgt_lun)); 2824 } 2825 sd_scsi_target_lun_head = NULL; 2826 mutex_destroy(&sd_scsi_target_lun_mutex); 2827 } 2828 2829 2830 /* 2831 * Function: sd_scsi_get_target_lun_count 2832 * 2833 * Description: This routine will check in the attached lun chain to see 2834 * how many luns are attached on the required SCSI controller 2835 * and target. Currently, some capabilities like tagged queue 2836 * are supported per target based by HBA. So all luns in a 2837 * target have the same capabilities. Based on this assumption, 2838 * sd should only set these capabilities once per target. This 2839 * function is called when sd needs to decide how many luns 2840 * already attached on a target. 2841 * 2842 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2843 * controller device. 2844 * target - The target ID on the controller's SCSI bus. 2845 * 2846 * Return Code: The number of luns attached on the required target and 2847 * controller. 2848 * -1 if target ID is not in parallel SCSI scope or the given 2849 * dip is not in the chain. 2850 * 2851 * Context: Kernel thread context 2852 */ 2853 2854 static int 2855 sd_scsi_get_target_lun_count(dev_info_t *dip, int target) 2856 { 2857 struct sd_scsi_hba_tgt_lun *cp; 2858 2859 if ((target < 0) || (target >= NTARGETS_WIDE)) { 2860 return (-1); 2861 } 2862 2863 mutex_enter(&sd_scsi_target_lun_mutex); 2864 2865 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2866 if (cp->pdip == dip) { 2867 break; 2868 } 2869 } 2870 2871 mutex_exit(&sd_scsi_target_lun_mutex); 2872 2873 if (cp == NULL) { 2874 return (-1); 2875 } 2876 2877 return (cp->nlun[target]); 2878 } 2879 2880 2881 /* 2882 * Function: sd_scsi_update_lun_on_target 2883 * 2884 * Description: This routine is used to update the attached lun chain when a 2885 * lun is attached or detached on a target. 2886 * 2887 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2888 * controller device. 2889 * target - The target ID on the controller's SCSI bus. 2890 * flag - Indicate the lun is attached or detached. 2891 * 2892 * Context: Kernel thread context 2893 */ 2894 2895 static void 2896 sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag) 2897 { 2898 struct sd_scsi_hba_tgt_lun *cp; 2899 2900 mutex_enter(&sd_scsi_target_lun_mutex); 2901 2902 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2903 if (cp->pdip == dip) { 2904 break; 2905 } 2906 } 2907 2908 if ((cp == NULL) && (flag == SD_SCSI_LUN_ATTACH)) { 2909 cp = kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun), 2910 KM_SLEEP); 2911 cp->pdip = dip; 2912 cp->next = sd_scsi_target_lun_head; 2913 sd_scsi_target_lun_head = cp; 2914 } 2915 2916 mutex_exit(&sd_scsi_target_lun_mutex); 2917 2918 if (cp != NULL) { 2919 if (flag == SD_SCSI_LUN_ATTACH) { 2920 cp->nlun[target] ++; 2921 } else { 2922 cp->nlun[target] --; 2923 } 2924 } 2925 } 2926 2927 2928 /* 2929 * Function: sd_spin_up_unit 2930 * 2931 * Description: Issues the following commands to spin-up the device: 2932 * START STOP UNIT, and INQUIRY. 2933 * 2934 * Arguments: un - driver soft state (unit) structure 2935 * 2936 * Return Code: 0 - success 2937 * EIO - failure 2938 * EACCES - reservation conflict 2939 * 2940 * Context: Kernel thread context 2941 */ 2942 2943 static int 2944 sd_spin_up_unit(struct sd_lun *un) 2945 { 2946 size_t resid = 0; 2947 int has_conflict = FALSE; 2948 uchar_t *bufaddr; 2949 2950 ASSERT(un != NULL); 2951 2952 /* 2953 * Send a throwaway START UNIT command. 2954 * 2955 * If we fail on this, we don't care presently what precisely 2956 * is wrong. EMC's arrays will also fail this with a check 2957 * condition (0x2/0x4/0x3) if the device is "inactive," but 2958 * we don't want to fail the attach because it may become 2959 * "active" later. 2960 */ 2961 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, SD_PATH_DIRECT) 2962 == EACCES) 2963 has_conflict = TRUE; 2964 2965 /* 2966 * Send another INQUIRY command to the target. This is necessary for 2967 * non-removable media direct access devices because their INQUIRY data 2968 * may not be fully qualified until they are spun up (perhaps via the 2969 * START command above). Note: This seems to be needed for some 2970 * legacy devices only.) The INQUIRY command should succeed even if a 2971 * Reservation Conflict is present. 2972 */ 2973 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP); 2974 if (sd_send_scsi_INQUIRY(un, bufaddr, SUN_INQSIZE, 0, 0, &resid) != 0) { 2975 kmem_free(bufaddr, SUN_INQSIZE); 2976 return (EIO); 2977 } 2978 2979 /* 2980 * If we got enough INQUIRY data, copy it over the old INQUIRY data. 2981 * Note that this routine does not return a failure here even if the 2982 * INQUIRY command did not return any data. This is a legacy behavior. 2983 */ 2984 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) { 2985 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE); 2986 } 2987 2988 kmem_free(bufaddr, SUN_INQSIZE); 2989 2990 /* If we hit a reservation conflict above, tell the caller. */ 2991 if (has_conflict == TRUE) { 2992 return (EACCES); 2993 } 2994 2995 return (0); 2996 } 2997 2998 #ifdef _LP64 2999 /* 3000 * Function: sd_enable_descr_sense 3001 * 3002 * Description: This routine attempts to select descriptor sense format 3003 * using the Control mode page. Devices that support 64 bit 3004 * LBAs (for >2TB luns) should also implement descriptor 3005 * sense data so we will call this function whenever we see 3006 * a lun larger than 2TB. If for some reason the device 3007 * supports 64 bit LBAs but doesn't support descriptor sense 3008 * presumably the mode select will fail. Everything will 3009 * continue to work normally except that we will not get 3010 * complete sense data for commands that fail with an LBA 3011 * larger than 32 bits. 3012 * 3013 * Arguments: un - driver soft state (unit) structure 3014 * 3015 * Context: Kernel thread context only 3016 */ 3017 3018 static void 3019 sd_enable_descr_sense(struct sd_lun *un) 3020 { 3021 uchar_t *header; 3022 struct mode_control_scsi3 *ctrl_bufp; 3023 size_t buflen; 3024 size_t bd_len; 3025 3026 /* 3027 * Read MODE SENSE page 0xA, Control Mode Page 3028 */ 3029 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH + 3030 sizeof (struct mode_control_scsi3); 3031 header = kmem_zalloc(buflen, KM_SLEEP); 3032 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 3033 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT) != 0) { 3034 SD_ERROR(SD_LOG_COMMON, un, 3035 "sd_enable_descr_sense: mode sense ctrl page failed\n"); 3036 goto eds_exit; 3037 } 3038 3039 /* 3040 * Determine size of Block Descriptors in order to locate 3041 * the mode page data. ATAPI devices return 0, SCSI devices 3042 * should return MODE_BLK_DESC_LENGTH. 3043 */ 3044 bd_len = ((struct mode_header *)header)->bdesc_length; 3045 3046 /* Clear the mode data length field for MODE SELECT */ 3047 ((struct mode_header *)header)->length = 0; 3048 3049 ctrl_bufp = (struct mode_control_scsi3 *) 3050 (header + MODE_HEADER_LENGTH + bd_len); 3051 3052 /* 3053 * If the page length is smaller than the expected value, 3054 * the target device doesn't support D_SENSE. Bail out here. 3055 */ 3056 if (ctrl_bufp->mode_page.length < 3057 sizeof (struct mode_control_scsi3) - 2) { 3058 SD_ERROR(SD_LOG_COMMON, un, 3059 "sd_enable_descr_sense: enable D_SENSE failed\n"); 3060 goto eds_exit; 3061 } 3062 3063 /* 3064 * Clear PS bit for MODE SELECT 3065 */ 3066 ctrl_bufp->mode_page.ps = 0; 3067 3068 /* 3069 * Set D_SENSE to enable descriptor sense format. 3070 */ 3071 ctrl_bufp->d_sense = 1; 3072 3073 /* 3074 * Use MODE SELECT to commit the change to the D_SENSE bit 3075 */ 3076 if (sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 3077 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT) != 0) { 3078 SD_INFO(SD_LOG_COMMON, un, 3079 "sd_enable_descr_sense: mode select ctrl page failed\n"); 3080 goto eds_exit; 3081 } 3082 3083 eds_exit: 3084 kmem_free(header, buflen); 3085 } 3086 3087 /* 3088 * Function: sd_reenable_dsense_task 3089 * 3090 * Description: Re-enable descriptor sense after device or bus reset 3091 * 3092 * Context: Executes in a taskq() thread context 3093 */ 3094 static void 3095 sd_reenable_dsense_task(void *arg) 3096 { 3097 struct sd_lun *un = arg; 3098 3099 ASSERT(un != NULL); 3100 sd_enable_descr_sense(un); 3101 } 3102 #endif /* _LP64 */ 3103 3104 /* 3105 * Function: sd_set_mmc_caps 3106 * 3107 * Description: This routine determines if the device is MMC compliant and if 3108 * the device supports CDDA via a mode sense of the CDVD 3109 * capabilities mode page. Also checks if the device is a 3110 * dvdram writable device. 3111 * 3112 * Arguments: un - driver soft state (unit) structure 3113 * 3114 * Context: Kernel thread context only 3115 */ 3116 3117 static void 3118 sd_set_mmc_caps(struct sd_lun *un) 3119 { 3120 struct mode_header_grp2 *sense_mhp; 3121 uchar_t *sense_page; 3122 caddr_t buf; 3123 int bd_len; 3124 int status; 3125 struct uscsi_cmd com; 3126 int rtn; 3127 uchar_t *out_data_rw, *out_data_hd; 3128 uchar_t *rqbuf_rw, *rqbuf_hd; 3129 3130 ASSERT(un != NULL); 3131 3132 /* 3133 * The flags which will be set in this function are - mmc compliant, 3134 * dvdram writable device, cdda support. Initialize them to FALSE 3135 * and if a capability is detected - it will be set to TRUE. 3136 */ 3137 un->un_f_mmc_cap = FALSE; 3138 un->un_f_dvdram_writable_device = FALSE; 3139 un->un_f_cfg_cdda = FALSE; 3140 3141 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3142 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 3143 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 3144 3145 if (status != 0) { 3146 /* command failed; just return */ 3147 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3148 return; 3149 } 3150 /* 3151 * If the mode sense request for the CDROM CAPABILITIES 3152 * page (0x2A) succeeds the device is assumed to be MMC. 3153 */ 3154 un->un_f_mmc_cap = TRUE; 3155 3156 /* Get to the page data */ 3157 sense_mhp = (struct mode_header_grp2 *)buf; 3158 bd_len = (sense_mhp->bdesc_length_hi << 8) | 3159 sense_mhp->bdesc_length_lo; 3160 if (bd_len > MODE_BLK_DESC_LENGTH) { 3161 /* 3162 * We did not get back the expected block descriptor 3163 * length so we cannot determine if the device supports 3164 * CDDA. However, we still indicate the device is MMC 3165 * according to the successful response to the page 3166 * 0x2A mode sense request. 3167 */ 3168 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3169 "sd_set_mmc_caps: Mode Sense returned " 3170 "invalid block descriptor length\n"); 3171 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3172 return; 3173 } 3174 3175 /* See if read CDDA is supported */ 3176 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + 3177 bd_len); 3178 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE; 3179 3180 /* See if writing DVD RAM is supported. */ 3181 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE; 3182 if (un->un_f_dvdram_writable_device == TRUE) { 3183 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3184 return; 3185 } 3186 3187 /* 3188 * If the device presents DVD or CD capabilities in the mode 3189 * page, we can return here since a RRD will not have 3190 * these capabilities. 3191 */ 3192 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3193 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3194 return; 3195 } 3196 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3197 3198 /* 3199 * If un->un_f_dvdram_writable_device is still FALSE, 3200 * check for a Removable Rigid Disk (RRD). A RRD 3201 * device is identified by the features RANDOM_WRITABLE and 3202 * HARDWARE_DEFECT_MANAGEMENT. 3203 */ 3204 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3205 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3206 3207 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3208 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3209 RANDOM_WRITABLE, SD_PATH_STANDARD); 3210 if (rtn != 0) { 3211 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3212 kmem_free(rqbuf_rw, SENSE_LENGTH); 3213 return; 3214 } 3215 3216 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3217 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3218 3219 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3220 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3221 HARDWARE_DEFECT_MANAGEMENT, SD_PATH_STANDARD); 3222 if (rtn == 0) { 3223 /* 3224 * We have good information, check for random writable 3225 * and hardware defect features. 3226 */ 3227 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3228 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) { 3229 un->un_f_dvdram_writable_device = TRUE; 3230 } 3231 } 3232 3233 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3234 kmem_free(rqbuf_rw, SENSE_LENGTH); 3235 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3236 kmem_free(rqbuf_hd, SENSE_LENGTH); 3237 } 3238 3239 /* 3240 * Function: sd_check_for_writable_cd 3241 * 3242 * Description: This routine determines if the media in the device is 3243 * writable or not. It uses the get configuration command (0x46) 3244 * to determine if the media is writable 3245 * 3246 * Arguments: un - driver soft state (unit) structure 3247 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" 3248 * chain and the normal command waitq, or 3249 * SD_PATH_DIRECT_PRIORITY to use the USCSI 3250 * "direct" chain and bypass the normal command 3251 * waitq. 3252 * 3253 * Context: Never called at interrupt context. 3254 */ 3255 3256 static void 3257 sd_check_for_writable_cd(struct sd_lun *un, int path_flag) 3258 { 3259 struct uscsi_cmd com; 3260 uchar_t *out_data; 3261 uchar_t *rqbuf; 3262 int rtn; 3263 uchar_t *out_data_rw, *out_data_hd; 3264 uchar_t *rqbuf_rw, *rqbuf_hd; 3265 struct mode_header_grp2 *sense_mhp; 3266 uchar_t *sense_page; 3267 caddr_t buf; 3268 int bd_len; 3269 int status; 3270 3271 ASSERT(un != NULL); 3272 ASSERT(mutex_owned(SD_MUTEX(un))); 3273 3274 /* 3275 * Initialize the writable media to false, if configuration info. 3276 * tells us otherwise then only we will set it. 3277 */ 3278 un->un_f_mmc_writable_media = FALSE; 3279 mutex_exit(SD_MUTEX(un)); 3280 3281 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 3282 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3283 3284 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, SENSE_LENGTH, 3285 out_data, SD_PROFILE_HEADER_LEN, path_flag); 3286 3287 mutex_enter(SD_MUTEX(un)); 3288 if (rtn == 0) { 3289 /* 3290 * We have good information, check for writable DVD. 3291 */ 3292 if ((out_data[6] == 0) && (out_data[7] == 0x12)) { 3293 un->un_f_mmc_writable_media = TRUE; 3294 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3295 kmem_free(rqbuf, SENSE_LENGTH); 3296 return; 3297 } 3298 } 3299 3300 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3301 kmem_free(rqbuf, SENSE_LENGTH); 3302 3303 /* 3304 * Determine if this is a RRD type device. 3305 */ 3306 mutex_exit(SD_MUTEX(un)); 3307 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3308 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 3309 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, path_flag); 3310 mutex_enter(SD_MUTEX(un)); 3311 if (status != 0) { 3312 /* command failed; just return */ 3313 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3314 return; 3315 } 3316 3317 /* Get to the page data */ 3318 sense_mhp = (struct mode_header_grp2 *)buf; 3319 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 3320 if (bd_len > MODE_BLK_DESC_LENGTH) { 3321 /* 3322 * We did not get back the expected block descriptor length so 3323 * we cannot check the mode page. 3324 */ 3325 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3326 "sd_check_for_writable_cd: Mode Sense returned " 3327 "invalid block descriptor length\n"); 3328 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3329 return; 3330 } 3331 3332 /* 3333 * If the device presents DVD or CD capabilities in the mode 3334 * page, we can return here since a RRD device will not have 3335 * these capabilities. 3336 */ 3337 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len); 3338 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3339 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3340 return; 3341 } 3342 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3343 3344 /* 3345 * If un->un_f_mmc_writable_media is still FALSE, 3346 * check for RRD type media. A RRD device is identified 3347 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT. 3348 */ 3349 mutex_exit(SD_MUTEX(un)); 3350 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3351 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3352 3353 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3354 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3355 RANDOM_WRITABLE, path_flag); 3356 if (rtn != 0) { 3357 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3358 kmem_free(rqbuf_rw, SENSE_LENGTH); 3359 mutex_enter(SD_MUTEX(un)); 3360 return; 3361 } 3362 3363 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3364 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3365 3366 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3367 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3368 HARDWARE_DEFECT_MANAGEMENT, path_flag); 3369 mutex_enter(SD_MUTEX(un)); 3370 if (rtn == 0) { 3371 /* 3372 * We have good information, check for random writable 3373 * and hardware defect features as current. 3374 */ 3375 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3376 (out_data_rw[10] & 0x1) && 3377 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) && 3378 (out_data_hd[10] & 0x1)) { 3379 un->un_f_mmc_writable_media = TRUE; 3380 } 3381 } 3382 3383 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3384 kmem_free(rqbuf_rw, SENSE_LENGTH); 3385 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3386 kmem_free(rqbuf_hd, SENSE_LENGTH); 3387 } 3388 3389 /* 3390 * Function: sd_read_unit_properties 3391 * 3392 * Description: The following implements a property lookup mechanism. 3393 * Properties for particular disks (keyed on vendor, model 3394 * and rev numbers) are sought in the sd.conf file via 3395 * sd_process_sdconf_file(), and if not found there, are 3396 * looked for in a list hardcoded in this driver via 3397 * sd_process_sdconf_table() Once located the properties 3398 * are used to update the driver unit structure. 3399 * 3400 * Arguments: un - driver soft state (unit) structure 3401 */ 3402 3403 static void 3404 sd_read_unit_properties(struct sd_lun *un) 3405 { 3406 /* 3407 * sd_process_sdconf_file returns SD_FAILURE if it cannot find 3408 * the "sd-config-list" property (from the sd.conf file) or if 3409 * there was not a match for the inquiry vid/pid. If this event 3410 * occurs the static driver configuration table is searched for 3411 * a match. 3412 */ 3413 ASSERT(un != NULL); 3414 if (sd_process_sdconf_file(un) == SD_FAILURE) { 3415 sd_process_sdconf_table(un); 3416 } 3417 3418 /* check for LSI device */ 3419 sd_is_lsi(un); 3420 3421 3422 } 3423 3424 3425 /* 3426 * Function: sd_process_sdconf_file 3427 * 3428 * Description: Use ddi_getlongprop to obtain the properties from the 3429 * driver's config file (ie, sd.conf) and update the driver 3430 * soft state structure accordingly. 3431 * 3432 * Arguments: un - driver soft state (unit) structure 3433 * 3434 * Return Code: SD_SUCCESS - The properties were successfully set according 3435 * to the driver configuration file. 3436 * SD_FAILURE - The driver config list was not obtained or 3437 * there was no vid/pid match. This indicates that 3438 * the static config table should be used. 3439 * 3440 * The config file has a property, "sd-config-list", which consists of 3441 * one or more duplets as follows: 3442 * 3443 * sd-config-list= 3444 * <duplet>, 3445 * [<duplet>,] 3446 * [<duplet>]; 3447 * 3448 * The structure of each duplet is as follows: 3449 * 3450 * <duplet>:= <vid+pid>,<data-property-name_list> 3451 * 3452 * The first entry of the duplet is the device ID string (the concatenated 3453 * vid & pid; not to be confused with a device_id). This is defined in 3454 * the same way as in the sd_disk_table. 3455 * 3456 * The second part of the duplet is a string that identifies a 3457 * data-property-name-list. The data-property-name-list is defined as 3458 * follows: 3459 * 3460 * <data-property-name-list>:=<data-property-name> [<data-property-name>] 3461 * 3462 * The syntax of <data-property-name> depends on the <version> field. 3463 * 3464 * If version = SD_CONF_VERSION_1 we have the following syntax: 3465 * 3466 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3467 * 3468 * where the prop0 value will be used to set prop0 if bit0 set in the 3469 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1 3470 * 3471 */ 3472 3473 static int 3474 sd_process_sdconf_file(struct sd_lun *un) 3475 { 3476 char *config_list = NULL; 3477 int config_list_len; 3478 int len; 3479 int dupletlen = 0; 3480 char *vidptr; 3481 int vidlen; 3482 char *dnlist_ptr; 3483 char *dataname_ptr; 3484 int dnlist_len; 3485 int dataname_len; 3486 int *data_list; 3487 int data_list_len; 3488 int rval = SD_FAILURE; 3489 int i; 3490 3491 ASSERT(un != NULL); 3492 3493 /* Obtain the configuration list associated with the .conf file */ 3494 if (ddi_getlongprop(DDI_DEV_T_ANY, SD_DEVINFO(un), DDI_PROP_DONTPASS, 3495 sd_config_list, (caddr_t)&config_list, &config_list_len) 3496 != DDI_PROP_SUCCESS) { 3497 return (SD_FAILURE); 3498 } 3499 3500 /* 3501 * Compare vids in each duplet to the inquiry vid - if a match is 3502 * made, get the data value and update the soft state structure 3503 * accordingly. 3504 * 3505 * Note: This algorithm is complex and difficult to maintain. It should 3506 * be replaced with a more robust implementation. 3507 */ 3508 for (len = config_list_len, vidptr = config_list; len > 0; 3509 vidptr += dupletlen, len -= dupletlen) { 3510 /* 3511 * Note: The assumption here is that each vid entry is on 3512 * a unique line from its associated duplet. 3513 */ 3514 vidlen = dupletlen = (int)strlen(vidptr); 3515 if ((vidlen == 0) || 3516 (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS)) { 3517 dupletlen++; 3518 continue; 3519 } 3520 3521 /* 3522 * dnlist contains 1 or more blank separated 3523 * data-property-name entries 3524 */ 3525 dnlist_ptr = vidptr + vidlen + 1; 3526 dnlist_len = (int)strlen(dnlist_ptr); 3527 dupletlen += dnlist_len + 2; 3528 3529 /* 3530 * Set a pointer for the first data-property-name 3531 * entry in the list 3532 */ 3533 dataname_ptr = dnlist_ptr; 3534 dataname_len = 0; 3535 3536 /* 3537 * Loop through all data-property-name entries in the 3538 * data-property-name-list setting the properties for each. 3539 */ 3540 while (dataname_len < dnlist_len) { 3541 int version; 3542 3543 /* 3544 * Determine the length of the current 3545 * data-property-name entry by indexing until a 3546 * blank or NULL is encountered. When the space is 3547 * encountered reset it to a NULL for compliance 3548 * with ddi_getlongprop(). 3549 */ 3550 for (i = 0; ((dataname_ptr[i] != ' ') && 3551 (dataname_ptr[i] != '\0')); i++) { 3552 ; 3553 } 3554 3555 dataname_len += i; 3556 /* If not null terminated, Make it so */ 3557 if (dataname_ptr[i] == ' ') { 3558 dataname_ptr[i] = '\0'; 3559 } 3560 dataname_len++; 3561 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3562 "sd_process_sdconf_file: disk:%s, data:%s\n", 3563 vidptr, dataname_ptr); 3564 3565 /* Get the data list */ 3566 if (ddi_getlongprop(DDI_DEV_T_ANY, SD_DEVINFO(un), 0, 3567 dataname_ptr, (caddr_t)&data_list, &data_list_len) 3568 != DDI_PROP_SUCCESS) { 3569 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3570 "sd_process_sdconf_file: data property (%s)" 3571 " has no value\n", dataname_ptr); 3572 dataname_ptr = dnlist_ptr + dataname_len; 3573 continue; 3574 } 3575 3576 version = data_list[0]; 3577 3578 if (version == SD_CONF_VERSION_1) { 3579 sd_tunables values; 3580 3581 /* Set the properties */ 3582 if (sd_chk_vers1_data(un, data_list[1], 3583 &data_list[2], data_list_len, dataname_ptr) 3584 == SD_SUCCESS) { 3585 sd_get_tunables_from_conf(un, 3586 data_list[1], &data_list[2], 3587 &values); 3588 sd_set_vers1_properties(un, 3589 data_list[1], &values); 3590 rval = SD_SUCCESS; 3591 } else { 3592 rval = SD_FAILURE; 3593 } 3594 } else { 3595 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3596 "data property %s version 0x%x is invalid.", 3597 dataname_ptr, version); 3598 rval = SD_FAILURE; 3599 } 3600 kmem_free(data_list, data_list_len); 3601 dataname_ptr = dnlist_ptr + dataname_len; 3602 } 3603 } 3604 3605 /* free up the memory allocated by ddi_getlongprop */ 3606 if (config_list) { 3607 kmem_free(config_list, config_list_len); 3608 } 3609 3610 return (rval); 3611 } 3612 3613 /* 3614 * Function: sd_get_tunables_from_conf() 3615 * 3616 * 3617 * This function reads the data list from the sd.conf file and pulls 3618 * the values that can have numeric values as arguments and places 3619 * the values in the appropriate sd_tunables member. 3620 * Since the order of the data list members varies across platforms 3621 * This function reads them from the data list in a platform specific 3622 * order and places them into the correct sd_tunable member that is 3623 * consistent across all platforms. 3624 */ 3625 static void 3626 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list, 3627 sd_tunables *values) 3628 { 3629 int i; 3630 int mask; 3631 3632 bzero(values, sizeof (sd_tunables)); 3633 3634 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3635 3636 mask = 1 << i; 3637 if (mask > flags) { 3638 break; 3639 } 3640 3641 switch (mask & flags) { 3642 case 0: /* This mask bit not set in flags */ 3643 continue; 3644 case SD_CONF_BSET_THROTTLE: 3645 values->sdt_throttle = data_list[i]; 3646 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3647 "sd_get_tunables_from_conf: throttle = %d\n", 3648 values->sdt_throttle); 3649 break; 3650 case SD_CONF_BSET_CTYPE: 3651 values->sdt_ctype = data_list[i]; 3652 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3653 "sd_get_tunables_from_conf: ctype = %d\n", 3654 values->sdt_ctype); 3655 break; 3656 case SD_CONF_BSET_NRR_COUNT: 3657 values->sdt_not_rdy_retries = data_list[i]; 3658 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3659 "sd_get_tunables_from_conf: not_rdy_retries = %d\n", 3660 values->sdt_not_rdy_retries); 3661 break; 3662 case SD_CONF_BSET_BSY_RETRY_COUNT: 3663 values->sdt_busy_retries = data_list[i]; 3664 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3665 "sd_get_tunables_from_conf: busy_retries = %d\n", 3666 values->sdt_busy_retries); 3667 break; 3668 case SD_CONF_BSET_RST_RETRIES: 3669 values->sdt_reset_retries = data_list[i]; 3670 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3671 "sd_get_tunables_from_conf: reset_retries = %d\n", 3672 values->sdt_reset_retries); 3673 break; 3674 case SD_CONF_BSET_RSV_REL_TIME: 3675 values->sdt_reserv_rel_time = data_list[i]; 3676 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3677 "sd_get_tunables_from_conf: reserv_rel_time = %d\n", 3678 values->sdt_reserv_rel_time); 3679 break; 3680 case SD_CONF_BSET_MIN_THROTTLE: 3681 values->sdt_min_throttle = data_list[i]; 3682 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3683 "sd_get_tunables_from_conf: min_throttle = %d\n", 3684 values->sdt_min_throttle); 3685 break; 3686 case SD_CONF_BSET_DISKSORT_DISABLED: 3687 values->sdt_disk_sort_dis = data_list[i]; 3688 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3689 "sd_get_tunables_from_conf: disk_sort_dis = %d\n", 3690 values->sdt_disk_sort_dis); 3691 break; 3692 case SD_CONF_BSET_LUN_RESET_ENABLED: 3693 values->sdt_lun_reset_enable = data_list[i]; 3694 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3695 "sd_get_tunables_from_conf: lun_reset_enable = %d" 3696 "\n", values->sdt_lun_reset_enable); 3697 break; 3698 case SD_CONF_BSET_CACHE_IS_NV: 3699 values->sdt_suppress_cache_flush = data_list[i]; 3700 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3701 "sd_get_tunables_from_conf: \ 3702 suppress_cache_flush = %d" 3703 "\n", values->sdt_suppress_cache_flush); 3704 break; 3705 } 3706 } 3707 } 3708 3709 /* 3710 * Function: sd_process_sdconf_table 3711 * 3712 * Description: Search the static configuration table for a match on the 3713 * inquiry vid/pid and update the driver soft state structure 3714 * according to the table property values for the device. 3715 * 3716 * The form of a configuration table entry is: 3717 * <vid+pid>,<flags>,<property-data> 3718 * "SEAGATE ST42400N",1,0x40000, 3719 * 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1; 3720 * 3721 * Arguments: un - driver soft state (unit) structure 3722 */ 3723 3724 static void 3725 sd_process_sdconf_table(struct sd_lun *un) 3726 { 3727 char *id = NULL; 3728 int table_index; 3729 int idlen; 3730 3731 ASSERT(un != NULL); 3732 for (table_index = 0; table_index < sd_disk_table_size; 3733 table_index++) { 3734 id = sd_disk_table[table_index].device_id; 3735 idlen = strlen(id); 3736 if (idlen == 0) { 3737 continue; 3738 } 3739 3740 /* 3741 * The static configuration table currently does not 3742 * implement version 10 properties. Additionally, 3743 * multiple data-property-name entries are not 3744 * implemented in the static configuration table. 3745 */ 3746 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 3747 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3748 "sd_process_sdconf_table: disk %s\n", id); 3749 sd_set_vers1_properties(un, 3750 sd_disk_table[table_index].flags, 3751 sd_disk_table[table_index].properties); 3752 break; 3753 } 3754 } 3755 } 3756 3757 3758 /* 3759 * Function: sd_sdconf_id_match 3760 * 3761 * Description: This local function implements a case sensitive vid/pid 3762 * comparison as well as the boundary cases of wild card and 3763 * multiple blanks. 3764 * 3765 * Note: An implicit assumption made here is that the scsi 3766 * inquiry structure will always keep the vid, pid and 3767 * revision strings in consecutive sequence, so they can be 3768 * read as a single string. If this assumption is not the 3769 * case, a separate string, to be used for the check, needs 3770 * to be built with these strings concatenated. 3771 * 3772 * Arguments: un - driver soft state (unit) structure 3773 * id - table or config file vid/pid 3774 * idlen - length of the vid/pid (bytes) 3775 * 3776 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 3777 * SD_FAILURE - Indicates no match with the inquiry vid/pid 3778 */ 3779 3780 static int 3781 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen) 3782 { 3783 struct scsi_inquiry *sd_inq; 3784 int rval = SD_SUCCESS; 3785 3786 ASSERT(un != NULL); 3787 sd_inq = un->un_sd->sd_inq; 3788 ASSERT(id != NULL); 3789 3790 /* 3791 * We use the inq_vid as a pointer to a buffer containing the 3792 * vid and pid and use the entire vid/pid length of the table 3793 * entry for the comparison. This works because the inq_pid 3794 * data member follows inq_vid in the scsi_inquiry structure. 3795 */ 3796 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) { 3797 /* 3798 * The user id string is compared to the inquiry vid/pid 3799 * using a case insensitive comparison and ignoring 3800 * multiple spaces. 3801 */ 3802 rval = sd_blank_cmp(un, id, idlen); 3803 if (rval != SD_SUCCESS) { 3804 /* 3805 * User id strings that start and end with a "*" 3806 * are a special case. These do not have a 3807 * specific vendor, and the product string can 3808 * appear anywhere in the 16 byte PID portion of 3809 * the inquiry data. This is a simple strstr() 3810 * type search for the user id in the inquiry data. 3811 */ 3812 if ((id[0] == '*') && (id[idlen - 1] == '*')) { 3813 char *pidptr = &id[1]; 3814 int i; 3815 int j; 3816 int pidstrlen = idlen - 2; 3817 j = sizeof (SD_INQUIRY(un)->inq_pid) - 3818 pidstrlen; 3819 3820 if (j < 0) { 3821 return (SD_FAILURE); 3822 } 3823 for (i = 0; i < j; i++) { 3824 if (bcmp(&SD_INQUIRY(un)->inq_pid[i], 3825 pidptr, pidstrlen) == 0) { 3826 rval = SD_SUCCESS; 3827 break; 3828 } 3829 } 3830 } 3831 } 3832 } 3833 return (rval); 3834 } 3835 3836 3837 /* 3838 * Function: sd_blank_cmp 3839 * 3840 * Description: If the id string starts and ends with a space, treat 3841 * multiple consecutive spaces as equivalent to a single 3842 * space. For example, this causes a sd_disk_table entry 3843 * of " NEC CDROM " to match a device's id string of 3844 * "NEC CDROM". 3845 * 3846 * Note: The success exit condition for this routine is if 3847 * the pointer to the table entry is '\0' and the cnt of 3848 * the inquiry length is zero. This will happen if the inquiry 3849 * string returned by the device is padded with spaces to be 3850 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The 3851 * SCSI spec states that the inquiry string is to be padded with 3852 * spaces. 3853 * 3854 * Arguments: un - driver soft state (unit) structure 3855 * id - table or config file vid/pid 3856 * idlen - length of the vid/pid (bytes) 3857 * 3858 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 3859 * SD_FAILURE - Indicates no match with the inquiry vid/pid 3860 */ 3861 3862 static int 3863 sd_blank_cmp(struct sd_lun *un, char *id, int idlen) 3864 { 3865 char *p1; 3866 char *p2; 3867 int cnt; 3868 cnt = sizeof (SD_INQUIRY(un)->inq_vid) + 3869 sizeof (SD_INQUIRY(un)->inq_pid); 3870 3871 ASSERT(un != NULL); 3872 p2 = un->un_sd->sd_inq->inq_vid; 3873 ASSERT(id != NULL); 3874 p1 = id; 3875 3876 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) { 3877 /* 3878 * Note: string p1 is terminated by a NUL but string p2 3879 * isn't. The end of p2 is determined by cnt. 3880 */ 3881 for (;;) { 3882 /* skip over any extra blanks in both strings */ 3883 while ((*p1 != '\0') && (*p1 == ' ')) { 3884 p1++; 3885 } 3886 while ((cnt != 0) && (*p2 == ' ')) { 3887 p2++; 3888 cnt--; 3889 } 3890 3891 /* compare the two strings */ 3892 if ((cnt == 0) || 3893 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) { 3894 break; 3895 } 3896 while ((cnt > 0) && 3897 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) { 3898 p1++; 3899 p2++; 3900 cnt--; 3901 } 3902 } 3903 } 3904 3905 /* return SD_SUCCESS if both strings match */ 3906 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE); 3907 } 3908 3909 3910 /* 3911 * Function: sd_chk_vers1_data 3912 * 3913 * Description: Verify the version 1 device properties provided by the 3914 * user via the configuration file 3915 * 3916 * Arguments: un - driver soft state (unit) structure 3917 * flags - integer mask indicating properties to be set 3918 * prop_list - integer list of property values 3919 * list_len - length of user provided data 3920 * 3921 * Return Code: SD_SUCCESS - Indicates the user provided data is valid 3922 * SD_FAILURE - Indicates the user provided data is invalid 3923 */ 3924 3925 static int 3926 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 3927 int list_len, char *dataname_ptr) 3928 { 3929 int i; 3930 int mask = 1; 3931 int index = 0; 3932 3933 ASSERT(un != NULL); 3934 3935 /* Check for a NULL property name and list */ 3936 if (dataname_ptr == NULL) { 3937 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3938 "sd_chk_vers1_data: NULL data property name."); 3939 return (SD_FAILURE); 3940 } 3941 if (prop_list == NULL) { 3942 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3943 "sd_chk_vers1_data: %s NULL data property list.", 3944 dataname_ptr); 3945 return (SD_FAILURE); 3946 } 3947 3948 /* Display a warning if undefined bits are set in the flags */ 3949 if (flags & ~SD_CONF_BIT_MASK) { 3950 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3951 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. " 3952 "Properties not set.", 3953 (flags & ~SD_CONF_BIT_MASK), dataname_ptr); 3954 return (SD_FAILURE); 3955 } 3956 3957 /* 3958 * Verify the length of the list by identifying the highest bit set 3959 * in the flags and validating that the property list has a length 3960 * up to the index of this bit. 3961 */ 3962 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3963 if (flags & mask) { 3964 index++; 3965 } 3966 mask = 1 << i; 3967 } 3968 if ((list_len / sizeof (int)) < (index + 2)) { 3969 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3970 "sd_chk_vers1_data: " 3971 "Data property list %s size is incorrect. " 3972 "Properties not set.", dataname_ptr); 3973 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: " 3974 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS); 3975 return (SD_FAILURE); 3976 } 3977 return (SD_SUCCESS); 3978 } 3979 3980 3981 /* 3982 * Function: sd_set_vers1_properties 3983 * 3984 * Description: Set version 1 device properties based on a property list 3985 * retrieved from the driver configuration file or static 3986 * configuration table. Version 1 properties have the format: 3987 * 3988 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3989 * 3990 * where the prop0 value will be used to set prop0 if bit0 3991 * is set in the flags 3992 * 3993 * Arguments: un - driver soft state (unit) structure 3994 * flags - integer mask indicating properties to be set 3995 * prop_list - integer list of property values 3996 */ 3997 3998 static void 3999 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list) 4000 { 4001 ASSERT(un != NULL); 4002 4003 /* 4004 * Set the flag to indicate cache is to be disabled. An attempt 4005 * to disable the cache via sd_cache_control() will be made 4006 * later during attach once the basic initialization is complete. 4007 */ 4008 if (flags & SD_CONF_BSET_NOCACHE) { 4009 un->un_f_opt_disable_cache = TRUE; 4010 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4011 "sd_set_vers1_properties: caching disabled flag set\n"); 4012 } 4013 4014 /* CD-specific configuration parameters */ 4015 if (flags & SD_CONF_BSET_PLAYMSF_BCD) { 4016 un->un_f_cfg_playmsf_bcd = TRUE; 4017 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4018 "sd_set_vers1_properties: playmsf_bcd set\n"); 4019 } 4020 if (flags & SD_CONF_BSET_READSUB_BCD) { 4021 un->un_f_cfg_readsub_bcd = TRUE; 4022 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4023 "sd_set_vers1_properties: readsub_bcd set\n"); 4024 } 4025 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) { 4026 un->un_f_cfg_read_toc_trk_bcd = TRUE; 4027 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4028 "sd_set_vers1_properties: read_toc_trk_bcd set\n"); 4029 } 4030 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) { 4031 un->un_f_cfg_read_toc_addr_bcd = TRUE; 4032 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4033 "sd_set_vers1_properties: read_toc_addr_bcd set\n"); 4034 } 4035 if (flags & SD_CONF_BSET_NO_READ_HEADER) { 4036 un->un_f_cfg_no_read_header = TRUE; 4037 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4038 "sd_set_vers1_properties: no_read_header set\n"); 4039 } 4040 if (flags & SD_CONF_BSET_READ_CD_XD4) { 4041 un->un_f_cfg_read_cd_xd4 = TRUE; 4042 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4043 "sd_set_vers1_properties: read_cd_xd4 set\n"); 4044 } 4045 4046 /* Support for devices which do not have valid/unique serial numbers */ 4047 if (flags & SD_CONF_BSET_FAB_DEVID) { 4048 un->un_f_opt_fab_devid = TRUE; 4049 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4050 "sd_set_vers1_properties: fab_devid bit set\n"); 4051 } 4052 4053 /* Support for user throttle configuration */ 4054 if (flags & SD_CONF_BSET_THROTTLE) { 4055 ASSERT(prop_list != NULL); 4056 un->un_saved_throttle = un->un_throttle = 4057 prop_list->sdt_throttle; 4058 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4059 "sd_set_vers1_properties: throttle set to %d\n", 4060 prop_list->sdt_throttle); 4061 } 4062 4063 /* Set the per disk retry count according to the conf file or table. */ 4064 if (flags & SD_CONF_BSET_NRR_COUNT) { 4065 ASSERT(prop_list != NULL); 4066 if (prop_list->sdt_not_rdy_retries) { 4067 un->un_notready_retry_count = 4068 prop_list->sdt_not_rdy_retries; 4069 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4070 "sd_set_vers1_properties: not ready retry count" 4071 " set to %d\n", un->un_notready_retry_count); 4072 } 4073 } 4074 4075 /* The controller type is reported for generic disk driver ioctls */ 4076 if (flags & SD_CONF_BSET_CTYPE) { 4077 ASSERT(prop_list != NULL); 4078 switch (prop_list->sdt_ctype) { 4079 case CTYPE_CDROM: 4080 un->un_ctype = prop_list->sdt_ctype; 4081 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4082 "sd_set_vers1_properties: ctype set to " 4083 "CTYPE_CDROM\n"); 4084 break; 4085 case CTYPE_CCS: 4086 un->un_ctype = prop_list->sdt_ctype; 4087 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4088 "sd_set_vers1_properties: ctype set to " 4089 "CTYPE_CCS\n"); 4090 break; 4091 case CTYPE_ROD: /* RW optical */ 4092 un->un_ctype = prop_list->sdt_ctype; 4093 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4094 "sd_set_vers1_properties: ctype set to " 4095 "CTYPE_ROD\n"); 4096 break; 4097 default: 4098 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4099 "sd_set_vers1_properties: Could not set " 4100 "invalid ctype value (%d)", 4101 prop_list->sdt_ctype); 4102 } 4103 } 4104 4105 /* Purple failover timeout */ 4106 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) { 4107 ASSERT(prop_list != NULL); 4108 un->un_busy_retry_count = 4109 prop_list->sdt_busy_retries; 4110 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4111 "sd_set_vers1_properties: " 4112 "busy retry count set to %d\n", 4113 un->un_busy_retry_count); 4114 } 4115 4116 /* Purple reset retry count */ 4117 if (flags & SD_CONF_BSET_RST_RETRIES) { 4118 ASSERT(prop_list != NULL); 4119 un->un_reset_retry_count = 4120 prop_list->sdt_reset_retries; 4121 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4122 "sd_set_vers1_properties: " 4123 "reset retry count set to %d\n", 4124 un->un_reset_retry_count); 4125 } 4126 4127 /* Purple reservation release timeout */ 4128 if (flags & SD_CONF_BSET_RSV_REL_TIME) { 4129 ASSERT(prop_list != NULL); 4130 un->un_reserve_release_time = 4131 prop_list->sdt_reserv_rel_time; 4132 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4133 "sd_set_vers1_properties: " 4134 "reservation release timeout set to %d\n", 4135 un->un_reserve_release_time); 4136 } 4137 4138 /* 4139 * Driver flag telling the driver to verify that no commands are pending 4140 * for a device before issuing a Test Unit Ready. This is a workaround 4141 * for a firmware bug in some Seagate eliteI drives. 4142 */ 4143 if (flags & SD_CONF_BSET_TUR_CHECK) { 4144 un->un_f_cfg_tur_check = TRUE; 4145 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4146 "sd_set_vers1_properties: tur queue check set\n"); 4147 } 4148 4149 if (flags & SD_CONF_BSET_MIN_THROTTLE) { 4150 un->un_min_throttle = prop_list->sdt_min_throttle; 4151 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4152 "sd_set_vers1_properties: min throttle set to %d\n", 4153 un->un_min_throttle); 4154 } 4155 4156 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) { 4157 un->un_f_disksort_disabled = 4158 (prop_list->sdt_disk_sort_dis != 0) ? 4159 TRUE : FALSE; 4160 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4161 "sd_set_vers1_properties: disksort disabled " 4162 "flag set to %d\n", 4163 prop_list->sdt_disk_sort_dis); 4164 } 4165 4166 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) { 4167 un->un_f_lun_reset_enabled = 4168 (prop_list->sdt_lun_reset_enable != 0) ? 4169 TRUE : FALSE; 4170 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4171 "sd_set_vers1_properties: lun reset enabled " 4172 "flag set to %d\n", 4173 prop_list->sdt_lun_reset_enable); 4174 } 4175 4176 if (flags & SD_CONF_BSET_CACHE_IS_NV) { 4177 un->un_f_suppress_cache_flush = 4178 (prop_list->sdt_suppress_cache_flush != 0) ? 4179 TRUE : FALSE; 4180 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4181 "sd_set_vers1_properties: suppress_cache_flush " 4182 "flag set to %d\n", 4183 prop_list->sdt_suppress_cache_flush); 4184 } 4185 4186 /* 4187 * Validate the throttle values. 4188 * If any of the numbers are invalid, set everything to defaults. 4189 */ 4190 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4191 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4192 (un->un_min_throttle > un->un_throttle)) { 4193 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4194 un->un_min_throttle = sd_min_throttle; 4195 } 4196 } 4197 4198 /* 4199 * Function: sd_is_lsi() 4200 * 4201 * Description: Check for lsi devices, step through the static device 4202 * table to match vid/pid. 4203 * 4204 * Args: un - ptr to sd_lun 4205 * 4206 * Notes: When creating new LSI property, need to add the new LSI property 4207 * to this function. 4208 */ 4209 static void 4210 sd_is_lsi(struct sd_lun *un) 4211 { 4212 char *id = NULL; 4213 int table_index; 4214 int idlen; 4215 void *prop; 4216 4217 ASSERT(un != NULL); 4218 for (table_index = 0; table_index < sd_disk_table_size; 4219 table_index++) { 4220 id = sd_disk_table[table_index].device_id; 4221 idlen = strlen(id); 4222 if (idlen == 0) { 4223 continue; 4224 } 4225 4226 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4227 prop = sd_disk_table[table_index].properties; 4228 if (prop == &lsi_properties || 4229 prop == &lsi_oem_properties || 4230 prop == &lsi_properties_scsi || 4231 prop == &symbios_properties) { 4232 un->un_f_cfg_is_lsi = TRUE; 4233 } 4234 break; 4235 } 4236 } 4237 } 4238 4239 /* 4240 * Function: sd_get_physical_geometry 4241 * 4242 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and 4243 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the 4244 * target, and use this information to initialize the physical 4245 * geometry cache specified by pgeom_p. 4246 * 4247 * MODE SENSE is an optional command, so failure in this case 4248 * does not necessarily denote an error. We want to use the 4249 * MODE SENSE commands to derive the physical geometry of the 4250 * device, but if either command fails, the logical geometry is 4251 * used as the fallback for disk label geometry in cmlb. 4252 * 4253 * This requires that un->un_blockcount and un->un_tgt_blocksize 4254 * have already been initialized for the current target and 4255 * that the current values be passed as args so that we don't 4256 * end up ever trying to use -1 as a valid value. This could 4257 * happen if either value is reset while we're not holding 4258 * the mutex. 4259 * 4260 * Arguments: un - driver soft state (unit) structure 4261 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4262 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4263 * to use the USCSI "direct" chain and bypass the normal 4264 * command waitq. 4265 * 4266 * Context: Kernel thread only (can sleep). 4267 */ 4268 4269 static int 4270 sd_get_physical_geometry(struct sd_lun *un, cmlb_geom_t *pgeom_p, 4271 diskaddr_t capacity, int lbasize, int path_flag) 4272 { 4273 struct mode_format *page3p; 4274 struct mode_geometry *page4p; 4275 struct mode_header *headerp; 4276 int sector_size; 4277 int nsect; 4278 int nhead; 4279 int ncyl; 4280 int intrlv; 4281 int spc; 4282 diskaddr_t modesense_capacity; 4283 int rpm; 4284 int bd_len; 4285 int mode_header_length; 4286 uchar_t *p3bufp; 4287 uchar_t *p4bufp; 4288 int cdbsize; 4289 int ret = EIO; 4290 4291 ASSERT(un != NULL); 4292 4293 if (lbasize == 0) { 4294 if (ISCD(un)) { 4295 lbasize = 2048; 4296 } else { 4297 lbasize = un->un_sys_blocksize; 4298 } 4299 } 4300 pgeom_p->g_secsize = (unsigned short)lbasize; 4301 4302 /* 4303 * If the unit is a cd/dvd drive MODE SENSE page three 4304 * and MODE SENSE page four are reserved (see SBC spec 4305 * and MMC spec). To prevent soft errors just return 4306 * using the default LBA size. 4307 */ 4308 if (ISCD(un)) 4309 return (ret); 4310 4311 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0; 4312 4313 /* 4314 * Retrieve MODE SENSE page 3 - Format Device Page 4315 */ 4316 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP); 4317 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p3bufp, 4318 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag) 4319 != 0) { 4320 SD_ERROR(SD_LOG_COMMON, un, 4321 "sd_get_physical_geometry: mode sense page 3 failed\n"); 4322 goto page3_exit; 4323 } 4324 4325 /* 4326 * Determine size of Block Descriptors in order to locate the mode 4327 * page data. ATAPI devices return 0, SCSI devices should return 4328 * MODE_BLK_DESC_LENGTH. 4329 */ 4330 headerp = (struct mode_header *)p3bufp; 4331 if (un->un_f_cfg_is_atapi == TRUE) { 4332 struct mode_header_grp2 *mhp = 4333 (struct mode_header_grp2 *)headerp; 4334 mode_header_length = MODE_HEADER_LENGTH_GRP2; 4335 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4336 } else { 4337 mode_header_length = MODE_HEADER_LENGTH; 4338 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4339 } 4340 4341 if (bd_len > MODE_BLK_DESC_LENGTH) { 4342 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4343 "received unexpected bd_len of %d, page3\n", bd_len); 4344 goto page3_exit; 4345 } 4346 4347 page3p = (struct mode_format *) 4348 ((caddr_t)headerp + mode_header_length + bd_len); 4349 4350 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) { 4351 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4352 "mode sense pg3 code mismatch %d\n", 4353 page3p->mode_page.code); 4354 goto page3_exit; 4355 } 4356 4357 /* 4358 * Use this physical geometry data only if BOTH MODE SENSE commands 4359 * complete successfully; otherwise, revert to the logical geometry. 4360 * So, we need to save everything in temporary variables. 4361 */ 4362 sector_size = BE_16(page3p->data_bytes_sect); 4363 4364 /* 4365 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size 4366 */ 4367 if (sector_size == 0) { 4368 sector_size = un->un_sys_blocksize; 4369 } else { 4370 sector_size &= ~(un->un_sys_blocksize - 1); 4371 } 4372 4373 nsect = BE_16(page3p->sect_track); 4374 intrlv = BE_16(page3p->interleave); 4375 4376 SD_INFO(SD_LOG_COMMON, un, 4377 "sd_get_physical_geometry: Format Parameters (page 3)\n"); 4378 SD_INFO(SD_LOG_COMMON, un, 4379 " mode page: %d; nsect: %d; sector size: %d;\n", 4380 page3p->mode_page.code, nsect, sector_size); 4381 SD_INFO(SD_LOG_COMMON, un, 4382 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv, 4383 BE_16(page3p->track_skew), 4384 BE_16(page3p->cylinder_skew)); 4385 4386 4387 /* 4388 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page 4389 */ 4390 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP); 4391 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p4bufp, 4392 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag) 4393 != 0) { 4394 SD_ERROR(SD_LOG_COMMON, un, 4395 "sd_get_physical_geometry: mode sense page 4 failed\n"); 4396 goto page4_exit; 4397 } 4398 4399 /* 4400 * Determine size of Block Descriptors in order to locate the mode 4401 * page data. ATAPI devices return 0, SCSI devices should return 4402 * MODE_BLK_DESC_LENGTH. 4403 */ 4404 headerp = (struct mode_header *)p4bufp; 4405 if (un->un_f_cfg_is_atapi == TRUE) { 4406 struct mode_header_grp2 *mhp = 4407 (struct mode_header_grp2 *)headerp; 4408 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4409 } else { 4410 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4411 } 4412 4413 if (bd_len > MODE_BLK_DESC_LENGTH) { 4414 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4415 "received unexpected bd_len of %d, page4\n", bd_len); 4416 goto page4_exit; 4417 } 4418 4419 page4p = (struct mode_geometry *) 4420 ((caddr_t)headerp + mode_header_length + bd_len); 4421 4422 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) { 4423 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4424 "mode sense pg4 code mismatch %d\n", 4425 page4p->mode_page.code); 4426 goto page4_exit; 4427 } 4428 4429 /* 4430 * Stash the data now, after we know that both commands completed. 4431 */ 4432 4433 4434 nhead = (int)page4p->heads; /* uchar, so no conversion needed */ 4435 spc = nhead * nsect; 4436 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb; 4437 rpm = BE_16(page4p->rpm); 4438 4439 modesense_capacity = spc * ncyl; 4440 4441 SD_INFO(SD_LOG_COMMON, un, 4442 "sd_get_physical_geometry: Geometry Parameters (page 4)\n"); 4443 SD_INFO(SD_LOG_COMMON, un, 4444 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm); 4445 SD_INFO(SD_LOG_COMMON, un, 4446 " computed capacity(h*s*c): %d;\n", modesense_capacity); 4447 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n", 4448 (void *)pgeom_p, capacity); 4449 4450 /* 4451 * Compensate if the drive's geometry is not rectangular, i.e., 4452 * the product of C * H * S returned by MODE SENSE >= that returned 4453 * by read capacity. This is an idiosyncrasy of the original x86 4454 * disk subsystem. 4455 */ 4456 if (modesense_capacity >= capacity) { 4457 SD_INFO(SD_LOG_COMMON, un, 4458 "sd_get_physical_geometry: adjusting acyl; " 4459 "old: %d; new: %d\n", pgeom_p->g_acyl, 4460 (modesense_capacity - capacity + spc - 1) / spc); 4461 if (sector_size != 0) { 4462 /* 1243403: NEC D38x7 drives don't support sec size */ 4463 pgeom_p->g_secsize = (unsigned short)sector_size; 4464 } 4465 pgeom_p->g_nsect = (unsigned short)nsect; 4466 pgeom_p->g_nhead = (unsigned short)nhead; 4467 pgeom_p->g_capacity = capacity; 4468 pgeom_p->g_acyl = 4469 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc; 4470 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl; 4471 } 4472 4473 pgeom_p->g_rpm = (unsigned short)rpm; 4474 pgeom_p->g_intrlv = (unsigned short)intrlv; 4475 ret = 0; 4476 4477 SD_INFO(SD_LOG_COMMON, un, 4478 "sd_get_physical_geometry: mode sense geometry:\n"); 4479 SD_INFO(SD_LOG_COMMON, un, 4480 " nsect: %d; sector size: %d; interlv: %d\n", 4481 nsect, sector_size, intrlv); 4482 SD_INFO(SD_LOG_COMMON, un, 4483 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n", 4484 nhead, ncyl, rpm, modesense_capacity); 4485 SD_INFO(SD_LOG_COMMON, un, 4486 "sd_get_physical_geometry: (cached)\n"); 4487 SD_INFO(SD_LOG_COMMON, un, 4488 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 4489 pgeom_p->g_ncyl, pgeom_p->g_acyl, 4490 pgeom_p->g_nhead, pgeom_p->g_nsect); 4491 SD_INFO(SD_LOG_COMMON, un, 4492 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n", 4493 pgeom_p->g_secsize, pgeom_p->g_capacity, 4494 pgeom_p->g_intrlv, pgeom_p->g_rpm); 4495 4496 page4_exit: 4497 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH); 4498 page3_exit: 4499 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH); 4500 4501 return (ret); 4502 } 4503 4504 /* 4505 * Function: sd_get_virtual_geometry 4506 * 4507 * Description: Ask the controller to tell us about the target device. 4508 * 4509 * Arguments: un - pointer to softstate 4510 * capacity - disk capacity in #blocks 4511 * lbasize - disk block size in bytes 4512 * 4513 * Context: Kernel thread only 4514 */ 4515 4516 static int 4517 sd_get_virtual_geometry(struct sd_lun *un, cmlb_geom_t *lgeom_p, 4518 diskaddr_t capacity, int lbasize) 4519 { 4520 uint_t geombuf; 4521 int spc; 4522 4523 ASSERT(un != NULL); 4524 4525 /* Set sector size, and total number of sectors */ 4526 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1); 4527 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1); 4528 4529 /* Let the HBA tell us its geometry */ 4530 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1); 4531 4532 /* A value of -1 indicates an undefined "geometry" property */ 4533 if (geombuf == (-1)) { 4534 return (EINVAL); 4535 } 4536 4537 /* Initialize the logical geometry cache. */ 4538 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff; 4539 lgeom_p->g_nsect = geombuf & 0xffff; 4540 lgeom_p->g_secsize = un->un_sys_blocksize; 4541 4542 spc = lgeom_p->g_nhead * lgeom_p->g_nsect; 4543 4544 /* 4545 * Note: The driver originally converted the capacity value from 4546 * target blocks to system blocks. However, the capacity value passed 4547 * to this routine is already in terms of system blocks (this scaling 4548 * is done when the READ CAPACITY command is issued and processed). 4549 * This 'error' may have gone undetected because the usage of g_ncyl 4550 * (which is based upon g_capacity) is very limited within the driver 4551 */ 4552 lgeom_p->g_capacity = capacity; 4553 4554 /* 4555 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The 4556 * hba may return zero values if the device has been removed. 4557 */ 4558 if (spc == 0) { 4559 lgeom_p->g_ncyl = 0; 4560 } else { 4561 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc; 4562 } 4563 lgeom_p->g_acyl = 0; 4564 4565 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n"); 4566 return (0); 4567 4568 } 4569 /* 4570 * Function: sd_update_block_info 4571 * 4572 * Description: Calculate a byte count to sector count bitshift value 4573 * from sector size. 4574 * 4575 * Arguments: un: unit struct. 4576 * lbasize: new target sector size 4577 * capacity: new target capacity, ie. block count 4578 * 4579 * Context: Kernel thread context 4580 */ 4581 4582 static void 4583 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity) 4584 { 4585 uint_t dblk; 4586 4587 if (lbasize != 0) { 4588 un->un_tgt_blocksize = lbasize; 4589 un->un_f_tgt_blocksize_is_valid = TRUE; 4590 } 4591 4592 if (capacity != 0) { 4593 un->un_blockcount = capacity; 4594 un->un_f_blockcount_is_valid = TRUE; 4595 } 4596 4597 /* 4598 * Update device capacity properties. 4599 * 4600 * 'device-nblocks' number of blocks in target's units 4601 * 'device-blksize' data bearing size of target's block 4602 * 4603 * NOTE: math is complicated by the fact that un_tgt_blocksize may 4604 * not be a power of two for checksumming disks with 520/528 byte 4605 * sectors. 4606 */ 4607 if (un->un_f_tgt_blocksize_is_valid && 4608 un->un_f_blockcount_is_valid && 4609 un->un_sys_blocksize) { 4610 dblk = un->un_tgt_blocksize / un->un_sys_blocksize; 4611 (void) ddi_prop_update_int64(DDI_DEV_T_NONE, SD_DEVINFO(un), 4612 "device-nblocks", un->un_blockcount / dblk); 4613 /* 4614 * To save memory, only define "device-blksize" when its 4615 * value is differnet than the default DEV_BSIZE value. 4616 */ 4617 if ((un->un_sys_blocksize * dblk) != DEV_BSIZE) 4618 (void) ddi_prop_update_int(DDI_DEV_T_NONE, 4619 SD_DEVINFO(un), "device-blksize", 4620 un->un_sys_blocksize * dblk); 4621 } 4622 } 4623 4624 4625 /* 4626 * Function: sd_register_devid 4627 * 4628 * Description: This routine will obtain the device id information from the 4629 * target, obtain the serial number, and register the device 4630 * id with the ddi framework. 4631 * 4632 * Arguments: devi - the system's dev_info_t for the device. 4633 * un - driver soft state (unit) structure 4634 * reservation_flag - indicates if a reservation conflict 4635 * occurred during attach 4636 * 4637 * Context: Kernel Thread 4638 */ 4639 static void 4640 sd_register_devid(struct sd_lun *un, dev_info_t *devi, int reservation_flag) 4641 { 4642 int rval = 0; 4643 uchar_t *inq80 = NULL; 4644 size_t inq80_len = MAX_INQUIRY_SIZE; 4645 size_t inq80_resid = 0; 4646 uchar_t *inq83 = NULL; 4647 size_t inq83_len = MAX_INQUIRY_SIZE; 4648 size_t inq83_resid = 0; 4649 int dlen, len; 4650 char *sn; 4651 4652 ASSERT(un != NULL); 4653 ASSERT(mutex_owned(SD_MUTEX(un))); 4654 ASSERT((SD_DEVINFO(un)) == devi); 4655 4656 /* 4657 * If transport has already registered a devid for this target 4658 * then that takes precedence over the driver's determination 4659 * of the devid. 4660 */ 4661 if (ddi_devid_get(SD_DEVINFO(un), &un->un_devid) == DDI_SUCCESS) { 4662 ASSERT(un->un_devid); 4663 return; /* use devid registered by the transport */ 4664 } 4665 4666 /* 4667 * This is the case of antiquated Sun disk drives that have the 4668 * FAB_DEVID property set in the disk_table. These drives 4669 * manage the devid's by storing them in last 2 available sectors 4670 * on the drive and have them fabricated by the ddi layer by calling 4671 * ddi_devid_init and passing the DEVID_FAB flag. 4672 */ 4673 if (un->un_f_opt_fab_devid == TRUE) { 4674 /* 4675 * Depending on EINVAL isn't reliable, since a reserved disk 4676 * may result in invalid geometry, so check to make sure a 4677 * reservation conflict did not occur during attach. 4678 */ 4679 if ((sd_get_devid(un) == EINVAL) && 4680 (reservation_flag != SD_TARGET_IS_RESERVED)) { 4681 /* 4682 * The devid is invalid AND there is no reservation 4683 * conflict. Fabricate a new devid. 4684 */ 4685 (void) sd_create_devid(un); 4686 } 4687 4688 /* Register the devid if it exists */ 4689 if (un->un_devid != NULL) { 4690 (void) ddi_devid_register(SD_DEVINFO(un), 4691 un->un_devid); 4692 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4693 "sd_register_devid: Devid Fabricated\n"); 4694 } 4695 return; 4696 } 4697 4698 /* 4699 * We check the availibility of the World Wide Name (0x83) and Unit 4700 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using 4701 * un_vpd_page_mask from them, we decide which way to get the WWN. If 4702 * 0x83 is availible, that is the best choice. Our next choice is 4703 * 0x80. If neither are availible, we munge the devid from the device 4704 * vid/pid/serial # for Sun qualified disks, or use the ddi framework 4705 * to fabricate a devid for non-Sun qualified disks. 4706 */ 4707 if (sd_check_vpd_page_support(un) == 0) { 4708 /* collect page 80 data if available */ 4709 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) { 4710 4711 mutex_exit(SD_MUTEX(un)); 4712 inq80 = kmem_zalloc(inq80_len, KM_SLEEP); 4713 rval = sd_send_scsi_INQUIRY(un, inq80, inq80_len, 4714 0x01, 0x80, &inq80_resid); 4715 4716 if (rval != 0) { 4717 kmem_free(inq80, inq80_len); 4718 inq80 = NULL; 4719 inq80_len = 0; 4720 } else if (ddi_prop_exists( 4721 DDI_DEV_T_NONE, SD_DEVINFO(un), 4722 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 4723 INQUIRY_SERIAL_NO) == 0) { 4724 /* 4725 * If we don't already have a serial number 4726 * property, do quick verify of data returned 4727 * and define property. 4728 */ 4729 dlen = inq80_len - inq80_resid; 4730 len = (size_t)inq80[3]; 4731 if ((dlen >= 4) && ((len + 4) <= dlen)) { 4732 /* 4733 * Ensure sn termination, skip leading 4734 * blanks, and create property 4735 * 'inquiry-serial-no'. 4736 */ 4737 sn = (char *)&inq80[4]; 4738 sn[len] = 0; 4739 while (*sn && (*sn == ' ')) 4740 sn++; 4741 if (*sn) { 4742 (void) ddi_prop_update_string( 4743 DDI_DEV_T_NONE, 4744 SD_DEVINFO(un), 4745 INQUIRY_SERIAL_NO, sn); 4746 } 4747 } 4748 } 4749 mutex_enter(SD_MUTEX(un)); 4750 } 4751 4752 /* collect page 83 data if available */ 4753 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) { 4754 mutex_exit(SD_MUTEX(un)); 4755 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 4756 rval = sd_send_scsi_INQUIRY(un, inq83, inq83_len, 4757 0x01, 0x83, &inq83_resid); 4758 4759 if (rval != 0) { 4760 kmem_free(inq83, inq83_len); 4761 inq83 = NULL; 4762 inq83_len = 0; 4763 } 4764 mutex_enter(SD_MUTEX(un)); 4765 } 4766 } 4767 4768 /* encode best devid possible based on data available */ 4769 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, 4770 (char *)ddi_driver_name(SD_DEVINFO(un)), 4771 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)), 4772 inq80, inq80_len - inq80_resid, inq83, inq83_len - 4773 inq83_resid, &un->un_devid) == DDI_SUCCESS) { 4774 4775 /* devid successfully encoded, register devid */ 4776 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); 4777 4778 } else { 4779 /* 4780 * Unable to encode a devid based on data available. 4781 * This is not a Sun qualified disk. Older Sun disk 4782 * drives that have the SD_FAB_DEVID property 4783 * set in the disk_table and non Sun qualified 4784 * disks are treated in the same manner. These 4785 * drives manage the devid's by storing them in 4786 * last 2 available sectors on the drive and 4787 * have them fabricated by the ddi layer by 4788 * calling ddi_devid_init and passing the 4789 * DEVID_FAB flag. 4790 * Create a fabricate devid only if there's no 4791 * fabricate devid existed. 4792 */ 4793 if (sd_get_devid(un) == EINVAL) { 4794 (void) sd_create_devid(un); 4795 } 4796 un->un_f_opt_fab_devid = TRUE; 4797 4798 /* Register the devid if it exists */ 4799 if (un->un_devid != NULL) { 4800 (void) ddi_devid_register(SD_DEVINFO(un), 4801 un->un_devid); 4802 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4803 "sd_register_devid: devid fabricated using " 4804 "ddi framework\n"); 4805 } 4806 } 4807 4808 /* clean up resources */ 4809 if (inq80 != NULL) { 4810 kmem_free(inq80, inq80_len); 4811 } 4812 if (inq83 != NULL) { 4813 kmem_free(inq83, inq83_len); 4814 } 4815 } 4816 4817 4818 4819 /* 4820 * Function: sd_get_devid 4821 * 4822 * Description: This routine will return 0 if a valid device id has been 4823 * obtained from the target and stored in the soft state. If a 4824 * valid device id has not been previously read and stored, a 4825 * read attempt will be made. 4826 * 4827 * Arguments: un - driver soft state (unit) structure 4828 * 4829 * Return Code: 0 if we successfully get the device id 4830 * 4831 * Context: Kernel Thread 4832 */ 4833 4834 static int 4835 sd_get_devid(struct sd_lun *un) 4836 { 4837 struct dk_devid *dkdevid; 4838 ddi_devid_t tmpid; 4839 uint_t *ip; 4840 size_t sz; 4841 diskaddr_t blk; 4842 int status; 4843 int chksum; 4844 int i; 4845 size_t buffer_size; 4846 4847 ASSERT(un != NULL); 4848 ASSERT(mutex_owned(SD_MUTEX(un))); 4849 4850 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n", 4851 un); 4852 4853 if (un->un_devid != NULL) { 4854 return (0); 4855 } 4856 4857 mutex_exit(SD_MUTEX(un)); 4858 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 4859 (void *)SD_PATH_DIRECT) != 0) { 4860 mutex_enter(SD_MUTEX(un)); 4861 return (EINVAL); 4862 } 4863 4864 /* 4865 * Read and verify device id, stored in the reserved cylinders at the 4866 * end of the disk. Backup label is on the odd sectors of the last 4867 * track of the last cylinder. Device id will be on track of the next 4868 * to last cylinder. 4869 */ 4870 mutex_enter(SD_MUTEX(un)); 4871 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid)); 4872 mutex_exit(SD_MUTEX(un)); 4873 dkdevid = kmem_alloc(buffer_size, KM_SLEEP); 4874 status = sd_send_scsi_READ(un, dkdevid, buffer_size, blk, 4875 SD_PATH_DIRECT); 4876 if (status != 0) { 4877 goto error; 4878 } 4879 4880 /* Validate the revision */ 4881 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 4882 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 4883 status = EINVAL; 4884 goto error; 4885 } 4886 4887 /* Calculate the checksum */ 4888 chksum = 0; 4889 ip = (uint_t *)dkdevid; 4890 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 4891 i++) { 4892 chksum ^= ip[i]; 4893 } 4894 4895 /* Compare the checksums */ 4896 if (DKD_GETCHKSUM(dkdevid) != chksum) { 4897 status = EINVAL; 4898 goto error; 4899 } 4900 4901 /* Validate the device id */ 4902 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 4903 status = EINVAL; 4904 goto error; 4905 } 4906 4907 /* 4908 * Store the device id in the driver soft state 4909 */ 4910 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 4911 tmpid = kmem_alloc(sz, KM_SLEEP); 4912 4913 mutex_enter(SD_MUTEX(un)); 4914 4915 un->un_devid = tmpid; 4916 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 4917 4918 kmem_free(dkdevid, buffer_size); 4919 4920 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un); 4921 4922 return (status); 4923 error: 4924 mutex_enter(SD_MUTEX(un)); 4925 kmem_free(dkdevid, buffer_size); 4926 return (status); 4927 } 4928 4929 4930 /* 4931 * Function: sd_create_devid 4932 * 4933 * Description: This routine will fabricate the device id and write it 4934 * to the disk. 4935 * 4936 * Arguments: un - driver soft state (unit) structure 4937 * 4938 * Return Code: value of the fabricated device id 4939 * 4940 * Context: Kernel Thread 4941 */ 4942 4943 static ddi_devid_t 4944 sd_create_devid(struct sd_lun *un) 4945 { 4946 ASSERT(un != NULL); 4947 4948 /* Fabricate the devid */ 4949 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid) 4950 == DDI_FAILURE) { 4951 return (NULL); 4952 } 4953 4954 /* Write the devid to disk */ 4955 if (sd_write_deviceid(un) != 0) { 4956 ddi_devid_free(un->un_devid); 4957 un->un_devid = NULL; 4958 } 4959 4960 return (un->un_devid); 4961 } 4962 4963 4964 /* 4965 * Function: sd_write_deviceid 4966 * 4967 * Description: This routine will write the device id to the disk 4968 * reserved sector. 4969 * 4970 * Arguments: un - driver soft state (unit) structure 4971 * 4972 * Return Code: EINVAL 4973 * value returned by sd_send_scsi_cmd 4974 * 4975 * Context: Kernel Thread 4976 */ 4977 4978 static int 4979 sd_write_deviceid(struct sd_lun *un) 4980 { 4981 struct dk_devid *dkdevid; 4982 diskaddr_t blk; 4983 uint_t *ip, chksum; 4984 int status; 4985 int i; 4986 4987 ASSERT(mutex_owned(SD_MUTEX(un))); 4988 4989 mutex_exit(SD_MUTEX(un)); 4990 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 4991 (void *)SD_PATH_DIRECT) != 0) { 4992 mutex_enter(SD_MUTEX(un)); 4993 return (-1); 4994 } 4995 4996 4997 /* Allocate the buffer */ 4998 dkdevid = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP); 4999 5000 /* Fill in the revision */ 5001 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 5002 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 5003 5004 /* Copy in the device id */ 5005 mutex_enter(SD_MUTEX(un)); 5006 bcopy(un->un_devid, &dkdevid->dkd_devid, 5007 ddi_devid_sizeof(un->un_devid)); 5008 mutex_exit(SD_MUTEX(un)); 5009 5010 /* Calculate the checksum */ 5011 chksum = 0; 5012 ip = (uint_t *)dkdevid; 5013 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 5014 i++) { 5015 chksum ^= ip[i]; 5016 } 5017 5018 /* Fill-in checksum */ 5019 DKD_FORMCHKSUM(chksum, dkdevid); 5020 5021 /* Write the reserved sector */ 5022 status = sd_send_scsi_WRITE(un, dkdevid, un->un_sys_blocksize, blk, 5023 SD_PATH_DIRECT); 5024 5025 kmem_free(dkdevid, un->un_sys_blocksize); 5026 5027 mutex_enter(SD_MUTEX(un)); 5028 return (status); 5029 } 5030 5031 5032 /* 5033 * Function: sd_check_vpd_page_support 5034 * 5035 * Description: This routine sends an inquiry command with the EVPD bit set and 5036 * a page code of 0x00 to the device. It is used to determine which 5037 * vital product pages are availible to find the devid. We are 5038 * looking for pages 0x83 or 0x80. If we return a negative 1, the 5039 * device does not support that command. 5040 * 5041 * Arguments: un - driver soft state (unit) structure 5042 * 5043 * Return Code: 0 - success 5044 * 1 - check condition 5045 * 5046 * Context: This routine can sleep. 5047 */ 5048 5049 static int 5050 sd_check_vpd_page_support(struct sd_lun *un) 5051 { 5052 uchar_t *page_list = NULL; 5053 uchar_t page_length = 0xff; /* Use max possible length */ 5054 uchar_t evpd = 0x01; /* Set the EVPD bit */ 5055 uchar_t page_code = 0x00; /* Supported VPD Pages */ 5056 int rval = 0; 5057 int counter; 5058 5059 ASSERT(un != NULL); 5060 ASSERT(mutex_owned(SD_MUTEX(un))); 5061 5062 mutex_exit(SD_MUTEX(un)); 5063 5064 /* 5065 * We'll set the page length to the maximum to save figuring it out 5066 * with an additional call. 5067 */ 5068 page_list = kmem_zalloc(page_length, KM_SLEEP); 5069 5070 rval = sd_send_scsi_INQUIRY(un, page_list, page_length, evpd, 5071 page_code, NULL); 5072 5073 mutex_enter(SD_MUTEX(un)); 5074 5075 /* 5076 * Now we must validate that the device accepted the command, as some 5077 * drives do not support it. If the drive does support it, we will 5078 * return 0, and the supported pages will be in un_vpd_page_mask. If 5079 * not, we return -1. 5080 */ 5081 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) { 5082 /* Loop to find one of the 2 pages we need */ 5083 counter = 4; /* Supported pages start at byte 4, with 0x00 */ 5084 5085 /* 5086 * Pages are returned in ascending order, and 0x83 is what we 5087 * are hoping for. 5088 */ 5089 while ((page_list[counter] <= 0x86) && 5090 (counter <= (page_list[VPD_PAGE_LENGTH] + 5091 VPD_HEAD_OFFSET))) { 5092 /* 5093 * Add 3 because page_list[3] is the number of 5094 * pages minus 3 5095 */ 5096 5097 switch (page_list[counter]) { 5098 case 0x00: 5099 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG; 5100 break; 5101 case 0x80: 5102 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG; 5103 break; 5104 case 0x81: 5105 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG; 5106 break; 5107 case 0x82: 5108 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG; 5109 break; 5110 case 0x83: 5111 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; 5112 break; 5113 case 0x86: 5114 un->un_vpd_page_mask |= SD_VPD_EXTENDED_DATA_PG; 5115 break; 5116 } 5117 counter++; 5118 } 5119 5120 } else { 5121 rval = -1; 5122 5123 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5124 "sd_check_vpd_page_support: This drive does not implement " 5125 "VPD pages.\n"); 5126 } 5127 5128 kmem_free(page_list, page_length); 5129 5130 return (rval); 5131 } 5132 5133 5134 /* 5135 * Function: sd_setup_pm 5136 * 5137 * Description: Initialize Power Management on the device 5138 * 5139 * Context: Kernel Thread 5140 */ 5141 5142 static void 5143 sd_setup_pm(struct sd_lun *un, dev_info_t *devi) 5144 { 5145 uint_t log_page_size; 5146 uchar_t *log_page_data; 5147 int rval; 5148 5149 /* 5150 * Since we are called from attach, holding a mutex for 5151 * un is unnecessary. Because some of the routines called 5152 * from here require SD_MUTEX to not be held, assert this 5153 * right up front. 5154 */ 5155 ASSERT(!mutex_owned(SD_MUTEX(un))); 5156 /* 5157 * Since the sd device does not have the 'reg' property, 5158 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 5159 * The following code is to tell cpr that this device 5160 * DOES need to be suspended and resumed. 5161 */ 5162 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 5163 "pm-hardware-state", "needs-suspend-resume"); 5164 5165 /* 5166 * This complies with the new power management framework 5167 * for certain desktop machines. Create the pm_components 5168 * property as a string array property. 5169 */ 5170 if (un->un_f_pm_supported) { 5171 /* 5172 * not all devices have a motor, try it first. 5173 * some devices may return ILLEGAL REQUEST, some 5174 * will hang 5175 * The following START_STOP_UNIT is used to check if target 5176 * device has a motor. 5177 */ 5178 un->un_f_start_stop_supported = TRUE; 5179 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 5180 SD_PATH_DIRECT) != 0) { 5181 un->un_f_start_stop_supported = FALSE; 5182 } 5183 5184 /* 5185 * create pm properties anyways otherwise the parent can't 5186 * go to sleep 5187 */ 5188 (void) sd_create_pm_components(devi, un); 5189 un->un_f_pm_is_enabled = TRUE; 5190 return; 5191 } 5192 5193 if (!un->un_f_log_sense_supported) { 5194 un->un_power_level = SD_SPINDLE_ON; 5195 un->un_f_pm_is_enabled = FALSE; 5196 return; 5197 } 5198 5199 rval = sd_log_page_supported(un, START_STOP_CYCLE_PAGE); 5200 5201 #ifdef SDDEBUG 5202 if (sd_force_pm_supported) { 5203 /* Force a successful result */ 5204 rval = 1; 5205 } 5206 #endif 5207 5208 /* 5209 * If the start-stop cycle counter log page is not supported 5210 * or if the pm-capable property is SD_PM_CAPABLE_FALSE (0) 5211 * then we should not create the pm_components property. 5212 */ 5213 if (rval == -1) { 5214 /* 5215 * Error. 5216 * Reading log sense failed, most likely this is 5217 * an older drive that does not support log sense. 5218 * If this fails auto-pm is not supported. 5219 */ 5220 un->un_power_level = SD_SPINDLE_ON; 5221 un->un_f_pm_is_enabled = FALSE; 5222 5223 } else if (rval == 0) { 5224 /* 5225 * Page not found. 5226 * The start stop cycle counter is implemented as page 5227 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For 5228 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE). 5229 */ 5230 if (sd_log_page_supported(un, START_STOP_CYCLE_VU_PAGE) == 1) { 5231 /* 5232 * Page found, use this one. 5233 */ 5234 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE; 5235 un->un_f_pm_is_enabled = TRUE; 5236 } else { 5237 /* 5238 * Error or page not found. 5239 * auto-pm is not supported for this device. 5240 */ 5241 un->un_power_level = SD_SPINDLE_ON; 5242 un->un_f_pm_is_enabled = FALSE; 5243 } 5244 } else { 5245 /* 5246 * Page found, use it. 5247 */ 5248 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE; 5249 un->un_f_pm_is_enabled = TRUE; 5250 } 5251 5252 5253 if (un->un_f_pm_is_enabled == TRUE) { 5254 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5255 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5256 5257 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 5258 log_page_size, un->un_start_stop_cycle_page, 5259 0x01, 0, SD_PATH_DIRECT); 5260 #ifdef SDDEBUG 5261 if (sd_force_pm_supported) { 5262 /* Force a successful result */ 5263 rval = 0; 5264 } 5265 #endif 5266 5267 /* 5268 * If the Log sense for Page( Start/stop cycle counter page) 5269 * succeeds, then power managment is supported and we can 5270 * enable auto-pm. 5271 */ 5272 if (rval == 0) { 5273 (void) sd_create_pm_components(devi, un); 5274 } else { 5275 un->un_power_level = SD_SPINDLE_ON; 5276 un->un_f_pm_is_enabled = FALSE; 5277 } 5278 5279 kmem_free(log_page_data, log_page_size); 5280 } 5281 } 5282 5283 5284 /* 5285 * Function: sd_create_pm_components 5286 * 5287 * Description: Initialize PM property. 5288 * 5289 * Context: Kernel thread context 5290 */ 5291 5292 static void 5293 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un) 5294 { 5295 char *pm_comp[] = { "NAME=spindle-motor", "0=off", "1=on", NULL }; 5296 5297 ASSERT(!mutex_owned(SD_MUTEX(un))); 5298 5299 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 5300 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 5301 /* 5302 * When components are initially created they are idle, 5303 * power up any non-removables. 5304 * Note: the return value of pm_raise_power can't be used 5305 * for determining if PM should be enabled for this device. 5306 * Even if you check the return values and remove this 5307 * property created above, the PM framework will not honor the 5308 * change after the first call to pm_raise_power. Hence, 5309 * removal of that property does not help if pm_raise_power 5310 * fails. In the case of removable media, the start/stop 5311 * will fail if the media is not present. 5312 */ 5313 if (un->un_f_attach_spinup && (pm_raise_power(SD_DEVINFO(un), 0, 5314 SD_SPINDLE_ON) == DDI_SUCCESS)) { 5315 mutex_enter(SD_MUTEX(un)); 5316 un->un_power_level = SD_SPINDLE_ON; 5317 mutex_enter(&un->un_pm_mutex); 5318 /* Set to on and not busy. */ 5319 un->un_pm_count = 0; 5320 } else { 5321 mutex_enter(SD_MUTEX(un)); 5322 un->un_power_level = SD_SPINDLE_OFF; 5323 mutex_enter(&un->un_pm_mutex); 5324 /* Set to off. */ 5325 un->un_pm_count = -1; 5326 } 5327 mutex_exit(&un->un_pm_mutex); 5328 mutex_exit(SD_MUTEX(un)); 5329 } else { 5330 un->un_power_level = SD_SPINDLE_ON; 5331 un->un_f_pm_is_enabled = FALSE; 5332 } 5333 } 5334 5335 5336 /* 5337 * Function: sd_ddi_suspend 5338 * 5339 * Description: Performs system power-down operations. This includes 5340 * setting the drive state to indicate its suspended so 5341 * that no new commands will be accepted. Also, wait for 5342 * all commands that are in transport or queued to a timer 5343 * for retry to complete. All timeout threads are cancelled. 5344 * 5345 * Return Code: DDI_FAILURE or DDI_SUCCESS 5346 * 5347 * Context: Kernel thread context 5348 */ 5349 5350 static int 5351 sd_ddi_suspend(dev_info_t *devi) 5352 { 5353 struct sd_lun *un; 5354 clock_t wait_cmds_complete; 5355 5356 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5357 if (un == NULL) { 5358 return (DDI_FAILURE); 5359 } 5360 5361 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n"); 5362 5363 mutex_enter(SD_MUTEX(un)); 5364 5365 /* Return success if the device is already suspended. */ 5366 if (un->un_state == SD_STATE_SUSPENDED) { 5367 mutex_exit(SD_MUTEX(un)); 5368 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5369 "device already suspended, exiting\n"); 5370 return (DDI_SUCCESS); 5371 } 5372 5373 /* Return failure if the device is being used by HA */ 5374 if (un->un_resvd_status & 5375 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) { 5376 mutex_exit(SD_MUTEX(un)); 5377 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5378 "device in use by HA, exiting\n"); 5379 return (DDI_FAILURE); 5380 } 5381 5382 /* 5383 * Return failure if the device is in a resource wait 5384 * or power changing state. 5385 */ 5386 if ((un->un_state == SD_STATE_RWAIT) || 5387 (un->un_state == SD_STATE_PM_CHANGING)) { 5388 mutex_exit(SD_MUTEX(un)); 5389 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5390 "device in resource wait state, exiting\n"); 5391 return (DDI_FAILURE); 5392 } 5393 5394 5395 un->un_save_state = un->un_last_state; 5396 New_state(un, SD_STATE_SUSPENDED); 5397 5398 /* 5399 * Wait for all commands that are in transport or queued to a timer 5400 * for retry to complete. 5401 * 5402 * While waiting, no new commands will be accepted or sent because of 5403 * the new state we set above. 5404 * 5405 * Wait till current operation has completed. If we are in the resource 5406 * wait state (with an intr outstanding) then we need to wait till the 5407 * intr completes and starts the next cmd. We want to wait for 5408 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND. 5409 */ 5410 wait_cmds_complete = ddi_get_lbolt() + 5411 (sd_wait_cmds_complete * drv_usectohz(1000000)); 5412 5413 while (un->un_ncmds_in_transport != 0) { 5414 /* 5415 * Fail if commands do not finish in the specified time. 5416 */ 5417 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un), 5418 wait_cmds_complete) == -1) { 5419 /* 5420 * Undo the state changes made above. Everything 5421 * must go back to it's original value. 5422 */ 5423 Restore_state(un); 5424 un->un_last_state = un->un_save_state; 5425 /* Wake up any threads that might be waiting. */ 5426 cv_broadcast(&un->un_suspend_cv); 5427 mutex_exit(SD_MUTEX(un)); 5428 SD_ERROR(SD_LOG_IO_PM, un, 5429 "sd_ddi_suspend: failed due to outstanding cmds\n"); 5430 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n"); 5431 return (DDI_FAILURE); 5432 } 5433 } 5434 5435 /* 5436 * Cancel SCSI watch thread and timeouts, if any are active 5437 */ 5438 5439 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) { 5440 opaque_t temp_token = un->un_swr_token; 5441 mutex_exit(SD_MUTEX(un)); 5442 scsi_watch_suspend(temp_token); 5443 mutex_enter(SD_MUTEX(un)); 5444 } 5445 5446 if (un->un_reset_throttle_timeid != NULL) { 5447 timeout_id_t temp_id = un->un_reset_throttle_timeid; 5448 un->un_reset_throttle_timeid = NULL; 5449 mutex_exit(SD_MUTEX(un)); 5450 (void) untimeout(temp_id); 5451 mutex_enter(SD_MUTEX(un)); 5452 } 5453 5454 if (un->un_dcvb_timeid != NULL) { 5455 timeout_id_t temp_id = un->un_dcvb_timeid; 5456 un->un_dcvb_timeid = NULL; 5457 mutex_exit(SD_MUTEX(un)); 5458 (void) untimeout(temp_id); 5459 mutex_enter(SD_MUTEX(un)); 5460 } 5461 5462 mutex_enter(&un->un_pm_mutex); 5463 if (un->un_pm_timeid != NULL) { 5464 timeout_id_t temp_id = un->un_pm_timeid; 5465 un->un_pm_timeid = NULL; 5466 mutex_exit(&un->un_pm_mutex); 5467 mutex_exit(SD_MUTEX(un)); 5468 (void) untimeout(temp_id); 5469 mutex_enter(SD_MUTEX(un)); 5470 } else { 5471 mutex_exit(&un->un_pm_mutex); 5472 } 5473 5474 if (un->un_retry_timeid != NULL) { 5475 timeout_id_t temp_id = un->un_retry_timeid; 5476 un->un_retry_timeid = NULL; 5477 mutex_exit(SD_MUTEX(un)); 5478 (void) untimeout(temp_id); 5479 mutex_enter(SD_MUTEX(un)); 5480 } 5481 5482 if (un->un_direct_priority_timeid != NULL) { 5483 timeout_id_t temp_id = un->un_direct_priority_timeid; 5484 un->un_direct_priority_timeid = NULL; 5485 mutex_exit(SD_MUTEX(un)); 5486 (void) untimeout(temp_id); 5487 mutex_enter(SD_MUTEX(un)); 5488 } 5489 5490 if (un->un_f_is_fibre == TRUE) { 5491 /* 5492 * Remove callbacks for insert and remove events 5493 */ 5494 if (un->un_insert_event != NULL) { 5495 mutex_exit(SD_MUTEX(un)); 5496 (void) ddi_remove_event_handler(un->un_insert_cb_id); 5497 mutex_enter(SD_MUTEX(un)); 5498 un->un_insert_event = NULL; 5499 } 5500 5501 if (un->un_remove_event != NULL) { 5502 mutex_exit(SD_MUTEX(un)); 5503 (void) ddi_remove_event_handler(un->un_remove_cb_id); 5504 mutex_enter(SD_MUTEX(un)); 5505 un->un_remove_event = NULL; 5506 } 5507 } 5508 5509 mutex_exit(SD_MUTEX(un)); 5510 5511 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); 5512 5513 return (DDI_SUCCESS); 5514 } 5515 5516 5517 /* 5518 * Function: sd_ddi_pm_suspend 5519 * 5520 * Description: Set the drive state to low power. 5521 * Someone else is required to actually change the drive 5522 * power level. 5523 * 5524 * Arguments: un - driver soft state (unit) structure 5525 * 5526 * Return Code: DDI_FAILURE or DDI_SUCCESS 5527 * 5528 * Context: Kernel thread context 5529 */ 5530 5531 static int 5532 sd_ddi_pm_suspend(struct sd_lun *un) 5533 { 5534 ASSERT(un != NULL); 5535 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: entry\n"); 5536 5537 ASSERT(!mutex_owned(SD_MUTEX(un))); 5538 mutex_enter(SD_MUTEX(un)); 5539 5540 /* 5541 * Exit if power management is not enabled for this device, or if 5542 * the device is being used by HA. 5543 */ 5544 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status & 5545 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) { 5546 mutex_exit(SD_MUTEX(un)); 5547 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exiting\n"); 5548 return (DDI_SUCCESS); 5549 } 5550 5551 SD_INFO(SD_LOG_POWER, un, "sd_ddi_pm_suspend: un_ncmds_in_driver=%ld\n", 5552 un->un_ncmds_in_driver); 5553 5554 /* 5555 * See if the device is not busy, ie.: 5556 * - we have no commands in the driver for this device 5557 * - not waiting for resources 5558 */ 5559 if ((un->un_ncmds_in_driver == 0) && 5560 (un->un_state != SD_STATE_RWAIT)) { 5561 /* 5562 * The device is not busy, so it is OK to go to low power state. 5563 * Indicate low power, but rely on someone else to actually 5564 * change it. 5565 */ 5566 mutex_enter(&un->un_pm_mutex); 5567 un->un_pm_count = -1; 5568 mutex_exit(&un->un_pm_mutex); 5569 un->un_power_level = SD_SPINDLE_OFF; 5570 } 5571 5572 mutex_exit(SD_MUTEX(un)); 5573 5574 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exit\n"); 5575 5576 return (DDI_SUCCESS); 5577 } 5578 5579 5580 /* 5581 * Function: sd_ddi_resume 5582 * 5583 * Description: Performs system power-up operations.. 5584 * 5585 * Return Code: DDI_SUCCESS 5586 * DDI_FAILURE 5587 * 5588 * Context: Kernel thread context 5589 */ 5590 5591 static int 5592 sd_ddi_resume(dev_info_t *devi) 5593 { 5594 struct sd_lun *un; 5595 5596 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5597 if (un == NULL) { 5598 return (DDI_FAILURE); 5599 } 5600 5601 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n"); 5602 5603 mutex_enter(SD_MUTEX(un)); 5604 Restore_state(un); 5605 5606 /* 5607 * Restore the state which was saved to give the 5608 * the right state in un_last_state 5609 */ 5610 un->un_last_state = un->un_save_state; 5611 /* 5612 * Note: throttle comes back at full. 5613 * Also note: this MUST be done before calling pm_raise_power 5614 * otherwise the system can get hung in biowait. The scenario where 5615 * this'll happen is under cpr suspend. Writing of the system 5616 * state goes through sddump, which writes 0 to un_throttle. If 5617 * writing the system state then fails, example if the partition is 5618 * too small, then cpr attempts a resume. If throttle isn't restored 5619 * from the saved value until after calling pm_raise_power then 5620 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs 5621 * in biowait. 5622 */ 5623 un->un_throttle = un->un_saved_throttle; 5624 5625 /* 5626 * The chance of failure is very rare as the only command done in power 5627 * entry point is START command when you transition from 0->1 or 5628 * unknown->1. Put it to SPINDLE ON state irrespective of the state at 5629 * which suspend was done. Ignore the return value as the resume should 5630 * not be failed. In the case of removable media the media need not be 5631 * inserted and hence there is a chance that raise power will fail with 5632 * media not present. 5633 */ 5634 if (un->un_f_attach_spinup) { 5635 mutex_exit(SD_MUTEX(un)); 5636 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 5637 mutex_enter(SD_MUTEX(un)); 5638 } 5639 5640 /* 5641 * Don't broadcast to the suspend cv and therefore possibly 5642 * start I/O until after power has been restored. 5643 */ 5644 cv_broadcast(&un->un_suspend_cv); 5645 cv_broadcast(&un->un_state_cv); 5646 5647 /* restart thread */ 5648 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { 5649 scsi_watch_resume(un->un_swr_token); 5650 } 5651 5652 #if (defined(__fibre)) 5653 if (un->un_f_is_fibre == TRUE) { 5654 /* 5655 * Add callbacks for insert and remove events 5656 */ 5657 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 5658 sd_init_event_callbacks(un); 5659 } 5660 } 5661 #endif 5662 5663 /* 5664 * Transport any pending commands to the target. 5665 * 5666 * If this is a low-activity device commands in queue will have to wait 5667 * until new commands come in, which may take awhile. Also, we 5668 * specifically don't check un_ncmds_in_transport because we know that 5669 * there really are no commands in progress after the unit was 5670 * suspended and we could have reached the throttle level, been 5671 * suspended, and have no new commands coming in for awhile. Highly 5672 * unlikely, but so is the low-activity disk scenario. 5673 */ 5674 ddi_xbuf_dispatch(un->un_xbuf_attr); 5675 5676 sd_start_cmds(un, NULL); 5677 mutex_exit(SD_MUTEX(un)); 5678 5679 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n"); 5680 5681 return (DDI_SUCCESS); 5682 } 5683 5684 5685 /* 5686 * Function: sd_ddi_pm_resume 5687 * 5688 * Description: Set the drive state to powered on. 5689 * Someone else is required to actually change the drive 5690 * power level. 5691 * 5692 * Arguments: un - driver soft state (unit) structure 5693 * 5694 * Return Code: DDI_SUCCESS 5695 * 5696 * Context: Kernel thread context 5697 */ 5698 5699 static int 5700 sd_ddi_pm_resume(struct sd_lun *un) 5701 { 5702 ASSERT(un != NULL); 5703 5704 ASSERT(!mutex_owned(SD_MUTEX(un))); 5705 mutex_enter(SD_MUTEX(un)); 5706 un->un_power_level = SD_SPINDLE_ON; 5707 5708 ASSERT(!mutex_owned(&un->un_pm_mutex)); 5709 mutex_enter(&un->un_pm_mutex); 5710 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 5711 un->un_pm_count++; 5712 ASSERT(un->un_pm_count == 0); 5713 /* 5714 * Note: no longer do the cv_broadcast on un_suspend_cv. The 5715 * un_suspend_cv is for a system resume, not a power management 5716 * device resume. (4297749) 5717 * cv_broadcast(&un->un_suspend_cv); 5718 */ 5719 } 5720 mutex_exit(&un->un_pm_mutex); 5721 mutex_exit(SD_MUTEX(un)); 5722 5723 return (DDI_SUCCESS); 5724 } 5725 5726 5727 /* 5728 * Function: sd_pm_idletimeout_handler 5729 * 5730 * Description: A timer routine that's active only while a device is busy. 5731 * The purpose is to extend slightly the pm framework's busy 5732 * view of the device to prevent busy/idle thrashing for 5733 * back-to-back commands. Do this by comparing the current time 5734 * to the time at which the last command completed and when the 5735 * difference is greater than sd_pm_idletime, call 5736 * pm_idle_component. In addition to indicating idle to the pm 5737 * framework, update the chain type to again use the internal pm 5738 * layers of the driver. 5739 * 5740 * Arguments: arg - driver soft state (unit) structure 5741 * 5742 * Context: Executes in a timeout(9F) thread context 5743 */ 5744 5745 static void 5746 sd_pm_idletimeout_handler(void *arg) 5747 { 5748 struct sd_lun *un = arg; 5749 5750 time_t now; 5751 5752 mutex_enter(&sd_detach_mutex); 5753 if (un->un_detach_count != 0) { 5754 /* Abort if the instance is detaching */ 5755 mutex_exit(&sd_detach_mutex); 5756 return; 5757 } 5758 mutex_exit(&sd_detach_mutex); 5759 5760 now = ddi_get_time(); 5761 /* 5762 * Grab both mutexes, in the proper order, since we're accessing 5763 * both PM and softstate variables. 5764 */ 5765 mutex_enter(SD_MUTEX(un)); 5766 mutex_enter(&un->un_pm_mutex); 5767 if (((now - un->un_pm_idle_time) > sd_pm_idletime) && 5768 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) { 5769 /* 5770 * Update the chain types. 5771 * This takes affect on the next new command received. 5772 */ 5773 if (un->un_f_non_devbsize_supported) { 5774 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 5775 } else { 5776 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 5777 } 5778 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 5779 5780 SD_TRACE(SD_LOG_IO_PM, un, 5781 "sd_pm_idletimeout_handler: idling device\n"); 5782 (void) pm_idle_component(SD_DEVINFO(un), 0); 5783 un->un_pm_idle_timeid = NULL; 5784 } else { 5785 un->un_pm_idle_timeid = 5786 timeout(sd_pm_idletimeout_handler, un, 5787 (drv_usectohz((clock_t)300000))); /* 300 ms. */ 5788 } 5789 mutex_exit(&un->un_pm_mutex); 5790 mutex_exit(SD_MUTEX(un)); 5791 } 5792 5793 5794 /* 5795 * Function: sd_pm_timeout_handler 5796 * 5797 * Description: Callback to tell framework we are idle. 5798 * 5799 * Context: timeout(9f) thread context. 5800 */ 5801 5802 static void 5803 sd_pm_timeout_handler(void *arg) 5804 { 5805 struct sd_lun *un = arg; 5806 5807 (void) pm_idle_component(SD_DEVINFO(un), 0); 5808 mutex_enter(&un->un_pm_mutex); 5809 un->un_pm_timeid = NULL; 5810 mutex_exit(&un->un_pm_mutex); 5811 } 5812 5813 5814 /* 5815 * Function: sdpower 5816 * 5817 * Description: PM entry point. 5818 * 5819 * Return Code: DDI_SUCCESS 5820 * DDI_FAILURE 5821 * 5822 * Context: Kernel thread context 5823 */ 5824 5825 static int 5826 sdpower(dev_info_t *devi, int component, int level) 5827 { 5828 struct sd_lun *un; 5829 int instance; 5830 int rval = DDI_SUCCESS; 5831 uint_t i, log_page_size, maxcycles, ncycles; 5832 uchar_t *log_page_data; 5833 int log_sense_page; 5834 int medium_present; 5835 time_t intvlp; 5836 dev_t dev; 5837 struct pm_trans_data sd_pm_tran_data; 5838 uchar_t save_state; 5839 int sval; 5840 uchar_t state_before_pm; 5841 int got_semaphore_here; 5842 5843 instance = ddi_get_instance(devi); 5844 5845 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 5846 (SD_SPINDLE_OFF > level) || (level > SD_SPINDLE_ON) || 5847 component != 0) { 5848 return (DDI_FAILURE); 5849 } 5850 5851 dev = sd_make_device(SD_DEVINFO(un)); 5852 5853 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level); 5854 5855 /* 5856 * Must synchronize power down with close. 5857 * Attempt to decrement/acquire the open/close semaphore, 5858 * but do NOT wait on it. If it's not greater than zero, 5859 * ie. it can't be decremented without waiting, then 5860 * someone else, either open or close, already has it 5861 * and the try returns 0. Use that knowledge here to determine 5862 * if it's OK to change the device power level. 5863 * Also, only increment it on exit if it was decremented, ie. gotten, 5864 * here. 5865 */ 5866 got_semaphore_here = sema_tryp(&un->un_semoclose); 5867 5868 mutex_enter(SD_MUTEX(un)); 5869 5870 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n", 5871 un->un_ncmds_in_driver); 5872 5873 /* 5874 * If un_ncmds_in_driver is non-zero it indicates commands are 5875 * already being processed in the driver, or if the semaphore was 5876 * not gotten here it indicates an open or close is being processed. 5877 * At the same time somebody is requesting to go low power which 5878 * can't happen, therefore we need to return failure. 5879 */ 5880 if ((level == SD_SPINDLE_OFF) && 5881 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) { 5882 mutex_exit(SD_MUTEX(un)); 5883 5884 if (got_semaphore_here != 0) { 5885 sema_v(&un->un_semoclose); 5886 } 5887 SD_TRACE(SD_LOG_IO_PM, un, 5888 "sdpower: exit, device has queued cmds.\n"); 5889 return (DDI_FAILURE); 5890 } 5891 5892 /* 5893 * if it is OFFLINE that means the disk is completely dead 5894 * in our case we have to put the disk in on or off by sending commands 5895 * Of course that will fail anyway so return back here. 5896 * 5897 * Power changes to a device that's OFFLINE or SUSPENDED 5898 * are not allowed. 5899 */ 5900 if ((un->un_state == SD_STATE_OFFLINE) || 5901 (un->un_state == SD_STATE_SUSPENDED)) { 5902 mutex_exit(SD_MUTEX(un)); 5903 5904 if (got_semaphore_here != 0) { 5905 sema_v(&un->un_semoclose); 5906 } 5907 SD_TRACE(SD_LOG_IO_PM, un, 5908 "sdpower: exit, device is off-line.\n"); 5909 return (DDI_FAILURE); 5910 } 5911 5912 /* 5913 * Change the device's state to indicate it's power level 5914 * is being changed. Do this to prevent a power off in the 5915 * middle of commands, which is especially bad on devices 5916 * that are really powered off instead of just spun down. 5917 */ 5918 state_before_pm = un->un_state; 5919 un->un_state = SD_STATE_PM_CHANGING; 5920 5921 mutex_exit(SD_MUTEX(un)); 5922 5923 /* 5924 * If "pm-capable" property is set to TRUE by HBA drivers, 5925 * bypass the following checking, otherwise, check the log 5926 * sense information for this device 5927 */ 5928 if ((level == SD_SPINDLE_OFF) && un->un_f_log_sense_supported) { 5929 /* 5930 * Get the log sense information to understand whether the 5931 * the powercycle counts have gone beyond the threshhold. 5932 */ 5933 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5934 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5935 5936 mutex_enter(SD_MUTEX(un)); 5937 log_sense_page = un->un_start_stop_cycle_page; 5938 mutex_exit(SD_MUTEX(un)); 5939 5940 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 5941 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT); 5942 #ifdef SDDEBUG 5943 if (sd_force_pm_supported) { 5944 /* Force a successful result */ 5945 rval = 0; 5946 } 5947 #endif 5948 if (rval != 0) { 5949 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 5950 "Log Sense Failed\n"); 5951 kmem_free(log_page_data, log_page_size); 5952 /* Cannot support power management on those drives */ 5953 5954 if (got_semaphore_here != 0) { 5955 sema_v(&un->un_semoclose); 5956 } 5957 /* 5958 * On exit put the state back to it's original value 5959 * and broadcast to anyone waiting for the power 5960 * change completion. 5961 */ 5962 mutex_enter(SD_MUTEX(un)); 5963 un->un_state = state_before_pm; 5964 cv_broadcast(&un->un_suspend_cv); 5965 mutex_exit(SD_MUTEX(un)); 5966 SD_TRACE(SD_LOG_IO_PM, un, 5967 "sdpower: exit, Log Sense Failed.\n"); 5968 return (DDI_FAILURE); 5969 } 5970 5971 /* 5972 * From the page data - Convert the essential information to 5973 * pm_trans_data 5974 */ 5975 maxcycles = 5976 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) | 5977 (log_page_data[0x1E] << 8) | log_page_data[0x1F]; 5978 5979 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles; 5980 5981 ncycles = 5982 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) | 5983 (log_page_data[0x26] << 8) | log_page_data[0x27]; 5984 5985 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles; 5986 5987 for (i = 0; i < DC_SCSI_MFR_LEN; i++) { 5988 sd_pm_tran_data.un.scsi_cycles.svc_date[i] = 5989 log_page_data[8+i]; 5990 } 5991 5992 kmem_free(log_page_data, log_page_size); 5993 5994 /* 5995 * Call pm_trans_check routine to get the Ok from 5996 * the global policy 5997 */ 5998 5999 sd_pm_tran_data.format = DC_SCSI_FORMAT; 6000 sd_pm_tran_data.un.scsi_cycles.flag = 0; 6001 6002 rval = pm_trans_check(&sd_pm_tran_data, &intvlp); 6003 #ifdef SDDEBUG 6004 if (sd_force_pm_supported) { 6005 /* Force a successful result */ 6006 rval = 1; 6007 } 6008 #endif 6009 switch (rval) { 6010 case 0: 6011 /* 6012 * Not Ok to Power cycle or error in parameters passed 6013 * Would have given the advised time to consider power 6014 * cycle. Based on the new intvlp parameter we are 6015 * supposed to pretend we are busy so that pm framework 6016 * will never call our power entry point. Because of 6017 * that install a timeout handler and wait for the 6018 * recommended time to elapse so that power management 6019 * can be effective again. 6020 * 6021 * To effect this behavior, call pm_busy_component to 6022 * indicate to the framework this device is busy. 6023 * By not adjusting un_pm_count the rest of PM in 6024 * the driver will function normally, and independant 6025 * of this but because the framework is told the device 6026 * is busy it won't attempt powering down until it gets 6027 * a matching idle. The timeout handler sends this. 6028 * Note: sd_pm_entry can't be called here to do this 6029 * because sdpower may have been called as a result 6030 * of a call to pm_raise_power from within sd_pm_entry. 6031 * 6032 * If a timeout handler is already active then 6033 * don't install another. 6034 */ 6035 mutex_enter(&un->un_pm_mutex); 6036 if (un->un_pm_timeid == NULL) { 6037 un->un_pm_timeid = 6038 timeout(sd_pm_timeout_handler, 6039 un, intvlp * drv_usectohz(1000000)); 6040 mutex_exit(&un->un_pm_mutex); 6041 (void) pm_busy_component(SD_DEVINFO(un), 0); 6042 } else { 6043 mutex_exit(&un->un_pm_mutex); 6044 } 6045 if (got_semaphore_here != 0) { 6046 sema_v(&un->un_semoclose); 6047 } 6048 /* 6049 * On exit put the state back to it's original value 6050 * and broadcast to anyone waiting for the power 6051 * change completion. 6052 */ 6053 mutex_enter(SD_MUTEX(un)); 6054 un->un_state = state_before_pm; 6055 cv_broadcast(&un->un_suspend_cv); 6056 mutex_exit(SD_MUTEX(un)); 6057 6058 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, " 6059 "trans check Failed, not ok to power cycle.\n"); 6060 return (DDI_FAILURE); 6061 6062 case -1: 6063 if (got_semaphore_here != 0) { 6064 sema_v(&un->un_semoclose); 6065 } 6066 /* 6067 * On exit put the state back to it's original value 6068 * and broadcast to anyone waiting for the power 6069 * change completion. 6070 */ 6071 mutex_enter(SD_MUTEX(un)); 6072 un->un_state = state_before_pm; 6073 cv_broadcast(&un->un_suspend_cv); 6074 mutex_exit(SD_MUTEX(un)); 6075 SD_TRACE(SD_LOG_IO_PM, un, 6076 "sdpower: exit, trans check command Failed.\n"); 6077 return (DDI_FAILURE); 6078 } 6079 } 6080 6081 if (level == SD_SPINDLE_OFF) { 6082 /* 6083 * Save the last state... if the STOP FAILS we need it 6084 * for restoring 6085 */ 6086 mutex_enter(SD_MUTEX(un)); 6087 save_state = un->un_last_state; 6088 /* 6089 * There must not be any cmds. getting processed 6090 * in the driver when we get here. Power to the 6091 * device is potentially going off. 6092 */ 6093 ASSERT(un->un_ncmds_in_driver == 0); 6094 mutex_exit(SD_MUTEX(un)); 6095 6096 /* 6097 * For now suspend the device completely before spindle is 6098 * turned off 6099 */ 6100 if ((rval = sd_ddi_pm_suspend(un)) == DDI_FAILURE) { 6101 if (got_semaphore_here != 0) { 6102 sema_v(&un->un_semoclose); 6103 } 6104 /* 6105 * On exit put the state back to it's original value 6106 * and broadcast to anyone waiting for the power 6107 * change completion. 6108 */ 6109 mutex_enter(SD_MUTEX(un)); 6110 un->un_state = state_before_pm; 6111 cv_broadcast(&un->un_suspend_cv); 6112 mutex_exit(SD_MUTEX(un)); 6113 SD_TRACE(SD_LOG_IO_PM, un, 6114 "sdpower: exit, PM suspend Failed.\n"); 6115 return (DDI_FAILURE); 6116 } 6117 } 6118 6119 /* 6120 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open, 6121 * close, or strategy. Dump no long uses this routine, it uses it's 6122 * own code so it can be done in polled mode. 6123 */ 6124 6125 medium_present = TRUE; 6126 6127 /* 6128 * When powering up, issue a TUR in case the device is at unit 6129 * attention. Don't do retries. Bypass the PM layer, otherwise 6130 * a deadlock on un_pm_busy_cv will occur. 6131 */ 6132 if (level == SD_SPINDLE_ON) { 6133 (void) sd_send_scsi_TEST_UNIT_READY(un, 6134 SD_DONT_RETRY_TUR | SD_BYPASS_PM); 6135 } 6136 6137 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n", 6138 ((level == SD_SPINDLE_ON) ? "START" : "STOP")); 6139 6140 sval = sd_send_scsi_START_STOP_UNIT(un, 6141 ((level == SD_SPINDLE_ON) ? SD_TARGET_START : SD_TARGET_STOP), 6142 SD_PATH_DIRECT); 6143 /* Command failed, check for media present. */ 6144 if ((sval == ENXIO) && un->un_f_has_removable_media) { 6145 medium_present = FALSE; 6146 } 6147 6148 /* 6149 * The conditions of interest here are: 6150 * if a spindle off with media present fails, 6151 * then restore the state and return an error. 6152 * else if a spindle on fails, 6153 * then return an error (there's no state to restore). 6154 * In all other cases we setup for the new state 6155 * and return success. 6156 */ 6157 switch (level) { 6158 case SD_SPINDLE_OFF: 6159 if ((medium_present == TRUE) && (sval != 0)) { 6160 /* The stop command from above failed */ 6161 rval = DDI_FAILURE; 6162 /* 6163 * The stop command failed, and we have media 6164 * present. Put the level back by calling the 6165 * sd_pm_resume() and set the state back to 6166 * it's previous value. 6167 */ 6168 (void) sd_ddi_pm_resume(un); 6169 mutex_enter(SD_MUTEX(un)); 6170 un->un_last_state = save_state; 6171 mutex_exit(SD_MUTEX(un)); 6172 break; 6173 } 6174 /* 6175 * The stop command from above succeeded. 6176 */ 6177 if (un->un_f_monitor_media_state) { 6178 /* 6179 * Terminate watch thread in case of removable media 6180 * devices going into low power state. This is as per 6181 * the requirements of pm framework, otherwise commands 6182 * will be generated for the device (through watch 6183 * thread), even when the device is in low power state. 6184 */ 6185 mutex_enter(SD_MUTEX(un)); 6186 un->un_f_watcht_stopped = FALSE; 6187 if (un->un_swr_token != NULL) { 6188 opaque_t temp_token = un->un_swr_token; 6189 un->un_f_watcht_stopped = TRUE; 6190 un->un_swr_token = NULL; 6191 mutex_exit(SD_MUTEX(un)); 6192 (void) scsi_watch_request_terminate(temp_token, 6193 SCSI_WATCH_TERMINATE_WAIT); 6194 } else { 6195 mutex_exit(SD_MUTEX(un)); 6196 } 6197 } 6198 break; 6199 6200 default: /* The level requested is spindle on... */ 6201 /* 6202 * Legacy behavior: return success on a failed spinup 6203 * if there is no media in the drive. 6204 * Do this by looking at medium_present here. 6205 */ 6206 if ((sval != 0) && medium_present) { 6207 /* The start command from above failed */ 6208 rval = DDI_FAILURE; 6209 break; 6210 } 6211 /* 6212 * The start command from above succeeded 6213 * Resume the devices now that we have 6214 * started the disks 6215 */ 6216 (void) sd_ddi_pm_resume(un); 6217 6218 /* 6219 * Resume the watch thread since it was suspended 6220 * when the device went into low power mode. 6221 */ 6222 if (un->un_f_monitor_media_state) { 6223 mutex_enter(SD_MUTEX(un)); 6224 if (un->un_f_watcht_stopped == TRUE) { 6225 opaque_t temp_token; 6226 6227 un->un_f_watcht_stopped = FALSE; 6228 mutex_exit(SD_MUTEX(un)); 6229 temp_token = scsi_watch_request_submit( 6230 SD_SCSI_DEVP(un), 6231 sd_check_media_time, 6232 SENSE_LENGTH, sd_media_watch_cb, 6233 (caddr_t)dev); 6234 mutex_enter(SD_MUTEX(un)); 6235 un->un_swr_token = temp_token; 6236 } 6237 mutex_exit(SD_MUTEX(un)); 6238 } 6239 } 6240 if (got_semaphore_here != 0) { 6241 sema_v(&un->un_semoclose); 6242 } 6243 /* 6244 * On exit put the state back to it's original value 6245 * and broadcast to anyone waiting for the power 6246 * change completion. 6247 */ 6248 mutex_enter(SD_MUTEX(un)); 6249 un->un_state = state_before_pm; 6250 cv_broadcast(&un->un_suspend_cv); 6251 mutex_exit(SD_MUTEX(un)); 6252 6253 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval); 6254 6255 return (rval); 6256 } 6257 6258 6259 6260 /* 6261 * Function: sdattach 6262 * 6263 * Description: Driver's attach(9e) entry point function. 6264 * 6265 * Arguments: devi - opaque device info handle 6266 * cmd - attach type 6267 * 6268 * Return Code: DDI_SUCCESS 6269 * DDI_FAILURE 6270 * 6271 * Context: Kernel thread context 6272 */ 6273 6274 static int 6275 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 6276 { 6277 switch (cmd) { 6278 case DDI_ATTACH: 6279 return (sd_unit_attach(devi)); 6280 case DDI_RESUME: 6281 return (sd_ddi_resume(devi)); 6282 default: 6283 break; 6284 } 6285 return (DDI_FAILURE); 6286 } 6287 6288 6289 /* 6290 * Function: sddetach 6291 * 6292 * Description: Driver's detach(9E) entry point function. 6293 * 6294 * Arguments: devi - opaque device info handle 6295 * cmd - detach type 6296 * 6297 * Return Code: DDI_SUCCESS 6298 * DDI_FAILURE 6299 * 6300 * Context: Kernel thread context 6301 */ 6302 6303 static int 6304 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 6305 { 6306 switch (cmd) { 6307 case DDI_DETACH: 6308 return (sd_unit_detach(devi)); 6309 case DDI_SUSPEND: 6310 return (sd_ddi_suspend(devi)); 6311 default: 6312 break; 6313 } 6314 return (DDI_FAILURE); 6315 } 6316 6317 6318 /* 6319 * Function: sd_sync_with_callback 6320 * 6321 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft 6322 * state while the callback routine is active. 6323 * 6324 * Arguments: un: softstate structure for the instance 6325 * 6326 * Context: Kernel thread context 6327 */ 6328 6329 static void 6330 sd_sync_with_callback(struct sd_lun *un) 6331 { 6332 ASSERT(un != NULL); 6333 6334 mutex_enter(SD_MUTEX(un)); 6335 6336 ASSERT(un->un_in_callback >= 0); 6337 6338 while (un->un_in_callback > 0) { 6339 mutex_exit(SD_MUTEX(un)); 6340 delay(2); 6341 mutex_enter(SD_MUTEX(un)); 6342 } 6343 6344 mutex_exit(SD_MUTEX(un)); 6345 } 6346 6347 /* 6348 * Function: sd_unit_attach 6349 * 6350 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates 6351 * the soft state structure for the device and performs 6352 * all necessary structure and device initializations. 6353 * 6354 * Arguments: devi: the system's dev_info_t for the device. 6355 * 6356 * Return Code: DDI_SUCCESS if attach is successful. 6357 * DDI_FAILURE if any part of the attach fails. 6358 * 6359 * Context: Called at attach(9e) time for the DDI_ATTACH flag. 6360 * Kernel thread context only. Can sleep. 6361 */ 6362 6363 static int 6364 sd_unit_attach(dev_info_t *devi) 6365 { 6366 struct scsi_device *devp; 6367 struct sd_lun *un; 6368 char *variantp; 6369 int reservation_flag = SD_TARGET_IS_UNRESERVED; 6370 int instance; 6371 int rval; 6372 int wc_enabled; 6373 int tgt; 6374 uint64_t capacity; 6375 uint_t lbasize = 0; 6376 dev_info_t *pdip = ddi_get_parent(devi); 6377 int offbyone = 0; 6378 int geom_label_valid = 0; 6379 #if defined(__sparc) 6380 int max_xfer_size; 6381 #endif 6382 6383 /* 6384 * Retrieve the target driver's private data area. This was set 6385 * up by the HBA. 6386 */ 6387 devp = ddi_get_driver_private(devi); 6388 6389 /* 6390 * Retrieve the target ID of the device. 6391 */ 6392 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6393 SCSI_ADDR_PROP_TARGET, -1); 6394 6395 /* 6396 * Since we have no idea what state things were left in by the last 6397 * user of the device, set up some 'default' settings, ie. turn 'em 6398 * off. The scsi_ifsetcap calls force re-negotiations with the drive. 6399 * Do this before the scsi_probe, which sends an inquiry. 6400 * This is a fix for bug (4430280). 6401 * Of special importance is wide-xfer. The drive could have been left 6402 * in wide transfer mode by the last driver to communicate with it, 6403 * this includes us. If that's the case, and if the following is not 6404 * setup properly or we don't re-negotiate with the drive prior to 6405 * transferring data to/from the drive, it causes bus parity errors, 6406 * data overruns, and unexpected interrupts. This first occurred when 6407 * the fix for bug (4378686) was made. 6408 */ 6409 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1); 6410 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1); 6411 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1); 6412 6413 /* 6414 * Currently, scsi_ifsetcap sets tagged-qing capability for all LUNs 6415 * on a target. Setting it per lun instance actually sets the 6416 * capability of this target, which affects those luns already 6417 * attached on the same target. So during attach, we can only disable 6418 * this capability only when no other lun has been attached on this 6419 * target. By doing this, we assume a target has the same tagged-qing 6420 * capability for every lun. The condition can be removed when HBA 6421 * is changed to support per lun based tagged-qing capability. 6422 */ 6423 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 6424 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1); 6425 } 6426 6427 /* 6428 * Use scsi_probe() to issue an INQUIRY command to the device. 6429 * This call will allocate and fill in the scsi_inquiry structure 6430 * and point the sd_inq member of the scsi_device structure to it. 6431 * If the attach succeeds, then this memory will not be de-allocated 6432 * (via scsi_unprobe()) until the instance is detached. 6433 */ 6434 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 6435 goto probe_failed; 6436 } 6437 6438 /* 6439 * Check the device type as specified in the inquiry data and 6440 * claim it if it is of a type that we support. 6441 */ 6442 switch (devp->sd_inq->inq_dtype) { 6443 case DTYPE_DIRECT: 6444 break; 6445 case DTYPE_RODIRECT: 6446 break; 6447 case DTYPE_OPTICAL: 6448 break; 6449 case DTYPE_NOTPRESENT: 6450 default: 6451 /* Unsupported device type; fail the attach. */ 6452 goto probe_failed; 6453 } 6454 6455 /* 6456 * Allocate the soft state structure for this unit. 6457 * 6458 * We rely upon this memory being set to all zeroes by 6459 * ddi_soft_state_zalloc(). We assume that any member of the 6460 * soft state structure that is not explicitly initialized by 6461 * this routine will have a value of zero. 6462 */ 6463 instance = ddi_get_instance(devp->sd_dev); 6464 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) { 6465 goto probe_failed; 6466 } 6467 6468 /* 6469 * Retrieve a pointer to the newly-allocated soft state. 6470 * 6471 * This should NEVER fail if the ddi_soft_state_zalloc() call above 6472 * was successful, unless something has gone horribly wrong and the 6473 * ddi's soft state internals are corrupt (in which case it is 6474 * probably better to halt here than just fail the attach....) 6475 */ 6476 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 6477 panic("sd_unit_attach: NULL soft state on instance:0x%x", 6478 instance); 6479 /*NOTREACHED*/ 6480 } 6481 6482 /* 6483 * Link the back ptr of the driver soft state to the scsi_device 6484 * struct for this lun. 6485 * Save a pointer to the softstate in the driver-private area of 6486 * the scsi_device struct. 6487 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until 6488 * we first set un->un_sd below. 6489 */ 6490 un->un_sd = devp; 6491 devp->sd_private = (opaque_t)un; 6492 6493 /* 6494 * The following must be after devp is stored in the soft state struct. 6495 */ 6496 #ifdef SDDEBUG 6497 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6498 "%s_unit_attach: un:0x%p instance:%d\n", 6499 ddi_driver_name(devi), un, instance); 6500 #endif 6501 6502 /* 6503 * Set up the device type and node type (for the minor nodes). 6504 * By default we assume that the device can at least support the 6505 * Common Command Set. Call it a CD-ROM if it reports itself 6506 * as a RODIRECT device. 6507 */ 6508 switch (devp->sd_inq->inq_dtype) { 6509 case DTYPE_RODIRECT: 6510 un->un_node_type = DDI_NT_CD_CHAN; 6511 un->un_ctype = CTYPE_CDROM; 6512 break; 6513 case DTYPE_OPTICAL: 6514 un->un_node_type = DDI_NT_BLOCK_CHAN; 6515 un->un_ctype = CTYPE_ROD; 6516 break; 6517 default: 6518 un->un_node_type = DDI_NT_BLOCK_CHAN; 6519 un->un_ctype = CTYPE_CCS; 6520 break; 6521 } 6522 6523 /* 6524 * Try to read the interconnect type from the HBA. 6525 * 6526 * Note: This driver is currently compiled as two binaries, a parallel 6527 * scsi version (sd) and a fibre channel version (ssd). All functional 6528 * differences are determined at compile time. In the future a single 6529 * binary will be provided and the inteconnect type will be used to 6530 * differentiate between fibre and parallel scsi behaviors. At that time 6531 * it will be necessary for all fibre channel HBAs to support this 6532 * property. 6533 * 6534 * set un_f_is_fiber to TRUE ( default fiber ) 6535 */ 6536 un->un_f_is_fibre = TRUE; 6537 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { 6538 case INTERCONNECT_SSA: 6539 un->un_interconnect_type = SD_INTERCONNECT_SSA; 6540 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6541 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); 6542 break; 6543 case INTERCONNECT_PARALLEL: 6544 un->un_f_is_fibre = FALSE; 6545 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 6546 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6547 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); 6548 break; 6549 case INTERCONNECT_SATA: 6550 un->un_f_is_fibre = FALSE; 6551 un->un_interconnect_type = SD_INTERCONNECT_SATA; 6552 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6553 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un); 6554 break; 6555 case INTERCONNECT_FIBRE: 6556 un->un_interconnect_type = SD_INTERCONNECT_FIBRE; 6557 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6558 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); 6559 break; 6560 case INTERCONNECT_FABRIC: 6561 un->un_interconnect_type = SD_INTERCONNECT_FABRIC; 6562 un->un_node_type = DDI_NT_BLOCK_FABRIC; 6563 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6564 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); 6565 break; 6566 default: 6567 #ifdef SD_DEFAULT_INTERCONNECT_TYPE 6568 /* 6569 * The HBA does not support the "interconnect-type" property 6570 * (or did not provide a recognized type). 6571 * 6572 * Note: This will be obsoleted when a single fibre channel 6573 * and parallel scsi driver is delivered. In the meantime the 6574 * interconnect type will be set to the platform default.If that 6575 * type is not parallel SCSI, it means that we should be 6576 * assuming "ssd" semantics. However, here this also means that 6577 * the FC HBA is not supporting the "interconnect-type" property 6578 * like we expect it to, so log this occurrence. 6579 */ 6580 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; 6581 if (!SD_IS_PARALLEL_SCSI(un)) { 6582 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6583 "sd_unit_attach: un:0x%p Assuming " 6584 "INTERCONNECT_FIBRE\n", un); 6585 } else { 6586 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6587 "sd_unit_attach: un:0x%p Assuming " 6588 "INTERCONNECT_PARALLEL\n", un); 6589 un->un_f_is_fibre = FALSE; 6590 } 6591 #else 6592 /* 6593 * Note: This source will be implemented when a single fibre 6594 * channel and parallel scsi driver is delivered. The default 6595 * will be to assume that if a device does not support the 6596 * "interconnect-type" property it is a parallel SCSI HBA and 6597 * we will set the interconnect type for parallel scsi. 6598 */ 6599 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 6600 un->un_f_is_fibre = FALSE; 6601 #endif 6602 break; 6603 } 6604 6605 if (un->un_f_is_fibre == TRUE) { 6606 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == 6607 SCSI_VERSION_3) { 6608 switch (un->un_interconnect_type) { 6609 case SD_INTERCONNECT_FIBRE: 6610 case SD_INTERCONNECT_SSA: 6611 un->un_node_type = DDI_NT_BLOCK_WWN; 6612 break; 6613 default: 6614 break; 6615 } 6616 } 6617 } 6618 6619 /* 6620 * Initialize the Request Sense command for the target 6621 */ 6622 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { 6623 goto alloc_rqs_failed; 6624 } 6625 6626 /* 6627 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc 6628 * with separate binary for sd and ssd. 6629 * 6630 * x86 has 1 binary, un_retry_count is set base on connection type. 6631 * The hardcoded values will go away when Sparc uses 1 binary 6632 * for sd and ssd. This hardcoded values need to match 6633 * SD_RETRY_COUNT in sddef.h 6634 * The value used is base on interconnect type. 6635 * fibre = 3, parallel = 5 6636 */ 6637 #if defined(__i386) || defined(__amd64) 6638 un->un_retry_count = un->un_f_is_fibre ? 3 : 5; 6639 #else 6640 un->un_retry_count = SD_RETRY_COUNT; 6641 #endif 6642 6643 /* 6644 * Set the per disk retry count to the default number of retries 6645 * for disks and CDROMs. This value can be overridden by the 6646 * disk property list or an entry in sd.conf. 6647 */ 6648 un->un_notready_retry_count = 6649 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un) 6650 : DISK_NOT_READY_RETRY_COUNT(un); 6651 6652 /* 6653 * Set the busy retry count to the default value of un_retry_count. 6654 * This can be overridden by entries in sd.conf or the device 6655 * config table. 6656 */ 6657 un->un_busy_retry_count = un->un_retry_count; 6658 6659 /* 6660 * Init the reset threshold for retries. This number determines 6661 * how many retries must be performed before a reset can be issued 6662 * (for certain error conditions). This can be overridden by entries 6663 * in sd.conf or the device config table. 6664 */ 6665 un->un_reset_retry_count = (un->un_retry_count / 2); 6666 6667 /* 6668 * Set the victim_retry_count to the default un_retry_count 6669 */ 6670 un->un_victim_retry_count = (2 * un->un_retry_count); 6671 6672 /* 6673 * Set the reservation release timeout to the default value of 6674 * 5 seconds. This can be overridden by entries in ssd.conf or the 6675 * device config table. 6676 */ 6677 un->un_reserve_release_time = 5; 6678 6679 /* 6680 * Set up the default maximum transfer size. Note that this may 6681 * get updated later in the attach, when setting up default wide 6682 * operations for disks. 6683 */ 6684 #if defined(__i386) || defined(__amd64) 6685 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; 6686 un->un_partial_dma_supported = 1; 6687 #else 6688 un->un_max_xfer_size = (uint_t)maxphys; 6689 #endif 6690 6691 /* 6692 * Get "allow bus device reset" property (defaults to "enabled" if 6693 * the property was not defined). This is to disable bus resets for 6694 * certain kinds of error recovery. Note: In the future when a run-time 6695 * fibre check is available the soft state flag should default to 6696 * enabled. 6697 */ 6698 if (un->un_f_is_fibre == TRUE) { 6699 un->un_f_allow_bus_device_reset = TRUE; 6700 } else { 6701 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6702 "allow-bus-device-reset", 1) != 0) { 6703 un->un_f_allow_bus_device_reset = TRUE; 6704 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6705 "sd_unit_attach: un:0x%p Bus device reset " 6706 "enabled\n", un); 6707 } else { 6708 un->un_f_allow_bus_device_reset = FALSE; 6709 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6710 "sd_unit_attach: un:0x%p Bus device reset " 6711 "disabled\n", un); 6712 } 6713 } 6714 6715 /* 6716 * Check if this is an ATAPI device. ATAPI devices use Group 1 6717 * Read/Write commands and Group 2 Mode Sense/Select commands. 6718 * 6719 * Note: The "obsolete" way of doing this is to check for the "atapi" 6720 * property. The new "variant" property with a value of "atapi" has been 6721 * introduced so that future 'variants' of standard SCSI behavior (like 6722 * atapi) could be specified by the underlying HBA drivers by supplying 6723 * a new value for the "variant" property, instead of having to define a 6724 * new property. 6725 */ 6726 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { 6727 un->un_f_cfg_is_atapi = TRUE; 6728 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6729 "sd_unit_attach: un:0x%p Atapi device\n", un); 6730 } 6731 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", 6732 &variantp) == DDI_PROP_SUCCESS) { 6733 if (strcmp(variantp, "atapi") == 0) { 6734 un->un_f_cfg_is_atapi = TRUE; 6735 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6736 "sd_unit_attach: un:0x%p Atapi device\n", un); 6737 } 6738 ddi_prop_free(variantp); 6739 } 6740 6741 un->un_cmd_timeout = SD_IO_TIME; 6742 6743 /* Info on current states, statuses, etc. (Updated frequently) */ 6744 un->un_state = SD_STATE_NORMAL; 6745 un->un_last_state = SD_STATE_NORMAL; 6746 6747 /* Control & status info for command throttling */ 6748 un->un_throttle = sd_max_throttle; 6749 un->un_saved_throttle = sd_max_throttle; 6750 un->un_min_throttle = sd_min_throttle; 6751 6752 if (un->un_f_is_fibre == TRUE) { 6753 un->un_f_use_adaptive_throttle = TRUE; 6754 } else { 6755 un->un_f_use_adaptive_throttle = FALSE; 6756 } 6757 6758 /* Removable media support. */ 6759 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 6760 un->un_mediastate = DKIO_NONE; 6761 un->un_specified_mediastate = DKIO_NONE; 6762 6763 /* CVs for suspend/resume (PM or DR) */ 6764 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 6765 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 6766 6767 /* Power management support. */ 6768 un->un_power_level = SD_SPINDLE_UNINIT; 6769 6770 cv_init(&un->un_wcc_cv, NULL, CV_DRIVER, NULL); 6771 un->un_f_wcc_inprog = 0; 6772 6773 /* 6774 * The open/close semaphore is used to serialize threads executing 6775 * in the driver's open & close entry point routines for a given 6776 * instance. 6777 */ 6778 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 6779 6780 /* 6781 * The conf file entry and softstate variable is a forceful override, 6782 * meaning a non-zero value must be entered to change the default. 6783 */ 6784 un->un_f_disksort_disabled = FALSE; 6785 6786 /* 6787 * Retrieve the properties from the static driver table or the driver 6788 * configuration file (.conf) for this unit and update the soft state 6789 * for the device as needed for the indicated properties. 6790 * Note: the property configuration needs to occur here as some of the 6791 * following routines may have dependancies on soft state flags set 6792 * as part of the driver property configuration. 6793 */ 6794 sd_read_unit_properties(un); 6795 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6796 "sd_unit_attach: un:0x%p property configuration complete.\n", un); 6797 6798 /* 6799 * Only if a device has "hotpluggable" property, it is 6800 * treated as hotpluggable device. Otherwise, it is 6801 * regarded as non-hotpluggable one. 6802 */ 6803 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable", 6804 -1) != -1) { 6805 un->un_f_is_hotpluggable = TRUE; 6806 } 6807 6808 /* 6809 * set unit's attributes(flags) according to "hotpluggable" and 6810 * RMB bit in INQUIRY data. 6811 */ 6812 sd_set_unit_attributes(un, devi); 6813 6814 /* 6815 * By default, we mark the capacity, lbasize, and geometry 6816 * as invalid. Only if we successfully read a valid capacity 6817 * will we update the un_blockcount and un_tgt_blocksize with the 6818 * valid values (the geometry will be validated later). 6819 */ 6820 un->un_f_blockcount_is_valid = FALSE; 6821 un->un_f_tgt_blocksize_is_valid = FALSE; 6822 6823 /* 6824 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine 6825 * otherwise. 6826 */ 6827 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE; 6828 un->un_blockcount = 0; 6829 6830 /* 6831 * Set up the per-instance info needed to determine the correct 6832 * CDBs and other info for issuing commands to the target. 6833 */ 6834 sd_init_cdb_limits(un); 6835 6836 /* 6837 * Set up the IO chains to use, based upon the target type. 6838 */ 6839 if (un->un_f_non_devbsize_supported) { 6840 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 6841 } else { 6842 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 6843 } 6844 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 6845 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD; 6846 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD; 6847 6848 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf), 6849 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit, 6850 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER); 6851 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi); 6852 6853 6854 if (ISCD(un)) { 6855 un->un_additional_codes = sd_additional_codes; 6856 } else { 6857 un->un_additional_codes = NULL; 6858 } 6859 6860 /* 6861 * Create the kstats here so they can be available for attach-time 6862 * routines that send commands to the unit (either polled or via 6863 * sd_send_scsi_cmd). 6864 * 6865 * Note: This is a critical sequence that needs to be maintained: 6866 * 1) Instantiate the kstats here, before any routines using the 6867 * iopath (i.e. sd_send_scsi_cmd). 6868 * 2) Instantiate and initialize the partition stats 6869 * (sd_set_pstats). 6870 * 3) Initialize the error stats (sd_set_errstats), following 6871 * sd_validate_geometry(),sd_register_devid(), 6872 * and sd_cache_control(). 6873 */ 6874 6875 un->un_stats = kstat_create(sd_label, instance, 6876 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 6877 if (un->un_stats != NULL) { 6878 un->un_stats->ks_lock = SD_MUTEX(un); 6879 kstat_install(un->un_stats); 6880 } 6881 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6882 "sd_unit_attach: un:0x%p un_stats created\n", un); 6883 6884 sd_create_errstats(un, instance); 6885 if (un->un_errstats == NULL) { 6886 goto create_errstats_failed; 6887 } 6888 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6889 "sd_unit_attach: un:0x%p errstats created\n", un); 6890 6891 /* 6892 * The following if/else code was relocated here from below as part 6893 * of the fix for bug (4430280). However with the default setup added 6894 * on entry to this routine, it's no longer absolutely necessary for 6895 * this to be before the call to sd_spin_up_unit. 6896 */ 6897 if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) { 6898 int tq_trigger_flag = (((devp->sd_inq->inq_ansi == 4) || 6899 (devp->sd_inq->inq_ansi == 5)) && 6900 devp->sd_inq->inq_bque) || devp->sd_inq->inq_cmdque; 6901 6902 /* 6903 * If tagged queueing is supported by the target 6904 * and by the host adapter then we will enable it 6905 */ 6906 un->un_tagflags = 0; 6907 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && tq_trigger_flag && 6908 (un->un_f_arq_enabled == TRUE)) { 6909 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 6910 1, 1) == 1) { 6911 un->un_tagflags = FLAG_STAG; 6912 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6913 "sd_unit_attach: un:0x%p tag queueing " 6914 "enabled\n", un); 6915 } else if (scsi_ifgetcap(SD_ADDRESS(un), 6916 "untagged-qing", 0) == 1) { 6917 un->un_f_opt_queueing = TRUE; 6918 un->un_saved_throttle = un->un_throttle = 6919 min(un->un_throttle, 3); 6920 } else { 6921 un->un_f_opt_queueing = FALSE; 6922 un->un_saved_throttle = un->un_throttle = 1; 6923 } 6924 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) 6925 == 1) && (un->un_f_arq_enabled == TRUE)) { 6926 /* The Host Adapter supports internal queueing. */ 6927 un->un_f_opt_queueing = TRUE; 6928 un->un_saved_throttle = un->un_throttle = 6929 min(un->un_throttle, 3); 6930 } else { 6931 un->un_f_opt_queueing = FALSE; 6932 un->un_saved_throttle = un->un_throttle = 1; 6933 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6934 "sd_unit_attach: un:0x%p no tag queueing\n", un); 6935 } 6936 6937 /* 6938 * Enable large transfers for SATA/SAS drives 6939 */ 6940 if (SD_IS_SERIAL(un)) { 6941 un->un_max_xfer_size = 6942 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 6943 sd_max_xfer_size, SD_MAX_XFER_SIZE); 6944 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6945 "sd_unit_attach: un:0x%p max transfer " 6946 "size=0x%x\n", un, un->un_max_xfer_size); 6947 6948 } 6949 6950 /* Setup or tear down default wide operations for disks */ 6951 6952 /* 6953 * Note: Legacy: it may be possible for both "sd_max_xfer_size" 6954 * and "ssd_max_xfer_size" to exist simultaneously on the same 6955 * system and be set to different values. In the future this 6956 * code may need to be updated when the ssd module is 6957 * obsoleted and removed from the system. (4299588) 6958 */ 6959 if (SD_IS_PARALLEL_SCSI(un) && 6960 (devp->sd_inq->inq_rdf == RDF_SCSI2) && 6961 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { 6962 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 6963 1, 1) == 1) { 6964 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6965 "sd_unit_attach: un:0x%p Wide Transfer " 6966 "enabled\n", un); 6967 } 6968 6969 /* 6970 * If tagged queuing has also been enabled, then 6971 * enable large xfers 6972 */ 6973 if (un->un_saved_throttle == sd_max_throttle) { 6974 un->un_max_xfer_size = 6975 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 6976 sd_max_xfer_size, SD_MAX_XFER_SIZE); 6977 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6978 "sd_unit_attach: un:0x%p max transfer " 6979 "size=0x%x\n", un, un->un_max_xfer_size); 6980 } 6981 } else { 6982 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 6983 0, 1) == 1) { 6984 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6985 "sd_unit_attach: un:0x%p " 6986 "Wide Transfer disabled\n", un); 6987 } 6988 } 6989 } else { 6990 un->un_tagflags = FLAG_STAG; 6991 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, 6992 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); 6993 } 6994 6995 /* 6996 * If this target supports LUN reset, try to enable it. 6997 */ 6998 if (un->un_f_lun_reset_enabled) { 6999 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { 7000 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7001 "un:0x%p lun_reset capability set\n", un); 7002 } else { 7003 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7004 "un:0x%p lun-reset capability not set\n", un); 7005 } 7006 } 7007 7008 /* 7009 * Adjust the maximum transfer size. This is to fix 7010 * the problem of partial DMA support on SPARC. Some 7011 * HBA driver, like aac, has very small dma_attr_maxxfer 7012 * size, which requires partial DMA support on SPARC. 7013 * In the future the SPARC pci nexus driver may solve 7014 * the problem instead of this fix. 7015 */ 7016 #if defined(__sparc) 7017 max_xfer_size = scsi_ifgetcap(SD_ADDRESS(un), "dma-max", 1); 7018 if ((max_xfer_size > 0) && (max_xfer_size < un->un_max_xfer_size)) { 7019 un->un_max_xfer_size = max_xfer_size; 7020 un->un_partial_dma_supported = 1; 7021 } 7022 #endif 7023 7024 /* 7025 * Set PKT_DMA_PARTIAL flag. 7026 */ 7027 if (un->un_partial_dma_supported == 1) { 7028 un->un_pkt_flags = PKT_DMA_PARTIAL; 7029 } else { 7030 un->un_pkt_flags = 0; 7031 } 7032 7033 /* 7034 * At this point in the attach, we have enough info in the 7035 * soft state to be able to issue commands to the target. 7036 * 7037 * All command paths used below MUST issue their commands as 7038 * SD_PATH_DIRECT. This is important as intermediate layers 7039 * are not all initialized yet (such as PM). 7040 */ 7041 7042 /* 7043 * Send a TEST UNIT READY command to the device. This should clear 7044 * any outstanding UNIT ATTENTION that may be present. 7045 * 7046 * Note: Don't check for success, just track if there is a reservation, 7047 * this is a throw away command to clear any unit attentions. 7048 * 7049 * Note: This MUST be the first command issued to the target during 7050 * attach to ensure power on UNIT ATTENTIONS are cleared. 7051 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated 7052 * with attempts at spinning up a device with no media. 7053 */ 7054 if (sd_send_scsi_TEST_UNIT_READY(un, SD_DONT_RETRY_TUR) == EACCES) { 7055 reservation_flag = SD_TARGET_IS_RESERVED; 7056 } 7057 7058 /* 7059 * If the device is NOT a removable media device, attempt to spin 7060 * it up (using the START_STOP_UNIT command) and read its capacity 7061 * (using the READ CAPACITY command). Note, however, that either 7062 * of these could fail and in some cases we would continue with 7063 * the attach despite the failure (see below). 7064 */ 7065 if (un->un_f_descr_format_supported) { 7066 switch (sd_spin_up_unit(un)) { 7067 case 0: 7068 /* 7069 * Spin-up was successful; now try to read the 7070 * capacity. If successful then save the results 7071 * and mark the capacity & lbasize as valid. 7072 */ 7073 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7074 "sd_unit_attach: un:0x%p spin-up successful\n", un); 7075 7076 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, 7077 &lbasize, SD_PATH_DIRECT)) { 7078 case 0: { 7079 if (capacity > DK_MAX_BLOCKS) { 7080 #ifdef _LP64 7081 if (capacity + 1 > 7082 SD_GROUP1_MAX_ADDRESS) { 7083 /* 7084 * Enable descriptor format 7085 * sense data so that we can 7086 * get 64 bit sense data 7087 * fields. 7088 */ 7089 sd_enable_descr_sense(un); 7090 } 7091 #else 7092 /* 32-bit kernels can't handle this */ 7093 scsi_log(SD_DEVINFO(un), 7094 sd_label, CE_WARN, 7095 "disk has %llu blocks, which " 7096 "is too large for a 32-bit " 7097 "kernel", capacity); 7098 7099 #if defined(__i386) || defined(__amd64) 7100 /* 7101 * 1TB disk was treated as (1T - 512)B 7102 * in the past, so that it might have 7103 * valid VTOC and solaris partitions, 7104 * we have to allow it to continue to 7105 * work. 7106 */ 7107 if (capacity -1 > DK_MAX_BLOCKS) 7108 #endif 7109 goto spinup_failed; 7110 #endif 7111 } 7112 7113 /* 7114 * Here it's not necessary to check the case: 7115 * the capacity of the device is bigger than 7116 * what the max hba cdb can support. Because 7117 * sd_send_scsi_READ_CAPACITY will retrieve 7118 * the capacity by sending USCSI command, which 7119 * is constrained by the max hba cdb. Actually, 7120 * sd_send_scsi_READ_CAPACITY will return 7121 * EINVAL when using bigger cdb than required 7122 * cdb length. Will handle this case in 7123 * "case EINVAL". 7124 */ 7125 7126 /* 7127 * The following relies on 7128 * sd_send_scsi_READ_CAPACITY never 7129 * returning 0 for capacity and/or lbasize. 7130 */ 7131 sd_update_block_info(un, lbasize, capacity); 7132 7133 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7134 "sd_unit_attach: un:0x%p capacity = %ld " 7135 "blocks; lbasize= %ld.\n", un, 7136 un->un_blockcount, un->un_tgt_blocksize); 7137 7138 break; 7139 } 7140 case EINVAL: 7141 /* 7142 * In the case where the max-cdb-length property 7143 * is smaller than the required CDB length for 7144 * a SCSI device, a target driver can fail to 7145 * attach to that device. 7146 */ 7147 scsi_log(SD_DEVINFO(un), 7148 sd_label, CE_WARN, 7149 "disk capacity is too large " 7150 "for current cdb length"); 7151 goto spinup_failed; 7152 case EACCES: 7153 /* 7154 * Should never get here if the spin-up 7155 * succeeded, but code it in anyway. 7156 * From here, just continue with the attach... 7157 */ 7158 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7159 "sd_unit_attach: un:0x%p " 7160 "sd_send_scsi_READ_CAPACITY " 7161 "returned reservation conflict\n", un); 7162 reservation_flag = SD_TARGET_IS_RESERVED; 7163 break; 7164 default: 7165 /* 7166 * Likewise, should never get here if the 7167 * spin-up succeeded. Just continue with 7168 * the attach... 7169 */ 7170 break; 7171 } 7172 break; 7173 case EACCES: 7174 /* 7175 * Device is reserved by another host. In this case 7176 * we could not spin it up or read the capacity, but 7177 * we continue with the attach anyway. 7178 */ 7179 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7180 "sd_unit_attach: un:0x%p spin-up reservation " 7181 "conflict.\n", un); 7182 reservation_flag = SD_TARGET_IS_RESERVED; 7183 break; 7184 default: 7185 /* Fail the attach if the spin-up failed. */ 7186 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7187 "sd_unit_attach: un:0x%p spin-up failed.", un); 7188 goto spinup_failed; 7189 } 7190 } 7191 7192 /* 7193 * Check to see if this is a MMC drive 7194 */ 7195 if (ISCD(un)) { 7196 sd_set_mmc_caps(un); 7197 } 7198 7199 7200 /* 7201 * Add a zero-length attribute to tell the world we support 7202 * kernel ioctls (for layered drivers) 7203 */ 7204 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7205 DDI_KERNEL_IOCTL, NULL, 0); 7206 7207 /* 7208 * Add a boolean property to tell the world we support 7209 * the B_FAILFAST flag (for layered drivers) 7210 */ 7211 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7212 "ddi-failfast-supported", NULL, 0); 7213 7214 /* 7215 * Initialize power management 7216 */ 7217 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); 7218 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); 7219 sd_setup_pm(un, devi); 7220 if (un->un_f_pm_is_enabled == FALSE) { 7221 /* 7222 * For performance, point to a jump table that does 7223 * not include pm. 7224 * The direct and priority chains don't change with PM. 7225 * 7226 * Note: this is currently done based on individual device 7227 * capabilities. When an interface for determining system 7228 * power enabled state becomes available, or when additional 7229 * layers are added to the command chain, these values will 7230 * have to be re-evaluated for correctness. 7231 */ 7232 if (un->un_f_non_devbsize_supported) { 7233 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM; 7234 } else { 7235 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM; 7236 } 7237 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 7238 } 7239 7240 /* 7241 * This property is set to 0 by HA software to avoid retries 7242 * on a reserved disk. (The preferred property name is 7243 * "retry-on-reservation-conflict") (1189689) 7244 * 7245 * Note: The use of a global here can have unintended consequences. A 7246 * per instance variable is preferrable to match the capabilities of 7247 * different underlying hba's (4402600) 7248 */ 7249 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, 7250 DDI_PROP_DONTPASS, "retry-on-reservation-conflict", 7251 sd_retry_on_reservation_conflict); 7252 if (sd_retry_on_reservation_conflict != 0) { 7253 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, 7254 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, 7255 sd_retry_on_reservation_conflict); 7256 } 7257 7258 /* Set up options for QFULL handling. */ 7259 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7260 "qfull-retries", -1)) != -1) { 7261 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries", 7262 rval, 1); 7263 } 7264 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7265 "qfull-retry-interval", -1)) != -1) { 7266 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval", 7267 rval, 1); 7268 } 7269 7270 /* 7271 * This just prints a message that announces the existence of the 7272 * device. The message is always printed in the system logfile, but 7273 * only appears on the console if the system is booted with the 7274 * -v (verbose) argument. 7275 */ 7276 ddi_report_dev(devi); 7277 7278 un->un_mediastate = DKIO_NONE; 7279 7280 cmlb_alloc_handle(&un->un_cmlbhandle); 7281 7282 #if defined(__i386) || defined(__amd64) 7283 /* 7284 * On x86, compensate for off-by-1 legacy error 7285 */ 7286 if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable && 7287 (lbasize == un->un_sys_blocksize)) 7288 offbyone = CMLB_OFF_BY_ONE; 7289 #endif 7290 7291 if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype, 7292 un->un_f_has_removable_media, un->un_f_is_hotpluggable, 7293 un->un_node_type, offbyone, un->un_cmlbhandle, 7294 (void *)SD_PATH_DIRECT) != 0) { 7295 goto cmlb_attach_failed; 7296 } 7297 7298 7299 /* 7300 * Read and validate the device's geometry (ie, disk label) 7301 * A new unformatted drive will not have a valid geometry, but 7302 * the driver needs to successfully attach to this device so 7303 * the drive can be formatted via ioctls. 7304 */ 7305 geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0, 7306 (void *)SD_PATH_DIRECT) == 0) ? 1: 0; 7307 7308 mutex_enter(SD_MUTEX(un)); 7309 7310 /* 7311 * Read and initialize the devid for the unit. 7312 */ 7313 if (un->un_f_devid_supported) { 7314 sd_register_devid(un, devi, reservation_flag); 7315 } 7316 mutex_exit(SD_MUTEX(un)); 7317 7318 #if (defined(__fibre)) 7319 /* 7320 * Register callbacks for fibre only. You can't do this soley 7321 * on the basis of the devid_type because this is hba specific. 7322 * We need to query our hba capabilities to find out whether to 7323 * register or not. 7324 */ 7325 if (un->un_f_is_fibre) { 7326 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 7327 sd_init_event_callbacks(un); 7328 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7329 "sd_unit_attach: un:0x%p event callbacks inserted", 7330 un); 7331 } 7332 } 7333 #endif 7334 7335 if (un->un_f_opt_disable_cache == TRUE) { 7336 /* 7337 * Disable both read cache and write cache. This is 7338 * the historic behavior of the keywords in the config file. 7339 */ 7340 if (sd_cache_control(un, SD_CACHE_DISABLE, SD_CACHE_DISABLE) != 7341 0) { 7342 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7343 "sd_unit_attach: un:0x%p Could not disable " 7344 "caching", un); 7345 goto devid_failed; 7346 } 7347 } 7348 7349 /* 7350 * Check the value of the WCE bit now and 7351 * set un_f_write_cache_enabled accordingly. 7352 */ 7353 (void) sd_get_write_cache_enabled(un, &wc_enabled); 7354 mutex_enter(SD_MUTEX(un)); 7355 un->un_f_write_cache_enabled = (wc_enabled != 0); 7356 mutex_exit(SD_MUTEX(un)); 7357 7358 /* 7359 * Check the value of the NV_SUP bit and set 7360 * un_f_suppress_cache_flush accordingly. 7361 */ 7362 sd_get_nv_sup(un); 7363 7364 /* 7365 * Find out what type of reservation this disk supports. 7366 */ 7367 switch (sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 0, NULL)) { 7368 case 0: 7369 /* 7370 * SCSI-3 reservations are supported. 7371 */ 7372 un->un_reservation_type = SD_SCSI3_RESERVATION; 7373 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7374 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un); 7375 break; 7376 case ENOTSUP: 7377 /* 7378 * The PERSISTENT RESERVE IN command would not be recognized by 7379 * a SCSI-2 device, so assume the reservation type is SCSI-2. 7380 */ 7381 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7382 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un); 7383 un->un_reservation_type = SD_SCSI2_RESERVATION; 7384 break; 7385 default: 7386 /* 7387 * default to SCSI-3 reservations 7388 */ 7389 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7390 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un); 7391 un->un_reservation_type = SD_SCSI3_RESERVATION; 7392 break; 7393 } 7394 7395 /* 7396 * Set the pstat and error stat values here, so data obtained during the 7397 * previous attach-time routines is available. 7398 * 7399 * Note: This is a critical sequence that needs to be maintained: 7400 * 1) Instantiate the kstats before any routines using the iopath 7401 * (i.e. sd_send_scsi_cmd). 7402 * 2) Initialize the error stats (sd_set_errstats) and partition 7403 * stats (sd_set_pstats)here, following 7404 * cmlb_validate_geometry(), sd_register_devid(), and 7405 * sd_cache_control(). 7406 */ 7407 7408 if (un->un_f_pkstats_enabled && geom_label_valid) { 7409 sd_set_pstats(un); 7410 SD_TRACE(SD_LOG_IO_PARTITION, un, 7411 "sd_unit_attach: un:0x%p pstats created and set\n", un); 7412 } 7413 7414 sd_set_errstats(un); 7415 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7416 "sd_unit_attach: un:0x%p errstats set\n", un); 7417 7418 7419 /* 7420 * After successfully attaching an instance, we record the information 7421 * of how many luns have been attached on the relative target and 7422 * controller for parallel SCSI. This information is used when sd tries 7423 * to set the tagged queuing capability in HBA. 7424 */ 7425 if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) { 7426 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH); 7427 } 7428 7429 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7430 "sd_unit_attach: un:0x%p exit success\n", un); 7431 7432 return (DDI_SUCCESS); 7433 7434 /* 7435 * An error occurred during the attach; clean up & return failure. 7436 */ 7437 7438 devid_failed: 7439 7440 setup_pm_failed: 7441 ddi_remove_minor_node(devi, NULL); 7442 7443 cmlb_attach_failed: 7444 /* 7445 * Cleanup from the scsi_ifsetcap() calls (437868) 7446 */ 7447 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 7448 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 7449 7450 /* 7451 * Refer to the comments of setting tagged-qing in the beginning of 7452 * sd_unit_attach. We can only disable tagged queuing when there is 7453 * no lun attached on the target. 7454 */ 7455 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 7456 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 7457 } 7458 7459 if (un->un_f_is_fibre == FALSE) { 7460 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 7461 } 7462 7463 spinup_failed: 7464 7465 mutex_enter(SD_MUTEX(un)); 7466 7467 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ 7468 if (un->un_direct_priority_timeid != NULL) { 7469 timeout_id_t temp_id = un->un_direct_priority_timeid; 7470 un->un_direct_priority_timeid = NULL; 7471 mutex_exit(SD_MUTEX(un)); 7472 (void) untimeout(temp_id); 7473 mutex_enter(SD_MUTEX(un)); 7474 } 7475 7476 /* Cancel any pending start/stop timeouts */ 7477 if (un->un_startstop_timeid != NULL) { 7478 timeout_id_t temp_id = un->un_startstop_timeid; 7479 un->un_startstop_timeid = NULL; 7480 mutex_exit(SD_MUTEX(un)); 7481 (void) untimeout(temp_id); 7482 mutex_enter(SD_MUTEX(un)); 7483 } 7484 7485 /* Cancel any pending reset-throttle timeouts */ 7486 if (un->un_reset_throttle_timeid != NULL) { 7487 timeout_id_t temp_id = un->un_reset_throttle_timeid; 7488 un->un_reset_throttle_timeid = NULL; 7489 mutex_exit(SD_MUTEX(un)); 7490 (void) untimeout(temp_id); 7491 mutex_enter(SD_MUTEX(un)); 7492 } 7493 7494 /* Cancel any pending retry timeouts */ 7495 if (un->un_retry_timeid != NULL) { 7496 timeout_id_t temp_id = un->un_retry_timeid; 7497 un->un_retry_timeid = NULL; 7498 mutex_exit(SD_MUTEX(un)); 7499 (void) untimeout(temp_id); 7500 mutex_enter(SD_MUTEX(un)); 7501 } 7502 7503 /* Cancel any pending delayed cv broadcast timeouts */ 7504 if (un->un_dcvb_timeid != NULL) { 7505 timeout_id_t temp_id = un->un_dcvb_timeid; 7506 un->un_dcvb_timeid = NULL; 7507 mutex_exit(SD_MUTEX(un)); 7508 (void) untimeout(temp_id); 7509 mutex_enter(SD_MUTEX(un)); 7510 } 7511 7512 mutex_exit(SD_MUTEX(un)); 7513 7514 /* There should not be any in-progress I/O so ASSERT this check */ 7515 ASSERT(un->un_ncmds_in_transport == 0); 7516 ASSERT(un->un_ncmds_in_driver == 0); 7517 7518 /* Do not free the softstate if the callback routine is active */ 7519 sd_sync_with_callback(un); 7520 7521 /* 7522 * Partition stats apparently are not used with removables. These would 7523 * not have been created during attach, so no need to clean them up... 7524 */ 7525 if (un->un_errstats != NULL) { 7526 kstat_delete(un->un_errstats); 7527 un->un_errstats = NULL; 7528 } 7529 7530 create_errstats_failed: 7531 7532 if (un->un_stats != NULL) { 7533 kstat_delete(un->un_stats); 7534 un->un_stats = NULL; 7535 } 7536 7537 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 7538 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 7539 7540 ddi_prop_remove_all(devi); 7541 sema_destroy(&un->un_semoclose); 7542 cv_destroy(&un->un_state_cv); 7543 7544 getrbuf_failed: 7545 7546 sd_free_rqs(un); 7547 7548 alloc_rqs_failed: 7549 7550 devp->sd_private = NULL; 7551 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ 7552 7553 get_softstate_failed: 7554 /* 7555 * Note: the man pages are unclear as to whether or not doing a 7556 * ddi_soft_state_free(sd_state, instance) is the right way to 7557 * clean up after the ddi_soft_state_zalloc() if the subsequent 7558 * ddi_get_soft_state() fails. The implication seems to be 7559 * that the get_soft_state cannot fail if the zalloc succeeds. 7560 */ 7561 ddi_soft_state_free(sd_state, instance); 7562 7563 probe_failed: 7564 scsi_unprobe(devp); 7565 7566 return (DDI_FAILURE); 7567 } 7568 7569 7570 /* 7571 * Function: sd_unit_detach 7572 * 7573 * Description: Performs DDI_DETACH processing for sddetach(). 7574 * 7575 * Return Code: DDI_SUCCESS 7576 * DDI_FAILURE 7577 * 7578 * Context: Kernel thread context 7579 */ 7580 7581 static int 7582 sd_unit_detach(dev_info_t *devi) 7583 { 7584 struct scsi_device *devp; 7585 struct sd_lun *un; 7586 int i; 7587 int tgt; 7588 dev_t dev; 7589 dev_info_t *pdip = ddi_get_parent(devi); 7590 int instance = ddi_get_instance(devi); 7591 7592 mutex_enter(&sd_detach_mutex); 7593 7594 /* 7595 * Fail the detach for any of the following: 7596 * - Unable to get the sd_lun struct for the instance 7597 * - A layered driver has an outstanding open on the instance 7598 * - Another thread is already detaching this instance 7599 * - Another thread is currently performing an open 7600 */ 7601 devp = ddi_get_driver_private(devi); 7602 if ((devp == NULL) || 7603 ((un = (struct sd_lun *)devp->sd_private) == NULL) || 7604 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) || 7605 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) { 7606 mutex_exit(&sd_detach_mutex); 7607 return (DDI_FAILURE); 7608 } 7609 7610 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); 7611 7612 /* 7613 * Mark this instance as currently in a detach, to inhibit any 7614 * opens from a layered driver. 7615 */ 7616 un->un_detach_count++; 7617 mutex_exit(&sd_detach_mutex); 7618 7619 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7620 SCSI_ADDR_PROP_TARGET, -1); 7621 7622 dev = sd_make_device(SD_DEVINFO(un)); 7623 7624 #ifndef lint 7625 _NOTE(COMPETING_THREADS_NOW); 7626 #endif 7627 7628 mutex_enter(SD_MUTEX(un)); 7629 7630 /* 7631 * Fail the detach if there are any outstanding layered 7632 * opens on this device. 7633 */ 7634 for (i = 0; i < NDKMAP; i++) { 7635 if (un->un_ocmap.lyropen[i] != 0) { 7636 goto err_notclosed; 7637 } 7638 } 7639 7640 /* 7641 * Verify there are NO outstanding commands issued to this device. 7642 * ie, un_ncmds_in_transport == 0. 7643 * It's possible to have outstanding commands through the physio 7644 * code path, even though everything's closed. 7645 */ 7646 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || 7647 (un->un_direct_priority_timeid != NULL) || 7648 (un->un_state == SD_STATE_RWAIT)) { 7649 mutex_exit(SD_MUTEX(un)); 7650 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7651 "sd_dr_detach: Detach failure due to outstanding cmds\n"); 7652 goto err_stillbusy; 7653 } 7654 7655 /* 7656 * If we have the device reserved, release the reservation. 7657 */ 7658 if ((un->un_resvd_status & SD_RESERVE) && 7659 !(un->un_resvd_status & SD_LOST_RESERVE)) { 7660 mutex_exit(SD_MUTEX(un)); 7661 /* 7662 * Note: sd_reserve_release sends a command to the device 7663 * via the sd_ioctlcmd() path, and can sleep. 7664 */ 7665 if (sd_reserve_release(dev, SD_RELEASE) != 0) { 7666 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7667 "sd_dr_detach: Cannot release reservation \n"); 7668 } 7669 } else { 7670 mutex_exit(SD_MUTEX(un)); 7671 } 7672 7673 /* 7674 * Untimeout any reserve recover, throttle reset, restart unit 7675 * and delayed broadcast timeout threads. Protect the timeout pointer 7676 * from getting nulled by their callback functions. 7677 */ 7678 mutex_enter(SD_MUTEX(un)); 7679 if (un->un_resvd_timeid != NULL) { 7680 timeout_id_t temp_id = un->un_resvd_timeid; 7681 un->un_resvd_timeid = NULL; 7682 mutex_exit(SD_MUTEX(un)); 7683 (void) untimeout(temp_id); 7684 mutex_enter(SD_MUTEX(un)); 7685 } 7686 7687 if (un->un_reset_throttle_timeid != NULL) { 7688 timeout_id_t temp_id = un->un_reset_throttle_timeid; 7689 un->un_reset_throttle_timeid = NULL; 7690 mutex_exit(SD_MUTEX(un)); 7691 (void) untimeout(temp_id); 7692 mutex_enter(SD_MUTEX(un)); 7693 } 7694 7695 if (un->un_startstop_timeid != NULL) { 7696 timeout_id_t temp_id = un->un_startstop_timeid; 7697 un->un_startstop_timeid = NULL; 7698 mutex_exit(SD_MUTEX(un)); 7699 (void) untimeout(temp_id); 7700 mutex_enter(SD_MUTEX(un)); 7701 } 7702 7703 if (un->un_dcvb_timeid != NULL) { 7704 timeout_id_t temp_id = un->un_dcvb_timeid; 7705 un->un_dcvb_timeid = NULL; 7706 mutex_exit(SD_MUTEX(un)); 7707 (void) untimeout(temp_id); 7708 } else { 7709 mutex_exit(SD_MUTEX(un)); 7710 } 7711 7712 /* Remove any pending reservation reclaim requests for this device */ 7713 sd_rmv_resv_reclaim_req(dev); 7714 7715 mutex_enter(SD_MUTEX(un)); 7716 7717 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ 7718 if (un->un_direct_priority_timeid != NULL) { 7719 timeout_id_t temp_id = un->un_direct_priority_timeid; 7720 un->un_direct_priority_timeid = NULL; 7721 mutex_exit(SD_MUTEX(un)); 7722 (void) untimeout(temp_id); 7723 mutex_enter(SD_MUTEX(un)); 7724 } 7725 7726 /* Cancel any active multi-host disk watch thread requests */ 7727 if (un->un_mhd_token != NULL) { 7728 mutex_exit(SD_MUTEX(un)); 7729 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); 7730 if (scsi_watch_request_terminate(un->un_mhd_token, 7731 SCSI_WATCH_TERMINATE_NOWAIT)) { 7732 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7733 "sd_dr_detach: Cannot cancel mhd watch request\n"); 7734 /* 7735 * Note: We are returning here after having removed 7736 * some driver timeouts above. This is consistent with 7737 * the legacy implementation but perhaps the watch 7738 * terminate call should be made with the wait flag set. 7739 */ 7740 goto err_stillbusy; 7741 } 7742 mutex_enter(SD_MUTEX(un)); 7743 un->un_mhd_token = NULL; 7744 } 7745 7746 if (un->un_swr_token != NULL) { 7747 mutex_exit(SD_MUTEX(un)); 7748 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); 7749 if (scsi_watch_request_terminate(un->un_swr_token, 7750 SCSI_WATCH_TERMINATE_NOWAIT)) { 7751 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7752 "sd_dr_detach: Cannot cancel swr watch request\n"); 7753 /* 7754 * Note: We are returning here after having removed 7755 * some driver timeouts above. This is consistent with 7756 * the legacy implementation but perhaps the watch 7757 * terminate call should be made with the wait flag set. 7758 */ 7759 goto err_stillbusy; 7760 } 7761 mutex_enter(SD_MUTEX(un)); 7762 un->un_swr_token = NULL; 7763 } 7764 7765 mutex_exit(SD_MUTEX(un)); 7766 7767 /* 7768 * Clear any scsi_reset_notifies. We clear the reset notifies 7769 * if we have not registered one. 7770 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! 7771 */ 7772 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 7773 sd_mhd_reset_notify_cb, (caddr_t)un); 7774 7775 /* 7776 * protect the timeout pointers from getting nulled by 7777 * their callback functions during the cancellation process. 7778 * In such a scenario untimeout can be invoked with a null value. 7779 */ 7780 _NOTE(NO_COMPETING_THREADS_NOW); 7781 7782 mutex_enter(&un->un_pm_mutex); 7783 if (un->un_pm_idle_timeid != NULL) { 7784 timeout_id_t temp_id = un->un_pm_idle_timeid; 7785 un->un_pm_idle_timeid = NULL; 7786 mutex_exit(&un->un_pm_mutex); 7787 7788 /* 7789 * Timeout is active; cancel it. 7790 * Note that it'll never be active on a device 7791 * that does not support PM therefore we don't 7792 * have to check before calling pm_idle_component. 7793 */ 7794 (void) untimeout(temp_id); 7795 (void) pm_idle_component(SD_DEVINFO(un), 0); 7796 mutex_enter(&un->un_pm_mutex); 7797 } 7798 7799 /* 7800 * Check whether there is already a timeout scheduled for power 7801 * management. If yes then don't lower the power here, that's. 7802 * the timeout handler's job. 7803 */ 7804 if (un->un_pm_timeid != NULL) { 7805 timeout_id_t temp_id = un->un_pm_timeid; 7806 un->un_pm_timeid = NULL; 7807 mutex_exit(&un->un_pm_mutex); 7808 /* 7809 * Timeout is active; cancel it. 7810 * Note that it'll never be active on a device 7811 * that does not support PM therefore we don't 7812 * have to check before calling pm_idle_component. 7813 */ 7814 (void) untimeout(temp_id); 7815 (void) pm_idle_component(SD_DEVINFO(un), 0); 7816 7817 } else { 7818 mutex_exit(&un->un_pm_mutex); 7819 if ((un->un_f_pm_is_enabled == TRUE) && 7820 (pm_lower_power(SD_DEVINFO(un), 0, SD_SPINDLE_OFF) != 7821 DDI_SUCCESS)) { 7822 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7823 "sd_dr_detach: Lower power request failed, ignoring.\n"); 7824 /* 7825 * Fix for bug: 4297749, item # 13 7826 * The above test now includes a check to see if PM is 7827 * supported by this device before call 7828 * pm_lower_power(). 7829 * Note, the following is not dead code. The call to 7830 * pm_lower_power above will generate a call back into 7831 * our sdpower routine which might result in a timeout 7832 * handler getting activated. Therefore the following 7833 * code is valid and necessary. 7834 */ 7835 mutex_enter(&un->un_pm_mutex); 7836 if (un->un_pm_timeid != NULL) { 7837 timeout_id_t temp_id = un->un_pm_timeid; 7838 un->un_pm_timeid = NULL; 7839 mutex_exit(&un->un_pm_mutex); 7840 (void) untimeout(temp_id); 7841 (void) pm_idle_component(SD_DEVINFO(un), 0); 7842 } else { 7843 mutex_exit(&un->un_pm_mutex); 7844 } 7845 } 7846 } 7847 7848 /* 7849 * Cleanup from the scsi_ifsetcap() calls (437868) 7850 * Relocated here from above to be after the call to 7851 * pm_lower_power, which was getting errors. 7852 */ 7853 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 7854 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 7855 7856 /* 7857 * Currently, tagged queuing is supported per target based by HBA. 7858 * Setting this per lun instance actually sets the capability of this 7859 * target in HBA, which affects those luns already attached on the 7860 * same target. So during detach, we can only disable this capability 7861 * only when this is the only lun left on this target. By doing 7862 * this, we assume a target has the same tagged queuing capability 7863 * for every lun. The condition can be removed when HBA is changed to 7864 * support per lun based tagged queuing capability. 7865 */ 7866 if (sd_scsi_get_target_lun_count(pdip, tgt) <= 1) { 7867 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 7868 } 7869 7870 if (un->un_f_is_fibre == FALSE) { 7871 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 7872 } 7873 7874 /* 7875 * Remove any event callbacks, fibre only 7876 */ 7877 if (un->un_f_is_fibre == TRUE) { 7878 if ((un->un_insert_event != NULL) && 7879 (ddi_remove_event_handler(un->un_insert_cb_id) != 7880 DDI_SUCCESS)) { 7881 /* 7882 * Note: We are returning here after having done 7883 * substantial cleanup above. This is consistent 7884 * with the legacy implementation but this may not 7885 * be the right thing to do. 7886 */ 7887 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7888 "sd_dr_detach: Cannot cancel insert event\n"); 7889 goto err_remove_event; 7890 } 7891 un->un_insert_event = NULL; 7892 7893 if ((un->un_remove_event != NULL) && 7894 (ddi_remove_event_handler(un->un_remove_cb_id) != 7895 DDI_SUCCESS)) { 7896 /* 7897 * Note: We are returning here after having done 7898 * substantial cleanup above. This is consistent 7899 * with the legacy implementation but this may not 7900 * be the right thing to do. 7901 */ 7902 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7903 "sd_dr_detach: Cannot cancel remove event\n"); 7904 goto err_remove_event; 7905 } 7906 un->un_remove_event = NULL; 7907 } 7908 7909 /* Do not free the softstate if the callback routine is active */ 7910 sd_sync_with_callback(un); 7911 7912 cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 7913 cmlb_free_handle(&un->un_cmlbhandle); 7914 7915 /* 7916 * Hold the detach mutex here, to make sure that no other threads ever 7917 * can access a (partially) freed soft state structure. 7918 */ 7919 mutex_enter(&sd_detach_mutex); 7920 7921 /* 7922 * Clean up the soft state struct. 7923 * Cleanup is done in reverse order of allocs/inits. 7924 * At this point there should be no competing threads anymore. 7925 */ 7926 7927 /* Unregister and free device id. */ 7928 ddi_devid_unregister(devi); 7929 if (un->un_devid) { 7930 ddi_devid_free(un->un_devid); 7931 un->un_devid = NULL; 7932 } 7933 7934 /* 7935 * Destroy wmap cache if it exists. 7936 */ 7937 if (un->un_wm_cache != NULL) { 7938 kmem_cache_destroy(un->un_wm_cache); 7939 un->un_wm_cache = NULL; 7940 } 7941 7942 /* 7943 * kstat cleanup is done in detach for all device types (4363169). 7944 * We do not want to fail detach if the device kstats are not deleted 7945 * since there is a confusion about the devo_refcnt for the device. 7946 * We just delete the kstats and let detach complete successfully. 7947 */ 7948 if (un->un_stats != NULL) { 7949 kstat_delete(un->un_stats); 7950 un->un_stats = NULL; 7951 } 7952 if (un->un_errstats != NULL) { 7953 kstat_delete(un->un_errstats); 7954 un->un_errstats = NULL; 7955 } 7956 7957 /* Remove partition stats */ 7958 if (un->un_f_pkstats_enabled) { 7959 for (i = 0; i < NSDMAP; i++) { 7960 if (un->un_pstats[i] != NULL) { 7961 kstat_delete(un->un_pstats[i]); 7962 un->un_pstats[i] = NULL; 7963 } 7964 } 7965 } 7966 7967 /* Remove xbuf registration */ 7968 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 7969 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 7970 7971 /* Remove driver properties */ 7972 ddi_prop_remove_all(devi); 7973 7974 mutex_destroy(&un->un_pm_mutex); 7975 cv_destroy(&un->un_pm_busy_cv); 7976 7977 cv_destroy(&un->un_wcc_cv); 7978 7979 /* Open/close semaphore */ 7980 sema_destroy(&un->un_semoclose); 7981 7982 /* Removable media condvar. */ 7983 cv_destroy(&un->un_state_cv); 7984 7985 /* Suspend/resume condvar. */ 7986 cv_destroy(&un->un_suspend_cv); 7987 cv_destroy(&un->un_disk_busy_cv); 7988 7989 sd_free_rqs(un); 7990 7991 /* Free up soft state */ 7992 devp->sd_private = NULL; 7993 7994 bzero(un, sizeof (struct sd_lun)); 7995 ddi_soft_state_free(sd_state, instance); 7996 7997 mutex_exit(&sd_detach_mutex); 7998 7999 /* This frees up the INQUIRY data associated with the device. */ 8000 scsi_unprobe(devp); 8001 8002 /* 8003 * After successfully detaching an instance, we update the information 8004 * of how many luns have been attached in the relative target and 8005 * controller for parallel SCSI. This information is used when sd tries 8006 * to set the tagged queuing capability in HBA. 8007 * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to 8008 * check if the device is parallel SCSI. However, we don't need to 8009 * check here because we've already checked during attach. No device 8010 * that is not parallel SCSI is in the chain. 8011 */ 8012 if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) { 8013 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH); 8014 } 8015 8016 return (DDI_SUCCESS); 8017 8018 err_notclosed: 8019 mutex_exit(SD_MUTEX(un)); 8020 8021 err_stillbusy: 8022 _NOTE(NO_COMPETING_THREADS_NOW); 8023 8024 err_remove_event: 8025 mutex_enter(&sd_detach_mutex); 8026 un->un_detach_count--; 8027 mutex_exit(&sd_detach_mutex); 8028 8029 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); 8030 return (DDI_FAILURE); 8031 } 8032 8033 8034 /* 8035 * Function: sd_create_errstats 8036 * 8037 * Description: This routine instantiates the device error stats. 8038 * 8039 * Note: During attach the stats are instantiated first so they are 8040 * available for attach-time routines that utilize the driver 8041 * iopath to send commands to the device. The stats are initialized 8042 * separately so data obtained during some attach-time routines is 8043 * available. (4362483) 8044 * 8045 * Arguments: un - driver soft state (unit) structure 8046 * instance - driver instance 8047 * 8048 * Context: Kernel thread context 8049 */ 8050 8051 static void 8052 sd_create_errstats(struct sd_lun *un, int instance) 8053 { 8054 struct sd_errstats *stp; 8055 char kstatmodule_err[KSTAT_STRLEN]; 8056 char kstatname[KSTAT_STRLEN]; 8057 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t)); 8058 8059 ASSERT(un != NULL); 8060 8061 if (un->un_errstats != NULL) { 8062 return; 8063 } 8064 8065 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err), 8066 "%serr", sd_label); 8067 (void) snprintf(kstatname, sizeof (kstatname), 8068 "%s%d,err", sd_label, instance); 8069 8070 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname, 8071 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 8072 8073 if (un->un_errstats == NULL) { 8074 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8075 "sd_create_errstats: Failed kstat_create\n"); 8076 return; 8077 } 8078 8079 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8080 kstat_named_init(&stp->sd_softerrs, "Soft Errors", 8081 KSTAT_DATA_UINT32); 8082 kstat_named_init(&stp->sd_harderrs, "Hard Errors", 8083 KSTAT_DATA_UINT32); 8084 kstat_named_init(&stp->sd_transerrs, "Transport Errors", 8085 KSTAT_DATA_UINT32); 8086 kstat_named_init(&stp->sd_vid, "Vendor", 8087 KSTAT_DATA_CHAR); 8088 kstat_named_init(&stp->sd_pid, "Product", 8089 KSTAT_DATA_CHAR); 8090 kstat_named_init(&stp->sd_revision, "Revision", 8091 KSTAT_DATA_CHAR); 8092 kstat_named_init(&stp->sd_serial, "Serial No", 8093 KSTAT_DATA_CHAR); 8094 kstat_named_init(&stp->sd_capacity, "Size", 8095 KSTAT_DATA_ULONGLONG); 8096 kstat_named_init(&stp->sd_rq_media_err, "Media Error", 8097 KSTAT_DATA_UINT32); 8098 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready", 8099 KSTAT_DATA_UINT32); 8100 kstat_named_init(&stp->sd_rq_nodev_err, "No Device", 8101 KSTAT_DATA_UINT32); 8102 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable", 8103 KSTAT_DATA_UINT32); 8104 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request", 8105 KSTAT_DATA_UINT32); 8106 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis", 8107 KSTAT_DATA_UINT32); 8108 8109 un->un_errstats->ks_private = un; 8110 un->un_errstats->ks_update = nulldev; 8111 8112 kstat_install(un->un_errstats); 8113 } 8114 8115 8116 /* 8117 * Function: sd_set_errstats 8118 * 8119 * Description: This routine sets the value of the vendor id, product id, 8120 * revision, serial number, and capacity device error stats. 8121 * 8122 * Note: During attach the stats are instantiated first so they are 8123 * available for attach-time routines that utilize the driver 8124 * iopath to send commands to the device. The stats are initialized 8125 * separately so data obtained during some attach-time routines is 8126 * available. (4362483) 8127 * 8128 * Arguments: un - driver soft state (unit) structure 8129 * 8130 * Context: Kernel thread context 8131 */ 8132 8133 static void 8134 sd_set_errstats(struct sd_lun *un) 8135 { 8136 struct sd_errstats *stp; 8137 8138 ASSERT(un != NULL); 8139 ASSERT(un->un_errstats != NULL); 8140 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8141 ASSERT(stp != NULL); 8142 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8); 8143 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16); 8144 (void) strncpy(stp->sd_revision.value.c, 8145 un->un_sd->sd_inq->inq_revision, 4); 8146 8147 /* 8148 * All the errstats are persistent across detach/attach, 8149 * so reset all the errstats here in case of the hot 8150 * replacement of disk drives, except for not changed 8151 * Sun qualified drives. 8152 */ 8153 if ((bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) != 0) || 8154 (bcmp(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8155 sizeof (SD_INQUIRY(un)->inq_serial)) != 0)) { 8156 stp->sd_softerrs.value.ui32 = 0; 8157 stp->sd_harderrs.value.ui32 = 0; 8158 stp->sd_transerrs.value.ui32 = 0; 8159 stp->sd_rq_media_err.value.ui32 = 0; 8160 stp->sd_rq_ntrdy_err.value.ui32 = 0; 8161 stp->sd_rq_nodev_err.value.ui32 = 0; 8162 stp->sd_rq_recov_err.value.ui32 = 0; 8163 stp->sd_rq_illrq_err.value.ui32 = 0; 8164 stp->sd_rq_pfa_err.value.ui32 = 0; 8165 } 8166 8167 /* 8168 * Set the "Serial No" kstat for Sun qualified drives (indicated by 8169 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid) 8170 * (4376302)) 8171 */ 8172 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) { 8173 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8174 sizeof (SD_INQUIRY(un)->inq_serial)); 8175 } 8176 8177 if (un->un_f_blockcount_is_valid != TRUE) { 8178 /* 8179 * Set capacity error stat to 0 for no media. This ensures 8180 * a valid capacity is displayed in response to 'iostat -E' 8181 * when no media is present in the device. 8182 */ 8183 stp->sd_capacity.value.ui64 = 0; 8184 } else { 8185 /* 8186 * Multiply un_blockcount by un->un_sys_blocksize to get 8187 * capacity. 8188 * 8189 * Note: for non-512 blocksize devices "un_blockcount" has been 8190 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by 8191 * (un_tgt_blocksize / un->un_sys_blocksize). 8192 */ 8193 stp->sd_capacity.value.ui64 = (uint64_t) 8194 ((uint64_t)un->un_blockcount * un->un_sys_blocksize); 8195 } 8196 } 8197 8198 8199 /* 8200 * Function: sd_set_pstats 8201 * 8202 * Description: This routine instantiates and initializes the partition 8203 * stats for each partition with more than zero blocks. 8204 * (4363169) 8205 * 8206 * Arguments: un - driver soft state (unit) structure 8207 * 8208 * Context: Kernel thread context 8209 */ 8210 8211 static void 8212 sd_set_pstats(struct sd_lun *un) 8213 { 8214 char kstatname[KSTAT_STRLEN]; 8215 int instance; 8216 int i; 8217 diskaddr_t nblks = 0; 8218 char *partname = NULL; 8219 8220 ASSERT(un != NULL); 8221 8222 instance = ddi_get_instance(SD_DEVINFO(un)); 8223 8224 /* Note:x86: is this a VTOC8/VTOC16 difference? */ 8225 for (i = 0; i < NSDMAP; i++) { 8226 8227 if (cmlb_partinfo(un->un_cmlbhandle, i, 8228 &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0) 8229 continue; 8230 mutex_enter(SD_MUTEX(un)); 8231 8232 if ((un->un_pstats[i] == NULL) && 8233 (nblks != 0)) { 8234 8235 (void) snprintf(kstatname, sizeof (kstatname), 8236 "%s%d,%s", sd_label, instance, 8237 partname); 8238 8239 un->un_pstats[i] = kstat_create(sd_label, 8240 instance, kstatname, "partition", KSTAT_TYPE_IO, 8241 1, KSTAT_FLAG_PERSISTENT); 8242 if (un->un_pstats[i] != NULL) { 8243 un->un_pstats[i]->ks_lock = SD_MUTEX(un); 8244 kstat_install(un->un_pstats[i]); 8245 } 8246 } 8247 mutex_exit(SD_MUTEX(un)); 8248 } 8249 } 8250 8251 8252 #if (defined(__fibre)) 8253 /* 8254 * Function: sd_init_event_callbacks 8255 * 8256 * Description: This routine initializes the insertion and removal event 8257 * callbacks. (fibre only) 8258 * 8259 * Arguments: un - driver soft state (unit) structure 8260 * 8261 * Context: Kernel thread context 8262 */ 8263 8264 static void 8265 sd_init_event_callbacks(struct sd_lun *un) 8266 { 8267 ASSERT(un != NULL); 8268 8269 if ((un->un_insert_event == NULL) && 8270 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, 8271 &un->un_insert_event) == DDI_SUCCESS)) { 8272 /* 8273 * Add the callback for an insertion event 8274 */ 8275 (void) ddi_add_event_handler(SD_DEVINFO(un), 8276 un->un_insert_event, sd_event_callback, (void *)un, 8277 &(un->un_insert_cb_id)); 8278 } 8279 8280 if ((un->un_remove_event == NULL) && 8281 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, 8282 &un->un_remove_event) == DDI_SUCCESS)) { 8283 /* 8284 * Add the callback for a removal event 8285 */ 8286 (void) ddi_add_event_handler(SD_DEVINFO(un), 8287 un->un_remove_event, sd_event_callback, (void *)un, 8288 &(un->un_remove_cb_id)); 8289 } 8290 } 8291 8292 8293 /* 8294 * Function: sd_event_callback 8295 * 8296 * Description: This routine handles insert/remove events (photon). The 8297 * state is changed to OFFLINE which can be used to supress 8298 * error msgs. (fibre only) 8299 * 8300 * Arguments: un - driver soft state (unit) structure 8301 * 8302 * Context: Callout thread context 8303 */ 8304 /* ARGSUSED */ 8305 static void 8306 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, 8307 void *bus_impldata) 8308 { 8309 struct sd_lun *un = (struct sd_lun *)arg; 8310 8311 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); 8312 if (event == un->un_insert_event) { 8313 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); 8314 mutex_enter(SD_MUTEX(un)); 8315 if (un->un_state == SD_STATE_OFFLINE) { 8316 if (un->un_last_state != SD_STATE_SUSPENDED) { 8317 un->un_state = un->un_last_state; 8318 } else { 8319 /* 8320 * We have gone through SUSPEND/RESUME while 8321 * we were offline. Restore the last state 8322 */ 8323 un->un_state = un->un_save_state; 8324 } 8325 } 8326 mutex_exit(SD_MUTEX(un)); 8327 8328 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); 8329 } else if (event == un->un_remove_event) { 8330 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); 8331 mutex_enter(SD_MUTEX(un)); 8332 /* 8333 * We need to handle an event callback that occurs during 8334 * the suspend operation, since we don't prevent it. 8335 */ 8336 if (un->un_state != SD_STATE_OFFLINE) { 8337 if (un->un_state != SD_STATE_SUSPENDED) { 8338 New_state(un, SD_STATE_OFFLINE); 8339 } else { 8340 un->un_last_state = SD_STATE_OFFLINE; 8341 } 8342 } 8343 mutex_exit(SD_MUTEX(un)); 8344 } else { 8345 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 8346 "!Unknown event\n"); 8347 } 8348 8349 } 8350 #endif 8351 8352 /* 8353 * Function: sd_cache_control() 8354 * 8355 * Description: This routine is the driver entry point for setting 8356 * read and write caching by modifying the WCE (write cache 8357 * enable) and RCD (read cache disable) bits of mode 8358 * page 8 (MODEPAGE_CACHING). 8359 * 8360 * Arguments: un - driver soft state (unit) structure 8361 * rcd_flag - flag for controlling the read cache 8362 * wce_flag - flag for controlling the write cache 8363 * 8364 * Return Code: EIO 8365 * code returned by sd_send_scsi_MODE_SENSE and 8366 * sd_send_scsi_MODE_SELECT 8367 * 8368 * Context: Kernel Thread 8369 */ 8370 8371 static int 8372 sd_cache_control(struct sd_lun *un, int rcd_flag, int wce_flag) 8373 { 8374 struct mode_caching *mode_caching_page; 8375 uchar_t *header; 8376 size_t buflen; 8377 int hdrlen; 8378 int bd_len; 8379 int rval = 0; 8380 struct mode_header_grp2 *mhp; 8381 8382 ASSERT(un != NULL); 8383 8384 /* 8385 * Do a test unit ready, otherwise a mode sense may not work if this 8386 * is the first command sent to the device after boot. 8387 */ 8388 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 8389 8390 if (un->un_f_cfg_is_atapi == TRUE) { 8391 hdrlen = MODE_HEADER_LENGTH_GRP2; 8392 } else { 8393 hdrlen = MODE_HEADER_LENGTH; 8394 } 8395 8396 /* 8397 * Allocate memory for the retrieved mode page and its headers. Set 8398 * a pointer to the page itself. Use mode_cache_scsi3 to insure 8399 * we get all of the mode sense data otherwise, the mode select 8400 * will fail. mode_cache_scsi3 is a superset of mode_caching. 8401 */ 8402 buflen = hdrlen + MODE_BLK_DESC_LENGTH + 8403 sizeof (struct mode_cache_scsi3); 8404 8405 header = kmem_zalloc(buflen, KM_SLEEP); 8406 8407 /* Get the information from the device. */ 8408 if (un->un_f_cfg_is_atapi == TRUE) { 8409 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, header, buflen, 8410 MODEPAGE_CACHING, SD_PATH_DIRECT); 8411 } else { 8412 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 8413 MODEPAGE_CACHING, SD_PATH_DIRECT); 8414 } 8415 if (rval != 0) { 8416 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 8417 "sd_cache_control: Mode Sense Failed\n"); 8418 kmem_free(header, buflen); 8419 return (rval); 8420 } 8421 8422 /* 8423 * Determine size of Block Descriptors in order to locate 8424 * the mode page data. ATAPI devices return 0, SCSI devices 8425 * should return MODE_BLK_DESC_LENGTH. 8426 */ 8427 if (un->un_f_cfg_is_atapi == TRUE) { 8428 mhp = (struct mode_header_grp2 *)header; 8429 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 8430 } else { 8431 bd_len = ((struct mode_header *)header)->bdesc_length; 8432 } 8433 8434 if (bd_len > MODE_BLK_DESC_LENGTH) { 8435 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 8436 "sd_cache_control: Mode Sense returned invalid " 8437 "block descriptor length\n"); 8438 kmem_free(header, buflen); 8439 return (EIO); 8440 } 8441 8442 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 8443 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 8444 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 8445 " caching page code mismatch %d\n", 8446 mode_caching_page->mode_page.code); 8447 kmem_free(header, buflen); 8448 return (EIO); 8449 } 8450 8451 /* Check the relevant bits on successful mode sense. */ 8452 if ((mode_caching_page->rcd && rcd_flag == SD_CACHE_ENABLE) || 8453 (!mode_caching_page->rcd && rcd_flag == SD_CACHE_DISABLE) || 8454 (mode_caching_page->wce && wce_flag == SD_CACHE_DISABLE) || 8455 (!mode_caching_page->wce && wce_flag == SD_CACHE_ENABLE)) { 8456 8457 size_t sbuflen; 8458 uchar_t save_pg; 8459 8460 /* 8461 * Construct select buffer length based on the 8462 * length of the sense data returned. 8463 */ 8464 sbuflen = hdrlen + MODE_BLK_DESC_LENGTH + 8465 sizeof (struct mode_page) + 8466 (int)mode_caching_page->mode_page.length; 8467 8468 /* 8469 * Set the caching bits as requested. 8470 */ 8471 if (rcd_flag == SD_CACHE_ENABLE) 8472 mode_caching_page->rcd = 0; 8473 else if (rcd_flag == SD_CACHE_DISABLE) 8474 mode_caching_page->rcd = 1; 8475 8476 if (wce_flag == SD_CACHE_ENABLE) 8477 mode_caching_page->wce = 1; 8478 else if (wce_flag == SD_CACHE_DISABLE) 8479 mode_caching_page->wce = 0; 8480 8481 /* 8482 * Save the page if the mode sense says the 8483 * drive supports it. 8484 */ 8485 save_pg = mode_caching_page->mode_page.ps ? 8486 SD_SAVE_PAGE : SD_DONTSAVE_PAGE; 8487 8488 /* Clear reserved bits before mode select. */ 8489 mode_caching_page->mode_page.ps = 0; 8490 8491 /* 8492 * Clear out mode header for mode select. 8493 * The rest of the retrieved page will be reused. 8494 */ 8495 bzero(header, hdrlen); 8496 8497 if (un->un_f_cfg_is_atapi == TRUE) { 8498 mhp = (struct mode_header_grp2 *)header; 8499 mhp->bdesc_length_hi = bd_len >> 8; 8500 mhp->bdesc_length_lo = (uchar_t)bd_len & 0xff; 8501 } else { 8502 ((struct mode_header *)header)->bdesc_length = bd_len; 8503 } 8504 8505 /* Issue mode select to change the cache settings */ 8506 if (un->un_f_cfg_is_atapi == TRUE) { 8507 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, header, 8508 sbuflen, save_pg, SD_PATH_DIRECT); 8509 } else { 8510 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 8511 sbuflen, save_pg, SD_PATH_DIRECT); 8512 } 8513 } 8514 8515 kmem_free(header, buflen); 8516 return (rval); 8517 } 8518 8519 8520 /* 8521 * Function: sd_get_write_cache_enabled() 8522 * 8523 * Description: This routine is the driver entry point for determining if 8524 * write caching is enabled. It examines the WCE (write cache 8525 * enable) bits of mode page 8 (MODEPAGE_CACHING). 8526 * 8527 * Arguments: un - driver soft state (unit) structure 8528 * is_enabled - pointer to int where write cache enabled state 8529 * is returned (non-zero -> write cache enabled) 8530 * 8531 * 8532 * Return Code: EIO 8533 * code returned by sd_send_scsi_MODE_SENSE 8534 * 8535 * Context: Kernel Thread 8536 * 8537 * NOTE: If ioctl is added to disable write cache, this sequence should 8538 * be followed so that no locking is required for accesses to 8539 * un->un_f_write_cache_enabled: 8540 * do mode select to clear wce 8541 * do synchronize cache to flush cache 8542 * set un->un_f_write_cache_enabled = FALSE 8543 * 8544 * Conversely, an ioctl to enable the write cache should be done 8545 * in this order: 8546 * set un->un_f_write_cache_enabled = TRUE 8547 * do mode select to set wce 8548 */ 8549 8550 static int 8551 sd_get_write_cache_enabled(struct sd_lun *un, int *is_enabled) 8552 { 8553 struct mode_caching *mode_caching_page; 8554 uchar_t *header; 8555 size_t buflen; 8556 int hdrlen; 8557 int bd_len; 8558 int rval = 0; 8559 8560 ASSERT(un != NULL); 8561 ASSERT(is_enabled != NULL); 8562 8563 /* in case of error, flag as enabled */ 8564 *is_enabled = TRUE; 8565 8566 /* 8567 * Do a test unit ready, otherwise a mode sense may not work if this 8568 * is the first command sent to the device after boot. 8569 */ 8570 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 8571 8572 if (un->un_f_cfg_is_atapi == TRUE) { 8573 hdrlen = MODE_HEADER_LENGTH_GRP2; 8574 } else { 8575 hdrlen = MODE_HEADER_LENGTH; 8576 } 8577 8578 /* 8579 * Allocate memory for the retrieved mode page and its headers. Set 8580 * a pointer to the page itself. 8581 */ 8582 buflen = hdrlen + MODE_BLK_DESC_LENGTH + sizeof (struct mode_caching); 8583 header = kmem_zalloc(buflen, KM_SLEEP); 8584 8585 /* Get the information from the device. */ 8586 if (un->un_f_cfg_is_atapi == TRUE) { 8587 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, header, buflen, 8588 MODEPAGE_CACHING, SD_PATH_DIRECT); 8589 } else { 8590 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 8591 MODEPAGE_CACHING, SD_PATH_DIRECT); 8592 } 8593 if (rval != 0) { 8594 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 8595 "sd_get_write_cache_enabled: Mode Sense Failed\n"); 8596 kmem_free(header, buflen); 8597 return (rval); 8598 } 8599 8600 /* 8601 * Determine size of Block Descriptors in order to locate 8602 * the mode page data. ATAPI devices return 0, SCSI devices 8603 * should return MODE_BLK_DESC_LENGTH. 8604 */ 8605 if (un->un_f_cfg_is_atapi == TRUE) { 8606 struct mode_header_grp2 *mhp; 8607 mhp = (struct mode_header_grp2 *)header; 8608 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 8609 } else { 8610 bd_len = ((struct mode_header *)header)->bdesc_length; 8611 } 8612 8613 if (bd_len > MODE_BLK_DESC_LENGTH) { 8614 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 8615 "sd_get_write_cache_enabled: Mode Sense returned invalid " 8616 "block descriptor length\n"); 8617 kmem_free(header, buflen); 8618 return (EIO); 8619 } 8620 8621 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 8622 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 8623 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 8624 " caching page code mismatch %d\n", 8625 mode_caching_page->mode_page.code); 8626 kmem_free(header, buflen); 8627 return (EIO); 8628 } 8629 *is_enabled = mode_caching_page->wce; 8630 8631 kmem_free(header, buflen); 8632 return (0); 8633 } 8634 8635 /* 8636 * Function: sd_get_nv_sup() 8637 * 8638 * Description: This routine is the driver entry point for 8639 * determining whether non-volatile cache is supported. This 8640 * determination process works as follows: 8641 * 8642 * 1. sd first queries sd.conf on whether 8643 * suppress_cache_flush bit is set for this device. 8644 * 8645 * 2. if not there, then queries the internal disk table. 8646 * 8647 * 3. if either sd.conf or internal disk table specifies 8648 * cache flush be suppressed, we don't bother checking 8649 * NV_SUP bit. 8650 * 8651 * If SUPPRESS_CACHE_FLUSH bit is not set to 1, sd queries 8652 * the optional INQUIRY VPD page 0x86. If the device 8653 * supports VPD page 0x86, sd examines the NV_SUP 8654 * (non-volatile cache support) bit in the INQUIRY VPD page 8655 * 0x86: 8656 * o If NV_SUP bit is set, sd assumes the device has a 8657 * non-volatile cache and set the 8658 * un_f_sync_nv_supported to TRUE. 8659 * o Otherwise cache is not non-volatile, 8660 * un_f_sync_nv_supported is set to FALSE. 8661 * 8662 * Arguments: un - driver soft state (unit) structure 8663 * 8664 * Return Code: 8665 * 8666 * Context: Kernel Thread 8667 */ 8668 8669 static void 8670 sd_get_nv_sup(struct sd_lun *un) 8671 { 8672 int rval = 0; 8673 uchar_t *inq86 = NULL; 8674 size_t inq86_len = MAX_INQUIRY_SIZE; 8675 size_t inq86_resid = 0; 8676 struct dk_callback *dkc; 8677 8678 ASSERT(un != NULL); 8679 8680 mutex_enter(SD_MUTEX(un)); 8681 8682 /* 8683 * Be conservative on the device's support of 8684 * SYNC_NV bit: un_f_sync_nv_supported is 8685 * initialized to be false. 8686 */ 8687 un->un_f_sync_nv_supported = FALSE; 8688 8689 /* 8690 * If either sd.conf or internal disk table 8691 * specifies cache flush be suppressed, then 8692 * we don't bother checking NV_SUP bit. 8693 */ 8694 if (un->un_f_suppress_cache_flush == TRUE) { 8695 mutex_exit(SD_MUTEX(un)); 8696 return; 8697 } 8698 8699 if (sd_check_vpd_page_support(un) == 0 && 8700 un->un_vpd_page_mask & SD_VPD_EXTENDED_DATA_PG) { 8701 mutex_exit(SD_MUTEX(un)); 8702 /* collect page 86 data if available */ 8703 inq86 = kmem_zalloc(inq86_len, KM_SLEEP); 8704 rval = sd_send_scsi_INQUIRY(un, inq86, inq86_len, 8705 0x01, 0x86, &inq86_resid); 8706 8707 if (rval == 0 && (inq86_len - inq86_resid > 6)) { 8708 SD_TRACE(SD_LOG_COMMON, un, 8709 "sd_get_nv_sup: \ 8710 successfully get VPD page: %x \ 8711 PAGE LENGTH: %x BYTE 6: %x\n", 8712 inq86[1], inq86[3], inq86[6]); 8713 8714 mutex_enter(SD_MUTEX(un)); 8715 /* 8716 * check the value of NV_SUP bit: only if the device 8717 * reports NV_SUP bit to be 1, the 8718 * un_f_sync_nv_supported bit will be set to true. 8719 */ 8720 if (inq86[6] & SD_VPD_NV_SUP) { 8721 un->un_f_sync_nv_supported = TRUE; 8722 } 8723 mutex_exit(SD_MUTEX(un)); 8724 } 8725 kmem_free(inq86, inq86_len); 8726 } else { 8727 mutex_exit(SD_MUTEX(un)); 8728 } 8729 8730 /* 8731 * Send a SYNC CACHE command to check whether 8732 * SYNC_NV bit is supported. This command should have 8733 * un_f_sync_nv_supported set to correct value. 8734 */ 8735 mutex_enter(SD_MUTEX(un)); 8736 if (un->un_f_sync_nv_supported) { 8737 mutex_exit(SD_MUTEX(un)); 8738 dkc = kmem_zalloc(sizeof (struct dk_callback), KM_SLEEP); 8739 dkc->dkc_flag = FLUSH_VOLATILE; 8740 (void) sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 8741 8742 /* 8743 * Send a TEST UNIT READY command to the device. This should 8744 * clear any outstanding UNIT ATTENTION that may be present. 8745 */ 8746 (void) sd_send_scsi_TEST_UNIT_READY(un, SD_DONT_RETRY_TUR); 8747 8748 kmem_free(dkc, sizeof (struct dk_callback)); 8749 } else { 8750 mutex_exit(SD_MUTEX(un)); 8751 } 8752 8753 SD_TRACE(SD_LOG_COMMON, un, "sd_get_nv_sup: \ 8754 un_f_suppress_cache_flush is set to %d\n", 8755 un->un_f_suppress_cache_flush); 8756 } 8757 8758 /* 8759 * Function: sd_make_device 8760 * 8761 * Description: Utility routine to return the Solaris device number from 8762 * the data in the device's dev_info structure. 8763 * 8764 * Return Code: The Solaris device number 8765 * 8766 * Context: Any 8767 */ 8768 8769 static dev_t 8770 sd_make_device(dev_info_t *devi) 8771 { 8772 return (makedevice(ddi_name_to_major(ddi_get_name(devi)), 8773 ddi_get_instance(devi) << SDUNIT_SHIFT)); 8774 } 8775 8776 8777 /* 8778 * Function: sd_pm_entry 8779 * 8780 * Description: Called at the start of a new command to manage power 8781 * and busy status of a device. This includes determining whether 8782 * the current power state of the device is sufficient for 8783 * performing the command or whether it must be changed. 8784 * The PM framework is notified appropriately. 8785 * Only with a return status of DDI_SUCCESS will the 8786 * component be busy to the framework. 8787 * 8788 * All callers of sd_pm_entry must check the return status 8789 * and only call sd_pm_exit it it was DDI_SUCCESS. A status 8790 * of DDI_FAILURE indicates the device failed to power up. 8791 * In this case un_pm_count has been adjusted so the result 8792 * on exit is still powered down, ie. count is less than 0. 8793 * Calling sd_pm_exit with this count value hits an ASSERT. 8794 * 8795 * Return Code: DDI_SUCCESS or DDI_FAILURE 8796 * 8797 * Context: Kernel thread context. 8798 */ 8799 8800 static int 8801 sd_pm_entry(struct sd_lun *un) 8802 { 8803 int return_status = DDI_SUCCESS; 8804 8805 ASSERT(!mutex_owned(SD_MUTEX(un))); 8806 ASSERT(!mutex_owned(&un->un_pm_mutex)); 8807 8808 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n"); 8809 8810 if (un->un_f_pm_is_enabled == FALSE) { 8811 SD_TRACE(SD_LOG_IO_PM, un, 8812 "sd_pm_entry: exiting, PM not enabled\n"); 8813 return (return_status); 8814 } 8815 8816 /* 8817 * Just increment a counter if PM is enabled. On the transition from 8818 * 0 ==> 1, mark the device as busy. The iodone side will decrement 8819 * the count with each IO and mark the device as idle when the count 8820 * hits 0. 8821 * 8822 * If the count is less than 0 the device is powered down. If a powered 8823 * down device is successfully powered up then the count must be 8824 * incremented to reflect the power up. Note that it'll get incremented 8825 * a second time to become busy. 8826 * 8827 * Because the following has the potential to change the device state 8828 * and must release the un_pm_mutex to do so, only one thread can be 8829 * allowed through at a time. 8830 */ 8831 8832 mutex_enter(&un->un_pm_mutex); 8833 while (un->un_pm_busy == TRUE) { 8834 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex); 8835 } 8836 un->un_pm_busy = TRUE; 8837 8838 if (un->un_pm_count < 1) { 8839 8840 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n"); 8841 8842 /* 8843 * Indicate we are now busy so the framework won't attempt to 8844 * power down the device. This call will only fail if either 8845 * we passed a bad component number or the device has no 8846 * components. Neither of these should ever happen. 8847 */ 8848 mutex_exit(&un->un_pm_mutex); 8849 return_status = pm_busy_component(SD_DEVINFO(un), 0); 8850 ASSERT(return_status == DDI_SUCCESS); 8851 8852 mutex_enter(&un->un_pm_mutex); 8853 8854 if (un->un_pm_count < 0) { 8855 mutex_exit(&un->un_pm_mutex); 8856 8857 SD_TRACE(SD_LOG_IO_PM, un, 8858 "sd_pm_entry: power up component\n"); 8859 8860 /* 8861 * pm_raise_power will cause sdpower to be called 8862 * which brings the device power level to the 8863 * desired state, ON in this case. If successful, 8864 * un_pm_count and un_power_level will be updated 8865 * appropriately. 8866 */ 8867 return_status = pm_raise_power(SD_DEVINFO(un), 0, 8868 SD_SPINDLE_ON); 8869 8870 mutex_enter(&un->un_pm_mutex); 8871 8872 if (return_status != DDI_SUCCESS) { 8873 /* 8874 * Power up failed. 8875 * Idle the device and adjust the count 8876 * so the result on exit is that we're 8877 * still powered down, ie. count is less than 0. 8878 */ 8879 SD_TRACE(SD_LOG_IO_PM, un, 8880 "sd_pm_entry: power up failed," 8881 " idle the component\n"); 8882 8883 (void) pm_idle_component(SD_DEVINFO(un), 0); 8884 un->un_pm_count--; 8885 } else { 8886 /* 8887 * Device is powered up, verify the 8888 * count is non-negative. 8889 * This is debug only. 8890 */ 8891 ASSERT(un->un_pm_count == 0); 8892 } 8893 } 8894 8895 if (return_status == DDI_SUCCESS) { 8896 /* 8897 * For performance, now that the device has been tagged 8898 * as busy, and it's known to be powered up, update the 8899 * chain types to use jump tables that do not include 8900 * pm. This significantly lowers the overhead and 8901 * therefore improves performance. 8902 */ 8903 8904 mutex_exit(&un->un_pm_mutex); 8905 mutex_enter(SD_MUTEX(un)); 8906 SD_TRACE(SD_LOG_IO_PM, un, 8907 "sd_pm_entry: changing uscsi_chain_type from %d\n", 8908 un->un_uscsi_chain_type); 8909 8910 if (un->un_f_non_devbsize_supported) { 8911 un->un_buf_chain_type = 8912 SD_CHAIN_INFO_RMMEDIA_NO_PM; 8913 } else { 8914 un->un_buf_chain_type = 8915 SD_CHAIN_INFO_DISK_NO_PM; 8916 } 8917 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 8918 8919 SD_TRACE(SD_LOG_IO_PM, un, 8920 " changed uscsi_chain_type to %d\n", 8921 un->un_uscsi_chain_type); 8922 mutex_exit(SD_MUTEX(un)); 8923 mutex_enter(&un->un_pm_mutex); 8924 8925 if (un->un_pm_idle_timeid == NULL) { 8926 /* 300 ms. */ 8927 un->un_pm_idle_timeid = 8928 timeout(sd_pm_idletimeout_handler, un, 8929 (drv_usectohz((clock_t)300000))); 8930 /* 8931 * Include an extra call to busy which keeps the 8932 * device busy with-respect-to the PM layer 8933 * until the timer fires, at which time it'll 8934 * get the extra idle call. 8935 */ 8936 (void) pm_busy_component(SD_DEVINFO(un), 0); 8937 } 8938 } 8939 } 8940 un->un_pm_busy = FALSE; 8941 /* Next... */ 8942 cv_signal(&un->un_pm_busy_cv); 8943 8944 un->un_pm_count++; 8945 8946 SD_TRACE(SD_LOG_IO_PM, un, 8947 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count); 8948 8949 mutex_exit(&un->un_pm_mutex); 8950 8951 return (return_status); 8952 } 8953 8954 8955 /* 8956 * Function: sd_pm_exit 8957 * 8958 * Description: Called at the completion of a command to manage busy 8959 * status for the device. If the device becomes idle the 8960 * PM framework is notified. 8961 * 8962 * Context: Kernel thread context 8963 */ 8964 8965 static void 8966 sd_pm_exit(struct sd_lun *un) 8967 { 8968 ASSERT(!mutex_owned(SD_MUTEX(un))); 8969 ASSERT(!mutex_owned(&un->un_pm_mutex)); 8970 8971 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n"); 8972 8973 /* 8974 * After attach the following flag is only read, so don't 8975 * take the penalty of acquiring a mutex for it. 8976 */ 8977 if (un->un_f_pm_is_enabled == TRUE) { 8978 8979 mutex_enter(&un->un_pm_mutex); 8980 un->un_pm_count--; 8981 8982 SD_TRACE(SD_LOG_IO_PM, un, 8983 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count); 8984 8985 ASSERT(un->un_pm_count >= 0); 8986 if (un->un_pm_count == 0) { 8987 mutex_exit(&un->un_pm_mutex); 8988 8989 SD_TRACE(SD_LOG_IO_PM, un, 8990 "sd_pm_exit: idle component\n"); 8991 8992 (void) pm_idle_component(SD_DEVINFO(un), 0); 8993 8994 } else { 8995 mutex_exit(&un->un_pm_mutex); 8996 } 8997 } 8998 8999 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n"); 9000 } 9001 9002 9003 /* 9004 * Function: sdopen 9005 * 9006 * Description: Driver's open(9e) entry point function. 9007 * 9008 * Arguments: dev_i - pointer to device number 9009 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE) 9010 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9011 * cred_p - user credential pointer 9012 * 9013 * Return Code: EINVAL 9014 * ENXIO 9015 * EIO 9016 * EROFS 9017 * EBUSY 9018 * 9019 * Context: Kernel thread context 9020 */ 9021 /* ARGSUSED */ 9022 static int 9023 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 9024 { 9025 struct sd_lun *un; 9026 int nodelay; 9027 int part; 9028 uint64_t partmask; 9029 int instance; 9030 dev_t dev; 9031 int rval = EIO; 9032 diskaddr_t nblks = 0; 9033 9034 /* Validate the open type */ 9035 if (otyp >= OTYPCNT) { 9036 return (EINVAL); 9037 } 9038 9039 dev = *dev_p; 9040 instance = SDUNIT(dev); 9041 mutex_enter(&sd_detach_mutex); 9042 9043 /* 9044 * Fail the open if there is no softstate for the instance, or 9045 * if another thread somewhere is trying to detach the instance. 9046 */ 9047 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 9048 (un->un_detach_count != 0)) { 9049 mutex_exit(&sd_detach_mutex); 9050 /* 9051 * The probe cache only needs to be cleared when open (9e) fails 9052 * with ENXIO (4238046). 9053 */ 9054 /* 9055 * un-conditionally clearing probe cache is ok with 9056 * separate sd/ssd binaries 9057 * x86 platform can be an issue with both parallel 9058 * and fibre in 1 binary 9059 */ 9060 sd_scsi_clear_probe_cache(); 9061 return (ENXIO); 9062 } 9063 9064 /* 9065 * The un_layer_count is to prevent another thread in specfs from 9066 * trying to detach the instance, which can happen when we are 9067 * called from a higher-layer driver instead of thru specfs. 9068 * This will not be needed when DDI provides a layered driver 9069 * interface that allows specfs to know that an instance is in 9070 * use by a layered driver & should not be detached. 9071 * 9072 * Note: the semantics for layered driver opens are exactly one 9073 * close for every open. 9074 */ 9075 if (otyp == OTYP_LYR) { 9076 un->un_layer_count++; 9077 } 9078 9079 /* 9080 * Keep a count of the current # of opens in progress. This is because 9081 * some layered drivers try to call us as a regular open. This can 9082 * cause problems that we cannot prevent, however by keeping this count 9083 * we can at least keep our open and detach routines from racing against 9084 * each other under such conditions. 9085 */ 9086 un->un_opens_in_progress++; 9087 mutex_exit(&sd_detach_mutex); 9088 9089 nodelay = (flag & (FNDELAY | FNONBLOCK)); 9090 part = SDPART(dev); 9091 partmask = 1 << part; 9092 9093 /* 9094 * We use a semaphore here in order to serialize 9095 * open and close requests on the device. 9096 */ 9097 sema_p(&un->un_semoclose); 9098 9099 mutex_enter(SD_MUTEX(un)); 9100 9101 /* 9102 * All device accesses go thru sdstrategy() where we check 9103 * on suspend status but there could be a scsi_poll command, 9104 * which bypasses sdstrategy(), so we need to check pm 9105 * status. 9106 */ 9107 9108 if (!nodelay) { 9109 while ((un->un_state == SD_STATE_SUSPENDED) || 9110 (un->un_state == SD_STATE_PM_CHANGING)) { 9111 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9112 } 9113 9114 mutex_exit(SD_MUTEX(un)); 9115 if (sd_pm_entry(un) != DDI_SUCCESS) { 9116 rval = EIO; 9117 SD_ERROR(SD_LOG_OPEN_CLOSE, un, 9118 "sdopen: sd_pm_entry failed\n"); 9119 goto open_failed_with_pm; 9120 } 9121 mutex_enter(SD_MUTEX(un)); 9122 } 9123 9124 /* check for previous exclusive open */ 9125 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); 9126 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9127 "sdopen: exclopen=%x, flag=%x, regopen=%x\n", 9128 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 9129 9130 if (un->un_exclopen & (partmask)) { 9131 goto excl_open_fail; 9132 } 9133 9134 if (flag & FEXCL) { 9135 int i; 9136 if (un->un_ocmap.lyropen[part]) { 9137 goto excl_open_fail; 9138 } 9139 for (i = 0; i < (OTYPCNT - 1); i++) { 9140 if (un->un_ocmap.regopen[i] & (partmask)) { 9141 goto excl_open_fail; 9142 } 9143 } 9144 } 9145 9146 /* 9147 * Check the write permission if this is a removable media device, 9148 * NDELAY has not been set, and writable permission is requested. 9149 * 9150 * Note: If NDELAY was set and this is write-protected media the WRITE 9151 * attempt will fail with EIO as part of the I/O processing. This is a 9152 * more permissive implementation that allows the open to succeed and 9153 * WRITE attempts to fail when appropriate. 9154 */ 9155 if (un->un_f_chk_wp_open) { 9156 if ((flag & FWRITE) && (!nodelay)) { 9157 mutex_exit(SD_MUTEX(un)); 9158 /* 9159 * Defer the check for write permission on writable 9160 * DVD drive till sdstrategy and will not fail open even 9161 * if FWRITE is set as the device can be writable 9162 * depending upon the media and the media can change 9163 * after the call to open(). 9164 */ 9165 if (un->un_f_dvdram_writable_device == FALSE) { 9166 if (ISCD(un) || sr_check_wp(dev)) { 9167 rval = EROFS; 9168 mutex_enter(SD_MUTEX(un)); 9169 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9170 "write to cd or write protected media\n"); 9171 goto open_fail; 9172 } 9173 } 9174 mutex_enter(SD_MUTEX(un)); 9175 } 9176 } 9177 9178 /* 9179 * If opening in NDELAY/NONBLOCK mode, just return. 9180 * Check if disk is ready and has a valid geometry later. 9181 */ 9182 if (!nodelay) { 9183 mutex_exit(SD_MUTEX(un)); 9184 rval = sd_ready_and_valid(un); 9185 mutex_enter(SD_MUTEX(un)); 9186 /* 9187 * Fail if device is not ready or if the number of disk 9188 * blocks is zero or negative for non CD devices. 9189 */ 9190 9191 nblks = 0; 9192 9193 if (rval == SD_READY_VALID && (!ISCD(un))) { 9194 /* if cmlb_partinfo fails, nblks remains 0 */ 9195 mutex_exit(SD_MUTEX(un)); 9196 (void) cmlb_partinfo(un->un_cmlbhandle, part, &nblks, 9197 NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 9198 mutex_enter(SD_MUTEX(un)); 9199 } 9200 9201 if ((rval != SD_READY_VALID) || 9202 (!ISCD(un) && nblks <= 0)) { 9203 rval = un->un_f_has_removable_media ? ENXIO : EIO; 9204 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9205 "device not ready or invalid disk block value\n"); 9206 goto open_fail; 9207 } 9208 #if defined(__i386) || defined(__amd64) 9209 } else { 9210 uchar_t *cp; 9211 /* 9212 * x86 requires special nodelay handling, so that p0 is 9213 * always defined and accessible. 9214 * Invalidate geometry only if device is not already open. 9215 */ 9216 cp = &un->un_ocmap.chkd[0]; 9217 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9218 if (*cp != (uchar_t)0) { 9219 break; 9220 } 9221 cp++; 9222 } 9223 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9224 mutex_exit(SD_MUTEX(un)); 9225 cmlb_invalidate(un->un_cmlbhandle, 9226 (void *)SD_PATH_DIRECT); 9227 mutex_enter(SD_MUTEX(un)); 9228 } 9229 9230 #endif 9231 } 9232 9233 if (otyp == OTYP_LYR) { 9234 un->un_ocmap.lyropen[part]++; 9235 } else { 9236 un->un_ocmap.regopen[otyp] |= partmask; 9237 } 9238 9239 /* Set up open and exclusive open flags */ 9240 if (flag & FEXCL) { 9241 un->un_exclopen |= (partmask); 9242 } 9243 9244 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9245 "open of part %d type %d\n", part, otyp); 9246 9247 mutex_exit(SD_MUTEX(un)); 9248 if (!nodelay) { 9249 sd_pm_exit(un); 9250 } 9251 9252 sema_v(&un->un_semoclose); 9253 9254 mutex_enter(&sd_detach_mutex); 9255 un->un_opens_in_progress--; 9256 mutex_exit(&sd_detach_mutex); 9257 9258 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n"); 9259 return (DDI_SUCCESS); 9260 9261 excl_open_fail: 9262 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n"); 9263 rval = EBUSY; 9264 9265 open_fail: 9266 mutex_exit(SD_MUTEX(un)); 9267 9268 /* 9269 * On a failed open we must exit the pm management. 9270 */ 9271 if (!nodelay) { 9272 sd_pm_exit(un); 9273 } 9274 open_failed_with_pm: 9275 sema_v(&un->un_semoclose); 9276 9277 mutex_enter(&sd_detach_mutex); 9278 un->un_opens_in_progress--; 9279 if (otyp == OTYP_LYR) { 9280 un->un_layer_count--; 9281 } 9282 mutex_exit(&sd_detach_mutex); 9283 9284 return (rval); 9285 } 9286 9287 9288 /* 9289 * Function: sdclose 9290 * 9291 * Description: Driver's close(9e) entry point function. 9292 * 9293 * Arguments: dev - device number 9294 * flag - file status flag, informational only 9295 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9296 * cred_p - user credential pointer 9297 * 9298 * Return Code: ENXIO 9299 * 9300 * Context: Kernel thread context 9301 */ 9302 /* ARGSUSED */ 9303 static int 9304 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 9305 { 9306 struct sd_lun *un; 9307 uchar_t *cp; 9308 int part; 9309 int nodelay; 9310 int rval = 0; 9311 9312 /* Validate the open type */ 9313 if (otyp >= OTYPCNT) { 9314 return (ENXIO); 9315 } 9316 9317 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9318 return (ENXIO); 9319 } 9320 9321 part = SDPART(dev); 9322 nodelay = flag & (FNDELAY | FNONBLOCK); 9323 9324 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9325 "sdclose: close of part %d type %d\n", part, otyp); 9326 9327 /* 9328 * We use a semaphore here in order to serialize 9329 * open and close requests on the device. 9330 */ 9331 sema_p(&un->un_semoclose); 9332 9333 mutex_enter(SD_MUTEX(un)); 9334 9335 /* Don't proceed if power is being changed. */ 9336 while (un->un_state == SD_STATE_PM_CHANGING) { 9337 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9338 } 9339 9340 if (un->un_exclopen & (1 << part)) { 9341 un->un_exclopen &= ~(1 << part); 9342 } 9343 9344 /* Update the open partition map */ 9345 if (otyp == OTYP_LYR) { 9346 un->un_ocmap.lyropen[part] -= 1; 9347 } else { 9348 un->un_ocmap.regopen[otyp] &= ~(1 << part); 9349 } 9350 9351 cp = &un->un_ocmap.chkd[0]; 9352 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9353 if (*cp != NULL) { 9354 break; 9355 } 9356 cp++; 9357 } 9358 9359 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9360 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n"); 9361 9362 /* 9363 * We avoid persistance upon the last close, and set 9364 * the throttle back to the maximum. 9365 */ 9366 un->un_throttle = un->un_saved_throttle; 9367 9368 if (un->un_state == SD_STATE_OFFLINE) { 9369 if (un->un_f_is_fibre == FALSE) { 9370 scsi_log(SD_DEVINFO(un), sd_label, 9371 CE_WARN, "offline\n"); 9372 } 9373 mutex_exit(SD_MUTEX(un)); 9374 cmlb_invalidate(un->un_cmlbhandle, 9375 (void *)SD_PATH_DIRECT); 9376 mutex_enter(SD_MUTEX(un)); 9377 9378 } else { 9379 /* 9380 * Flush any outstanding writes in NVRAM cache. 9381 * Note: SYNCHRONIZE CACHE is an optional SCSI-2 9382 * cmd, it may not work for non-Pluto devices. 9383 * SYNCHRONIZE CACHE is not required for removables, 9384 * except DVD-RAM drives. 9385 * 9386 * Also note: because SYNCHRONIZE CACHE is currently 9387 * the only command issued here that requires the 9388 * drive be powered up, only do the power up before 9389 * sending the Sync Cache command. If additional 9390 * commands are added which require a powered up 9391 * drive, the following sequence may have to change. 9392 * 9393 * And finally, note that parallel SCSI on SPARC 9394 * only issues a Sync Cache to DVD-RAM, a newly 9395 * supported device. 9396 */ 9397 #if defined(__i386) || defined(__amd64) 9398 if (un->un_f_sync_cache_supported || 9399 un->un_f_dvdram_writable_device == TRUE) { 9400 #else 9401 if (un->un_f_dvdram_writable_device == TRUE) { 9402 #endif 9403 mutex_exit(SD_MUTEX(un)); 9404 if (sd_pm_entry(un) == DDI_SUCCESS) { 9405 rval = 9406 sd_send_scsi_SYNCHRONIZE_CACHE(un, 9407 NULL); 9408 /* ignore error if not supported */ 9409 if (rval == ENOTSUP) { 9410 rval = 0; 9411 } else if (rval != 0) { 9412 rval = EIO; 9413 } 9414 sd_pm_exit(un); 9415 } else { 9416 rval = EIO; 9417 } 9418 mutex_enter(SD_MUTEX(un)); 9419 } 9420 9421 /* 9422 * For devices which supports DOOR_LOCK, send an ALLOW 9423 * MEDIA REMOVAL command, but don't get upset if it 9424 * fails. We need to raise the power of the drive before 9425 * we can call sd_send_scsi_DOORLOCK() 9426 */ 9427 if (un->un_f_doorlock_supported) { 9428 mutex_exit(SD_MUTEX(un)); 9429 if (sd_pm_entry(un) == DDI_SUCCESS) { 9430 rval = sd_send_scsi_DOORLOCK(un, 9431 SD_REMOVAL_ALLOW, SD_PATH_DIRECT); 9432 9433 sd_pm_exit(un); 9434 if (ISCD(un) && (rval != 0) && 9435 (nodelay != 0)) { 9436 rval = ENXIO; 9437 } 9438 } else { 9439 rval = EIO; 9440 } 9441 mutex_enter(SD_MUTEX(un)); 9442 } 9443 9444 /* 9445 * If a device has removable media, invalidate all 9446 * parameters related to media, such as geometry, 9447 * blocksize, and blockcount. 9448 */ 9449 if (un->un_f_has_removable_media) { 9450 sr_ejected(un); 9451 } 9452 9453 /* 9454 * Destroy the cache (if it exists) which was 9455 * allocated for the write maps since this is 9456 * the last close for this media. 9457 */ 9458 if (un->un_wm_cache) { 9459 /* 9460 * Check if there are pending commands. 9461 * and if there are give a warning and 9462 * do not destroy the cache. 9463 */ 9464 if (un->un_ncmds_in_driver > 0) { 9465 scsi_log(SD_DEVINFO(un), 9466 sd_label, CE_WARN, 9467 "Unable to clean up memory " 9468 "because of pending I/O\n"); 9469 } else { 9470 kmem_cache_destroy( 9471 un->un_wm_cache); 9472 un->un_wm_cache = NULL; 9473 } 9474 } 9475 } 9476 } 9477 9478 mutex_exit(SD_MUTEX(un)); 9479 sema_v(&un->un_semoclose); 9480 9481 if (otyp == OTYP_LYR) { 9482 mutex_enter(&sd_detach_mutex); 9483 /* 9484 * The detach routine may run when the layer count 9485 * drops to zero. 9486 */ 9487 un->un_layer_count--; 9488 mutex_exit(&sd_detach_mutex); 9489 } 9490 9491 return (rval); 9492 } 9493 9494 9495 /* 9496 * Function: sd_ready_and_valid 9497 * 9498 * Description: Test if device is ready and has a valid geometry. 9499 * 9500 * Arguments: dev - device number 9501 * un - driver soft state (unit) structure 9502 * 9503 * Return Code: SD_READY_VALID ready and valid label 9504 * SD_NOT_READY_VALID not ready, no label 9505 * SD_RESERVED_BY_OTHERS reservation conflict 9506 * 9507 * Context: Never called at interrupt context. 9508 */ 9509 9510 static int 9511 sd_ready_and_valid(struct sd_lun *un) 9512 { 9513 struct sd_errstats *stp; 9514 uint64_t capacity; 9515 uint_t lbasize; 9516 int rval = SD_READY_VALID; 9517 char name_str[48]; 9518 int is_valid; 9519 9520 ASSERT(un != NULL); 9521 ASSERT(!mutex_owned(SD_MUTEX(un))); 9522 9523 mutex_enter(SD_MUTEX(un)); 9524 /* 9525 * If a device has removable media, we must check if media is 9526 * ready when checking if this device is ready and valid. 9527 */ 9528 if (un->un_f_has_removable_media) { 9529 mutex_exit(SD_MUTEX(un)); 9530 if (sd_send_scsi_TEST_UNIT_READY(un, 0) != 0) { 9531 rval = SD_NOT_READY_VALID; 9532 mutex_enter(SD_MUTEX(un)); 9533 goto done; 9534 } 9535 9536 is_valid = SD_IS_VALID_LABEL(un); 9537 mutex_enter(SD_MUTEX(un)); 9538 if (!is_valid || 9539 (un->un_f_blockcount_is_valid == FALSE) || 9540 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 9541 9542 /* capacity has to be read every open. */ 9543 mutex_exit(SD_MUTEX(un)); 9544 if (sd_send_scsi_READ_CAPACITY(un, &capacity, 9545 &lbasize, SD_PATH_DIRECT) != 0) { 9546 cmlb_invalidate(un->un_cmlbhandle, 9547 (void *)SD_PATH_DIRECT); 9548 mutex_enter(SD_MUTEX(un)); 9549 rval = SD_NOT_READY_VALID; 9550 goto done; 9551 } else { 9552 mutex_enter(SD_MUTEX(un)); 9553 sd_update_block_info(un, lbasize, capacity); 9554 } 9555 } 9556 9557 /* 9558 * Check if the media in the device is writable or not. 9559 */ 9560 if (!is_valid && ISCD(un)) { 9561 sd_check_for_writable_cd(un, SD_PATH_DIRECT); 9562 } 9563 9564 } else { 9565 /* 9566 * Do a test unit ready to clear any unit attention from non-cd 9567 * devices. 9568 */ 9569 mutex_exit(SD_MUTEX(un)); 9570 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 9571 mutex_enter(SD_MUTEX(un)); 9572 } 9573 9574 9575 /* 9576 * If this is a non 512 block device, allocate space for 9577 * the wmap cache. This is being done here since every time 9578 * a media is changed this routine will be called and the 9579 * block size is a function of media rather than device. 9580 */ 9581 if (un->un_f_non_devbsize_supported && NOT_DEVBSIZE(un)) { 9582 if (!(un->un_wm_cache)) { 9583 (void) snprintf(name_str, sizeof (name_str), 9584 "%s%d_cache", 9585 ddi_driver_name(SD_DEVINFO(un)), 9586 ddi_get_instance(SD_DEVINFO(un))); 9587 un->un_wm_cache = kmem_cache_create( 9588 name_str, sizeof (struct sd_w_map), 9589 8, sd_wm_cache_constructor, 9590 sd_wm_cache_destructor, NULL, 9591 (void *)un, NULL, 0); 9592 if (!(un->un_wm_cache)) { 9593 rval = ENOMEM; 9594 goto done; 9595 } 9596 } 9597 } 9598 9599 if (un->un_state == SD_STATE_NORMAL) { 9600 /* 9601 * If the target is not yet ready here (defined by a TUR 9602 * failure), invalidate the geometry and print an 'offline' 9603 * message. This is a legacy message, as the state of the 9604 * target is not actually changed to SD_STATE_OFFLINE. 9605 * 9606 * If the TUR fails for EACCES (Reservation Conflict), 9607 * SD_RESERVED_BY_OTHERS will be returned to indicate 9608 * reservation conflict. If the TUR fails for other 9609 * reasons, SD_NOT_READY_VALID will be returned. 9610 */ 9611 int err; 9612 9613 mutex_exit(SD_MUTEX(un)); 9614 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 9615 mutex_enter(SD_MUTEX(un)); 9616 9617 if (err != 0) { 9618 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 9619 "offline or reservation conflict\n"); 9620 mutex_exit(SD_MUTEX(un)); 9621 cmlb_invalidate(un->un_cmlbhandle, 9622 (void *)SD_PATH_DIRECT); 9623 mutex_enter(SD_MUTEX(un)); 9624 if (err == EACCES) { 9625 rval = SD_RESERVED_BY_OTHERS; 9626 } else { 9627 rval = SD_NOT_READY_VALID; 9628 } 9629 goto done; 9630 } 9631 } 9632 9633 if (un->un_f_format_in_progress == FALSE) { 9634 mutex_exit(SD_MUTEX(un)); 9635 if (cmlb_validate(un->un_cmlbhandle, 0, 9636 (void *)SD_PATH_DIRECT) != 0) { 9637 rval = SD_NOT_READY_VALID; 9638 mutex_enter(SD_MUTEX(un)); 9639 goto done; 9640 } 9641 if (un->un_f_pkstats_enabled) { 9642 sd_set_pstats(un); 9643 SD_TRACE(SD_LOG_IO_PARTITION, un, 9644 "sd_ready_and_valid: un:0x%p pstats created and " 9645 "set\n", un); 9646 } 9647 mutex_enter(SD_MUTEX(un)); 9648 } 9649 9650 /* 9651 * If this device supports DOOR_LOCK command, try and send 9652 * this command to PREVENT MEDIA REMOVAL, but don't get upset 9653 * if it fails. For a CD, however, it is an error 9654 */ 9655 if (un->un_f_doorlock_supported) { 9656 mutex_exit(SD_MUTEX(un)); 9657 if ((sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 9658 SD_PATH_DIRECT) != 0) && ISCD(un)) { 9659 rval = SD_NOT_READY_VALID; 9660 mutex_enter(SD_MUTEX(un)); 9661 goto done; 9662 } 9663 mutex_enter(SD_MUTEX(un)); 9664 } 9665 9666 /* The state has changed, inform the media watch routines */ 9667 un->un_mediastate = DKIO_INSERTED; 9668 cv_broadcast(&un->un_state_cv); 9669 rval = SD_READY_VALID; 9670 9671 done: 9672 9673 /* 9674 * Initialize the capacity kstat value, if no media previously 9675 * (capacity kstat is 0) and a media has been inserted 9676 * (un_blockcount > 0). 9677 */ 9678 if (un->un_errstats != NULL) { 9679 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9680 if ((stp->sd_capacity.value.ui64 == 0) && 9681 (un->un_f_blockcount_is_valid == TRUE)) { 9682 stp->sd_capacity.value.ui64 = 9683 (uint64_t)((uint64_t)un->un_blockcount * 9684 un->un_sys_blocksize); 9685 } 9686 } 9687 9688 mutex_exit(SD_MUTEX(un)); 9689 return (rval); 9690 } 9691 9692 9693 /* 9694 * Function: sdmin 9695 * 9696 * Description: Routine to limit the size of a data transfer. Used in 9697 * conjunction with physio(9F). 9698 * 9699 * Arguments: bp - pointer to the indicated buf(9S) struct. 9700 * 9701 * Context: Kernel thread context. 9702 */ 9703 9704 static void 9705 sdmin(struct buf *bp) 9706 { 9707 struct sd_lun *un; 9708 int instance; 9709 9710 instance = SDUNIT(bp->b_edev); 9711 9712 un = ddi_get_soft_state(sd_state, instance); 9713 ASSERT(un != NULL); 9714 9715 if (bp->b_bcount > un->un_max_xfer_size) { 9716 bp->b_bcount = un->un_max_xfer_size; 9717 } 9718 } 9719 9720 9721 /* 9722 * Function: sdread 9723 * 9724 * Description: Driver's read(9e) entry point function. 9725 * 9726 * Arguments: dev - device number 9727 * uio - structure pointer describing where data is to be stored 9728 * in user's space 9729 * cred_p - user credential pointer 9730 * 9731 * Return Code: ENXIO 9732 * EIO 9733 * EINVAL 9734 * value returned by physio 9735 * 9736 * Context: Kernel thread context. 9737 */ 9738 /* ARGSUSED */ 9739 static int 9740 sdread(dev_t dev, struct uio *uio, cred_t *cred_p) 9741 { 9742 struct sd_lun *un = NULL; 9743 int secmask; 9744 int err; 9745 9746 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9747 return (ENXIO); 9748 } 9749 9750 ASSERT(!mutex_owned(SD_MUTEX(un))); 9751 9752 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9753 mutex_enter(SD_MUTEX(un)); 9754 /* 9755 * Because the call to sd_ready_and_valid will issue I/O we 9756 * must wait here if either the device is suspended or 9757 * if it's power level is changing. 9758 */ 9759 while ((un->un_state == SD_STATE_SUSPENDED) || 9760 (un->un_state == SD_STATE_PM_CHANGING)) { 9761 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9762 } 9763 un->un_ncmds_in_driver++; 9764 mutex_exit(SD_MUTEX(un)); 9765 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9766 mutex_enter(SD_MUTEX(un)); 9767 un->un_ncmds_in_driver--; 9768 ASSERT(un->un_ncmds_in_driver >= 0); 9769 mutex_exit(SD_MUTEX(un)); 9770 return (EIO); 9771 } 9772 mutex_enter(SD_MUTEX(un)); 9773 un->un_ncmds_in_driver--; 9774 ASSERT(un->un_ncmds_in_driver >= 0); 9775 mutex_exit(SD_MUTEX(un)); 9776 } 9777 9778 /* 9779 * Read requests are restricted to multiples of the system block size. 9780 */ 9781 secmask = un->un_sys_blocksize - 1; 9782 9783 if (uio->uio_loffset & ((offset_t)(secmask))) { 9784 SD_ERROR(SD_LOG_READ_WRITE, un, 9785 "sdread: file offset not modulo %d\n", 9786 un->un_sys_blocksize); 9787 err = EINVAL; 9788 } else if (uio->uio_iov->iov_len & (secmask)) { 9789 SD_ERROR(SD_LOG_READ_WRITE, un, 9790 "sdread: transfer length not modulo %d\n", 9791 un->un_sys_blocksize); 9792 err = EINVAL; 9793 } else { 9794 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio); 9795 } 9796 return (err); 9797 } 9798 9799 9800 /* 9801 * Function: sdwrite 9802 * 9803 * Description: Driver's write(9e) entry point function. 9804 * 9805 * Arguments: dev - device number 9806 * uio - structure pointer describing where data is stored in 9807 * user's space 9808 * cred_p - user credential pointer 9809 * 9810 * Return Code: ENXIO 9811 * EIO 9812 * EINVAL 9813 * value returned by physio 9814 * 9815 * Context: Kernel thread context. 9816 */ 9817 /* ARGSUSED */ 9818 static int 9819 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 9820 { 9821 struct sd_lun *un = NULL; 9822 int secmask; 9823 int err; 9824 9825 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9826 return (ENXIO); 9827 } 9828 9829 ASSERT(!mutex_owned(SD_MUTEX(un))); 9830 9831 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9832 mutex_enter(SD_MUTEX(un)); 9833 /* 9834 * Because the call to sd_ready_and_valid will issue I/O we 9835 * must wait here if either the device is suspended or 9836 * if it's power level is changing. 9837 */ 9838 while ((un->un_state == SD_STATE_SUSPENDED) || 9839 (un->un_state == SD_STATE_PM_CHANGING)) { 9840 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9841 } 9842 un->un_ncmds_in_driver++; 9843 mutex_exit(SD_MUTEX(un)); 9844 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9845 mutex_enter(SD_MUTEX(un)); 9846 un->un_ncmds_in_driver--; 9847 ASSERT(un->un_ncmds_in_driver >= 0); 9848 mutex_exit(SD_MUTEX(un)); 9849 return (EIO); 9850 } 9851 mutex_enter(SD_MUTEX(un)); 9852 un->un_ncmds_in_driver--; 9853 ASSERT(un->un_ncmds_in_driver >= 0); 9854 mutex_exit(SD_MUTEX(un)); 9855 } 9856 9857 /* 9858 * Write requests are restricted to multiples of the system block size. 9859 */ 9860 secmask = un->un_sys_blocksize - 1; 9861 9862 if (uio->uio_loffset & ((offset_t)(secmask))) { 9863 SD_ERROR(SD_LOG_READ_WRITE, un, 9864 "sdwrite: file offset not modulo %d\n", 9865 un->un_sys_blocksize); 9866 err = EINVAL; 9867 } else if (uio->uio_iov->iov_len & (secmask)) { 9868 SD_ERROR(SD_LOG_READ_WRITE, un, 9869 "sdwrite: transfer length not modulo %d\n", 9870 un->un_sys_blocksize); 9871 err = EINVAL; 9872 } else { 9873 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio); 9874 } 9875 return (err); 9876 } 9877 9878 9879 /* 9880 * Function: sdaread 9881 * 9882 * Description: Driver's aread(9e) entry point function. 9883 * 9884 * Arguments: dev - device number 9885 * aio - structure pointer describing where data is to be stored 9886 * cred_p - user credential pointer 9887 * 9888 * Return Code: ENXIO 9889 * EIO 9890 * EINVAL 9891 * value returned by aphysio 9892 * 9893 * Context: Kernel thread context. 9894 */ 9895 /* ARGSUSED */ 9896 static int 9897 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 9898 { 9899 struct sd_lun *un = NULL; 9900 struct uio *uio = aio->aio_uio; 9901 int secmask; 9902 int err; 9903 9904 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9905 return (ENXIO); 9906 } 9907 9908 ASSERT(!mutex_owned(SD_MUTEX(un))); 9909 9910 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9911 mutex_enter(SD_MUTEX(un)); 9912 /* 9913 * Because the call to sd_ready_and_valid will issue I/O we 9914 * must wait here if either the device is suspended or 9915 * if it's power level is changing. 9916 */ 9917 while ((un->un_state == SD_STATE_SUSPENDED) || 9918 (un->un_state == SD_STATE_PM_CHANGING)) { 9919 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9920 } 9921 un->un_ncmds_in_driver++; 9922 mutex_exit(SD_MUTEX(un)); 9923 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9924 mutex_enter(SD_MUTEX(un)); 9925 un->un_ncmds_in_driver--; 9926 ASSERT(un->un_ncmds_in_driver >= 0); 9927 mutex_exit(SD_MUTEX(un)); 9928 return (EIO); 9929 } 9930 mutex_enter(SD_MUTEX(un)); 9931 un->un_ncmds_in_driver--; 9932 ASSERT(un->un_ncmds_in_driver >= 0); 9933 mutex_exit(SD_MUTEX(un)); 9934 } 9935 9936 /* 9937 * Read requests are restricted to multiples of the system block size. 9938 */ 9939 secmask = un->un_sys_blocksize - 1; 9940 9941 if (uio->uio_loffset & ((offset_t)(secmask))) { 9942 SD_ERROR(SD_LOG_READ_WRITE, un, 9943 "sdaread: file offset not modulo %d\n", 9944 un->un_sys_blocksize); 9945 err = EINVAL; 9946 } else if (uio->uio_iov->iov_len & (secmask)) { 9947 SD_ERROR(SD_LOG_READ_WRITE, un, 9948 "sdaread: transfer length not modulo %d\n", 9949 un->un_sys_blocksize); 9950 err = EINVAL; 9951 } else { 9952 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio); 9953 } 9954 return (err); 9955 } 9956 9957 9958 /* 9959 * Function: sdawrite 9960 * 9961 * Description: Driver's awrite(9e) entry point function. 9962 * 9963 * Arguments: dev - device number 9964 * aio - structure pointer describing where data is stored 9965 * cred_p - user credential pointer 9966 * 9967 * Return Code: ENXIO 9968 * EIO 9969 * EINVAL 9970 * value returned by aphysio 9971 * 9972 * Context: Kernel thread context. 9973 */ 9974 /* ARGSUSED */ 9975 static int 9976 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 9977 { 9978 struct sd_lun *un = NULL; 9979 struct uio *uio = aio->aio_uio; 9980 int secmask; 9981 int err; 9982 9983 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9984 return (ENXIO); 9985 } 9986 9987 ASSERT(!mutex_owned(SD_MUTEX(un))); 9988 9989 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9990 mutex_enter(SD_MUTEX(un)); 9991 /* 9992 * Because the call to sd_ready_and_valid will issue I/O we 9993 * must wait here if either the device is suspended or 9994 * if it's power level is changing. 9995 */ 9996 while ((un->un_state == SD_STATE_SUSPENDED) || 9997 (un->un_state == SD_STATE_PM_CHANGING)) { 9998 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9999 } 10000 un->un_ncmds_in_driver++; 10001 mutex_exit(SD_MUTEX(un)); 10002 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 10003 mutex_enter(SD_MUTEX(un)); 10004 un->un_ncmds_in_driver--; 10005 ASSERT(un->un_ncmds_in_driver >= 0); 10006 mutex_exit(SD_MUTEX(un)); 10007 return (EIO); 10008 } 10009 mutex_enter(SD_MUTEX(un)); 10010 un->un_ncmds_in_driver--; 10011 ASSERT(un->un_ncmds_in_driver >= 0); 10012 mutex_exit(SD_MUTEX(un)); 10013 } 10014 10015 /* 10016 * Write requests are restricted to multiples of the system block size. 10017 */ 10018 secmask = un->un_sys_blocksize - 1; 10019 10020 if (uio->uio_loffset & ((offset_t)(secmask))) { 10021 SD_ERROR(SD_LOG_READ_WRITE, un, 10022 "sdawrite: file offset not modulo %d\n", 10023 un->un_sys_blocksize); 10024 err = EINVAL; 10025 } else if (uio->uio_iov->iov_len & (secmask)) { 10026 SD_ERROR(SD_LOG_READ_WRITE, un, 10027 "sdawrite: transfer length not modulo %d\n", 10028 un->un_sys_blocksize); 10029 err = EINVAL; 10030 } else { 10031 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio); 10032 } 10033 return (err); 10034 } 10035 10036 10037 10038 10039 10040 /* 10041 * Driver IO processing follows the following sequence: 10042 * 10043 * sdioctl(9E) sdstrategy(9E) biodone(9F) 10044 * | | ^ 10045 * v v | 10046 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+ 10047 * | | | | 10048 * v | | | 10049 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone() 10050 * | | ^ ^ 10051 * v v | | 10052 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | | 10053 * | | | | 10054 * +---+ | +------------+ +-------+ 10055 * | | | | 10056 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10057 * | v | | 10058 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() | 10059 * | | ^ | 10060 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10061 * | v | | 10062 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() | 10063 * | | ^ | 10064 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10065 * | v | | 10066 * | sd_checksum_iostart() sd_checksum_iodone() | 10067 * | | ^ | 10068 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+ 10069 * | v | | 10070 * | sd_pm_iostart() sd_pm_iodone() | 10071 * | | ^ | 10072 * | | | | 10073 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+ 10074 * | ^ 10075 * v | 10076 * sd_core_iostart() | 10077 * | | 10078 * | +------>(*destroypkt)() 10079 * +-> sd_start_cmds() <-+ | | 10080 * | | | v 10081 * | | | scsi_destroy_pkt(9F) 10082 * | | | 10083 * +->(*initpkt)() +- sdintr() 10084 * | | | | 10085 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx() 10086 * | +-> scsi_setup_cdb(9F) | 10087 * | | 10088 * +--> scsi_transport(9F) | 10089 * | | 10090 * +----> SCSA ---->+ 10091 * 10092 * 10093 * This code is based upon the following presumptions: 10094 * 10095 * - iostart and iodone functions operate on buf(9S) structures. These 10096 * functions perform the necessary operations on the buf(9S) and pass 10097 * them along to the next function in the chain by using the macros 10098 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE() 10099 * (for iodone side functions). 10100 * 10101 * - The iostart side functions may sleep. The iodone side functions 10102 * are called under interrupt context and may NOT sleep. Therefore 10103 * iodone side functions also may not call iostart side functions. 10104 * (NOTE: iostart side functions should NOT sleep for memory, as 10105 * this could result in deadlock.) 10106 * 10107 * - An iostart side function may call its corresponding iodone side 10108 * function directly (if necessary). 10109 * 10110 * - In the event of an error, an iostart side function can return a buf(9S) 10111 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and 10112 * b_error in the usual way of course). 10113 * 10114 * - The taskq mechanism may be used by the iodone side functions to dispatch 10115 * requests to the iostart side functions. The iostart side functions in 10116 * this case would be called under the context of a taskq thread, so it's 10117 * OK for them to block/sleep/spin in this case. 10118 * 10119 * - iostart side functions may allocate "shadow" buf(9S) structs and 10120 * pass them along to the next function in the chain. The corresponding 10121 * iodone side functions must coalesce the "shadow" bufs and return 10122 * the "original" buf to the next higher layer. 10123 * 10124 * - The b_private field of the buf(9S) struct holds a pointer to 10125 * an sd_xbuf struct, which contains information needed to 10126 * construct the scsi_pkt for the command. 10127 * 10128 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each 10129 * layer must acquire & release the SD_MUTEX(un) as needed. 10130 */ 10131 10132 10133 /* 10134 * Create taskq for all targets in the system. This is created at 10135 * _init(9E) and destroyed at _fini(9E). 10136 * 10137 * Note: here we set the minalloc to a reasonably high number to ensure that 10138 * we will have an adequate supply of task entries available at interrupt time. 10139 * This is used in conjunction with the TASKQ_PREPOPULATE flag in 10140 * sd_create_taskq(). Since we do not want to sleep for allocations at 10141 * interrupt time, set maxalloc equal to minalloc. That way we will just fail 10142 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq 10143 * requests any one instant in time. 10144 */ 10145 #define SD_TASKQ_NUMTHREADS 8 10146 #define SD_TASKQ_MINALLOC 256 10147 #define SD_TASKQ_MAXALLOC 256 10148 10149 static taskq_t *sd_tq = NULL; 10150 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq)) 10151 10152 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; 10153 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; 10154 10155 /* 10156 * The following task queue is being created for the write part of 10157 * read-modify-write of non-512 block size devices. 10158 * Limit the number of threads to 1 for now. This number has been chosen 10159 * considering the fact that it applies only to dvd ram drives/MO drives 10160 * currently. Performance for which is not main criteria at this stage. 10161 * Note: It needs to be explored if we can use a single taskq in future 10162 */ 10163 #define SD_WMR_TASKQ_NUMTHREADS 1 10164 static taskq_t *sd_wmr_tq = NULL; 10165 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq)) 10166 10167 /* 10168 * Function: sd_taskq_create 10169 * 10170 * Description: Create taskq thread(s) and preallocate task entries 10171 * 10172 * Return Code: Returns a pointer to the allocated taskq_t. 10173 * 10174 * Context: Can sleep. Requires blockable context. 10175 * 10176 * Notes: - The taskq() facility currently is NOT part of the DDI. 10177 * (definitely NOT recommeded for 3rd-party drivers!) :-) 10178 * - taskq_create() will block for memory, also it will panic 10179 * if it cannot create the requested number of threads. 10180 * - Currently taskq_create() creates threads that cannot be 10181 * swapped. 10182 * - We use TASKQ_PREPOPULATE to ensure we have an adequate 10183 * supply of taskq entries at interrupt time (ie, so that we 10184 * do not have to sleep for memory) 10185 */ 10186 10187 static void 10188 sd_taskq_create(void) 10189 { 10190 char taskq_name[TASKQ_NAMELEN]; 10191 10192 ASSERT(sd_tq == NULL); 10193 ASSERT(sd_wmr_tq == NULL); 10194 10195 (void) snprintf(taskq_name, sizeof (taskq_name), 10196 "%s_drv_taskq", sd_label); 10197 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS, 10198 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10199 TASKQ_PREPOPULATE)); 10200 10201 (void) snprintf(taskq_name, sizeof (taskq_name), 10202 "%s_rmw_taskq", sd_label); 10203 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS, 10204 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10205 TASKQ_PREPOPULATE)); 10206 } 10207 10208 10209 /* 10210 * Function: sd_taskq_delete 10211 * 10212 * Description: Complementary cleanup routine for sd_taskq_create(). 10213 * 10214 * Context: Kernel thread context. 10215 */ 10216 10217 static void 10218 sd_taskq_delete(void) 10219 { 10220 ASSERT(sd_tq != NULL); 10221 ASSERT(sd_wmr_tq != NULL); 10222 taskq_destroy(sd_tq); 10223 taskq_destroy(sd_wmr_tq); 10224 sd_tq = NULL; 10225 sd_wmr_tq = NULL; 10226 } 10227 10228 10229 /* 10230 * Function: sdstrategy 10231 * 10232 * Description: Driver's strategy (9E) entry point function. 10233 * 10234 * Arguments: bp - pointer to buf(9S) 10235 * 10236 * Return Code: Always returns zero 10237 * 10238 * Context: Kernel thread context. 10239 */ 10240 10241 static int 10242 sdstrategy(struct buf *bp) 10243 { 10244 struct sd_lun *un; 10245 10246 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 10247 if (un == NULL) { 10248 bioerror(bp, EIO); 10249 bp->b_resid = bp->b_bcount; 10250 biodone(bp); 10251 return (0); 10252 } 10253 /* As was done in the past, fail new cmds. if state is dumping. */ 10254 if (un->un_state == SD_STATE_DUMPING) { 10255 bioerror(bp, ENXIO); 10256 bp->b_resid = bp->b_bcount; 10257 biodone(bp); 10258 return (0); 10259 } 10260 10261 ASSERT(!mutex_owned(SD_MUTEX(un))); 10262 10263 /* 10264 * Commands may sneak in while we released the mutex in 10265 * DDI_SUSPEND, we should block new commands. However, old 10266 * commands that are still in the driver at this point should 10267 * still be allowed to drain. 10268 */ 10269 mutex_enter(SD_MUTEX(un)); 10270 /* 10271 * Must wait here if either the device is suspended or 10272 * if it's power level is changing. 10273 */ 10274 while ((un->un_state == SD_STATE_SUSPENDED) || 10275 (un->un_state == SD_STATE_PM_CHANGING)) { 10276 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10277 } 10278 10279 un->un_ncmds_in_driver++; 10280 10281 /* 10282 * atapi: Since we are running the CD for now in PIO mode we need to 10283 * call bp_mapin here to avoid bp_mapin called interrupt context under 10284 * the HBA's init_pkt routine. 10285 */ 10286 if (un->un_f_cfg_is_atapi == TRUE) { 10287 mutex_exit(SD_MUTEX(un)); 10288 bp_mapin(bp); 10289 mutex_enter(SD_MUTEX(un)); 10290 } 10291 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n", 10292 un->un_ncmds_in_driver); 10293 10294 mutex_exit(SD_MUTEX(un)); 10295 10296 /* 10297 * This will (eventually) allocate the sd_xbuf area and 10298 * call sd_xbuf_strategy(). We just want to return the 10299 * result of ddi_xbuf_qstrategy so that we have an opt- 10300 * imized tail call which saves us a stack frame. 10301 */ 10302 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); 10303 } 10304 10305 10306 /* 10307 * Function: sd_xbuf_strategy 10308 * 10309 * Description: Function for initiating IO operations via the 10310 * ddi_xbuf_qstrategy() mechanism. 10311 * 10312 * Context: Kernel thread context. 10313 */ 10314 10315 static void 10316 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg) 10317 { 10318 struct sd_lun *un = arg; 10319 10320 ASSERT(bp != NULL); 10321 ASSERT(xp != NULL); 10322 ASSERT(un != NULL); 10323 ASSERT(!mutex_owned(SD_MUTEX(un))); 10324 10325 /* 10326 * Initialize the fields in the xbuf and save a pointer to the 10327 * xbuf in bp->b_private. 10328 */ 10329 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL); 10330 10331 /* Send the buf down the iostart chain */ 10332 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp); 10333 } 10334 10335 10336 /* 10337 * Function: sd_xbuf_init 10338 * 10339 * Description: Prepare the given sd_xbuf struct for use. 10340 * 10341 * Arguments: un - ptr to softstate 10342 * bp - ptr to associated buf(9S) 10343 * xp - ptr to associated sd_xbuf 10344 * chain_type - IO chain type to use: 10345 * SD_CHAIN_NULL 10346 * SD_CHAIN_BUFIO 10347 * SD_CHAIN_USCSI 10348 * SD_CHAIN_DIRECT 10349 * SD_CHAIN_DIRECT_PRIORITY 10350 * pktinfop - ptr to private data struct for scsi_pkt(9S) 10351 * initialization; may be NULL if none. 10352 * 10353 * Context: Kernel thread context 10354 */ 10355 10356 static void 10357 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 10358 uchar_t chain_type, void *pktinfop) 10359 { 10360 int index; 10361 10362 ASSERT(un != NULL); 10363 ASSERT(bp != NULL); 10364 ASSERT(xp != NULL); 10365 10366 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n", 10367 bp, chain_type); 10368 10369 xp->xb_un = un; 10370 xp->xb_pktp = NULL; 10371 xp->xb_pktinfo = pktinfop; 10372 xp->xb_private = bp->b_private; 10373 xp->xb_blkno = (daddr_t)bp->b_blkno; 10374 10375 /* 10376 * Set up the iostart and iodone chain indexes in the xbuf, based 10377 * upon the specified chain type to use. 10378 */ 10379 switch (chain_type) { 10380 case SD_CHAIN_NULL: 10381 /* 10382 * Fall thru to just use the values for the buf type, even 10383 * tho for the NULL chain these values will never be used. 10384 */ 10385 /* FALLTHRU */ 10386 case SD_CHAIN_BUFIO: 10387 index = un->un_buf_chain_type; 10388 break; 10389 case SD_CHAIN_USCSI: 10390 index = un->un_uscsi_chain_type; 10391 break; 10392 case SD_CHAIN_DIRECT: 10393 index = un->un_direct_chain_type; 10394 break; 10395 case SD_CHAIN_DIRECT_PRIORITY: 10396 index = un->un_priority_chain_type; 10397 break; 10398 default: 10399 /* We're really broken if we ever get here... */ 10400 panic("sd_xbuf_init: illegal chain type!"); 10401 /*NOTREACHED*/ 10402 } 10403 10404 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index; 10405 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index; 10406 10407 /* 10408 * It might be a bit easier to simply bzero the entire xbuf above, 10409 * but it turns out that since we init a fair number of members anyway, 10410 * we save a fair number cycles by doing explicit assignment of zero. 10411 */ 10412 xp->xb_pkt_flags = 0; 10413 xp->xb_dma_resid = 0; 10414 xp->xb_retry_count = 0; 10415 xp->xb_victim_retry_count = 0; 10416 xp->xb_ua_retry_count = 0; 10417 xp->xb_nr_retry_count = 0; 10418 xp->xb_sense_bp = NULL; 10419 xp->xb_sense_status = 0; 10420 xp->xb_sense_state = 0; 10421 xp->xb_sense_resid = 0; 10422 10423 bp->b_private = xp; 10424 bp->b_flags &= ~(B_DONE | B_ERROR); 10425 bp->b_resid = 0; 10426 bp->av_forw = NULL; 10427 bp->av_back = NULL; 10428 bioerror(bp, 0); 10429 10430 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n"); 10431 } 10432 10433 10434 /* 10435 * Function: sd_uscsi_strategy 10436 * 10437 * Description: Wrapper for calling into the USCSI chain via physio(9F) 10438 * 10439 * Arguments: bp - buf struct ptr 10440 * 10441 * Return Code: Always returns 0 10442 * 10443 * Context: Kernel thread context 10444 */ 10445 10446 static int 10447 sd_uscsi_strategy(struct buf *bp) 10448 { 10449 struct sd_lun *un; 10450 struct sd_uscsi_info *uip; 10451 struct sd_xbuf *xp; 10452 uchar_t chain_type; 10453 10454 ASSERT(bp != NULL); 10455 10456 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 10457 if (un == NULL) { 10458 bioerror(bp, EIO); 10459 bp->b_resid = bp->b_bcount; 10460 biodone(bp); 10461 return (0); 10462 } 10463 10464 ASSERT(!mutex_owned(SD_MUTEX(un))); 10465 10466 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp); 10467 10468 mutex_enter(SD_MUTEX(un)); 10469 /* 10470 * atapi: Since we are running the CD for now in PIO mode we need to 10471 * call bp_mapin here to avoid bp_mapin called interrupt context under 10472 * the HBA's init_pkt routine. 10473 */ 10474 if (un->un_f_cfg_is_atapi == TRUE) { 10475 mutex_exit(SD_MUTEX(un)); 10476 bp_mapin(bp); 10477 mutex_enter(SD_MUTEX(un)); 10478 } 10479 un->un_ncmds_in_driver++; 10480 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n", 10481 un->un_ncmds_in_driver); 10482 mutex_exit(SD_MUTEX(un)); 10483 10484 /* 10485 * A pointer to a struct sd_uscsi_info is expected in bp->b_private 10486 */ 10487 ASSERT(bp->b_private != NULL); 10488 uip = (struct sd_uscsi_info *)bp->b_private; 10489 10490 switch (uip->ui_flags) { 10491 case SD_PATH_DIRECT: 10492 chain_type = SD_CHAIN_DIRECT; 10493 break; 10494 case SD_PATH_DIRECT_PRIORITY: 10495 chain_type = SD_CHAIN_DIRECT_PRIORITY; 10496 break; 10497 default: 10498 chain_type = SD_CHAIN_USCSI; 10499 break; 10500 } 10501 10502 /* 10503 * We may allocate extra buf for external USCSI commands. If the 10504 * application asks for bigger than 20-byte sense data via USCSI, 10505 * SCSA layer will allocate 252 bytes sense buf for that command. 10506 */ 10507 if (((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_rqlen > 10508 SENSE_LENGTH) { 10509 xp = kmem_zalloc(sizeof (struct sd_xbuf) - SENSE_LENGTH + 10510 MAX_SENSE_LENGTH, KM_SLEEP); 10511 } else { 10512 xp = kmem_zalloc(sizeof (struct sd_xbuf), KM_SLEEP); 10513 } 10514 10515 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp); 10516 10517 /* Use the index obtained within xbuf_init */ 10518 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp); 10519 10520 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp); 10521 10522 return (0); 10523 } 10524 10525 /* 10526 * Function: sd_send_scsi_cmd 10527 * 10528 * Description: Runs a USCSI command for user (when called thru sdioctl), 10529 * or for the driver 10530 * 10531 * Arguments: dev - the dev_t for the device 10532 * incmd - ptr to a valid uscsi_cmd struct 10533 * flag - bit flag, indicating open settings, 32/64 bit type 10534 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 10535 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 10536 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 10537 * to use the USCSI "direct" chain and bypass the normal 10538 * command waitq. 10539 * 10540 * Return Code: 0 - successful completion of the given command 10541 * EIO - scsi_uscsi_handle_command() failed 10542 * ENXIO - soft state not found for specified dev 10543 * EINVAL 10544 * EFAULT - copyin/copyout error 10545 * return code of scsi_uscsi_handle_command(): 10546 * EIO 10547 * ENXIO 10548 * EACCES 10549 * 10550 * Context: Waits for command to complete. Can sleep. 10551 */ 10552 10553 static int 10554 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 10555 enum uio_seg dataspace, int path_flag) 10556 { 10557 struct sd_uscsi_info *uip; 10558 struct uscsi_cmd *uscmd; 10559 struct sd_lun *un; 10560 int format = 0; 10561 int rval; 10562 10563 un = ddi_get_soft_state(sd_state, SDUNIT(dev)); 10564 if (un == NULL) { 10565 return (ENXIO); 10566 } 10567 10568 ASSERT(!mutex_owned(SD_MUTEX(un))); 10569 10570 #ifdef SDDEBUG 10571 switch (dataspace) { 10572 case UIO_USERSPACE: 10573 SD_TRACE(SD_LOG_IO, un, 10574 "sd_send_scsi_cmd: entry: un:0x%p UIO_USERSPACE\n", un); 10575 break; 10576 case UIO_SYSSPACE: 10577 SD_TRACE(SD_LOG_IO, un, 10578 "sd_send_scsi_cmd: entry: un:0x%p UIO_SYSSPACE\n", un); 10579 break; 10580 default: 10581 SD_TRACE(SD_LOG_IO, un, 10582 "sd_send_scsi_cmd: entry: un:0x%p UNEXPECTED SPACE\n", un); 10583 break; 10584 } 10585 #endif 10586 10587 rval = scsi_uscsi_alloc_and_copyin((intptr_t)incmd, flag, 10588 SD_ADDRESS(un), &uscmd); 10589 if (rval != 0) { 10590 SD_TRACE(SD_LOG_IO, un, "sd_sense_scsi_cmd: " 10591 "scsi_uscsi_alloc_and_copyin failed\n", un); 10592 return (rval); 10593 } 10594 10595 if ((uscmd->uscsi_cdb != NULL) && 10596 (uscmd->uscsi_cdb[0] == SCMD_FORMAT)) { 10597 mutex_enter(SD_MUTEX(un)); 10598 un->un_f_format_in_progress = TRUE; 10599 mutex_exit(SD_MUTEX(un)); 10600 format = 1; 10601 } 10602 10603 /* 10604 * Allocate an sd_uscsi_info struct and fill it with the info 10605 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 10606 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 10607 * since we allocate the buf here in this function, we do not 10608 * need to preserve the prior contents of b_private. 10609 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 10610 */ 10611 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 10612 uip->ui_flags = path_flag; 10613 uip->ui_cmdp = uscmd; 10614 10615 /* 10616 * Commands sent with priority are intended for error recovery 10617 * situations, and do not have retries performed. 10618 */ 10619 if (path_flag == SD_PATH_DIRECT_PRIORITY) { 10620 uscmd->uscsi_flags |= USCSI_DIAGNOSE; 10621 } 10622 uscmd->uscsi_flags &= ~USCSI_NOINTR; 10623 10624 rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd, 10625 sd_uscsi_strategy, NULL, uip); 10626 10627 #ifdef SDDEBUG 10628 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 10629 "uscsi_status: 0x%02x uscsi_resid:0x%x\n", 10630 uscmd->uscsi_status, uscmd->uscsi_resid); 10631 if (uscmd->uscsi_bufaddr != NULL) { 10632 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 10633 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n", 10634 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen); 10635 if (dataspace == UIO_SYSSPACE) { 10636 SD_DUMP_MEMORY(un, SD_LOG_IO, 10637 "data", (uchar_t *)uscmd->uscsi_bufaddr, 10638 uscmd->uscsi_buflen, SD_LOG_HEX); 10639 } 10640 } 10641 #endif 10642 10643 if (format == 1) { 10644 mutex_enter(SD_MUTEX(un)); 10645 un->un_f_format_in_progress = FALSE; 10646 mutex_exit(SD_MUTEX(un)); 10647 } 10648 10649 (void) scsi_uscsi_copyout_and_free((intptr_t)incmd, uscmd); 10650 kmem_free(uip, sizeof (struct sd_uscsi_info)); 10651 10652 return (rval); 10653 } 10654 10655 10656 /* 10657 * Function: sd_buf_iodone 10658 * 10659 * Description: Frees the sd_xbuf & returns the buf to its originator. 10660 * 10661 * Context: May be called from interrupt context. 10662 */ 10663 /* ARGSUSED */ 10664 static void 10665 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp) 10666 { 10667 struct sd_xbuf *xp; 10668 10669 ASSERT(un != NULL); 10670 ASSERT(bp != NULL); 10671 ASSERT(!mutex_owned(SD_MUTEX(un))); 10672 10673 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n"); 10674 10675 xp = SD_GET_XBUF(bp); 10676 ASSERT(xp != NULL); 10677 10678 mutex_enter(SD_MUTEX(un)); 10679 10680 /* 10681 * Grab time when the cmd completed. 10682 * This is used for determining if the system has been 10683 * idle long enough to make it idle to the PM framework. 10684 * This is for lowering the overhead, and therefore improving 10685 * performance per I/O operation. 10686 */ 10687 un->un_pm_idle_time = ddi_get_time(); 10688 10689 un->un_ncmds_in_driver--; 10690 ASSERT(un->un_ncmds_in_driver >= 0); 10691 SD_INFO(SD_LOG_IO, un, "sd_buf_iodone: un_ncmds_in_driver = %ld\n", 10692 un->un_ncmds_in_driver); 10693 10694 mutex_exit(SD_MUTEX(un)); 10695 10696 ddi_xbuf_done(bp, un->un_xbuf_attr); /* xbuf is gone after this */ 10697 biodone(bp); /* bp is gone after this */ 10698 10699 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n"); 10700 } 10701 10702 10703 /* 10704 * Function: sd_uscsi_iodone 10705 * 10706 * Description: Frees the sd_xbuf & returns the buf to its originator. 10707 * 10708 * Context: May be called from interrupt context. 10709 */ 10710 /* ARGSUSED */ 10711 static void 10712 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 10713 { 10714 struct sd_xbuf *xp; 10715 10716 ASSERT(un != NULL); 10717 ASSERT(bp != NULL); 10718 10719 xp = SD_GET_XBUF(bp); 10720 ASSERT(xp != NULL); 10721 ASSERT(!mutex_owned(SD_MUTEX(un))); 10722 10723 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n"); 10724 10725 bp->b_private = xp->xb_private; 10726 10727 mutex_enter(SD_MUTEX(un)); 10728 10729 /* 10730 * Grab time when the cmd completed. 10731 * This is used for determining if the system has been 10732 * idle long enough to make it idle to the PM framework. 10733 * This is for lowering the overhead, and therefore improving 10734 * performance per I/O operation. 10735 */ 10736 un->un_pm_idle_time = ddi_get_time(); 10737 10738 un->un_ncmds_in_driver--; 10739 ASSERT(un->un_ncmds_in_driver >= 0); 10740 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", 10741 un->un_ncmds_in_driver); 10742 10743 mutex_exit(SD_MUTEX(un)); 10744 10745 if (((struct uscsi_cmd *)(xp->xb_pktinfo))->uscsi_rqlen > 10746 SENSE_LENGTH) { 10747 kmem_free(xp, sizeof (struct sd_xbuf) - SENSE_LENGTH + 10748 MAX_SENSE_LENGTH); 10749 } else { 10750 kmem_free(xp, sizeof (struct sd_xbuf)); 10751 } 10752 10753 biodone(bp); 10754 10755 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n"); 10756 } 10757 10758 10759 /* 10760 * Function: sd_mapblockaddr_iostart 10761 * 10762 * Description: Verify request lies within the partition limits for 10763 * the indicated minor device. Issue "overrun" buf if 10764 * request would exceed partition range. Converts 10765 * partition-relative block address to absolute. 10766 * 10767 * Context: Can sleep 10768 * 10769 * Issues: This follows what the old code did, in terms of accessing 10770 * some of the partition info in the unit struct without holding 10771 * the mutext. This is a general issue, if the partition info 10772 * can be altered while IO is in progress... as soon as we send 10773 * a buf, its partitioning can be invalid before it gets to the 10774 * device. Probably the right fix is to move partitioning out 10775 * of the driver entirely. 10776 */ 10777 10778 static void 10779 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp) 10780 { 10781 diskaddr_t nblocks; /* #blocks in the given partition */ 10782 daddr_t blocknum; /* Block number specified by the buf */ 10783 size_t requested_nblocks; 10784 size_t available_nblocks; 10785 int partition; 10786 diskaddr_t partition_offset; 10787 struct sd_xbuf *xp; 10788 10789 10790 ASSERT(un != NULL); 10791 ASSERT(bp != NULL); 10792 ASSERT(!mutex_owned(SD_MUTEX(un))); 10793 10794 SD_TRACE(SD_LOG_IO_PARTITION, un, 10795 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp); 10796 10797 xp = SD_GET_XBUF(bp); 10798 ASSERT(xp != NULL); 10799 10800 /* 10801 * If the geometry is not indicated as valid, attempt to access 10802 * the unit & verify the geometry/label. This can be the case for 10803 * removable-media devices, of if the device was opened in 10804 * NDELAY/NONBLOCK mode. 10805 */ 10806 if (!SD_IS_VALID_LABEL(un) && 10807 (sd_ready_and_valid(un) != SD_READY_VALID)) { 10808 /* 10809 * For removable devices it is possible to start an I/O 10810 * without a media by opening the device in nodelay mode. 10811 * Also for writable CDs there can be many scenarios where 10812 * there is no geometry yet but volume manager is trying to 10813 * issue a read() just because it can see TOC on the CD. So 10814 * do not print a message for removables. 10815 */ 10816 if (!un->un_f_has_removable_media) { 10817 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10818 "i/o to invalid geometry\n"); 10819 } 10820 bioerror(bp, EIO); 10821 bp->b_resid = bp->b_bcount; 10822 SD_BEGIN_IODONE(index, un, bp); 10823 return; 10824 } 10825 10826 partition = SDPART(bp->b_edev); 10827 10828 nblocks = 0; 10829 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 10830 &nblocks, &partition_offset, NULL, NULL, (void *)SD_PATH_DIRECT); 10831 10832 /* 10833 * blocknum is the starting block number of the request. At this 10834 * point it is still relative to the start of the minor device. 10835 */ 10836 blocknum = xp->xb_blkno; 10837 10838 /* 10839 * Legacy: If the starting block number is one past the last block 10840 * in the partition, do not set B_ERROR in the buf. 10841 */ 10842 if (blocknum == nblocks) { 10843 goto error_exit; 10844 } 10845 10846 /* 10847 * Confirm that the first block of the request lies within the 10848 * partition limits. Also the requested number of bytes must be 10849 * a multiple of the system block size. 10850 */ 10851 if ((blocknum < 0) || (blocknum >= nblocks) || 10852 ((bp->b_bcount & (un->un_sys_blocksize - 1)) != 0)) { 10853 bp->b_flags |= B_ERROR; 10854 goto error_exit; 10855 } 10856 10857 /* 10858 * If the requsted # blocks exceeds the available # blocks, that 10859 * is an overrun of the partition. 10860 */ 10861 requested_nblocks = SD_BYTES2SYSBLOCKS(un, bp->b_bcount); 10862 available_nblocks = (size_t)(nblocks - blocknum); 10863 ASSERT(nblocks >= blocknum); 10864 10865 if (requested_nblocks > available_nblocks) { 10866 /* 10867 * Allocate an "overrun" buf to allow the request to proceed 10868 * for the amount of space available in the partition. The 10869 * amount not transferred will be added into the b_resid 10870 * when the operation is complete. The overrun buf 10871 * replaces the original buf here, and the original buf 10872 * is saved inside the overrun buf, for later use. 10873 */ 10874 size_t resid = SD_SYSBLOCKS2BYTES(un, 10875 (offset_t)(requested_nblocks - available_nblocks)); 10876 size_t count = bp->b_bcount - resid; 10877 /* 10878 * Note: count is an unsigned entity thus it'll NEVER 10879 * be less than 0 so ASSERT the original values are 10880 * correct. 10881 */ 10882 ASSERT(bp->b_bcount >= resid); 10883 10884 bp = sd_bioclone_alloc(bp, count, blocknum, 10885 (int (*)(struct buf *)) sd_mapblockaddr_iodone); 10886 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */ 10887 ASSERT(xp != NULL); 10888 } 10889 10890 /* At this point there should be no residual for this buf. */ 10891 ASSERT(bp->b_resid == 0); 10892 10893 /* Convert the block number to an absolute address. */ 10894 xp->xb_blkno += partition_offset; 10895 10896 SD_NEXT_IOSTART(index, un, bp); 10897 10898 SD_TRACE(SD_LOG_IO_PARTITION, un, 10899 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp); 10900 10901 return; 10902 10903 error_exit: 10904 bp->b_resid = bp->b_bcount; 10905 SD_BEGIN_IODONE(index, un, bp); 10906 SD_TRACE(SD_LOG_IO_PARTITION, un, 10907 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp); 10908 } 10909 10910 10911 /* 10912 * Function: sd_mapblockaddr_iodone 10913 * 10914 * Description: Completion-side processing for partition management. 10915 * 10916 * Context: May be called under interrupt context 10917 */ 10918 10919 static void 10920 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp) 10921 { 10922 /* int partition; */ /* Not used, see below. */ 10923 ASSERT(un != NULL); 10924 ASSERT(bp != NULL); 10925 ASSERT(!mutex_owned(SD_MUTEX(un))); 10926 10927 SD_TRACE(SD_LOG_IO_PARTITION, un, 10928 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp); 10929 10930 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) { 10931 /* 10932 * We have an "overrun" buf to deal with... 10933 */ 10934 struct sd_xbuf *xp; 10935 struct buf *obp; /* ptr to the original buf */ 10936 10937 xp = SD_GET_XBUF(bp); 10938 ASSERT(xp != NULL); 10939 10940 /* Retrieve the pointer to the original buf */ 10941 obp = (struct buf *)xp->xb_private; 10942 ASSERT(obp != NULL); 10943 10944 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid); 10945 bioerror(obp, bp->b_error); 10946 10947 sd_bioclone_free(bp); 10948 10949 /* 10950 * Get back the original buf. 10951 * Note that since the restoration of xb_blkno below 10952 * was removed, the sd_xbuf is not needed. 10953 */ 10954 bp = obp; 10955 /* 10956 * xp = SD_GET_XBUF(bp); 10957 * ASSERT(xp != NULL); 10958 */ 10959 } 10960 10961 /* 10962 * Convert sd->xb_blkno back to a minor-device relative value. 10963 * Note: this has been commented out, as it is not needed in the 10964 * current implementation of the driver (ie, since this function 10965 * is at the top of the layering chains, so the info will be 10966 * discarded) and it is in the "hot" IO path. 10967 * 10968 * partition = getminor(bp->b_edev) & SDPART_MASK; 10969 * xp->xb_blkno -= un->un_offset[partition]; 10970 */ 10971 10972 SD_NEXT_IODONE(index, un, bp); 10973 10974 SD_TRACE(SD_LOG_IO_PARTITION, un, 10975 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp); 10976 } 10977 10978 10979 /* 10980 * Function: sd_mapblocksize_iostart 10981 * 10982 * Description: Convert between system block size (un->un_sys_blocksize) 10983 * and target block size (un->un_tgt_blocksize). 10984 * 10985 * Context: Can sleep to allocate resources. 10986 * 10987 * Assumptions: A higher layer has already performed any partition validation, 10988 * and converted the xp->xb_blkno to an absolute value relative 10989 * to the start of the device. 10990 * 10991 * It is also assumed that the higher layer has implemented 10992 * an "overrun" mechanism for the case where the request would 10993 * read/write beyond the end of a partition. In this case we 10994 * assume (and ASSERT) that bp->b_resid == 0. 10995 * 10996 * Note: The implementation for this routine assumes the target 10997 * block size remains constant between allocation and transport. 10998 */ 10999 11000 static void 11001 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp) 11002 { 11003 struct sd_mapblocksize_info *bsp; 11004 struct sd_xbuf *xp; 11005 offset_t first_byte; 11006 daddr_t start_block, end_block; 11007 daddr_t request_bytes; 11008 ushort_t is_aligned = FALSE; 11009 11010 ASSERT(un != NULL); 11011 ASSERT(bp != NULL); 11012 ASSERT(!mutex_owned(SD_MUTEX(un))); 11013 ASSERT(bp->b_resid == 0); 11014 11015 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 11016 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp); 11017 11018 /* 11019 * For a non-writable CD, a write request is an error 11020 */ 11021 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) && 11022 (un->un_f_mmc_writable_media == FALSE)) { 11023 bioerror(bp, EIO); 11024 bp->b_resid = bp->b_bcount; 11025 SD_BEGIN_IODONE(index, un, bp); 11026 return; 11027 } 11028 11029 /* 11030 * We do not need a shadow buf if the device is using 11031 * un->un_sys_blocksize as its block size or if bcount == 0. 11032 * In this case there is no layer-private data block allocated. 11033 */ 11034 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 11035 (bp->b_bcount == 0)) { 11036 goto done; 11037 } 11038 11039 #if defined(__i386) || defined(__amd64) 11040 /* We do not support non-block-aligned transfers for ROD devices */ 11041 ASSERT(!ISROD(un)); 11042 #endif 11043 11044 xp = SD_GET_XBUF(bp); 11045 ASSERT(xp != NULL); 11046 11047 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 11048 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n", 11049 un->un_tgt_blocksize, un->un_sys_blocksize); 11050 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 11051 "request start block:0x%x\n", xp->xb_blkno); 11052 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 11053 "request len:0x%x\n", bp->b_bcount); 11054 11055 /* 11056 * Allocate the layer-private data area for the mapblocksize layer. 11057 * Layers are allowed to use the xp_private member of the sd_xbuf 11058 * struct to store the pointer to their layer-private data block, but 11059 * each layer also has the responsibility of restoring the prior 11060 * contents of xb_private before returning the buf/xbuf to the 11061 * higher layer that sent it. 11062 * 11063 * Here we save the prior contents of xp->xb_private into the 11064 * bsp->mbs_oprivate field of our layer-private data area. This value 11065 * is restored by sd_mapblocksize_iodone() just prior to freeing up 11066 * the layer-private area and returning the buf/xbuf to the layer 11067 * that sent it. 11068 * 11069 * Note that here we use kmem_zalloc for the allocation as there are 11070 * parts of the mapblocksize code that expect certain fields to be 11071 * zero unless explicitly set to a required value. 11072 */ 11073 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 11074 bsp->mbs_oprivate = xp->xb_private; 11075 xp->xb_private = bsp; 11076 11077 /* 11078 * This treats the data on the disk (target) as an array of bytes. 11079 * first_byte is the byte offset, from the beginning of the device, 11080 * to the location of the request. This is converted from a 11081 * un->un_sys_blocksize block address to a byte offset, and then back 11082 * to a block address based upon a un->un_tgt_blocksize block size. 11083 * 11084 * xp->xb_blkno should be absolute upon entry into this function, 11085 * but, but it is based upon partitions that use the "system" 11086 * block size. It must be adjusted to reflect the block size of 11087 * the target. 11088 * 11089 * Note that end_block is actually the block that follows the last 11090 * block of the request, but that's what is needed for the computation. 11091 */ 11092 first_byte = SD_SYSBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 11093 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize; 11094 end_block = (first_byte + bp->b_bcount + un->un_tgt_blocksize - 1) / 11095 un->un_tgt_blocksize; 11096 11097 /* request_bytes is rounded up to a multiple of the target block size */ 11098 request_bytes = (end_block - start_block) * un->un_tgt_blocksize; 11099 11100 /* 11101 * See if the starting address of the request and the request 11102 * length are aligned on a un->un_tgt_blocksize boundary. If aligned 11103 * then we do not need to allocate a shadow buf to handle the request. 11104 */ 11105 if (((first_byte % un->un_tgt_blocksize) == 0) && 11106 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) { 11107 is_aligned = TRUE; 11108 } 11109 11110 if ((bp->b_flags & B_READ) == 0) { 11111 /* 11112 * Lock the range for a write operation. An aligned request is 11113 * considered a simple write; otherwise the request must be a 11114 * read-modify-write. 11115 */ 11116 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1, 11117 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW); 11118 } 11119 11120 /* 11121 * Alloc a shadow buf if the request is not aligned. Also, this is 11122 * where the READ command is generated for a read-modify-write. (The 11123 * write phase is deferred until after the read completes.) 11124 */ 11125 if (is_aligned == FALSE) { 11126 11127 struct sd_mapblocksize_info *shadow_bsp; 11128 struct sd_xbuf *shadow_xp; 11129 struct buf *shadow_bp; 11130 11131 /* 11132 * Allocate the shadow buf and it associated xbuf. Note that 11133 * after this call the xb_blkno value in both the original 11134 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the 11135 * same: absolute relative to the start of the device, and 11136 * adjusted for the target block size. The b_blkno in the 11137 * shadow buf will also be set to this value. We should never 11138 * change b_blkno in the original bp however. 11139 * 11140 * Note also that the shadow buf will always need to be a 11141 * READ command, regardless of whether the incoming command 11142 * is a READ or a WRITE. 11143 */ 11144 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ, 11145 xp->xb_blkno, 11146 (int (*)(struct buf *)) sd_mapblocksize_iodone); 11147 11148 shadow_xp = SD_GET_XBUF(shadow_bp); 11149 11150 /* 11151 * Allocate the layer-private data for the shadow buf. 11152 * (No need to preserve xb_private in the shadow xbuf.) 11153 */ 11154 shadow_xp->xb_private = shadow_bsp = 11155 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 11156 11157 /* 11158 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone 11159 * to figure out where the start of the user data is (based upon 11160 * the system block size) in the data returned by the READ 11161 * command (which will be based upon the target blocksize). Note 11162 * that this is only really used if the request is unaligned. 11163 */ 11164 bsp->mbs_copy_offset = (ssize_t)(first_byte - 11165 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize)); 11166 ASSERT((bsp->mbs_copy_offset >= 0) && 11167 (bsp->mbs_copy_offset < un->un_tgt_blocksize)); 11168 11169 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset; 11170 11171 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index; 11172 11173 /* Transfer the wmap (if any) to the shadow buf */ 11174 shadow_bsp->mbs_wmp = bsp->mbs_wmp; 11175 bsp->mbs_wmp = NULL; 11176 11177 /* 11178 * The shadow buf goes on from here in place of the 11179 * original buf. 11180 */ 11181 shadow_bsp->mbs_orig_bp = bp; 11182 bp = shadow_bp; 11183 } 11184 11185 SD_INFO(SD_LOG_IO_RMMEDIA, un, 11186 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno); 11187 SD_INFO(SD_LOG_IO_RMMEDIA, un, 11188 "sd_mapblocksize_iostart: tgt request len:0x%x\n", 11189 request_bytes); 11190 SD_INFO(SD_LOG_IO_RMMEDIA, un, 11191 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp); 11192 11193 done: 11194 SD_NEXT_IOSTART(index, un, bp); 11195 11196 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 11197 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp); 11198 } 11199 11200 11201 /* 11202 * Function: sd_mapblocksize_iodone 11203 * 11204 * Description: Completion side processing for block-size mapping. 11205 * 11206 * Context: May be called under interrupt context 11207 */ 11208 11209 static void 11210 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp) 11211 { 11212 struct sd_mapblocksize_info *bsp; 11213 struct sd_xbuf *xp; 11214 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */ 11215 struct buf *orig_bp; /* ptr to the original buf */ 11216 offset_t shadow_end; 11217 offset_t request_end; 11218 offset_t shadow_start; 11219 ssize_t copy_offset; 11220 size_t copy_length; 11221 size_t shortfall; 11222 uint_t is_write; /* TRUE if this bp is a WRITE */ 11223 uint_t has_wmap; /* TRUE is this bp has a wmap */ 11224 11225 ASSERT(un != NULL); 11226 ASSERT(bp != NULL); 11227 11228 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 11229 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp); 11230 11231 /* 11232 * There is no shadow buf or layer-private data if the target is 11233 * using un->un_sys_blocksize as its block size or if bcount == 0. 11234 */ 11235 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 11236 (bp->b_bcount == 0)) { 11237 goto exit; 11238 } 11239 11240 xp = SD_GET_XBUF(bp); 11241 ASSERT(xp != NULL); 11242 11243 /* Retrieve the pointer to the layer-private data area from the xbuf. */ 11244 bsp = xp->xb_private; 11245 11246 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE; 11247 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE; 11248 11249 if (is_write) { 11250 /* 11251 * For a WRITE request we must free up the block range that 11252 * we have locked up. This holds regardless of whether this is 11253 * an aligned write request or a read-modify-write request. 11254 */ 11255 sd_range_unlock(un, bsp->mbs_wmp); 11256 bsp->mbs_wmp = NULL; 11257 } 11258 11259 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) { 11260 /* 11261 * An aligned read or write command will have no shadow buf; 11262 * there is not much else to do with it. 11263 */ 11264 goto done; 11265 } 11266 11267 orig_bp = bsp->mbs_orig_bp; 11268 ASSERT(orig_bp != NULL); 11269 orig_xp = SD_GET_XBUF(orig_bp); 11270 ASSERT(orig_xp != NULL); 11271 ASSERT(!mutex_owned(SD_MUTEX(un))); 11272 11273 if (!is_write && has_wmap) { 11274 /* 11275 * A READ with a wmap means this is the READ phase of a 11276 * read-modify-write. If an error occurred on the READ then 11277 * we do not proceed with the WRITE phase or copy any data. 11278 * Just release the write maps and return with an error. 11279 */ 11280 if ((bp->b_resid != 0) || (bp->b_error != 0)) { 11281 orig_bp->b_resid = orig_bp->b_bcount; 11282 bioerror(orig_bp, bp->b_error); 11283 sd_range_unlock(un, bsp->mbs_wmp); 11284 goto freebuf_done; 11285 } 11286 } 11287 11288 /* 11289 * Here is where we set up to copy the data from the shadow buf 11290 * into the space associated with the original buf. 11291 * 11292 * To deal with the conversion between block sizes, these 11293 * computations treat the data as an array of bytes, with the 11294 * first byte (byte 0) corresponding to the first byte in the 11295 * first block on the disk. 11296 */ 11297 11298 /* 11299 * shadow_start and shadow_len indicate the location and size of 11300 * the data returned with the shadow IO request. 11301 */ 11302 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 11303 shadow_end = shadow_start + bp->b_bcount - bp->b_resid; 11304 11305 /* 11306 * copy_offset gives the offset (in bytes) from the start of the first 11307 * block of the READ request to the beginning of the data. We retrieve 11308 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved 11309 * there by sd_mapblockize_iostart(). copy_length gives the amount of 11310 * data to be copied (in bytes). 11311 */ 11312 copy_offset = bsp->mbs_copy_offset; 11313 ASSERT((copy_offset >= 0) && (copy_offset < un->un_tgt_blocksize)); 11314 copy_length = orig_bp->b_bcount; 11315 request_end = shadow_start + copy_offset + orig_bp->b_bcount; 11316 11317 /* 11318 * Set up the resid and error fields of orig_bp as appropriate. 11319 */ 11320 if (shadow_end >= request_end) { 11321 /* We got all the requested data; set resid to zero */ 11322 orig_bp->b_resid = 0; 11323 } else { 11324 /* 11325 * We failed to get enough data to fully satisfy the original 11326 * request. Just copy back whatever data we got and set 11327 * up the residual and error code as required. 11328 * 11329 * 'shortfall' is the amount by which the data received with the 11330 * shadow buf has "fallen short" of the requested amount. 11331 */ 11332 shortfall = (size_t)(request_end - shadow_end); 11333 11334 if (shortfall > orig_bp->b_bcount) { 11335 /* 11336 * We did not get enough data to even partially 11337 * fulfill the original request. The residual is 11338 * equal to the amount requested. 11339 */ 11340 orig_bp->b_resid = orig_bp->b_bcount; 11341 } else { 11342 /* 11343 * We did not get all the data that we requested 11344 * from the device, but we will try to return what 11345 * portion we did get. 11346 */ 11347 orig_bp->b_resid = shortfall; 11348 } 11349 ASSERT(copy_length >= orig_bp->b_resid); 11350 copy_length -= orig_bp->b_resid; 11351 } 11352 11353 /* Propagate the error code from the shadow buf to the original buf */ 11354 bioerror(orig_bp, bp->b_error); 11355 11356 if (is_write) { 11357 goto freebuf_done; /* No data copying for a WRITE */ 11358 } 11359 11360 if (has_wmap) { 11361 /* 11362 * This is a READ command from the READ phase of a 11363 * read-modify-write request. We have to copy the data given 11364 * by the user OVER the data returned by the READ command, 11365 * then convert the command from a READ to a WRITE and send 11366 * it back to the target. 11367 */ 11368 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset, 11369 copy_length); 11370 11371 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */ 11372 11373 /* 11374 * Dispatch the WRITE command to the taskq thread, which 11375 * will in turn send the command to the target. When the 11376 * WRITE command completes, we (sd_mapblocksize_iodone()) 11377 * will get called again as part of the iodone chain 11378 * processing for it. Note that we will still be dealing 11379 * with the shadow buf at that point. 11380 */ 11381 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp, 11382 KM_NOSLEEP) != 0) { 11383 /* 11384 * Dispatch was successful so we are done. Return 11385 * without going any higher up the iodone chain. Do 11386 * not free up any layer-private data until after the 11387 * WRITE completes. 11388 */ 11389 return; 11390 } 11391 11392 /* 11393 * Dispatch of the WRITE command failed; set up the error 11394 * condition and send this IO back up the iodone chain. 11395 */ 11396 bioerror(orig_bp, EIO); 11397 orig_bp->b_resid = orig_bp->b_bcount; 11398 11399 } else { 11400 /* 11401 * This is a regular READ request (ie, not a RMW). Copy the 11402 * data from the shadow buf into the original buf. The 11403 * copy_offset compensates for any "misalignment" between the 11404 * shadow buf (with its un->un_tgt_blocksize blocks) and the 11405 * original buf (with its un->un_sys_blocksize blocks). 11406 */ 11407 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr, 11408 copy_length); 11409 } 11410 11411 freebuf_done: 11412 11413 /* 11414 * At this point we still have both the shadow buf AND the original 11415 * buf to deal with, as well as the layer-private data area in each. 11416 * Local variables are as follows: 11417 * 11418 * bp -- points to shadow buf 11419 * xp -- points to xbuf of shadow buf 11420 * bsp -- points to layer-private data area of shadow buf 11421 * orig_bp -- points to original buf 11422 * 11423 * First free the shadow buf and its associated xbuf, then free the 11424 * layer-private data area from the shadow buf. There is no need to 11425 * restore xb_private in the shadow xbuf. 11426 */ 11427 sd_shadow_buf_free(bp); 11428 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 11429 11430 /* 11431 * Now update the local variables to point to the original buf, xbuf, 11432 * and layer-private area. 11433 */ 11434 bp = orig_bp; 11435 xp = SD_GET_XBUF(bp); 11436 ASSERT(xp != NULL); 11437 ASSERT(xp == orig_xp); 11438 bsp = xp->xb_private; 11439 ASSERT(bsp != NULL); 11440 11441 done: 11442 /* 11443 * Restore xb_private to whatever it was set to by the next higher 11444 * layer in the chain, then free the layer-private data area. 11445 */ 11446 xp->xb_private = bsp->mbs_oprivate; 11447 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 11448 11449 exit: 11450 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp), 11451 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp); 11452 11453 SD_NEXT_IODONE(index, un, bp); 11454 } 11455 11456 11457 /* 11458 * Function: sd_checksum_iostart 11459 * 11460 * Description: A stub function for a layer that's currently not used. 11461 * For now just a placeholder. 11462 * 11463 * Context: Kernel thread context 11464 */ 11465 11466 static void 11467 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp) 11468 { 11469 ASSERT(un != NULL); 11470 ASSERT(bp != NULL); 11471 ASSERT(!mutex_owned(SD_MUTEX(un))); 11472 SD_NEXT_IOSTART(index, un, bp); 11473 } 11474 11475 11476 /* 11477 * Function: sd_checksum_iodone 11478 * 11479 * Description: A stub function for a layer that's currently not used. 11480 * For now just a placeholder. 11481 * 11482 * Context: May be called under interrupt context 11483 */ 11484 11485 static void 11486 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp) 11487 { 11488 ASSERT(un != NULL); 11489 ASSERT(bp != NULL); 11490 ASSERT(!mutex_owned(SD_MUTEX(un))); 11491 SD_NEXT_IODONE(index, un, bp); 11492 } 11493 11494 11495 /* 11496 * Function: sd_checksum_uscsi_iostart 11497 * 11498 * Description: A stub function for a layer that's currently not used. 11499 * For now just a placeholder. 11500 * 11501 * Context: Kernel thread context 11502 */ 11503 11504 static void 11505 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp) 11506 { 11507 ASSERT(un != NULL); 11508 ASSERT(bp != NULL); 11509 ASSERT(!mutex_owned(SD_MUTEX(un))); 11510 SD_NEXT_IOSTART(index, un, bp); 11511 } 11512 11513 11514 /* 11515 * Function: sd_checksum_uscsi_iodone 11516 * 11517 * Description: A stub function for a layer that's currently not used. 11518 * For now just a placeholder. 11519 * 11520 * Context: May be called under interrupt context 11521 */ 11522 11523 static void 11524 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 11525 { 11526 ASSERT(un != NULL); 11527 ASSERT(bp != NULL); 11528 ASSERT(!mutex_owned(SD_MUTEX(un))); 11529 SD_NEXT_IODONE(index, un, bp); 11530 } 11531 11532 11533 /* 11534 * Function: sd_pm_iostart 11535 * 11536 * Description: iostart-side routine for Power mangement. 11537 * 11538 * Context: Kernel thread context 11539 */ 11540 11541 static void 11542 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp) 11543 { 11544 ASSERT(un != NULL); 11545 ASSERT(bp != NULL); 11546 ASSERT(!mutex_owned(SD_MUTEX(un))); 11547 ASSERT(!mutex_owned(&un->un_pm_mutex)); 11548 11549 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n"); 11550 11551 if (sd_pm_entry(un) != DDI_SUCCESS) { 11552 /* 11553 * Set up to return the failed buf back up the 'iodone' 11554 * side of the calling chain. 11555 */ 11556 bioerror(bp, EIO); 11557 bp->b_resid = bp->b_bcount; 11558 11559 SD_BEGIN_IODONE(index, un, bp); 11560 11561 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 11562 return; 11563 } 11564 11565 SD_NEXT_IOSTART(index, un, bp); 11566 11567 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 11568 } 11569 11570 11571 /* 11572 * Function: sd_pm_iodone 11573 * 11574 * Description: iodone-side routine for power mangement. 11575 * 11576 * Context: may be called from interrupt context 11577 */ 11578 11579 static void 11580 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp) 11581 { 11582 ASSERT(un != NULL); 11583 ASSERT(bp != NULL); 11584 ASSERT(!mutex_owned(&un->un_pm_mutex)); 11585 11586 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n"); 11587 11588 /* 11589 * After attach the following flag is only read, so don't 11590 * take the penalty of acquiring a mutex for it. 11591 */ 11592 if (un->un_f_pm_is_enabled == TRUE) { 11593 sd_pm_exit(un); 11594 } 11595 11596 SD_NEXT_IODONE(index, un, bp); 11597 11598 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n"); 11599 } 11600 11601 11602 /* 11603 * Function: sd_core_iostart 11604 * 11605 * Description: Primary driver function for enqueuing buf(9S) structs from 11606 * the system and initiating IO to the target device 11607 * 11608 * Context: Kernel thread context. Can sleep. 11609 * 11610 * Assumptions: - The given xp->xb_blkno is absolute 11611 * (ie, relative to the start of the device). 11612 * - The IO is to be done using the native blocksize of 11613 * the device, as specified in un->un_tgt_blocksize. 11614 */ 11615 /* ARGSUSED */ 11616 static void 11617 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp) 11618 { 11619 struct sd_xbuf *xp; 11620 11621 ASSERT(un != NULL); 11622 ASSERT(bp != NULL); 11623 ASSERT(!mutex_owned(SD_MUTEX(un))); 11624 ASSERT(bp->b_resid == 0); 11625 11626 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp); 11627 11628 xp = SD_GET_XBUF(bp); 11629 ASSERT(xp != NULL); 11630 11631 mutex_enter(SD_MUTEX(un)); 11632 11633 /* 11634 * If we are currently in the failfast state, fail any new IO 11635 * that has B_FAILFAST set, then return. 11636 */ 11637 if ((bp->b_flags & B_FAILFAST) && 11638 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) { 11639 mutex_exit(SD_MUTEX(un)); 11640 bioerror(bp, EIO); 11641 bp->b_resid = bp->b_bcount; 11642 SD_BEGIN_IODONE(index, un, bp); 11643 return; 11644 } 11645 11646 if (SD_IS_DIRECT_PRIORITY(xp)) { 11647 /* 11648 * Priority command -- transport it immediately. 11649 * 11650 * Note: We may want to assert that USCSI_DIAGNOSE is set, 11651 * because all direct priority commands should be associated 11652 * with error recovery actions which we don't want to retry. 11653 */ 11654 sd_start_cmds(un, bp); 11655 } else { 11656 /* 11657 * Normal command -- add it to the wait queue, then start 11658 * transporting commands from the wait queue. 11659 */ 11660 sd_add_buf_to_waitq(un, bp); 11661 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 11662 sd_start_cmds(un, NULL); 11663 } 11664 11665 mutex_exit(SD_MUTEX(un)); 11666 11667 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp); 11668 } 11669 11670 11671 /* 11672 * Function: sd_init_cdb_limits 11673 * 11674 * Description: This is to handle scsi_pkt initialization differences 11675 * between the driver platforms. 11676 * 11677 * Legacy behaviors: 11678 * 11679 * If the block number or the sector count exceeds the 11680 * capabilities of a Group 0 command, shift over to a 11681 * Group 1 command. We don't blindly use Group 1 11682 * commands because a) some drives (CDC Wren IVs) get a 11683 * bit confused, and b) there is probably a fair amount 11684 * of speed difference for a target to receive and decode 11685 * a 10 byte command instead of a 6 byte command. 11686 * 11687 * The xfer time difference of 6 vs 10 byte CDBs is 11688 * still significant so this code is still worthwhile. 11689 * 10 byte CDBs are very inefficient with the fas HBA driver 11690 * and older disks. Each CDB byte took 1 usec with some 11691 * popular disks. 11692 * 11693 * Context: Must be called at attach time 11694 */ 11695 11696 static void 11697 sd_init_cdb_limits(struct sd_lun *un) 11698 { 11699 int hba_cdb_limit; 11700 11701 /* 11702 * Use CDB_GROUP1 commands for most devices except for 11703 * parallel SCSI fixed drives in which case we get better 11704 * performance using CDB_GROUP0 commands (where applicable). 11705 */ 11706 un->un_mincdb = SD_CDB_GROUP1; 11707 #if !defined(__fibre) 11708 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && 11709 !un->un_f_has_removable_media) { 11710 un->un_mincdb = SD_CDB_GROUP0; 11711 } 11712 #endif 11713 11714 /* 11715 * Try to read the max-cdb-length supported by HBA. 11716 */ 11717 un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1); 11718 if (0 >= un->un_max_hba_cdb) { 11719 un->un_max_hba_cdb = CDB_GROUP4; 11720 hba_cdb_limit = SD_CDB_GROUP4; 11721 } else if (0 < un->un_max_hba_cdb && 11722 un->un_max_hba_cdb < CDB_GROUP1) { 11723 hba_cdb_limit = SD_CDB_GROUP0; 11724 } else if (CDB_GROUP1 <= un->un_max_hba_cdb && 11725 un->un_max_hba_cdb < CDB_GROUP5) { 11726 hba_cdb_limit = SD_CDB_GROUP1; 11727 } else if (CDB_GROUP5 <= un->un_max_hba_cdb && 11728 un->un_max_hba_cdb < CDB_GROUP4) { 11729 hba_cdb_limit = SD_CDB_GROUP5; 11730 } else { 11731 hba_cdb_limit = SD_CDB_GROUP4; 11732 } 11733 11734 /* 11735 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4 11736 * commands for fixed disks unless we are building for a 32 bit 11737 * kernel. 11738 */ 11739 #ifdef _LP64 11740 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 11741 min(hba_cdb_limit, SD_CDB_GROUP4); 11742 #else 11743 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 11744 min(hba_cdb_limit, SD_CDB_GROUP1); 11745 #endif 11746 11747 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) 11748 ? sizeof (struct scsi_arq_status) : 1); 11749 un->un_cmd_timeout = (ushort_t)sd_io_time; 11750 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; 11751 } 11752 11753 11754 /* 11755 * Function: sd_initpkt_for_buf 11756 * 11757 * Description: Allocate and initialize for transport a scsi_pkt struct, 11758 * based upon the info specified in the given buf struct. 11759 * 11760 * Assumes the xb_blkno in the request is absolute (ie, 11761 * relative to the start of the device (NOT partition!). 11762 * Also assumes that the request is using the native block 11763 * size of the device (as returned by the READ CAPACITY 11764 * command). 11765 * 11766 * Return Code: SD_PKT_ALLOC_SUCCESS 11767 * SD_PKT_ALLOC_FAILURE 11768 * SD_PKT_ALLOC_FAILURE_NO_DMA 11769 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 11770 * 11771 * Context: Kernel thread and may be called from software interrupt context 11772 * as part of a sdrunout callback. This function may not block or 11773 * call routines that block 11774 */ 11775 11776 static int 11777 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp) 11778 { 11779 struct sd_xbuf *xp; 11780 struct scsi_pkt *pktp = NULL; 11781 struct sd_lun *un; 11782 size_t blockcount; 11783 daddr_t startblock; 11784 int rval; 11785 int cmd_flags; 11786 11787 ASSERT(bp != NULL); 11788 ASSERT(pktpp != NULL); 11789 xp = SD_GET_XBUF(bp); 11790 ASSERT(xp != NULL); 11791 un = SD_GET_UN(bp); 11792 ASSERT(un != NULL); 11793 ASSERT(mutex_owned(SD_MUTEX(un))); 11794 ASSERT(bp->b_resid == 0); 11795 11796 SD_TRACE(SD_LOG_IO_CORE, un, 11797 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); 11798 11799 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11800 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { 11801 /* 11802 * Already have a scsi_pkt -- just need DMA resources. 11803 * We must recompute the CDB in case the mapping returns 11804 * a nonzero pkt_resid. 11805 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer 11806 * that is being retried, the unmap/remap of the DMA resouces 11807 * will result in the entire transfer starting over again 11808 * from the very first block. 11809 */ 11810 ASSERT(xp->xb_pktp != NULL); 11811 pktp = xp->xb_pktp; 11812 } else { 11813 pktp = NULL; 11814 } 11815 #endif /* __i386 || __amd64 */ 11816 11817 startblock = xp->xb_blkno; /* Absolute block num. */ 11818 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 11819 11820 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11821 11822 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); 11823 11824 #else 11825 11826 cmd_flags = un->un_pkt_flags | xp->xb_pkt_flags; 11827 11828 #endif 11829 11830 /* 11831 * sd_setup_rw_pkt will determine the appropriate CDB group to use, 11832 * call scsi_init_pkt, and build the CDB. 11833 */ 11834 rval = sd_setup_rw_pkt(un, &pktp, bp, 11835 cmd_flags, sdrunout, (caddr_t)un, 11836 startblock, blockcount); 11837 11838 if (rval == 0) { 11839 /* 11840 * Success. 11841 * 11842 * If partial DMA is being used and required for this transfer. 11843 * set it up here. 11844 */ 11845 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 && 11846 (pktp->pkt_resid != 0)) { 11847 11848 /* 11849 * Save the CDB length and pkt_resid for the 11850 * next xfer 11851 */ 11852 xp->xb_dma_resid = pktp->pkt_resid; 11853 11854 /* rezero resid */ 11855 pktp->pkt_resid = 0; 11856 11857 } else { 11858 xp->xb_dma_resid = 0; 11859 } 11860 11861 pktp->pkt_flags = un->un_tagflags; 11862 pktp->pkt_time = un->un_cmd_timeout; 11863 pktp->pkt_comp = sdintr; 11864 11865 pktp->pkt_private = bp; 11866 *pktpp = pktp; 11867 11868 SD_TRACE(SD_LOG_IO_CORE, un, 11869 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); 11870 11871 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11872 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; 11873 #endif 11874 11875 return (SD_PKT_ALLOC_SUCCESS); 11876 11877 } 11878 11879 /* 11880 * SD_PKT_ALLOC_FAILURE is the only expected failure code 11881 * from sd_setup_rw_pkt. 11882 */ 11883 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 11884 11885 if (rval == SD_PKT_ALLOC_FAILURE) { 11886 *pktpp = NULL; 11887 /* 11888 * Set the driver state to RWAIT to indicate the driver 11889 * is waiting on resource allocations. The driver will not 11890 * suspend, pm_suspend, or detatch while the state is RWAIT. 11891 */ 11892 New_state(un, SD_STATE_RWAIT); 11893 11894 SD_ERROR(SD_LOG_IO_CORE, un, 11895 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp); 11896 11897 if ((bp->b_flags & B_ERROR) != 0) { 11898 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 11899 } 11900 return (SD_PKT_ALLOC_FAILURE); 11901 } else { 11902 /* 11903 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL 11904 * 11905 * This should never happen. Maybe someone messed with the 11906 * kernel's minphys? 11907 */ 11908 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 11909 "Request rejected: too large for CDB: " 11910 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount); 11911 SD_ERROR(SD_LOG_IO_CORE, un, 11912 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp); 11913 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 11914 11915 } 11916 } 11917 11918 11919 /* 11920 * Function: sd_destroypkt_for_buf 11921 * 11922 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing). 11923 * 11924 * Context: Kernel thread or interrupt context 11925 */ 11926 11927 static void 11928 sd_destroypkt_for_buf(struct buf *bp) 11929 { 11930 ASSERT(bp != NULL); 11931 ASSERT(SD_GET_UN(bp) != NULL); 11932 11933 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 11934 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp); 11935 11936 ASSERT(SD_GET_PKTP(bp) != NULL); 11937 scsi_destroy_pkt(SD_GET_PKTP(bp)); 11938 11939 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 11940 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp); 11941 } 11942 11943 /* 11944 * Function: sd_setup_rw_pkt 11945 * 11946 * Description: Determines appropriate CDB group for the requested LBA 11947 * and transfer length, calls scsi_init_pkt, and builds 11948 * the CDB. Do not use for partial DMA transfers except 11949 * for the initial transfer since the CDB size must 11950 * remain constant. 11951 * 11952 * Context: Kernel thread and may be called from software interrupt 11953 * context as part of a sdrunout callback. This function may not 11954 * block or call routines that block 11955 */ 11956 11957 11958 int 11959 sd_setup_rw_pkt(struct sd_lun *un, 11960 struct scsi_pkt **pktpp, struct buf *bp, int flags, 11961 int (*callback)(caddr_t), caddr_t callback_arg, 11962 diskaddr_t lba, uint32_t blockcount) 11963 { 11964 struct scsi_pkt *return_pktp; 11965 union scsi_cdb *cdbp; 11966 struct sd_cdbinfo *cp = NULL; 11967 int i; 11968 11969 /* 11970 * See which size CDB to use, based upon the request. 11971 */ 11972 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) { 11973 11974 /* 11975 * Check lba and block count against sd_cdbtab limits. 11976 * In the partial DMA case, we have to use the same size 11977 * CDB for all the transfers. Check lba + blockcount 11978 * against the max LBA so we know that segment of the 11979 * transfer can use the CDB we select. 11980 */ 11981 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) && 11982 (blockcount <= sd_cdbtab[i].sc_maxlen)) { 11983 11984 /* 11985 * The command will fit into the CDB type 11986 * specified by sd_cdbtab[i]. 11987 */ 11988 cp = sd_cdbtab + i; 11989 11990 /* 11991 * Call scsi_init_pkt so we can fill in the 11992 * CDB. 11993 */ 11994 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp, 11995 bp, cp->sc_grpcode, un->un_status_len, 0, 11996 flags, callback, callback_arg); 11997 11998 if (return_pktp != NULL) { 11999 12000 /* 12001 * Return new value of pkt 12002 */ 12003 *pktpp = return_pktp; 12004 12005 /* 12006 * To be safe, zero the CDB insuring there is 12007 * no leftover data from a previous command. 12008 */ 12009 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode); 12010 12011 /* 12012 * Handle partial DMA mapping 12013 */ 12014 if (return_pktp->pkt_resid != 0) { 12015 12016 /* 12017 * Not going to xfer as many blocks as 12018 * originally expected 12019 */ 12020 blockcount -= 12021 SD_BYTES2TGTBLOCKS(un, 12022 return_pktp->pkt_resid); 12023 } 12024 12025 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp; 12026 12027 /* 12028 * Set command byte based on the CDB 12029 * type we matched. 12030 */ 12031 cdbp->scc_cmd = cp->sc_grpmask | 12032 ((bp->b_flags & B_READ) ? 12033 SCMD_READ : SCMD_WRITE); 12034 12035 SD_FILL_SCSI1_LUN(un, return_pktp); 12036 12037 /* 12038 * Fill in LBA and length 12039 */ 12040 ASSERT((cp->sc_grpcode == CDB_GROUP1) || 12041 (cp->sc_grpcode == CDB_GROUP4) || 12042 (cp->sc_grpcode == CDB_GROUP0) || 12043 (cp->sc_grpcode == CDB_GROUP5)); 12044 12045 if (cp->sc_grpcode == CDB_GROUP1) { 12046 FORMG1ADDR(cdbp, lba); 12047 FORMG1COUNT(cdbp, blockcount); 12048 return (0); 12049 } else if (cp->sc_grpcode == CDB_GROUP4) { 12050 FORMG4LONGADDR(cdbp, lba); 12051 FORMG4COUNT(cdbp, blockcount); 12052 return (0); 12053 } else if (cp->sc_grpcode == CDB_GROUP0) { 12054 FORMG0ADDR(cdbp, lba); 12055 FORMG0COUNT(cdbp, blockcount); 12056 return (0); 12057 } else if (cp->sc_grpcode == CDB_GROUP5) { 12058 FORMG5ADDR(cdbp, lba); 12059 FORMG5COUNT(cdbp, blockcount); 12060 return (0); 12061 } 12062 12063 /* 12064 * It should be impossible to not match one 12065 * of the CDB types above, so we should never 12066 * reach this point. Set the CDB command byte 12067 * to test-unit-ready to avoid writing 12068 * to somewhere we don't intend. 12069 */ 12070 cdbp->scc_cmd = SCMD_TEST_UNIT_READY; 12071 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 12072 } else { 12073 /* 12074 * Couldn't get scsi_pkt 12075 */ 12076 return (SD_PKT_ALLOC_FAILURE); 12077 } 12078 } 12079 } 12080 12081 /* 12082 * None of the available CDB types were suitable. This really 12083 * should never happen: on a 64 bit system we support 12084 * READ16/WRITE16 which will hold an entire 64 bit disk address 12085 * and on a 32 bit system we will refuse to bind to a device 12086 * larger than 2TB so addresses will never be larger than 32 bits. 12087 */ 12088 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 12089 } 12090 12091 /* 12092 * Function: sd_setup_next_rw_pkt 12093 * 12094 * Description: Setup packet for partial DMA transfers, except for the 12095 * initial transfer. sd_setup_rw_pkt should be used for 12096 * the initial transfer. 12097 * 12098 * Context: Kernel thread and may be called from interrupt context. 12099 */ 12100 12101 int 12102 sd_setup_next_rw_pkt(struct sd_lun *un, 12103 struct scsi_pkt *pktp, struct buf *bp, 12104 diskaddr_t lba, uint32_t blockcount) 12105 { 12106 uchar_t com; 12107 union scsi_cdb *cdbp; 12108 uchar_t cdb_group_id; 12109 12110 ASSERT(pktp != NULL); 12111 ASSERT(pktp->pkt_cdbp != NULL); 12112 12113 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 12114 com = cdbp->scc_cmd; 12115 cdb_group_id = CDB_GROUPID(com); 12116 12117 ASSERT((cdb_group_id == CDB_GROUPID_0) || 12118 (cdb_group_id == CDB_GROUPID_1) || 12119 (cdb_group_id == CDB_GROUPID_4) || 12120 (cdb_group_id == CDB_GROUPID_5)); 12121 12122 /* 12123 * Move pkt to the next portion of the xfer. 12124 * func is NULL_FUNC so we do not have to release 12125 * the disk mutex here. 12126 */ 12127 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0, 12128 NULL_FUNC, NULL) == pktp) { 12129 /* Success. Handle partial DMA */ 12130 if (pktp->pkt_resid != 0) { 12131 blockcount -= 12132 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid); 12133 } 12134 12135 cdbp->scc_cmd = com; 12136 SD_FILL_SCSI1_LUN(un, pktp); 12137 if (cdb_group_id == CDB_GROUPID_1) { 12138 FORMG1ADDR(cdbp, lba); 12139 FORMG1COUNT(cdbp, blockcount); 12140 return (0); 12141 } else if (cdb_group_id == CDB_GROUPID_4) { 12142 FORMG4LONGADDR(cdbp, lba); 12143 FORMG4COUNT(cdbp, blockcount); 12144 return (0); 12145 } else if (cdb_group_id == CDB_GROUPID_0) { 12146 FORMG0ADDR(cdbp, lba); 12147 FORMG0COUNT(cdbp, blockcount); 12148 return (0); 12149 } else if (cdb_group_id == CDB_GROUPID_5) { 12150 FORMG5ADDR(cdbp, lba); 12151 FORMG5COUNT(cdbp, blockcount); 12152 return (0); 12153 } 12154 12155 /* Unreachable */ 12156 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 12157 } 12158 12159 /* 12160 * Error setting up next portion of cmd transfer. 12161 * Something is definitely very wrong and this 12162 * should not happen. 12163 */ 12164 return (SD_PKT_ALLOC_FAILURE); 12165 } 12166 12167 /* 12168 * Function: sd_initpkt_for_uscsi 12169 * 12170 * Description: Allocate and initialize for transport a scsi_pkt struct, 12171 * based upon the info specified in the given uscsi_cmd struct. 12172 * 12173 * Return Code: SD_PKT_ALLOC_SUCCESS 12174 * SD_PKT_ALLOC_FAILURE 12175 * SD_PKT_ALLOC_FAILURE_NO_DMA 12176 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 12177 * 12178 * Context: Kernel thread and may be called from software interrupt context 12179 * as part of a sdrunout callback. This function may not block or 12180 * call routines that block 12181 */ 12182 12183 static int 12184 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp) 12185 { 12186 struct uscsi_cmd *uscmd; 12187 struct sd_xbuf *xp; 12188 struct scsi_pkt *pktp; 12189 struct sd_lun *un; 12190 uint32_t flags = 0; 12191 12192 ASSERT(bp != NULL); 12193 ASSERT(pktpp != NULL); 12194 xp = SD_GET_XBUF(bp); 12195 ASSERT(xp != NULL); 12196 un = SD_GET_UN(bp); 12197 ASSERT(un != NULL); 12198 ASSERT(mutex_owned(SD_MUTEX(un))); 12199 12200 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 12201 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 12202 ASSERT(uscmd != NULL); 12203 12204 SD_TRACE(SD_LOG_IO_CORE, un, 12205 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp); 12206 12207 /* 12208 * Allocate the scsi_pkt for the command. 12209 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path 12210 * during scsi_init_pkt time and will continue to use the 12211 * same path as long as the same scsi_pkt is used without 12212 * intervening scsi_dma_free(). Since uscsi command does 12213 * not call scsi_dmafree() before retry failed command, it 12214 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT 12215 * set such that scsi_vhci can use other available path for 12216 * retry. Besides, ucsci command does not allow DMA breakup, 12217 * so there is no need to set PKT_DMA_PARTIAL flag. 12218 */ 12219 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 12220 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 12221 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 12222 ((int)(uscmd->uscsi_rqlen) + sizeof (struct scsi_arq_status) 12223 - sizeof (struct scsi_extended_sense)), 0, 12224 (un->un_pkt_flags & ~PKT_DMA_PARTIAL) | PKT_XARQ, 12225 sdrunout, (caddr_t)un); 12226 } else { 12227 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 12228 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 12229 sizeof (struct scsi_arq_status), 0, 12230 (un->un_pkt_flags & ~PKT_DMA_PARTIAL), 12231 sdrunout, (caddr_t)un); 12232 } 12233 12234 if (pktp == NULL) { 12235 *pktpp = NULL; 12236 /* 12237 * Set the driver state to RWAIT to indicate the driver 12238 * is waiting on resource allocations. The driver will not 12239 * suspend, pm_suspend, or detatch while the state is RWAIT. 12240 */ 12241 New_state(un, SD_STATE_RWAIT); 12242 12243 SD_ERROR(SD_LOG_IO_CORE, un, 12244 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp); 12245 12246 if ((bp->b_flags & B_ERROR) != 0) { 12247 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 12248 } 12249 return (SD_PKT_ALLOC_FAILURE); 12250 } 12251 12252 /* 12253 * We do not do DMA breakup for USCSI commands, so return failure 12254 * here if all the needed DMA resources were not allocated. 12255 */ 12256 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) && 12257 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) { 12258 scsi_destroy_pkt(pktp); 12259 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: " 12260 "No partial DMA for USCSI. exit: buf:0x%p\n", bp); 12261 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL); 12262 } 12263 12264 /* Init the cdb from the given uscsi struct */ 12265 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp, 12266 uscmd->uscsi_cdb[0], 0, 0, 0); 12267 12268 SD_FILL_SCSI1_LUN(un, pktp); 12269 12270 /* 12271 * Set up the optional USCSI flags. See the uscsi (7I) man page 12272 * for listing of the supported flags. 12273 */ 12274 12275 if (uscmd->uscsi_flags & USCSI_SILENT) { 12276 flags |= FLAG_SILENT; 12277 } 12278 12279 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) { 12280 flags |= FLAG_DIAGNOSE; 12281 } 12282 12283 if (uscmd->uscsi_flags & USCSI_ISOLATE) { 12284 flags |= FLAG_ISOLATE; 12285 } 12286 12287 if (un->un_f_is_fibre == FALSE) { 12288 if (uscmd->uscsi_flags & USCSI_RENEGOT) { 12289 flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 12290 } 12291 } 12292 12293 /* 12294 * Set the pkt flags here so we save time later. 12295 * Note: These flags are NOT in the uscsi man page!!! 12296 */ 12297 if (uscmd->uscsi_flags & USCSI_HEAD) { 12298 flags |= FLAG_HEAD; 12299 } 12300 12301 if (uscmd->uscsi_flags & USCSI_NOINTR) { 12302 flags |= FLAG_NOINTR; 12303 } 12304 12305 /* 12306 * For tagged queueing, things get a bit complicated. 12307 * Check first for head of queue and last for ordered queue. 12308 * If neither head nor order, use the default driver tag flags. 12309 */ 12310 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) { 12311 if (uscmd->uscsi_flags & USCSI_HTAG) { 12312 flags |= FLAG_HTAG; 12313 } else if (uscmd->uscsi_flags & USCSI_OTAG) { 12314 flags |= FLAG_OTAG; 12315 } else { 12316 flags |= un->un_tagflags & FLAG_TAGMASK; 12317 } 12318 } 12319 12320 if (uscmd->uscsi_flags & USCSI_NODISCON) { 12321 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON; 12322 } 12323 12324 pktp->pkt_flags = flags; 12325 12326 /* Copy the caller's CDB into the pkt... */ 12327 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen); 12328 12329 if (uscmd->uscsi_timeout == 0) { 12330 pktp->pkt_time = un->un_uscsi_timeout; 12331 } else { 12332 pktp->pkt_time = uscmd->uscsi_timeout; 12333 } 12334 12335 /* need it later to identify USCSI request in sdintr */ 12336 xp->xb_pkt_flags |= SD_XB_USCSICMD; 12337 12338 xp->xb_sense_resid = uscmd->uscsi_rqresid; 12339 12340 pktp->pkt_private = bp; 12341 pktp->pkt_comp = sdintr; 12342 *pktpp = pktp; 12343 12344 SD_TRACE(SD_LOG_IO_CORE, un, 12345 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp); 12346 12347 return (SD_PKT_ALLOC_SUCCESS); 12348 } 12349 12350 12351 /* 12352 * Function: sd_destroypkt_for_uscsi 12353 * 12354 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi 12355 * IOs.. Also saves relevant info into the associated uscsi_cmd 12356 * struct. 12357 * 12358 * Context: May be called under interrupt context 12359 */ 12360 12361 static void 12362 sd_destroypkt_for_uscsi(struct buf *bp) 12363 { 12364 struct uscsi_cmd *uscmd; 12365 struct sd_xbuf *xp; 12366 struct scsi_pkt *pktp; 12367 struct sd_lun *un; 12368 12369 ASSERT(bp != NULL); 12370 xp = SD_GET_XBUF(bp); 12371 ASSERT(xp != NULL); 12372 un = SD_GET_UN(bp); 12373 ASSERT(un != NULL); 12374 ASSERT(!mutex_owned(SD_MUTEX(un))); 12375 pktp = SD_GET_PKTP(bp); 12376 ASSERT(pktp != NULL); 12377 12378 SD_TRACE(SD_LOG_IO_CORE, un, 12379 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp); 12380 12381 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 12382 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 12383 ASSERT(uscmd != NULL); 12384 12385 /* Save the status and the residual into the uscsi_cmd struct */ 12386 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 12387 uscmd->uscsi_resid = bp->b_resid; 12388 12389 /* 12390 * If enabled, copy any saved sense data into the area specified 12391 * by the uscsi command. 12392 */ 12393 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 12394 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 12395 /* 12396 * Note: uscmd->uscsi_rqbuf should always point to a buffer 12397 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd()) 12398 */ 12399 uscmd->uscsi_rqstatus = xp->xb_sense_status; 12400 uscmd->uscsi_rqresid = xp->xb_sense_resid; 12401 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 12402 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 12403 MAX_SENSE_LENGTH); 12404 } else { 12405 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 12406 SENSE_LENGTH); 12407 } 12408 } 12409 12410 /* We are done with the scsi_pkt; free it now */ 12411 ASSERT(SD_GET_PKTP(bp) != NULL); 12412 scsi_destroy_pkt(SD_GET_PKTP(bp)); 12413 12414 SD_TRACE(SD_LOG_IO_CORE, un, 12415 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp); 12416 } 12417 12418 12419 /* 12420 * Function: sd_bioclone_alloc 12421 * 12422 * Description: Allocate a buf(9S) and init it as per the given buf 12423 * and the various arguments. The associated sd_xbuf 12424 * struct is (nearly) duplicated. The struct buf *bp 12425 * argument is saved in new_xp->xb_private. 12426 * 12427 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 12428 * datalen - size of data area for the shadow bp 12429 * blkno - starting LBA 12430 * func - function pointer for b_iodone in the shadow buf. (May 12431 * be NULL if none.) 12432 * 12433 * Return Code: Pointer to allocates buf(9S) struct 12434 * 12435 * Context: Can sleep. 12436 */ 12437 12438 static struct buf * 12439 sd_bioclone_alloc(struct buf *bp, size_t datalen, 12440 daddr_t blkno, int (*func)(struct buf *)) 12441 { 12442 struct sd_lun *un; 12443 struct sd_xbuf *xp; 12444 struct sd_xbuf *new_xp; 12445 struct buf *new_bp; 12446 12447 ASSERT(bp != NULL); 12448 xp = SD_GET_XBUF(bp); 12449 ASSERT(xp != NULL); 12450 un = SD_GET_UN(bp); 12451 ASSERT(un != NULL); 12452 ASSERT(!mutex_owned(SD_MUTEX(un))); 12453 12454 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func, 12455 NULL, KM_SLEEP); 12456 12457 new_bp->b_lblkno = blkno; 12458 12459 /* 12460 * Allocate an xbuf for the shadow bp and copy the contents of the 12461 * original xbuf into it. 12462 */ 12463 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 12464 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 12465 12466 /* 12467 * The given bp is automatically saved in the xb_private member 12468 * of the new xbuf. Callers are allowed to depend on this. 12469 */ 12470 new_xp->xb_private = bp; 12471 12472 new_bp->b_private = new_xp; 12473 12474 return (new_bp); 12475 } 12476 12477 /* 12478 * Function: sd_shadow_buf_alloc 12479 * 12480 * Description: Allocate a buf(9S) and init it as per the given buf 12481 * and the various arguments. The associated sd_xbuf 12482 * struct is (nearly) duplicated. The struct buf *bp 12483 * argument is saved in new_xp->xb_private. 12484 * 12485 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 12486 * datalen - size of data area for the shadow bp 12487 * bflags - B_READ or B_WRITE (pseudo flag) 12488 * blkno - starting LBA 12489 * func - function pointer for b_iodone in the shadow buf. (May 12490 * be NULL if none.) 12491 * 12492 * Return Code: Pointer to allocates buf(9S) struct 12493 * 12494 * Context: Can sleep. 12495 */ 12496 12497 static struct buf * 12498 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags, 12499 daddr_t blkno, int (*func)(struct buf *)) 12500 { 12501 struct sd_lun *un; 12502 struct sd_xbuf *xp; 12503 struct sd_xbuf *new_xp; 12504 struct buf *new_bp; 12505 12506 ASSERT(bp != NULL); 12507 xp = SD_GET_XBUF(bp); 12508 ASSERT(xp != NULL); 12509 un = SD_GET_UN(bp); 12510 ASSERT(un != NULL); 12511 ASSERT(!mutex_owned(SD_MUTEX(un))); 12512 12513 if (bp->b_flags & (B_PAGEIO | B_PHYS)) { 12514 bp_mapin(bp); 12515 } 12516 12517 bflags &= (B_READ | B_WRITE); 12518 #if defined(__i386) || defined(__amd64) 12519 new_bp = getrbuf(KM_SLEEP); 12520 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); 12521 new_bp->b_bcount = datalen; 12522 new_bp->b_flags = bflags | 12523 (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW)); 12524 #else 12525 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, 12526 datalen, bflags, SLEEP_FUNC, NULL); 12527 #endif 12528 new_bp->av_forw = NULL; 12529 new_bp->av_back = NULL; 12530 new_bp->b_dev = bp->b_dev; 12531 new_bp->b_blkno = blkno; 12532 new_bp->b_iodone = func; 12533 new_bp->b_edev = bp->b_edev; 12534 new_bp->b_resid = 0; 12535 12536 /* We need to preserve the B_FAILFAST flag */ 12537 if (bp->b_flags & B_FAILFAST) { 12538 new_bp->b_flags |= B_FAILFAST; 12539 } 12540 12541 /* 12542 * Allocate an xbuf for the shadow bp and copy the contents of the 12543 * original xbuf into it. 12544 */ 12545 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 12546 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 12547 12548 /* Need later to copy data between the shadow buf & original buf! */ 12549 new_xp->xb_pkt_flags |= PKT_CONSISTENT; 12550 12551 /* 12552 * The given bp is automatically saved in the xb_private member 12553 * of the new xbuf. Callers are allowed to depend on this. 12554 */ 12555 new_xp->xb_private = bp; 12556 12557 new_bp->b_private = new_xp; 12558 12559 return (new_bp); 12560 } 12561 12562 /* 12563 * Function: sd_bioclone_free 12564 * 12565 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations 12566 * in the larger than partition operation. 12567 * 12568 * Context: May be called under interrupt context 12569 */ 12570 12571 static void 12572 sd_bioclone_free(struct buf *bp) 12573 { 12574 struct sd_xbuf *xp; 12575 12576 ASSERT(bp != NULL); 12577 xp = SD_GET_XBUF(bp); 12578 ASSERT(xp != NULL); 12579 12580 /* 12581 * Call bp_mapout() before freeing the buf, in case a lower 12582 * layer or HBA had done a bp_mapin(). we must do this here 12583 * as we are the "originator" of the shadow buf. 12584 */ 12585 bp_mapout(bp); 12586 12587 /* 12588 * Null out b_iodone before freeing the bp, to ensure that the driver 12589 * never gets confused by a stale value in this field. (Just a little 12590 * extra defensiveness here.) 12591 */ 12592 bp->b_iodone = NULL; 12593 12594 freerbuf(bp); 12595 12596 kmem_free(xp, sizeof (struct sd_xbuf)); 12597 } 12598 12599 /* 12600 * Function: sd_shadow_buf_free 12601 * 12602 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations. 12603 * 12604 * Context: May be called under interrupt context 12605 */ 12606 12607 static void 12608 sd_shadow_buf_free(struct buf *bp) 12609 { 12610 struct sd_xbuf *xp; 12611 12612 ASSERT(bp != NULL); 12613 xp = SD_GET_XBUF(bp); 12614 ASSERT(xp != NULL); 12615 12616 #if defined(__sparc) 12617 /* 12618 * Call bp_mapout() before freeing the buf, in case a lower 12619 * layer or HBA had done a bp_mapin(). we must do this here 12620 * as we are the "originator" of the shadow buf. 12621 */ 12622 bp_mapout(bp); 12623 #endif 12624 12625 /* 12626 * Null out b_iodone before freeing the bp, to ensure that the driver 12627 * never gets confused by a stale value in this field. (Just a little 12628 * extra defensiveness here.) 12629 */ 12630 bp->b_iodone = NULL; 12631 12632 #if defined(__i386) || defined(__amd64) 12633 kmem_free(bp->b_un.b_addr, bp->b_bcount); 12634 freerbuf(bp); 12635 #else 12636 scsi_free_consistent_buf(bp); 12637 #endif 12638 12639 kmem_free(xp, sizeof (struct sd_xbuf)); 12640 } 12641 12642 12643 /* 12644 * Function: sd_print_transport_rejected_message 12645 * 12646 * Description: This implements the ludicrously complex rules for printing 12647 * a "transport rejected" message. This is to address the 12648 * specific problem of having a flood of this error message 12649 * produced when a failover occurs. 12650 * 12651 * Context: Any. 12652 */ 12653 12654 static void 12655 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp, 12656 int code) 12657 { 12658 ASSERT(un != NULL); 12659 ASSERT(mutex_owned(SD_MUTEX(un))); 12660 ASSERT(xp != NULL); 12661 12662 /* 12663 * Print the "transport rejected" message under the following 12664 * conditions: 12665 * 12666 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set 12667 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR. 12668 * - If the error code IS a TRAN_FATAL_ERROR, then the message is 12669 * printed the FIRST time a TRAN_FATAL_ERROR is returned from 12670 * scsi_transport(9F) (which indicates that the target might have 12671 * gone off-line). This uses the un->un_tran_fatal_count 12672 * count, which is incremented whenever a TRAN_FATAL_ERROR is 12673 * received, and reset to zero whenver a TRAN_ACCEPT is returned 12674 * from scsi_transport(). 12675 * 12676 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of 12677 * the preceeding cases in order for the message to be printed. 12678 */ 12679 if ((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) { 12680 if ((sd_level_mask & SD_LOGMASK_DIAG) || 12681 (code != TRAN_FATAL_ERROR) || 12682 (un->un_tran_fatal_count == 1)) { 12683 switch (code) { 12684 case TRAN_BADPKT: 12685 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12686 "transport rejected bad packet\n"); 12687 break; 12688 case TRAN_FATAL_ERROR: 12689 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12690 "transport rejected fatal error\n"); 12691 break; 12692 default: 12693 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12694 "transport rejected (%d)\n", code); 12695 break; 12696 } 12697 } 12698 } 12699 } 12700 12701 12702 /* 12703 * Function: sd_add_buf_to_waitq 12704 * 12705 * Description: Add the given buf(9S) struct to the wait queue for the 12706 * instance. If sorting is enabled, then the buf is added 12707 * to the queue via an elevator sort algorithm (a la 12708 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key. 12709 * If sorting is not enabled, then the buf is just added 12710 * to the end of the wait queue. 12711 * 12712 * Return Code: void 12713 * 12714 * Context: Does not sleep/block, therefore technically can be called 12715 * from any context. However if sorting is enabled then the 12716 * execution time is indeterminate, and may take long if 12717 * the wait queue grows large. 12718 */ 12719 12720 static void 12721 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp) 12722 { 12723 struct buf *ap; 12724 12725 ASSERT(bp != NULL); 12726 ASSERT(un != NULL); 12727 ASSERT(mutex_owned(SD_MUTEX(un))); 12728 12729 /* If the queue is empty, add the buf as the only entry & return. */ 12730 if (un->un_waitq_headp == NULL) { 12731 ASSERT(un->un_waitq_tailp == NULL); 12732 un->un_waitq_headp = un->un_waitq_tailp = bp; 12733 bp->av_forw = NULL; 12734 return; 12735 } 12736 12737 ASSERT(un->un_waitq_tailp != NULL); 12738 12739 /* 12740 * If sorting is disabled, just add the buf to the tail end of 12741 * the wait queue and return. 12742 */ 12743 if (un->un_f_disksort_disabled) { 12744 un->un_waitq_tailp->av_forw = bp; 12745 un->un_waitq_tailp = bp; 12746 bp->av_forw = NULL; 12747 return; 12748 } 12749 12750 /* 12751 * Sort thru the list of requests currently on the wait queue 12752 * and add the new buf request at the appropriate position. 12753 * 12754 * The un->un_waitq_headp is an activity chain pointer on which 12755 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The 12756 * first queue holds those requests which are positioned after 12757 * the current SD_GET_BLKNO() (in the first request); the second holds 12758 * requests which came in after their SD_GET_BLKNO() number was passed. 12759 * Thus we implement a one way scan, retracting after reaching 12760 * the end of the drive to the first request on the second 12761 * queue, at which time it becomes the first queue. 12762 * A one-way scan is natural because of the way UNIX read-ahead 12763 * blocks are allocated. 12764 * 12765 * If we lie after the first request, then we must locate the 12766 * second request list and add ourselves to it. 12767 */ 12768 ap = un->un_waitq_headp; 12769 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) { 12770 while (ap->av_forw != NULL) { 12771 /* 12772 * Look for an "inversion" in the (normally 12773 * ascending) block numbers. This indicates 12774 * the start of the second request list. 12775 */ 12776 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) { 12777 /* 12778 * Search the second request list for the 12779 * first request at a larger block number. 12780 * We go before that; however if there is 12781 * no such request, we go at the end. 12782 */ 12783 do { 12784 if (SD_GET_BLKNO(bp) < 12785 SD_GET_BLKNO(ap->av_forw)) { 12786 goto insert; 12787 } 12788 ap = ap->av_forw; 12789 } while (ap->av_forw != NULL); 12790 goto insert; /* after last */ 12791 } 12792 ap = ap->av_forw; 12793 } 12794 12795 /* 12796 * No inversions... we will go after the last, and 12797 * be the first request in the second request list. 12798 */ 12799 goto insert; 12800 } 12801 12802 /* 12803 * Request is at/after the current request... 12804 * sort in the first request list. 12805 */ 12806 while (ap->av_forw != NULL) { 12807 /* 12808 * We want to go after the current request (1) if 12809 * there is an inversion after it (i.e. it is the end 12810 * of the first request list), or (2) if the next 12811 * request is a larger block no. than our request. 12812 */ 12813 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) || 12814 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) { 12815 goto insert; 12816 } 12817 ap = ap->av_forw; 12818 } 12819 12820 /* 12821 * Neither a second list nor a larger request, therefore 12822 * we go at the end of the first list (which is the same 12823 * as the end of the whole schebang). 12824 */ 12825 insert: 12826 bp->av_forw = ap->av_forw; 12827 ap->av_forw = bp; 12828 12829 /* 12830 * If we inserted onto the tail end of the waitq, make sure the 12831 * tail pointer is updated. 12832 */ 12833 if (ap == un->un_waitq_tailp) { 12834 un->un_waitq_tailp = bp; 12835 } 12836 } 12837 12838 12839 /* 12840 * Function: sd_start_cmds 12841 * 12842 * Description: Remove and transport cmds from the driver queues. 12843 * 12844 * Arguments: un - pointer to the unit (soft state) struct for the target. 12845 * 12846 * immed_bp - ptr to a buf to be transported immediately. Only 12847 * the immed_bp is transported; bufs on the waitq are not 12848 * processed and the un_retry_bp is not checked. If immed_bp is 12849 * NULL, then normal queue processing is performed. 12850 * 12851 * Context: May be called from kernel thread context, interrupt context, 12852 * or runout callback context. This function may not block or 12853 * call routines that block. 12854 */ 12855 12856 static void 12857 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) 12858 { 12859 struct sd_xbuf *xp; 12860 struct buf *bp; 12861 void (*statp)(kstat_io_t *); 12862 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12863 void (*saved_statp)(kstat_io_t *); 12864 #endif 12865 int rval; 12866 12867 ASSERT(un != NULL); 12868 ASSERT(mutex_owned(SD_MUTEX(un))); 12869 ASSERT(un->un_ncmds_in_transport >= 0); 12870 ASSERT(un->un_throttle >= 0); 12871 12872 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); 12873 12874 do { 12875 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12876 saved_statp = NULL; 12877 #endif 12878 12879 /* 12880 * If we are syncing or dumping, fail the command to 12881 * avoid recursively calling back into scsi_transport(). 12882 * The dump I/O itself uses a separate code path so this 12883 * only prevents non-dump I/O from being sent while dumping. 12884 * File system sync takes place before dumping begins. 12885 * During panic, filesystem I/O is allowed provided 12886 * un_in_callback is <= 1. This is to prevent recursion 12887 * such as sd_start_cmds -> scsi_transport -> sdintr -> 12888 * sd_start_cmds and so on. See panic.c for more information 12889 * about the states the system can be in during panic. 12890 */ 12891 if ((un->un_state == SD_STATE_DUMPING) || 12892 (ddi_in_panic() && (un->un_in_callback > 1))) { 12893 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12894 "sd_start_cmds: panicking\n"); 12895 goto exit; 12896 } 12897 12898 if ((bp = immed_bp) != NULL) { 12899 /* 12900 * We have a bp that must be transported immediately. 12901 * It's OK to transport the immed_bp here without doing 12902 * the throttle limit check because the immed_bp is 12903 * always used in a retry/recovery case. This means 12904 * that we know we are not at the throttle limit by 12905 * virtue of the fact that to get here we must have 12906 * already gotten a command back via sdintr(). This also 12907 * relies on (1) the command on un_retry_bp preventing 12908 * further commands from the waitq from being issued; 12909 * and (2) the code in sd_retry_command checking the 12910 * throttle limit before issuing a delayed or immediate 12911 * retry. This holds even if the throttle limit is 12912 * currently ratcheted down from its maximum value. 12913 */ 12914 statp = kstat_runq_enter; 12915 if (bp == un->un_retry_bp) { 12916 ASSERT((un->un_retry_statp == NULL) || 12917 (un->un_retry_statp == kstat_waitq_enter) || 12918 (un->un_retry_statp == 12919 kstat_runq_back_to_waitq)); 12920 /* 12921 * If the waitq kstat was incremented when 12922 * sd_set_retry_bp() queued this bp for a retry, 12923 * then we must set up statp so that the waitq 12924 * count will get decremented correctly below. 12925 * Also we must clear un->un_retry_statp to 12926 * ensure that we do not act on a stale value 12927 * in this field. 12928 */ 12929 if ((un->un_retry_statp == kstat_waitq_enter) || 12930 (un->un_retry_statp == 12931 kstat_runq_back_to_waitq)) { 12932 statp = kstat_waitq_to_runq; 12933 } 12934 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12935 saved_statp = un->un_retry_statp; 12936 #endif 12937 un->un_retry_statp = NULL; 12938 12939 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 12940 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " 12941 "un_throttle:%d un_ncmds_in_transport:%d\n", 12942 un, un->un_retry_bp, un->un_throttle, 12943 un->un_ncmds_in_transport); 12944 } else { 12945 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: " 12946 "processing priority bp:0x%p\n", bp); 12947 } 12948 12949 } else if ((bp = un->un_waitq_headp) != NULL) { 12950 /* 12951 * A command on the waitq is ready to go, but do not 12952 * send it if: 12953 * 12954 * (1) the throttle limit has been reached, or 12955 * (2) a retry is pending, or 12956 * (3) a START_STOP_UNIT callback pending, or 12957 * (4) a callback for a SD_PATH_DIRECT_PRIORITY 12958 * command is pending. 12959 * 12960 * For all of these conditions, IO processing will 12961 * restart after the condition is cleared. 12962 */ 12963 if (un->un_ncmds_in_transport >= un->un_throttle) { 12964 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12965 "sd_start_cmds: exiting, " 12966 "throttle limit reached!\n"); 12967 goto exit; 12968 } 12969 if (un->un_retry_bp != NULL) { 12970 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12971 "sd_start_cmds: exiting, retry pending!\n"); 12972 goto exit; 12973 } 12974 if (un->un_startstop_timeid != NULL) { 12975 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12976 "sd_start_cmds: exiting, " 12977 "START_STOP pending!\n"); 12978 goto exit; 12979 } 12980 if (un->un_direct_priority_timeid != NULL) { 12981 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12982 "sd_start_cmds: exiting, " 12983 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n"); 12984 goto exit; 12985 } 12986 12987 /* Dequeue the command */ 12988 un->un_waitq_headp = bp->av_forw; 12989 if (un->un_waitq_headp == NULL) { 12990 un->un_waitq_tailp = NULL; 12991 } 12992 bp->av_forw = NULL; 12993 statp = kstat_waitq_to_runq; 12994 SD_TRACE(SD_LOG_IO_CORE, un, 12995 "sd_start_cmds: processing waitq bp:0x%p\n", bp); 12996 12997 } else { 12998 /* No work to do so bail out now */ 12999 SD_TRACE(SD_LOG_IO_CORE, un, 13000 "sd_start_cmds: no more work, exiting!\n"); 13001 goto exit; 13002 } 13003 13004 /* 13005 * Reset the state to normal. This is the mechanism by which 13006 * the state transitions from either SD_STATE_RWAIT or 13007 * SD_STATE_OFFLINE to SD_STATE_NORMAL. 13008 * If state is SD_STATE_PM_CHANGING then this command is 13009 * part of the device power control and the state must 13010 * not be put back to normal. Doing so would would 13011 * allow new commands to proceed when they shouldn't, 13012 * the device may be going off. 13013 */ 13014 if ((un->un_state != SD_STATE_SUSPENDED) && 13015 (un->un_state != SD_STATE_PM_CHANGING)) { 13016 New_state(un, SD_STATE_NORMAL); 13017 } 13018 13019 xp = SD_GET_XBUF(bp); 13020 ASSERT(xp != NULL); 13021 13022 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13023 /* 13024 * Allocate the scsi_pkt if we need one, or attach DMA 13025 * resources if we have a scsi_pkt that needs them. The 13026 * latter should only occur for commands that are being 13027 * retried. 13028 */ 13029 if ((xp->xb_pktp == NULL) || 13030 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { 13031 #else 13032 if (xp->xb_pktp == NULL) { 13033 #endif 13034 /* 13035 * There is no scsi_pkt allocated for this buf. Call 13036 * the initpkt function to allocate & init one. 13037 * 13038 * The scsi_init_pkt runout callback functionality is 13039 * implemented as follows: 13040 * 13041 * 1) The initpkt function always calls 13042 * scsi_init_pkt(9F) with sdrunout specified as the 13043 * callback routine. 13044 * 2) A successful packet allocation is initialized and 13045 * the I/O is transported. 13046 * 3) The I/O associated with an allocation resource 13047 * failure is left on its queue to be retried via 13048 * runout or the next I/O. 13049 * 4) The I/O associated with a DMA error is removed 13050 * from the queue and failed with EIO. Processing of 13051 * the transport queues is also halted to be 13052 * restarted via runout or the next I/O. 13053 * 5) The I/O associated with a CDB size or packet 13054 * size error is removed from the queue and failed 13055 * with EIO. Processing of the transport queues is 13056 * continued. 13057 * 13058 * Note: there is no interface for canceling a runout 13059 * callback. To prevent the driver from detaching or 13060 * suspending while a runout is pending the driver 13061 * state is set to SD_STATE_RWAIT 13062 * 13063 * Note: using the scsi_init_pkt callback facility can 13064 * result in an I/O request persisting at the head of 13065 * the list which cannot be satisfied even after 13066 * multiple retries. In the future the driver may 13067 * implement some kind of maximum runout count before 13068 * failing an I/O. 13069 * 13070 * Note: the use of funcp below may seem superfluous, 13071 * but it helps warlock figure out the correct 13072 * initpkt function calls (see [s]sd.wlcmd). 13073 */ 13074 struct scsi_pkt *pktp; 13075 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp); 13076 13077 ASSERT(bp != un->un_rqs_bp); 13078 13079 funcp = sd_initpkt_map[xp->xb_chain_iostart]; 13080 switch ((*funcp)(bp, &pktp)) { 13081 case SD_PKT_ALLOC_SUCCESS: 13082 xp->xb_pktp = pktp; 13083 SD_TRACE(SD_LOG_IO_CORE, un, 13084 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n", 13085 pktp); 13086 goto got_pkt; 13087 13088 case SD_PKT_ALLOC_FAILURE: 13089 /* 13090 * Temporary (hopefully) resource depletion. 13091 * Since retries and RQS commands always have a 13092 * scsi_pkt allocated, these cases should never 13093 * get here. So the only cases this needs to 13094 * handle is a bp from the waitq (which we put 13095 * back onto the waitq for sdrunout), or a bp 13096 * sent as an immed_bp (which we just fail). 13097 */ 13098 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13099 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); 13100 13101 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13102 13103 if (bp == immed_bp) { 13104 /* 13105 * If SD_XB_DMA_FREED is clear, then 13106 * this is a failure to allocate a 13107 * scsi_pkt, and we must fail the 13108 * command. 13109 */ 13110 if ((xp->xb_pkt_flags & 13111 SD_XB_DMA_FREED) == 0) { 13112 break; 13113 } 13114 13115 /* 13116 * If this immediate command is NOT our 13117 * un_retry_bp, then we must fail it. 13118 */ 13119 if (bp != un->un_retry_bp) { 13120 break; 13121 } 13122 13123 /* 13124 * We get here if this cmd is our 13125 * un_retry_bp that was DMAFREED, but 13126 * scsi_init_pkt() failed to reallocate 13127 * DMA resources when we attempted to 13128 * retry it. This can happen when an 13129 * mpxio failover is in progress, but 13130 * we don't want to just fail the 13131 * command in this case. 13132 * 13133 * Use timeout(9F) to restart it after 13134 * a 100ms delay. We don't want to 13135 * let sdrunout() restart it, because 13136 * sdrunout() is just supposed to start 13137 * commands that are sitting on the 13138 * wait queue. The un_retry_bp stays 13139 * set until the command completes, but 13140 * sdrunout can be called many times 13141 * before that happens. Since sdrunout 13142 * cannot tell if the un_retry_bp is 13143 * already in the transport, it could 13144 * end up calling scsi_transport() for 13145 * the un_retry_bp multiple times. 13146 * 13147 * Also: don't schedule the callback 13148 * if some other callback is already 13149 * pending. 13150 */ 13151 if (un->un_retry_statp == NULL) { 13152 /* 13153 * restore the kstat pointer to 13154 * keep kstat counts coherent 13155 * when we do retry the command. 13156 */ 13157 un->un_retry_statp = 13158 saved_statp; 13159 } 13160 13161 if ((un->un_startstop_timeid == NULL) && 13162 (un->un_retry_timeid == NULL) && 13163 (un->un_direct_priority_timeid == 13164 NULL)) { 13165 13166 un->un_retry_timeid = 13167 timeout( 13168 sd_start_retry_command, 13169 un, SD_RESTART_TIMEOUT); 13170 } 13171 goto exit; 13172 } 13173 13174 #else 13175 if (bp == immed_bp) { 13176 break; /* Just fail the command */ 13177 } 13178 #endif 13179 13180 /* Add the buf back to the head of the waitq */ 13181 bp->av_forw = un->un_waitq_headp; 13182 un->un_waitq_headp = bp; 13183 if (un->un_waitq_tailp == NULL) { 13184 un->un_waitq_tailp = bp; 13185 } 13186 goto exit; 13187 13188 case SD_PKT_ALLOC_FAILURE_NO_DMA: 13189 /* 13190 * HBA DMA resource failure. Fail the command 13191 * and continue processing of the queues. 13192 */ 13193 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13194 "sd_start_cmds: " 13195 "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); 13196 break; 13197 13198 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: 13199 /* 13200 * Note:x86: Partial DMA mapping not supported 13201 * for USCSI commands, and all the needed DMA 13202 * resources were not allocated. 13203 */ 13204 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13205 "sd_start_cmds: " 13206 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); 13207 break; 13208 13209 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: 13210 /* 13211 * Note:x86: Request cannot fit into CDB based 13212 * on lba and len. 13213 */ 13214 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13215 "sd_start_cmds: " 13216 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); 13217 break; 13218 13219 default: 13220 /* Should NEVER get here! */ 13221 panic("scsi_initpkt error"); 13222 /*NOTREACHED*/ 13223 } 13224 13225 /* 13226 * Fatal error in allocating a scsi_pkt for this buf. 13227 * Update kstats & return the buf with an error code. 13228 * We must use sd_return_failed_command_no_restart() to 13229 * avoid a recursive call back into sd_start_cmds(). 13230 * However this also means that we must keep processing 13231 * the waitq here in order to avoid stalling. 13232 */ 13233 if (statp == kstat_waitq_to_runq) { 13234 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 13235 } 13236 sd_return_failed_command_no_restart(un, bp, EIO); 13237 if (bp == immed_bp) { 13238 /* immed_bp is gone by now, so clear this */ 13239 immed_bp = NULL; 13240 } 13241 continue; 13242 } 13243 got_pkt: 13244 if (bp == immed_bp) { 13245 /* goto the head of the class.... */ 13246 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 13247 } 13248 13249 un->un_ncmds_in_transport++; 13250 SD_UPDATE_KSTATS(un, statp, bp); 13251 13252 /* 13253 * Call scsi_transport() to send the command to the target. 13254 * According to SCSA architecture, we must drop the mutex here 13255 * before calling scsi_transport() in order to avoid deadlock. 13256 * Note that the scsi_pkt's completion routine can be executed 13257 * (from interrupt context) even before the call to 13258 * scsi_transport() returns. 13259 */ 13260 SD_TRACE(SD_LOG_IO_CORE, un, 13261 "sd_start_cmds: calling scsi_transport()\n"); 13262 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); 13263 13264 mutex_exit(SD_MUTEX(un)); 13265 rval = scsi_transport(xp->xb_pktp); 13266 mutex_enter(SD_MUTEX(un)); 13267 13268 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13269 "sd_start_cmds: scsi_transport() returned %d\n", rval); 13270 13271 switch (rval) { 13272 case TRAN_ACCEPT: 13273 /* Clear this with every pkt accepted by the HBA */ 13274 un->un_tran_fatal_count = 0; 13275 break; /* Success; try the next cmd (if any) */ 13276 13277 case TRAN_BUSY: 13278 un->un_ncmds_in_transport--; 13279 ASSERT(un->un_ncmds_in_transport >= 0); 13280 13281 /* 13282 * Don't retry request sense, the sense data 13283 * is lost when another request is sent. 13284 * Free up the rqs buf and retry 13285 * the original failed cmd. Update kstat. 13286 */ 13287 if (bp == un->un_rqs_bp) { 13288 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13289 bp = sd_mark_rqs_idle(un, xp); 13290 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 13291 NULL, NULL, EIO, SD_BSY_TIMEOUT / 500, 13292 kstat_waitq_enter); 13293 goto exit; 13294 } 13295 13296 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13297 /* 13298 * Free the DMA resources for the scsi_pkt. This will 13299 * allow mpxio to select another path the next time 13300 * we call scsi_transport() with this scsi_pkt. 13301 * See sdintr() for the rationalization behind this. 13302 */ 13303 if ((un->un_f_is_fibre == TRUE) && 13304 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 13305 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { 13306 scsi_dmafree(xp->xb_pktp); 13307 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 13308 } 13309 #endif 13310 13311 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { 13312 /* 13313 * Commands that are SD_PATH_DIRECT_PRIORITY 13314 * are for error recovery situations. These do 13315 * not use the normal command waitq, so if they 13316 * get a TRAN_BUSY we cannot put them back onto 13317 * the waitq for later retry. One possible 13318 * problem is that there could already be some 13319 * other command on un_retry_bp that is waiting 13320 * for this one to complete, so we would be 13321 * deadlocked if we put this command back onto 13322 * the waitq for later retry (since un_retry_bp 13323 * must complete before the driver gets back to 13324 * commands on the waitq). 13325 * 13326 * To avoid deadlock we must schedule a callback 13327 * that will restart this command after a set 13328 * interval. This should keep retrying for as 13329 * long as the underlying transport keeps 13330 * returning TRAN_BUSY (just like for other 13331 * commands). Use the same timeout interval as 13332 * for the ordinary TRAN_BUSY retry. 13333 */ 13334 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13335 "sd_start_cmds: scsi_transport() returned " 13336 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n"); 13337 13338 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13339 un->un_direct_priority_timeid = 13340 timeout(sd_start_direct_priority_command, 13341 bp, SD_BSY_TIMEOUT / 500); 13342 13343 goto exit; 13344 } 13345 13346 /* 13347 * For TRAN_BUSY, we want to reduce the throttle value, 13348 * unless we are retrying a command. 13349 */ 13350 if (bp != un->un_retry_bp) { 13351 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); 13352 } 13353 13354 /* 13355 * Set up the bp to be tried again 10 ms later. 13356 * Note:x86: Is there a timeout value in the sd_lun 13357 * for this condition? 13358 */ 13359 sd_set_retry_bp(un, bp, SD_BSY_TIMEOUT / 500, 13360 kstat_runq_back_to_waitq); 13361 goto exit; 13362 13363 case TRAN_FATAL_ERROR: 13364 un->un_tran_fatal_count++; 13365 /* FALLTHRU */ 13366 13367 case TRAN_BADPKT: 13368 default: 13369 un->un_ncmds_in_transport--; 13370 ASSERT(un->un_ncmds_in_transport >= 0); 13371 13372 /* 13373 * If this is our REQUEST SENSE command with a 13374 * transport error, we must get back the pointers 13375 * to the original buf, and mark the REQUEST 13376 * SENSE command as "available". 13377 */ 13378 if (bp == un->un_rqs_bp) { 13379 bp = sd_mark_rqs_idle(un, xp); 13380 xp = SD_GET_XBUF(bp); 13381 } else { 13382 /* 13383 * Legacy behavior: do not update transport 13384 * error count for request sense commands. 13385 */ 13386 SD_UPDATE_ERRSTATS(un, sd_transerrs); 13387 } 13388 13389 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13390 sd_print_transport_rejected_message(un, xp, rval); 13391 13392 /* 13393 * We must use sd_return_failed_command_no_restart() to 13394 * avoid a recursive call back into sd_start_cmds(). 13395 * However this also means that we must keep processing 13396 * the waitq here in order to avoid stalling. 13397 */ 13398 sd_return_failed_command_no_restart(un, bp, EIO); 13399 13400 /* 13401 * Notify any threads waiting in sd_ddi_suspend() that 13402 * a command completion has occurred. 13403 */ 13404 if (un->un_state == SD_STATE_SUSPENDED) { 13405 cv_broadcast(&un->un_disk_busy_cv); 13406 } 13407 13408 if (bp == immed_bp) { 13409 /* immed_bp is gone by now, so clear this */ 13410 immed_bp = NULL; 13411 } 13412 break; 13413 } 13414 13415 } while (immed_bp == NULL); 13416 13417 exit: 13418 ASSERT(mutex_owned(SD_MUTEX(un))); 13419 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n"); 13420 } 13421 13422 13423 /* 13424 * Function: sd_return_command 13425 * 13426 * Description: Returns a command to its originator (with or without an 13427 * error). Also starts commands waiting to be transported 13428 * to the target. 13429 * 13430 * Context: May be called from interrupt, kernel, or timeout context 13431 */ 13432 13433 static void 13434 sd_return_command(struct sd_lun *un, struct buf *bp) 13435 { 13436 struct sd_xbuf *xp; 13437 struct scsi_pkt *pktp; 13438 13439 ASSERT(bp != NULL); 13440 ASSERT(un != NULL); 13441 ASSERT(mutex_owned(SD_MUTEX(un))); 13442 ASSERT(bp != un->un_rqs_bp); 13443 xp = SD_GET_XBUF(bp); 13444 ASSERT(xp != NULL); 13445 13446 pktp = SD_GET_PKTP(bp); 13447 13448 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n"); 13449 13450 /* 13451 * Note: check for the "sdrestart failed" case. 13452 */ 13453 if ((un->un_partial_dma_supported == 1) && 13454 ((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) && 13455 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) && 13456 (xp->xb_pktp->pkt_resid == 0)) { 13457 13458 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) { 13459 /* 13460 * Successfully set up next portion of cmd 13461 * transfer, try sending it 13462 */ 13463 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 13464 NULL, NULL, 0, (clock_t)0, NULL); 13465 sd_start_cmds(un, NULL); 13466 return; /* Note:x86: need a return here? */ 13467 } 13468 } 13469 13470 /* 13471 * If this is the failfast bp, clear it from un_failfast_bp. This 13472 * can happen if upon being re-tried the failfast bp either 13473 * succeeded or encountered another error (possibly even a different 13474 * error than the one that precipitated the failfast state, but in 13475 * that case it would have had to exhaust retries as well). Regardless, 13476 * this should not occur whenever the instance is in the active 13477 * failfast state. 13478 */ 13479 if (bp == un->un_failfast_bp) { 13480 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 13481 un->un_failfast_bp = NULL; 13482 } 13483 13484 /* 13485 * Clear the failfast state upon successful completion of ANY cmd. 13486 */ 13487 if (bp->b_error == 0) { 13488 un->un_failfast_state = SD_FAILFAST_INACTIVE; 13489 } 13490 13491 /* 13492 * This is used if the command was retried one or more times. Show that 13493 * we are done with it, and allow processing of the waitq to resume. 13494 */ 13495 if (bp == un->un_retry_bp) { 13496 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13497 "sd_return_command: un:0x%p: " 13498 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 13499 un->un_retry_bp = NULL; 13500 un->un_retry_statp = NULL; 13501 } 13502 13503 SD_UPDATE_RDWR_STATS(un, bp); 13504 SD_UPDATE_PARTITION_STATS(un, bp); 13505 13506 switch (un->un_state) { 13507 case SD_STATE_SUSPENDED: 13508 /* 13509 * Notify any threads waiting in sd_ddi_suspend() that 13510 * a command completion has occurred. 13511 */ 13512 cv_broadcast(&un->un_disk_busy_cv); 13513 break; 13514 default: 13515 sd_start_cmds(un, NULL); 13516 break; 13517 } 13518 13519 /* Return this command up the iodone chain to its originator. */ 13520 mutex_exit(SD_MUTEX(un)); 13521 13522 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 13523 xp->xb_pktp = NULL; 13524 13525 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 13526 13527 ASSERT(!mutex_owned(SD_MUTEX(un))); 13528 mutex_enter(SD_MUTEX(un)); 13529 13530 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n"); 13531 } 13532 13533 13534 /* 13535 * Function: sd_return_failed_command 13536 * 13537 * Description: Command completion when an error occurred. 13538 * 13539 * Context: May be called from interrupt context 13540 */ 13541 13542 static void 13543 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode) 13544 { 13545 ASSERT(bp != NULL); 13546 ASSERT(un != NULL); 13547 ASSERT(mutex_owned(SD_MUTEX(un))); 13548 13549 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13550 "sd_return_failed_command: entry\n"); 13551 13552 /* 13553 * b_resid could already be nonzero due to a partial data 13554 * transfer, so do not change it here. 13555 */ 13556 SD_BIOERROR(bp, errcode); 13557 13558 sd_return_command(un, bp); 13559 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13560 "sd_return_failed_command: exit\n"); 13561 } 13562 13563 13564 /* 13565 * Function: sd_return_failed_command_no_restart 13566 * 13567 * Description: Same as sd_return_failed_command, but ensures that no 13568 * call back into sd_start_cmds will be issued. 13569 * 13570 * Context: May be called from interrupt context 13571 */ 13572 13573 static void 13574 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp, 13575 int errcode) 13576 { 13577 struct sd_xbuf *xp; 13578 13579 ASSERT(bp != NULL); 13580 ASSERT(un != NULL); 13581 ASSERT(mutex_owned(SD_MUTEX(un))); 13582 xp = SD_GET_XBUF(bp); 13583 ASSERT(xp != NULL); 13584 ASSERT(errcode != 0); 13585 13586 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13587 "sd_return_failed_command_no_restart: entry\n"); 13588 13589 /* 13590 * b_resid could already be nonzero due to a partial data 13591 * transfer, so do not change it here. 13592 */ 13593 SD_BIOERROR(bp, errcode); 13594 13595 /* 13596 * If this is the failfast bp, clear it. This can happen if the 13597 * failfast bp encounterd a fatal error when we attempted to 13598 * re-try it (such as a scsi_transport(9F) failure). However 13599 * we should NOT be in an active failfast state if the failfast 13600 * bp is not NULL. 13601 */ 13602 if (bp == un->un_failfast_bp) { 13603 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 13604 un->un_failfast_bp = NULL; 13605 } 13606 13607 if (bp == un->un_retry_bp) { 13608 /* 13609 * This command was retried one or more times. Show that we are 13610 * done with it, and allow processing of the waitq to resume. 13611 */ 13612 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13613 "sd_return_failed_command_no_restart: " 13614 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 13615 un->un_retry_bp = NULL; 13616 un->un_retry_statp = NULL; 13617 } 13618 13619 SD_UPDATE_RDWR_STATS(un, bp); 13620 SD_UPDATE_PARTITION_STATS(un, bp); 13621 13622 mutex_exit(SD_MUTEX(un)); 13623 13624 if (xp->xb_pktp != NULL) { 13625 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 13626 xp->xb_pktp = NULL; 13627 } 13628 13629 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 13630 13631 mutex_enter(SD_MUTEX(un)); 13632 13633 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13634 "sd_return_failed_command_no_restart: exit\n"); 13635 } 13636 13637 13638 /* 13639 * Function: sd_retry_command 13640 * 13641 * Description: queue up a command for retry, or (optionally) fail it 13642 * if retry counts are exhausted. 13643 * 13644 * Arguments: un - Pointer to the sd_lun struct for the target. 13645 * 13646 * bp - Pointer to the buf for the command to be retried. 13647 * 13648 * retry_check_flag - Flag to see which (if any) of the retry 13649 * counts should be decremented/checked. If the indicated 13650 * retry count is exhausted, then the command will not be 13651 * retried; it will be failed instead. This should use a 13652 * value equal to one of the following: 13653 * 13654 * SD_RETRIES_NOCHECK 13655 * SD_RESD_RETRIES_STANDARD 13656 * SD_RETRIES_VICTIM 13657 * 13658 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE 13659 * if the check should be made to see of FLAG_ISOLATE is set 13660 * in the pkt. If FLAG_ISOLATE is set, then the command is 13661 * not retried, it is simply failed. 13662 * 13663 * user_funcp - Ptr to function to call before dispatching the 13664 * command. May be NULL if no action needs to be performed. 13665 * (Primarily intended for printing messages.) 13666 * 13667 * user_arg - Optional argument to be passed along to 13668 * the user_funcp call. 13669 * 13670 * failure_code - errno return code to set in the bp if the 13671 * command is going to be failed. 13672 * 13673 * retry_delay - Retry delay interval in (clock_t) units. May 13674 * be zero which indicates that the retry should be retried 13675 * immediately (ie, without an intervening delay). 13676 * 13677 * statp - Ptr to kstat function to be updated if the command 13678 * is queued for a delayed retry. May be NULL if no kstat 13679 * update is desired. 13680 * 13681 * Context: May be called from interrupt context. 13682 */ 13683 13684 static void 13685 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag, 13686 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int 13687 code), void *user_arg, int failure_code, clock_t retry_delay, 13688 void (*statp)(kstat_io_t *)) 13689 { 13690 struct sd_xbuf *xp; 13691 struct scsi_pkt *pktp; 13692 13693 ASSERT(un != NULL); 13694 ASSERT(mutex_owned(SD_MUTEX(un))); 13695 ASSERT(bp != NULL); 13696 xp = SD_GET_XBUF(bp); 13697 ASSERT(xp != NULL); 13698 pktp = SD_GET_PKTP(bp); 13699 ASSERT(pktp != NULL); 13700 13701 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 13702 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp); 13703 13704 /* 13705 * If we are syncing or dumping, fail the command to avoid 13706 * recursively calling back into scsi_transport(). 13707 */ 13708 if (ddi_in_panic()) { 13709 goto fail_command_no_log; 13710 } 13711 13712 /* 13713 * We should never be be retrying a command with FLAG_DIAGNOSE set, so 13714 * log an error and fail the command. 13715 */ 13716 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 13717 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 13718 "ERROR, retrying FLAG_DIAGNOSE command.\n"); 13719 sd_dump_memory(un, SD_LOG_IO, "CDB", 13720 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 13721 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 13722 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 13723 goto fail_command; 13724 } 13725 13726 /* 13727 * If we are suspended, then put the command onto head of the 13728 * wait queue since we don't want to start more commands, and 13729 * clear the un_retry_bp. Next time when we are resumed, will 13730 * handle the command in the wait queue. 13731 */ 13732 switch (un->un_state) { 13733 case SD_STATE_SUSPENDED: 13734 case SD_STATE_DUMPING: 13735 bp->av_forw = un->un_waitq_headp; 13736 un->un_waitq_headp = bp; 13737 if (un->un_waitq_tailp == NULL) { 13738 un->un_waitq_tailp = bp; 13739 } 13740 if (bp == un->un_retry_bp) { 13741 un->un_retry_bp = NULL; 13742 un->un_retry_statp = NULL; 13743 } 13744 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 13745 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: " 13746 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp); 13747 return; 13748 default: 13749 break; 13750 } 13751 13752 /* 13753 * If the caller wants us to check FLAG_ISOLATE, then see if that 13754 * is set; if it is then we do not want to retry the command. 13755 * Normally, FLAG_ISOLATE is only used with USCSI cmds. 13756 */ 13757 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) { 13758 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { 13759 goto fail_command; 13760 } 13761 } 13762 13763 13764 /* 13765 * If SD_RETRIES_FAILFAST is set, it indicates that either a 13766 * command timeout or a selection timeout has occurred. This means 13767 * that we were unable to establish an kind of communication with 13768 * the target, and subsequent retries and/or commands are likely 13769 * to encounter similar results and take a long time to complete. 13770 * 13771 * If this is a failfast error condition, we need to update the 13772 * failfast state, even if this bp does not have B_FAILFAST set. 13773 */ 13774 if (retry_check_flag & SD_RETRIES_FAILFAST) { 13775 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { 13776 ASSERT(un->un_failfast_bp == NULL); 13777 /* 13778 * If we are already in the active failfast state, and 13779 * another failfast error condition has been detected, 13780 * then fail this command if it has B_FAILFAST set. 13781 * If B_FAILFAST is clear, then maintain the legacy 13782 * behavior of retrying heroically, even tho this will 13783 * take a lot more time to fail the command. 13784 */ 13785 if (bp->b_flags & B_FAILFAST) { 13786 goto fail_command; 13787 } 13788 } else { 13789 /* 13790 * We're not in the active failfast state, but we 13791 * have a failfast error condition, so we must begin 13792 * transition to the next state. We do this regardless 13793 * of whether or not this bp has B_FAILFAST set. 13794 */ 13795 if (un->un_failfast_bp == NULL) { 13796 /* 13797 * This is the first bp to meet a failfast 13798 * condition so save it on un_failfast_bp & 13799 * do normal retry processing. Do not enter 13800 * active failfast state yet. This marks 13801 * entry into the "failfast pending" state. 13802 */ 13803 un->un_failfast_bp = bp; 13804 13805 } else if (un->un_failfast_bp == bp) { 13806 /* 13807 * This is the second time *this* bp has 13808 * encountered a failfast error condition, 13809 * so enter active failfast state & flush 13810 * queues as appropriate. 13811 */ 13812 un->un_failfast_state = SD_FAILFAST_ACTIVE; 13813 un->un_failfast_bp = NULL; 13814 sd_failfast_flushq(un); 13815 13816 /* 13817 * Fail this bp now if B_FAILFAST set; 13818 * otherwise continue with retries. (It would 13819 * be pretty ironic if this bp succeeded on a 13820 * subsequent retry after we just flushed all 13821 * the queues). 13822 */ 13823 if (bp->b_flags & B_FAILFAST) { 13824 goto fail_command; 13825 } 13826 13827 #if !defined(lint) && !defined(__lint) 13828 } else { 13829 /* 13830 * If neither of the preceeding conditionals 13831 * was true, it means that there is some 13832 * *other* bp that has met an inital failfast 13833 * condition and is currently either being 13834 * retried or is waiting to be retried. In 13835 * that case we should perform normal retry 13836 * processing on *this* bp, since there is a 13837 * chance that the current failfast condition 13838 * is transient and recoverable. If that does 13839 * not turn out to be the case, then retries 13840 * will be cleared when the wait queue is 13841 * flushed anyway. 13842 */ 13843 #endif 13844 } 13845 } 13846 } else { 13847 /* 13848 * SD_RETRIES_FAILFAST is clear, which indicates that we 13849 * likely were able to at least establish some level of 13850 * communication with the target and subsequent commands 13851 * and/or retries are likely to get through to the target, 13852 * In this case we want to be aggressive about clearing 13853 * the failfast state. Note that this does not affect 13854 * the "failfast pending" condition. 13855 */ 13856 un->un_failfast_state = SD_FAILFAST_INACTIVE; 13857 } 13858 13859 13860 /* 13861 * Check the specified retry count to see if we can still do 13862 * any retries with this pkt before we should fail it. 13863 */ 13864 switch (retry_check_flag & SD_RETRIES_MASK) { 13865 case SD_RETRIES_VICTIM: 13866 /* 13867 * Check the victim retry count. If exhausted, then fall 13868 * thru & check against the standard retry count. 13869 */ 13870 if (xp->xb_victim_retry_count < un->un_victim_retry_count) { 13871 /* Increment count & proceed with the retry */ 13872 xp->xb_victim_retry_count++; 13873 break; 13874 } 13875 /* Victim retries exhausted, fall back to std. retries... */ 13876 /* FALLTHRU */ 13877 13878 case SD_RETRIES_STANDARD: 13879 if (xp->xb_retry_count >= un->un_retry_count) { 13880 /* Retries exhausted, fail the command */ 13881 SD_TRACE(SD_LOG_IO_CORE, un, 13882 "sd_retry_command: retries exhausted!\n"); 13883 /* 13884 * update b_resid for failed SCMD_READ & SCMD_WRITE 13885 * commands with nonzero pkt_resid. 13886 */ 13887 if ((pktp->pkt_reason == CMD_CMPLT) && 13888 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) && 13889 (pktp->pkt_resid != 0)) { 13890 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F; 13891 if ((op == SCMD_READ) || (op == SCMD_WRITE)) { 13892 SD_UPDATE_B_RESID(bp, pktp); 13893 } 13894 } 13895 goto fail_command; 13896 } 13897 xp->xb_retry_count++; 13898 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13899 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 13900 break; 13901 13902 case SD_RETRIES_UA: 13903 if (xp->xb_ua_retry_count >= sd_ua_retry_count) { 13904 /* Retries exhausted, fail the command */ 13905 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13906 "Unit Attention retries exhausted. " 13907 "Check the target.\n"); 13908 goto fail_command; 13909 } 13910 xp->xb_ua_retry_count++; 13911 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13912 "sd_retry_command: retry count:%d\n", 13913 xp->xb_ua_retry_count); 13914 break; 13915 13916 case SD_RETRIES_BUSY: 13917 if (xp->xb_retry_count >= un->un_busy_retry_count) { 13918 /* Retries exhausted, fail the command */ 13919 SD_TRACE(SD_LOG_IO_CORE, un, 13920 "sd_retry_command: retries exhausted!\n"); 13921 goto fail_command; 13922 } 13923 xp->xb_retry_count++; 13924 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13925 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 13926 break; 13927 13928 case SD_RETRIES_NOCHECK: 13929 default: 13930 /* No retry count to check. Just proceed with the retry */ 13931 break; 13932 } 13933 13934 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 13935 13936 /* 13937 * If we were given a zero timeout, we must attempt to retry the 13938 * command immediately (ie, without a delay). 13939 */ 13940 if (retry_delay == 0) { 13941 /* 13942 * Check some limiting conditions to see if we can actually 13943 * do the immediate retry. If we cannot, then we must 13944 * fall back to queueing up a delayed retry. 13945 */ 13946 if (un->un_ncmds_in_transport >= un->un_throttle) { 13947 /* 13948 * We are at the throttle limit for the target, 13949 * fall back to delayed retry. 13950 */ 13951 retry_delay = SD_BSY_TIMEOUT; 13952 statp = kstat_waitq_enter; 13953 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13954 "sd_retry_command: immed. retry hit " 13955 "throttle!\n"); 13956 } else { 13957 /* 13958 * We're clear to proceed with the immediate retry. 13959 * First call the user-provided function (if any) 13960 */ 13961 if (user_funcp != NULL) { 13962 (*user_funcp)(un, bp, user_arg, 13963 SD_IMMEDIATE_RETRY_ISSUED); 13964 #ifdef __lock_lint 13965 sd_print_incomplete_msg(un, bp, user_arg, 13966 SD_IMMEDIATE_RETRY_ISSUED); 13967 sd_print_cmd_incomplete_msg(un, bp, user_arg, 13968 SD_IMMEDIATE_RETRY_ISSUED); 13969 sd_print_sense_failed_msg(un, bp, user_arg, 13970 SD_IMMEDIATE_RETRY_ISSUED); 13971 #endif 13972 } 13973 13974 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13975 "sd_retry_command: issuing immediate retry\n"); 13976 13977 /* 13978 * Call sd_start_cmds() to transport the command to 13979 * the target. 13980 */ 13981 sd_start_cmds(un, bp); 13982 13983 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13984 "sd_retry_command exit\n"); 13985 return; 13986 } 13987 } 13988 13989 /* 13990 * Set up to retry the command after a delay. 13991 * First call the user-provided function (if any) 13992 */ 13993 if (user_funcp != NULL) { 13994 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED); 13995 } 13996 13997 sd_set_retry_bp(un, bp, retry_delay, statp); 13998 13999 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 14000 return; 14001 14002 fail_command: 14003 14004 if (user_funcp != NULL) { 14005 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED); 14006 } 14007 14008 fail_command_no_log: 14009 14010 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14011 "sd_retry_command: returning failed command\n"); 14012 14013 sd_return_failed_command(un, bp, failure_code); 14014 14015 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 14016 } 14017 14018 14019 /* 14020 * Function: sd_set_retry_bp 14021 * 14022 * Description: Set up the given bp for retry. 14023 * 14024 * Arguments: un - ptr to associated softstate 14025 * bp - ptr to buf(9S) for the command 14026 * retry_delay - time interval before issuing retry (may be 0) 14027 * statp - optional pointer to kstat function 14028 * 14029 * Context: May be called under interrupt context 14030 */ 14031 14032 static void 14033 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, 14034 void (*statp)(kstat_io_t *)) 14035 { 14036 ASSERT(un != NULL); 14037 ASSERT(mutex_owned(SD_MUTEX(un))); 14038 ASSERT(bp != NULL); 14039 14040 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14041 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp); 14042 14043 /* 14044 * Indicate that the command is being retried. This will not allow any 14045 * other commands on the wait queue to be transported to the target 14046 * until this command has been completed (success or failure). The 14047 * "retry command" is not transported to the target until the given 14048 * time delay expires, unless the user specified a 0 retry_delay. 14049 * 14050 * Note: the timeout(9F) callback routine is what actually calls 14051 * sd_start_cmds() to transport the command, with the exception of a 14052 * zero retry_delay. The only current implementor of a zero retry delay 14053 * is the case where a START_STOP_UNIT is sent to spin-up a device. 14054 */ 14055 if (un->un_retry_bp == NULL) { 14056 ASSERT(un->un_retry_statp == NULL); 14057 un->un_retry_bp = bp; 14058 14059 /* 14060 * If the user has not specified a delay the command should 14061 * be queued and no timeout should be scheduled. 14062 */ 14063 if (retry_delay == 0) { 14064 /* 14065 * Save the kstat pointer that will be used in the 14066 * call to SD_UPDATE_KSTATS() below, so that 14067 * sd_start_cmds() can correctly decrement the waitq 14068 * count when it is time to transport this command. 14069 */ 14070 un->un_retry_statp = statp; 14071 goto done; 14072 } 14073 } 14074 14075 if (un->un_retry_bp == bp) { 14076 /* 14077 * Save the kstat pointer that will be used in the call to 14078 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can 14079 * correctly decrement the waitq count when it is time to 14080 * transport this command. 14081 */ 14082 un->un_retry_statp = statp; 14083 14084 /* 14085 * Schedule a timeout if: 14086 * 1) The user has specified a delay. 14087 * 2) There is not a START_STOP_UNIT callback pending. 14088 * 14089 * If no delay has been specified, then it is up to the caller 14090 * to ensure that IO processing continues without stalling. 14091 * Effectively, this means that the caller will issue the 14092 * required call to sd_start_cmds(). The START_STOP_UNIT 14093 * callback does this after the START STOP UNIT command has 14094 * completed. In either of these cases we should not schedule 14095 * a timeout callback here. Also don't schedule the timeout if 14096 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart. 14097 */ 14098 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) && 14099 (un->un_direct_priority_timeid == NULL)) { 14100 un->un_retry_timeid = 14101 timeout(sd_start_retry_command, un, retry_delay); 14102 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14103 "sd_set_retry_bp: setting timeout: un: 0x%p" 14104 " bp:0x%p un_retry_timeid:0x%p\n", 14105 un, bp, un->un_retry_timeid); 14106 } 14107 } else { 14108 /* 14109 * We only get in here if there is already another command 14110 * waiting to be retried. In this case, we just put the 14111 * given command onto the wait queue, so it can be transported 14112 * after the current retry command has completed. 14113 * 14114 * Also we have to make sure that if the command at the head 14115 * of the wait queue is the un_failfast_bp, that we do not 14116 * put ahead of it any other commands that are to be retried. 14117 */ 14118 if ((un->un_failfast_bp != NULL) && 14119 (un->un_failfast_bp == un->un_waitq_headp)) { 14120 /* 14121 * Enqueue this command AFTER the first command on 14122 * the wait queue (which is also un_failfast_bp). 14123 */ 14124 bp->av_forw = un->un_waitq_headp->av_forw; 14125 un->un_waitq_headp->av_forw = bp; 14126 if (un->un_waitq_headp == un->un_waitq_tailp) { 14127 un->un_waitq_tailp = bp; 14128 } 14129 } else { 14130 /* Enqueue this command at the head of the waitq. */ 14131 bp->av_forw = un->un_waitq_headp; 14132 un->un_waitq_headp = bp; 14133 if (un->un_waitq_tailp == NULL) { 14134 un->un_waitq_tailp = bp; 14135 } 14136 } 14137 14138 if (statp == NULL) { 14139 statp = kstat_waitq_enter; 14140 } 14141 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14142 "sd_set_retry_bp: un:0x%p already delayed retry\n", un); 14143 } 14144 14145 done: 14146 if (statp != NULL) { 14147 SD_UPDATE_KSTATS(un, statp, bp); 14148 } 14149 14150 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14151 "sd_set_retry_bp: exit un:0x%p\n", un); 14152 } 14153 14154 14155 /* 14156 * Function: sd_start_retry_command 14157 * 14158 * Description: Start the command that has been waiting on the target's 14159 * retry queue. Called from timeout(9F) context after the 14160 * retry delay interval has expired. 14161 * 14162 * Arguments: arg - pointer to associated softstate for the device. 14163 * 14164 * Context: timeout(9F) thread context. May not sleep. 14165 */ 14166 14167 static void 14168 sd_start_retry_command(void *arg) 14169 { 14170 struct sd_lun *un = arg; 14171 14172 ASSERT(un != NULL); 14173 ASSERT(!mutex_owned(SD_MUTEX(un))); 14174 14175 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14176 "sd_start_retry_command: entry\n"); 14177 14178 mutex_enter(SD_MUTEX(un)); 14179 14180 un->un_retry_timeid = NULL; 14181 14182 if (un->un_retry_bp != NULL) { 14183 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14184 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n", 14185 un, un->un_retry_bp); 14186 sd_start_cmds(un, un->un_retry_bp); 14187 } 14188 14189 mutex_exit(SD_MUTEX(un)); 14190 14191 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14192 "sd_start_retry_command: exit\n"); 14193 } 14194 14195 14196 /* 14197 * Function: sd_start_direct_priority_command 14198 * 14199 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had 14200 * received TRAN_BUSY when we called scsi_transport() to send it 14201 * to the underlying HBA. This function is called from timeout(9F) 14202 * context after the delay interval has expired. 14203 * 14204 * Arguments: arg - pointer to associated buf(9S) to be restarted. 14205 * 14206 * Context: timeout(9F) thread context. May not sleep. 14207 */ 14208 14209 static void 14210 sd_start_direct_priority_command(void *arg) 14211 { 14212 struct buf *priority_bp = arg; 14213 struct sd_lun *un; 14214 14215 ASSERT(priority_bp != NULL); 14216 un = SD_GET_UN(priority_bp); 14217 ASSERT(un != NULL); 14218 ASSERT(!mutex_owned(SD_MUTEX(un))); 14219 14220 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14221 "sd_start_direct_priority_command: entry\n"); 14222 14223 mutex_enter(SD_MUTEX(un)); 14224 un->un_direct_priority_timeid = NULL; 14225 sd_start_cmds(un, priority_bp); 14226 mutex_exit(SD_MUTEX(un)); 14227 14228 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14229 "sd_start_direct_priority_command: exit\n"); 14230 } 14231 14232 14233 /* 14234 * Function: sd_send_request_sense_command 14235 * 14236 * Description: Sends a REQUEST SENSE command to the target 14237 * 14238 * Context: May be called from interrupt context. 14239 */ 14240 14241 static void 14242 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 14243 struct scsi_pkt *pktp) 14244 { 14245 ASSERT(bp != NULL); 14246 ASSERT(un != NULL); 14247 ASSERT(mutex_owned(SD_MUTEX(un))); 14248 14249 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: " 14250 "entry: buf:0x%p\n", bp); 14251 14252 /* 14253 * If we are syncing or dumping, then fail the command to avoid a 14254 * recursive callback into scsi_transport(). Also fail the command 14255 * if we are suspended (legacy behavior). 14256 */ 14257 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 14258 (un->un_state == SD_STATE_DUMPING)) { 14259 sd_return_failed_command(un, bp, EIO); 14260 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14261 "sd_send_request_sense_command: syncing/dumping, exit\n"); 14262 return; 14263 } 14264 14265 /* 14266 * Retry the failed command and don't issue the request sense if: 14267 * 1) the sense buf is busy 14268 * 2) we have 1 or more outstanding commands on the target 14269 * (the sense data will be cleared or invalidated any way) 14270 * 14271 * Note: There could be an issue with not checking a retry limit here, 14272 * the problem is determining which retry limit to check. 14273 */ 14274 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { 14275 /* Don't retry if the command is flagged as non-retryable */ 14276 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 14277 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 14278 NULL, NULL, 0, SD_BSY_TIMEOUT, kstat_waitq_enter); 14279 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14280 "sd_send_request_sense_command: " 14281 "at full throttle, retrying exit\n"); 14282 } else { 14283 sd_return_failed_command(un, bp, EIO); 14284 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14285 "sd_send_request_sense_command: " 14286 "at full throttle, non-retryable exit\n"); 14287 } 14288 return; 14289 } 14290 14291 sd_mark_rqs_busy(un, bp); 14292 sd_start_cmds(un, un->un_rqs_bp); 14293 14294 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14295 "sd_send_request_sense_command: exit\n"); 14296 } 14297 14298 14299 /* 14300 * Function: sd_mark_rqs_busy 14301 * 14302 * Description: Indicate that the request sense bp for this instance is 14303 * in use. 14304 * 14305 * Context: May be called under interrupt context 14306 */ 14307 14308 static void 14309 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp) 14310 { 14311 struct sd_xbuf *sense_xp; 14312 14313 ASSERT(un != NULL); 14314 ASSERT(bp != NULL); 14315 ASSERT(mutex_owned(SD_MUTEX(un))); 14316 ASSERT(un->un_sense_isbusy == 0); 14317 14318 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: " 14319 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un); 14320 14321 sense_xp = SD_GET_XBUF(un->un_rqs_bp); 14322 ASSERT(sense_xp != NULL); 14323 14324 SD_INFO(SD_LOG_IO, un, 14325 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp); 14326 14327 ASSERT(sense_xp->xb_pktp != NULL); 14328 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) 14329 == (FLAG_SENSING | FLAG_HEAD)); 14330 14331 un->un_sense_isbusy = 1; 14332 un->un_rqs_bp->b_resid = 0; 14333 sense_xp->xb_pktp->pkt_resid = 0; 14334 sense_xp->xb_pktp->pkt_reason = 0; 14335 14336 /* So we can get back the bp at interrupt time! */ 14337 sense_xp->xb_sense_bp = bp; 14338 14339 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH); 14340 14341 /* 14342 * Mark this buf as awaiting sense data. (This is already set in 14343 * the pkt_flags for the RQS packet.) 14344 */ 14345 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING; 14346 14347 sense_xp->xb_retry_count = 0; 14348 sense_xp->xb_victim_retry_count = 0; 14349 sense_xp->xb_ua_retry_count = 0; 14350 sense_xp->xb_nr_retry_count = 0; 14351 sense_xp->xb_dma_resid = 0; 14352 14353 /* Clean up the fields for auto-request sense */ 14354 sense_xp->xb_sense_status = 0; 14355 sense_xp->xb_sense_state = 0; 14356 sense_xp->xb_sense_resid = 0; 14357 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data)); 14358 14359 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n"); 14360 } 14361 14362 14363 /* 14364 * Function: sd_mark_rqs_idle 14365 * 14366 * Description: SD_MUTEX must be held continuously through this routine 14367 * to prevent reuse of the rqs struct before the caller can 14368 * complete it's processing. 14369 * 14370 * Return Code: Pointer to the RQS buf 14371 * 14372 * Context: May be called under interrupt context 14373 */ 14374 14375 static struct buf * 14376 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp) 14377 { 14378 struct buf *bp; 14379 ASSERT(un != NULL); 14380 ASSERT(sense_xp != NULL); 14381 ASSERT(mutex_owned(SD_MUTEX(un))); 14382 ASSERT(un->un_sense_isbusy != 0); 14383 14384 un->un_sense_isbusy = 0; 14385 bp = sense_xp->xb_sense_bp; 14386 sense_xp->xb_sense_bp = NULL; 14387 14388 /* This pkt is no longer interested in getting sense data */ 14389 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING; 14390 14391 return (bp); 14392 } 14393 14394 14395 14396 /* 14397 * Function: sd_alloc_rqs 14398 * 14399 * Description: Set up the unit to receive auto request sense data 14400 * 14401 * Return Code: DDI_SUCCESS or DDI_FAILURE 14402 * 14403 * Context: Called under attach(9E) context 14404 */ 14405 14406 static int 14407 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un) 14408 { 14409 struct sd_xbuf *xp; 14410 14411 ASSERT(un != NULL); 14412 ASSERT(!mutex_owned(SD_MUTEX(un))); 14413 ASSERT(un->un_rqs_bp == NULL); 14414 ASSERT(un->un_rqs_pktp == NULL); 14415 14416 /* 14417 * First allocate the required buf and scsi_pkt structs, then set up 14418 * the CDB in the scsi_pkt for a REQUEST SENSE command. 14419 */ 14420 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL, 14421 MAX_SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 14422 if (un->un_rqs_bp == NULL) { 14423 return (DDI_FAILURE); 14424 } 14425 14426 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp, 14427 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL); 14428 14429 if (un->un_rqs_pktp == NULL) { 14430 sd_free_rqs(un); 14431 return (DDI_FAILURE); 14432 } 14433 14434 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */ 14435 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp, 14436 SCMD_REQUEST_SENSE, 0, MAX_SENSE_LENGTH, 0); 14437 14438 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); 14439 14440 /* Set up the other needed members in the ARQ scsi_pkt. */ 14441 un->un_rqs_pktp->pkt_comp = sdintr; 14442 un->un_rqs_pktp->pkt_time = sd_io_time; 14443 un->un_rqs_pktp->pkt_flags |= 14444 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ 14445 14446 /* 14447 * Allocate & init the sd_xbuf struct for the RQS command. Do not 14448 * provide any intpkt, destroypkt routines as we take care of 14449 * scsi_pkt allocation/freeing here and in sd_free_rqs(). 14450 */ 14451 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 14452 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL); 14453 xp->xb_pktp = un->un_rqs_pktp; 14454 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14455 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n", 14456 un, xp, un->un_rqs_pktp, un->un_rqs_bp); 14457 14458 /* 14459 * Save the pointer to the request sense private bp so it can 14460 * be retrieved in sdintr. 14461 */ 14462 un->un_rqs_pktp->pkt_private = un->un_rqs_bp; 14463 ASSERT(un->un_rqs_bp->b_private == xp); 14464 14465 /* 14466 * See if the HBA supports auto-request sense for the specified 14467 * target/lun. If it does, then try to enable it (if not already 14468 * enabled). 14469 * 14470 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return 14471 * failure, while for other HBAs (pln) scsi_ifsetcap will always 14472 * return success. However, in both of these cases ARQ is always 14473 * enabled and scsi_ifgetcap will always return true. The best approach 14474 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). 14475 * 14476 * The 3rd case is the HBA (adp) always return enabled on 14477 * scsi_ifgetgetcap even when it's not enable, the best approach 14478 * is issue a scsi_ifsetcap then a scsi_ifgetcap 14479 * Note: this case is to circumvent the Adaptec bug. (x86 only) 14480 */ 14481 14482 if (un->un_f_is_fibre == TRUE) { 14483 un->un_f_arq_enabled = TRUE; 14484 } else { 14485 #if defined(__i386) || defined(__amd64) 14486 /* 14487 * Circumvent the Adaptec bug, remove this code when 14488 * the bug is fixed 14489 */ 14490 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); 14491 #endif 14492 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { 14493 case 0: 14494 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14495 "sd_alloc_rqs: HBA supports ARQ\n"); 14496 /* 14497 * ARQ is supported by this HBA but currently is not 14498 * enabled. Attempt to enable it and if successful then 14499 * mark this instance as ARQ enabled. 14500 */ 14501 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1) 14502 == 1) { 14503 /* Successfully enabled ARQ in the HBA */ 14504 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14505 "sd_alloc_rqs: ARQ enabled\n"); 14506 un->un_f_arq_enabled = TRUE; 14507 } else { 14508 /* Could not enable ARQ in the HBA */ 14509 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14510 "sd_alloc_rqs: failed ARQ enable\n"); 14511 un->un_f_arq_enabled = FALSE; 14512 } 14513 break; 14514 case 1: 14515 /* 14516 * ARQ is supported by this HBA and is already enabled. 14517 * Just mark ARQ as enabled for this instance. 14518 */ 14519 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14520 "sd_alloc_rqs: ARQ already enabled\n"); 14521 un->un_f_arq_enabled = TRUE; 14522 break; 14523 default: 14524 /* 14525 * ARQ is not supported by this HBA; disable it for this 14526 * instance. 14527 */ 14528 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14529 "sd_alloc_rqs: HBA does not support ARQ\n"); 14530 un->un_f_arq_enabled = FALSE; 14531 break; 14532 } 14533 } 14534 14535 return (DDI_SUCCESS); 14536 } 14537 14538 14539 /* 14540 * Function: sd_free_rqs 14541 * 14542 * Description: Cleanup for the pre-instance RQS command. 14543 * 14544 * Context: Kernel thread context 14545 */ 14546 14547 static void 14548 sd_free_rqs(struct sd_lun *un) 14549 { 14550 ASSERT(un != NULL); 14551 14552 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n"); 14553 14554 /* 14555 * If consistent memory is bound to a scsi_pkt, the pkt 14556 * has to be destroyed *before* freeing the consistent memory. 14557 * Don't change the sequence of this operations. 14558 * scsi_destroy_pkt() might access memory, which isn't allowed, 14559 * after it was freed in scsi_free_consistent_buf(). 14560 */ 14561 if (un->un_rqs_pktp != NULL) { 14562 scsi_destroy_pkt(un->un_rqs_pktp); 14563 un->un_rqs_pktp = NULL; 14564 } 14565 14566 if (un->un_rqs_bp != NULL) { 14567 struct sd_xbuf *xp = SD_GET_XBUF(un->un_rqs_bp); 14568 if (xp != NULL) { 14569 kmem_free(xp, sizeof (struct sd_xbuf)); 14570 } 14571 scsi_free_consistent_buf(un->un_rqs_bp); 14572 un->un_rqs_bp = NULL; 14573 } 14574 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n"); 14575 } 14576 14577 14578 14579 /* 14580 * Function: sd_reduce_throttle 14581 * 14582 * Description: Reduces the maximum # of outstanding commands on a 14583 * target to the current number of outstanding commands. 14584 * Queues a tiemout(9F) callback to restore the limit 14585 * after a specified interval has elapsed. 14586 * Typically used when we get a TRAN_BUSY return code 14587 * back from scsi_transport(). 14588 * 14589 * Arguments: un - ptr to the sd_lun softstate struct 14590 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL 14591 * 14592 * Context: May be called from interrupt context 14593 */ 14594 14595 static void 14596 sd_reduce_throttle(struct sd_lun *un, int throttle_type) 14597 { 14598 ASSERT(un != NULL); 14599 ASSERT(mutex_owned(SD_MUTEX(un))); 14600 ASSERT(un->un_ncmds_in_transport >= 0); 14601 14602 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 14603 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n", 14604 un, un->un_throttle, un->un_ncmds_in_transport); 14605 14606 if (un->un_throttle > 1) { 14607 if (un->un_f_use_adaptive_throttle == TRUE) { 14608 switch (throttle_type) { 14609 case SD_THROTTLE_TRAN_BUSY: 14610 if (un->un_busy_throttle == 0) { 14611 un->un_busy_throttle = un->un_throttle; 14612 } 14613 break; 14614 case SD_THROTTLE_QFULL: 14615 un->un_busy_throttle = 0; 14616 break; 14617 default: 14618 ASSERT(FALSE); 14619 } 14620 14621 if (un->un_ncmds_in_transport > 0) { 14622 un->un_throttle = un->un_ncmds_in_transport; 14623 } 14624 14625 } else { 14626 if (un->un_ncmds_in_transport == 0) { 14627 un->un_throttle = 1; 14628 } else { 14629 un->un_throttle = un->un_ncmds_in_transport; 14630 } 14631 } 14632 } 14633 14634 /* Reschedule the timeout if none is currently active */ 14635 if (un->un_reset_throttle_timeid == NULL) { 14636 un->un_reset_throttle_timeid = timeout(sd_restore_throttle, 14637 un, SD_THROTTLE_RESET_INTERVAL); 14638 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14639 "sd_reduce_throttle: timeout scheduled!\n"); 14640 } 14641 14642 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 14643 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle); 14644 } 14645 14646 14647 14648 /* 14649 * Function: sd_restore_throttle 14650 * 14651 * Description: Callback function for timeout(9F). Resets the current 14652 * value of un->un_throttle to its default. 14653 * 14654 * Arguments: arg - pointer to associated softstate for the device. 14655 * 14656 * Context: May be called from interrupt context 14657 */ 14658 14659 static void 14660 sd_restore_throttle(void *arg) 14661 { 14662 struct sd_lun *un = arg; 14663 14664 ASSERT(un != NULL); 14665 ASSERT(!mutex_owned(SD_MUTEX(un))); 14666 14667 mutex_enter(SD_MUTEX(un)); 14668 14669 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 14670 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle); 14671 14672 un->un_reset_throttle_timeid = NULL; 14673 14674 if (un->un_f_use_adaptive_throttle == TRUE) { 14675 /* 14676 * If un_busy_throttle is nonzero, then it contains the 14677 * value that un_throttle was when we got a TRAN_BUSY back 14678 * from scsi_transport(). We want to revert back to this 14679 * value. 14680 * 14681 * In the QFULL case, the throttle limit will incrementally 14682 * increase until it reaches max throttle. 14683 */ 14684 if (un->un_busy_throttle > 0) { 14685 un->un_throttle = un->un_busy_throttle; 14686 un->un_busy_throttle = 0; 14687 } else { 14688 /* 14689 * increase throttle by 10% open gate slowly, schedule 14690 * another restore if saved throttle has not been 14691 * reached 14692 */ 14693 short throttle; 14694 if (sd_qfull_throttle_enable) { 14695 throttle = un->un_throttle + 14696 max((un->un_throttle / 10), 1); 14697 un->un_throttle = 14698 (throttle < un->un_saved_throttle) ? 14699 throttle : un->un_saved_throttle; 14700 if (un->un_throttle < un->un_saved_throttle) { 14701 un->un_reset_throttle_timeid = 14702 timeout(sd_restore_throttle, 14703 un, 14704 SD_QFULL_THROTTLE_RESET_INTERVAL); 14705 } 14706 } 14707 } 14708 14709 /* 14710 * If un_throttle has fallen below the low-water mark, we 14711 * restore the maximum value here (and allow it to ratchet 14712 * down again if necessary). 14713 */ 14714 if (un->un_throttle < un->un_min_throttle) { 14715 un->un_throttle = un->un_saved_throttle; 14716 } 14717 } else { 14718 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 14719 "restoring limit from 0x%x to 0x%x\n", 14720 un->un_throttle, un->un_saved_throttle); 14721 un->un_throttle = un->un_saved_throttle; 14722 } 14723 14724 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14725 "sd_restore_throttle: calling sd_start_cmds!\n"); 14726 14727 sd_start_cmds(un, NULL); 14728 14729 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14730 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n", 14731 un, un->un_throttle); 14732 14733 mutex_exit(SD_MUTEX(un)); 14734 14735 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n"); 14736 } 14737 14738 /* 14739 * Function: sdrunout 14740 * 14741 * Description: Callback routine for scsi_init_pkt when a resource allocation 14742 * fails. 14743 * 14744 * Arguments: arg - a pointer to the sd_lun unit struct for the particular 14745 * soft state instance. 14746 * 14747 * Return Code: The scsi_init_pkt routine allows for the callback function to 14748 * return a 0 indicating the callback should be rescheduled or a 1 14749 * indicating not to reschedule. This routine always returns 1 14750 * because the driver always provides a callback function to 14751 * scsi_init_pkt. This results in a callback always being scheduled 14752 * (via the scsi_init_pkt callback implementation) if a resource 14753 * failure occurs. 14754 * 14755 * Context: This callback function may not block or call routines that block 14756 * 14757 * Note: Using the scsi_init_pkt callback facility can result in an I/O 14758 * request persisting at the head of the list which cannot be 14759 * satisfied even after multiple retries. In the future the driver 14760 * may implement some time of maximum runout count before failing 14761 * an I/O. 14762 */ 14763 14764 static int 14765 sdrunout(caddr_t arg) 14766 { 14767 struct sd_lun *un = (struct sd_lun *)arg; 14768 14769 ASSERT(un != NULL); 14770 ASSERT(!mutex_owned(SD_MUTEX(un))); 14771 14772 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n"); 14773 14774 mutex_enter(SD_MUTEX(un)); 14775 sd_start_cmds(un, NULL); 14776 mutex_exit(SD_MUTEX(un)); 14777 /* 14778 * This callback routine always returns 1 (i.e. do not reschedule) 14779 * because we always specify sdrunout as the callback handler for 14780 * scsi_init_pkt inside the call to sd_start_cmds. 14781 */ 14782 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); 14783 return (1); 14784 } 14785 14786 14787 /* 14788 * Function: sdintr 14789 * 14790 * Description: Completion callback routine for scsi_pkt(9S) structs 14791 * sent to the HBA driver via scsi_transport(9F). 14792 * 14793 * Context: Interrupt context 14794 */ 14795 14796 static void 14797 sdintr(struct scsi_pkt *pktp) 14798 { 14799 struct buf *bp; 14800 struct sd_xbuf *xp; 14801 struct sd_lun *un; 14802 size_t actual_len; 14803 14804 ASSERT(pktp != NULL); 14805 bp = (struct buf *)pktp->pkt_private; 14806 ASSERT(bp != NULL); 14807 xp = SD_GET_XBUF(bp); 14808 ASSERT(xp != NULL); 14809 ASSERT(xp->xb_pktp != NULL); 14810 un = SD_GET_UN(bp); 14811 ASSERT(un != NULL); 14812 ASSERT(!mutex_owned(SD_MUTEX(un))); 14813 14814 #ifdef SD_FAULT_INJECTION 14815 14816 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n"); 14817 /* SD FaultInjection */ 14818 sd_faultinjection(pktp); 14819 14820 #endif /* SD_FAULT_INJECTION */ 14821 14822 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p," 14823 " xp:0x%p, un:0x%p\n", bp, xp, un); 14824 14825 mutex_enter(SD_MUTEX(un)); 14826 14827 /* Reduce the count of the #commands currently in transport */ 14828 un->un_ncmds_in_transport--; 14829 ASSERT(un->un_ncmds_in_transport >= 0); 14830 14831 /* Increment counter to indicate that the callback routine is active */ 14832 un->un_in_callback++; 14833 14834 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14835 14836 #ifdef SDDEBUG 14837 if (bp == un->un_retry_bp) { 14838 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " 14839 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", 14840 un, un->un_retry_bp, un->un_ncmds_in_transport); 14841 } 14842 #endif 14843 14844 /* 14845 * If pkt_reason is CMD_DEV_GONE, fail the command, and update the media 14846 * state if needed. 14847 */ 14848 if (pktp->pkt_reason == CMD_DEV_GONE) { 14849 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14850 "Command failed to complete...Device is gone\n"); 14851 if (un->un_mediastate != DKIO_DEV_GONE) { 14852 un->un_mediastate = DKIO_DEV_GONE; 14853 cv_broadcast(&un->un_state_cv); 14854 } 14855 sd_return_failed_command(un, bp, EIO); 14856 goto exit; 14857 } 14858 14859 if (pktp->pkt_state & STATE_XARQ_DONE) { 14860 SD_TRACE(SD_LOG_COMMON, un, 14861 "sdintr: extra sense data received. pkt=%p\n", pktp); 14862 } 14863 14864 /* 14865 * First see if the pkt has auto-request sense data with it.... 14866 * Look at the packet state first so we don't take a performance 14867 * hit looking at the arq enabled flag unless absolutely necessary. 14868 */ 14869 if ((pktp->pkt_state & STATE_ARQ_DONE) && 14870 (un->un_f_arq_enabled == TRUE)) { 14871 /* 14872 * The HBA did an auto request sense for this command so check 14873 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 14874 * driver command that should not be retried. 14875 */ 14876 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 14877 /* 14878 * Save the relevant sense info into the xp for the 14879 * original cmd. 14880 */ 14881 struct scsi_arq_status *asp; 14882 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 14883 xp->xb_sense_status = 14884 *((uchar_t *)(&(asp->sts_rqpkt_status))); 14885 xp->xb_sense_state = asp->sts_rqpkt_state; 14886 xp->xb_sense_resid = asp->sts_rqpkt_resid; 14887 if (pktp->pkt_state & STATE_XARQ_DONE) { 14888 actual_len = MAX_SENSE_LENGTH - 14889 xp->xb_sense_resid; 14890 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 14891 MAX_SENSE_LENGTH); 14892 } else { 14893 if (xp->xb_sense_resid > SENSE_LENGTH) { 14894 actual_len = MAX_SENSE_LENGTH - 14895 xp->xb_sense_resid; 14896 } else { 14897 actual_len = SENSE_LENGTH - 14898 xp->xb_sense_resid; 14899 } 14900 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 14901 xp->xb_sense_resid = 14902 (int)(((struct uscsi_cmd *) 14903 (xp->xb_pktinfo))-> 14904 uscsi_rqlen) - actual_len; 14905 } 14906 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 14907 SENSE_LENGTH); 14908 } 14909 14910 /* fail the command */ 14911 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14912 "sdintr: arq done and FLAG_DIAGNOSE set\n"); 14913 sd_return_failed_command(un, bp, EIO); 14914 goto exit; 14915 } 14916 14917 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 14918 /* 14919 * We want to either retry or fail this command, so free 14920 * the DMA resources here. If we retry the command then 14921 * the DMA resources will be reallocated in sd_start_cmds(). 14922 * Note that when PKT_DMA_PARTIAL is used, this reallocation 14923 * causes the *entire* transfer to start over again from the 14924 * beginning of the request, even for PARTIAL chunks that 14925 * have already transferred successfully. 14926 */ 14927 if ((un->un_f_is_fibre == TRUE) && 14928 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 14929 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 14930 scsi_dmafree(pktp); 14931 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 14932 } 14933 #endif 14934 14935 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14936 "sdintr: arq done, sd_handle_auto_request_sense\n"); 14937 14938 sd_handle_auto_request_sense(un, bp, xp, pktp); 14939 goto exit; 14940 } 14941 14942 /* Next see if this is the REQUEST SENSE pkt for the instance */ 14943 if (pktp->pkt_flags & FLAG_SENSING) { 14944 /* This pktp is from the unit's REQUEST_SENSE command */ 14945 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14946 "sdintr: sd_handle_request_sense\n"); 14947 sd_handle_request_sense(un, bp, xp, pktp); 14948 goto exit; 14949 } 14950 14951 /* 14952 * Check to see if the command successfully completed as requested; 14953 * this is the most common case (and also the hot performance path). 14954 * 14955 * Requirements for successful completion are: 14956 * pkt_reason is CMD_CMPLT and packet status is status good. 14957 * In addition: 14958 * - A residual of zero indicates successful completion no matter what 14959 * the command is. 14960 * - If the residual is not zero and the command is not a read or 14961 * write, then it's still defined as successful completion. In other 14962 * words, if the command is a read or write the residual must be 14963 * zero for successful completion. 14964 * - If the residual is not zero and the command is a read or 14965 * write, and it's a USCSICMD, then it's still defined as 14966 * successful completion. 14967 */ 14968 if ((pktp->pkt_reason == CMD_CMPLT) && 14969 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) { 14970 14971 /* 14972 * Since this command is returned with a good status, we 14973 * can reset the count for Sonoma failover. 14974 */ 14975 un->un_sonoma_failure_count = 0; 14976 14977 /* 14978 * Return all USCSI commands on good status 14979 */ 14980 if (pktp->pkt_resid == 0) { 14981 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14982 "sdintr: returning command for resid == 0\n"); 14983 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) && 14984 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) { 14985 SD_UPDATE_B_RESID(bp, pktp); 14986 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14987 "sdintr: returning command for resid != 0\n"); 14988 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 14989 SD_UPDATE_B_RESID(bp, pktp); 14990 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14991 "sdintr: returning uscsi command\n"); 14992 } else { 14993 goto not_successful; 14994 } 14995 sd_return_command(un, bp); 14996 14997 /* 14998 * Decrement counter to indicate that the callback routine 14999 * is done. 15000 */ 15001 un->un_in_callback--; 15002 ASSERT(un->un_in_callback >= 0); 15003 mutex_exit(SD_MUTEX(un)); 15004 15005 return; 15006 } 15007 15008 not_successful: 15009 15010 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 15011 /* 15012 * The following is based upon knowledge of the underlying transport 15013 * and its use of DMA resources. This code should be removed when 15014 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor 15015 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() 15016 * and sd_start_cmds(). 15017 * 15018 * Free any DMA resources associated with this command if there 15019 * is a chance it could be retried or enqueued for later retry. 15020 * If we keep the DMA binding then mpxio cannot reissue the 15021 * command on another path whenever a path failure occurs. 15022 * 15023 * Note that when PKT_DMA_PARTIAL is used, free/reallocation 15024 * causes the *entire* transfer to start over again from the 15025 * beginning of the request, even for PARTIAL chunks that 15026 * have already transferred successfully. 15027 * 15028 * This is only done for non-uscsi commands (and also skipped for the 15029 * driver's internal RQS command). Also just do this for Fibre Channel 15030 * devices as these are the only ones that support mpxio. 15031 */ 15032 if ((un->un_f_is_fibre == TRUE) && 15033 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 15034 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 15035 scsi_dmafree(pktp); 15036 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 15037 } 15038 #endif 15039 15040 /* 15041 * The command did not successfully complete as requested so check 15042 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 15043 * driver command that should not be retried so just return. If 15044 * FLAG_DIAGNOSE is not set the error will be processed below. 15045 */ 15046 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 15047 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15048 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n"); 15049 /* 15050 * Issue a request sense if a check condition caused the error 15051 * (we handle the auto request sense case above), otherwise 15052 * just fail the command. 15053 */ 15054 if ((pktp->pkt_reason == CMD_CMPLT) && 15055 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { 15056 sd_send_request_sense_command(un, bp, pktp); 15057 } else { 15058 sd_return_failed_command(un, bp, EIO); 15059 } 15060 goto exit; 15061 } 15062 15063 /* 15064 * The command did not successfully complete as requested so process 15065 * the error, retry, and/or attempt recovery. 15066 */ 15067 switch (pktp->pkt_reason) { 15068 case CMD_CMPLT: 15069 switch (SD_GET_PKT_STATUS(pktp)) { 15070 case STATUS_GOOD: 15071 /* 15072 * The command completed successfully with a non-zero 15073 * residual 15074 */ 15075 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15076 "sdintr: STATUS_GOOD \n"); 15077 sd_pkt_status_good(un, bp, xp, pktp); 15078 break; 15079 15080 case STATUS_CHECK: 15081 case STATUS_TERMINATED: 15082 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15083 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n"); 15084 sd_pkt_status_check_condition(un, bp, xp, pktp); 15085 break; 15086 15087 case STATUS_BUSY: 15088 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15089 "sdintr: STATUS_BUSY\n"); 15090 sd_pkt_status_busy(un, bp, xp, pktp); 15091 break; 15092 15093 case STATUS_RESERVATION_CONFLICT: 15094 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15095 "sdintr: STATUS_RESERVATION_CONFLICT\n"); 15096 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 15097 break; 15098 15099 case STATUS_QFULL: 15100 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15101 "sdintr: STATUS_QFULL\n"); 15102 sd_pkt_status_qfull(un, bp, xp, pktp); 15103 break; 15104 15105 case STATUS_MET: 15106 case STATUS_INTERMEDIATE: 15107 case STATUS_SCSI2: 15108 case STATUS_INTERMEDIATE_MET: 15109 case STATUS_ACA_ACTIVE: 15110 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 15111 "Unexpected SCSI status received: 0x%x\n", 15112 SD_GET_PKT_STATUS(pktp)); 15113 sd_return_failed_command(un, bp, EIO); 15114 break; 15115 15116 default: 15117 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 15118 "Invalid SCSI status received: 0x%x\n", 15119 SD_GET_PKT_STATUS(pktp)); 15120 sd_return_failed_command(un, bp, EIO); 15121 break; 15122 15123 } 15124 break; 15125 15126 case CMD_INCOMPLETE: 15127 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15128 "sdintr: CMD_INCOMPLETE\n"); 15129 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp); 15130 break; 15131 case CMD_TRAN_ERR: 15132 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15133 "sdintr: CMD_TRAN_ERR\n"); 15134 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp); 15135 break; 15136 case CMD_RESET: 15137 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15138 "sdintr: CMD_RESET \n"); 15139 sd_pkt_reason_cmd_reset(un, bp, xp, pktp); 15140 break; 15141 case CMD_ABORTED: 15142 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15143 "sdintr: CMD_ABORTED \n"); 15144 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp); 15145 break; 15146 case CMD_TIMEOUT: 15147 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15148 "sdintr: CMD_TIMEOUT\n"); 15149 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp); 15150 break; 15151 case CMD_UNX_BUS_FREE: 15152 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15153 "sdintr: CMD_UNX_BUS_FREE \n"); 15154 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp); 15155 break; 15156 case CMD_TAG_REJECT: 15157 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15158 "sdintr: CMD_TAG_REJECT\n"); 15159 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp); 15160 break; 15161 default: 15162 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15163 "sdintr: default\n"); 15164 sd_pkt_reason_default(un, bp, xp, pktp); 15165 break; 15166 } 15167 15168 exit: 15169 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n"); 15170 15171 /* Decrement counter to indicate that the callback routine is done. */ 15172 un->un_in_callback--; 15173 ASSERT(un->un_in_callback >= 0); 15174 15175 /* 15176 * At this point, the pkt has been dispatched, ie, it is either 15177 * being re-tried or has been returned to its caller and should 15178 * not be referenced. 15179 */ 15180 15181 mutex_exit(SD_MUTEX(un)); 15182 } 15183 15184 15185 /* 15186 * Function: sd_print_incomplete_msg 15187 * 15188 * Description: Prints the error message for a CMD_INCOMPLETE error. 15189 * 15190 * Arguments: un - ptr to associated softstate for the device. 15191 * bp - ptr to the buf(9S) for the command. 15192 * arg - message string ptr 15193 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED, 15194 * or SD_NO_RETRY_ISSUED. 15195 * 15196 * Context: May be called under interrupt context 15197 */ 15198 15199 static void 15200 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 15201 { 15202 struct scsi_pkt *pktp; 15203 char *msgp; 15204 char *cmdp = arg; 15205 15206 ASSERT(un != NULL); 15207 ASSERT(mutex_owned(SD_MUTEX(un))); 15208 ASSERT(bp != NULL); 15209 ASSERT(arg != NULL); 15210 pktp = SD_GET_PKTP(bp); 15211 ASSERT(pktp != NULL); 15212 15213 switch (code) { 15214 case SD_DELAYED_RETRY_ISSUED: 15215 case SD_IMMEDIATE_RETRY_ISSUED: 15216 msgp = "retrying"; 15217 break; 15218 case SD_NO_RETRY_ISSUED: 15219 default: 15220 msgp = "giving up"; 15221 break; 15222 } 15223 15224 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 15225 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15226 "incomplete %s- %s\n", cmdp, msgp); 15227 } 15228 } 15229 15230 15231 15232 /* 15233 * Function: sd_pkt_status_good 15234 * 15235 * Description: Processing for a STATUS_GOOD code in pkt_status. 15236 * 15237 * Context: May be called under interrupt context 15238 */ 15239 15240 static void 15241 sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 15242 struct sd_xbuf *xp, struct scsi_pkt *pktp) 15243 { 15244 char *cmdp; 15245 15246 ASSERT(un != NULL); 15247 ASSERT(mutex_owned(SD_MUTEX(un))); 15248 ASSERT(bp != NULL); 15249 ASSERT(xp != NULL); 15250 ASSERT(pktp != NULL); 15251 ASSERT(pktp->pkt_reason == CMD_CMPLT); 15252 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD); 15253 ASSERT(pktp->pkt_resid != 0); 15254 15255 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n"); 15256 15257 SD_UPDATE_ERRSTATS(un, sd_harderrs); 15258 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) { 15259 case SCMD_READ: 15260 cmdp = "read"; 15261 break; 15262 case SCMD_WRITE: 15263 cmdp = "write"; 15264 break; 15265 default: 15266 SD_UPDATE_B_RESID(bp, pktp); 15267 sd_return_command(un, bp); 15268 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 15269 return; 15270 } 15271 15272 /* 15273 * See if we can retry the read/write, preferrably immediately. 15274 * If retries are exhaused, then sd_retry_command() will update 15275 * the b_resid count. 15276 */ 15277 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg, 15278 cmdp, EIO, (clock_t)0, NULL); 15279 15280 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 15281 } 15282 15283 15284 15285 15286 15287 /* 15288 * Function: sd_handle_request_sense 15289 * 15290 * Description: Processing for non-auto Request Sense command. 15291 * 15292 * Arguments: un - ptr to associated softstate 15293 * sense_bp - ptr to buf(9S) for the RQS command 15294 * sense_xp - ptr to the sd_xbuf for the RQS command 15295 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command 15296 * 15297 * Context: May be called under interrupt context 15298 */ 15299 15300 static void 15301 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp, 15302 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp) 15303 { 15304 struct buf *cmd_bp; /* buf for the original command */ 15305 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */ 15306 struct scsi_pkt *cmd_pktp; /* pkt for the original command */ 15307 size_t actual_len; /* actual sense data length */ 15308 15309 ASSERT(un != NULL); 15310 ASSERT(mutex_owned(SD_MUTEX(un))); 15311 ASSERT(sense_bp != NULL); 15312 ASSERT(sense_xp != NULL); 15313 ASSERT(sense_pktp != NULL); 15314 15315 /* 15316 * Note the sense_bp, sense_xp, and sense_pktp here are for the 15317 * RQS command and not the original command. 15318 */ 15319 ASSERT(sense_pktp == un->un_rqs_pktp); 15320 ASSERT(sense_bp == un->un_rqs_bp); 15321 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) == 15322 (FLAG_SENSING | FLAG_HEAD)); 15323 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) & 15324 FLAG_SENSING) == FLAG_SENSING); 15325 15326 /* These are the bp, xp, and pktp for the original command */ 15327 cmd_bp = sense_xp->xb_sense_bp; 15328 cmd_xp = SD_GET_XBUF(cmd_bp); 15329 cmd_pktp = SD_GET_PKTP(cmd_bp); 15330 15331 if (sense_pktp->pkt_reason != CMD_CMPLT) { 15332 /* 15333 * The REQUEST SENSE command failed. Release the REQUEST 15334 * SENSE command for re-use, get back the bp for the original 15335 * command, and attempt to re-try the original command if 15336 * FLAG_DIAGNOSE is not set in the original packet. 15337 */ 15338 SD_UPDATE_ERRSTATS(un, sd_harderrs); 15339 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15340 cmd_bp = sd_mark_rqs_idle(un, sense_xp); 15341 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD, 15342 NULL, NULL, EIO, (clock_t)0, NULL); 15343 return; 15344 } 15345 } 15346 15347 /* 15348 * Save the relevant sense info into the xp for the original cmd. 15349 * 15350 * Note: if the request sense failed the state info will be zero 15351 * as set in sd_mark_rqs_busy() 15352 */ 15353 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp); 15354 cmd_xp->xb_sense_state = sense_pktp->pkt_state; 15355 actual_len = MAX_SENSE_LENGTH - sense_pktp->pkt_resid; 15356 if ((cmd_xp->xb_pkt_flags & SD_XB_USCSICMD) && 15357 (((struct uscsi_cmd *)cmd_xp->xb_pktinfo)->uscsi_rqlen > 15358 SENSE_LENGTH)) { 15359 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 15360 MAX_SENSE_LENGTH); 15361 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid; 15362 } else { 15363 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 15364 SENSE_LENGTH); 15365 if (actual_len < SENSE_LENGTH) { 15366 cmd_xp->xb_sense_resid = SENSE_LENGTH - actual_len; 15367 } else { 15368 cmd_xp->xb_sense_resid = 0; 15369 } 15370 } 15371 15372 /* 15373 * Free up the RQS command.... 15374 * NOTE: 15375 * Must do this BEFORE calling sd_validate_sense_data! 15376 * sd_validate_sense_data may return the original command in 15377 * which case the pkt will be freed and the flags can no 15378 * longer be touched. 15379 * SD_MUTEX is held through this process until the command 15380 * is dispatched based upon the sense data, so there are 15381 * no race conditions. 15382 */ 15383 (void) sd_mark_rqs_idle(un, sense_xp); 15384 15385 /* 15386 * For a retryable command see if we have valid sense data, if so then 15387 * turn it over to sd_decode_sense() to figure out the right course of 15388 * action. Just fail a non-retryable command. 15389 */ 15390 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15391 if (sd_validate_sense_data(un, cmd_bp, cmd_xp, actual_len) == 15392 SD_SENSE_DATA_IS_VALID) { 15393 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp); 15394 } 15395 } else { 15396 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB", 15397 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15398 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data", 15399 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 15400 sd_return_failed_command(un, cmd_bp, EIO); 15401 } 15402 } 15403 15404 15405 15406 15407 /* 15408 * Function: sd_handle_auto_request_sense 15409 * 15410 * Description: Processing for auto-request sense information. 15411 * 15412 * Arguments: un - ptr to associated softstate 15413 * bp - ptr to buf(9S) for the command 15414 * xp - ptr to the sd_xbuf for the command 15415 * pktp - ptr to the scsi_pkt(9S) for the command 15416 * 15417 * Context: May be called under interrupt context 15418 */ 15419 15420 static void 15421 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 15422 struct sd_xbuf *xp, struct scsi_pkt *pktp) 15423 { 15424 struct scsi_arq_status *asp; 15425 size_t actual_len; 15426 15427 ASSERT(un != NULL); 15428 ASSERT(mutex_owned(SD_MUTEX(un))); 15429 ASSERT(bp != NULL); 15430 ASSERT(xp != NULL); 15431 ASSERT(pktp != NULL); 15432 ASSERT(pktp != un->un_rqs_pktp); 15433 ASSERT(bp != un->un_rqs_bp); 15434 15435 /* 15436 * For auto-request sense, we get a scsi_arq_status back from 15437 * the HBA, with the sense data in the sts_sensedata member. 15438 * The pkt_scbp of the packet points to this scsi_arq_status. 15439 */ 15440 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 15441 15442 if (asp->sts_rqpkt_reason != CMD_CMPLT) { 15443 /* 15444 * The auto REQUEST SENSE failed; see if we can re-try 15445 * the original command. 15446 */ 15447 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15448 "auto request sense failed (reason=%s)\n", 15449 scsi_rname(asp->sts_rqpkt_reason)); 15450 15451 sd_reset_target(un, pktp); 15452 15453 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15454 NULL, NULL, EIO, (clock_t)0, NULL); 15455 return; 15456 } 15457 15458 /* Save the relevant sense info into the xp for the original cmd. */ 15459 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status))); 15460 xp->xb_sense_state = asp->sts_rqpkt_state; 15461 xp->xb_sense_resid = asp->sts_rqpkt_resid; 15462 if (xp->xb_sense_state & STATE_XARQ_DONE) { 15463 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 15464 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 15465 MAX_SENSE_LENGTH); 15466 } else { 15467 if (xp->xb_sense_resid > SENSE_LENGTH) { 15468 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 15469 } else { 15470 actual_len = SENSE_LENGTH - xp->xb_sense_resid; 15471 } 15472 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 15473 xp->xb_sense_resid = (int)(((struct uscsi_cmd *) 15474 (xp->xb_pktinfo))->uscsi_rqlen) - actual_len; 15475 } 15476 bcopy(&asp->sts_sensedata, xp->xb_sense_data, SENSE_LENGTH); 15477 } 15478 15479 /* 15480 * See if we have valid sense data, if so then turn it over to 15481 * sd_decode_sense() to figure out the right course of action. 15482 */ 15483 if (sd_validate_sense_data(un, bp, xp, actual_len) == 15484 SD_SENSE_DATA_IS_VALID) { 15485 sd_decode_sense(un, bp, xp, pktp); 15486 } 15487 } 15488 15489 15490 /* 15491 * Function: sd_print_sense_failed_msg 15492 * 15493 * Description: Print log message when RQS has failed. 15494 * 15495 * Arguments: un - ptr to associated softstate 15496 * bp - ptr to buf(9S) for the command 15497 * arg - generic message string ptr 15498 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 15499 * or SD_NO_RETRY_ISSUED 15500 * 15501 * Context: May be called from interrupt context 15502 */ 15503 15504 static void 15505 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg, 15506 int code) 15507 { 15508 char *msgp = arg; 15509 15510 ASSERT(un != NULL); 15511 ASSERT(mutex_owned(SD_MUTEX(un))); 15512 ASSERT(bp != NULL); 15513 15514 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) { 15515 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp); 15516 } 15517 } 15518 15519 15520 /* 15521 * Function: sd_validate_sense_data 15522 * 15523 * Description: Check the given sense data for validity. 15524 * If the sense data is not valid, the command will 15525 * be either failed or retried! 15526 * 15527 * Return Code: SD_SENSE_DATA_IS_INVALID 15528 * SD_SENSE_DATA_IS_VALID 15529 * 15530 * Context: May be called from interrupt context 15531 */ 15532 15533 static int 15534 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 15535 size_t actual_len) 15536 { 15537 struct scsi_extended_sense *esp; 15538 struct scsi_pkt *pktp; 15539 char *msgp = NULL; 15540 15541 ASSERT(un != NULL); 15542 ASSERT(mutex_owned(SD_MUTEX(un))); 15543 ASSERT(bp != NULL); 15544 ASSERT(bp != un->un_rqs_bp); 15545 ASSERT(xp != NULL); 15546 15547 pktp = SD_GET_PKTP(bp); 15548 ASSERT(pktp != NULL); 15549 15550 /* 15551 * Check the status of the RQS command (auto or manual). 15552 */ 15553 switch (xp->xb_sense_status & STATUS_MASK) { 15554 case STATUS_GOOD: 15555 break; 15556 15557 case STATUS_RESERVATION_CONFLICT: 15558 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 15559 return (SD_SENSE_DATA_IS_INVALID); 15560 15561 case STATUS_BUSY: 15562 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15563 "Busy Status on REQUEST SENSE\n"); 15564 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL, 15565 NULL, EIO, SD_BSY_TIMEOUT / 500, kstat_waitq_enter); 15566 return (SD_SENSE_DATA_IS_INVALID); 15567 15568 case STATUS_QFULL: 15569 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15570 "QFULL Status on REQUEST SENSE\n"); 15571 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, 15572 NULL, EIO, SD_BSY_TIMEOUT / 500, kstat_waitq_enter); 15573 return (SD_SENSE_DATA_IS_INVALID); 15574 15575 case STATUS_CHECK: 15576 case STATUS_TERMINATED: 15577 msgp = "Check Condition on REQUEST SENSE\n"; 15578 goto sense_failed; 15579 15580 default: 15581 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n"; 15582 goto sense_failed; 15583 } 15584 15585 /* 15586 * See if we got the minimum required amount of sense data. 15587 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes 15588 * or less. 15589 */ 15590 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) || 15591 (actual_len == 0)) { 15592 msgp = "Request Sense couldn't get sense data\n"; 15593 goto sense_failed; 15594 } 15595 15596 if (actual_len < SUN_MIN_SENSE_LENGTH) { 15597 msgp = "Not enough sense information\n"; 15598 goto sense_failed; 15599 } 15600 15601 /* 15602 * We require the extended sense data 15603 */ 15604 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 15605 if (esp->es_class != CLASS_EXTENDED_SENSE) { 15606 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 15607 static char tmp[8]; 15608 static char buf[148]; 15609 char *p = (char *)(xp->xb_sense_data); 15610 int i; 15611 15612 mutex_enter(&sd_sense_mutex); 15613 (void) strcpy(buf, "undecodable sense information:"); 15614 for (i = 0; i < actual_len; i++) { 15615 (void) sprintf(tmp, " 0x%x", *(p++)&0xff); 15616 (void) strcpy(&buf[strlen(buf)], tmp); 15617 } 15618 i = strlen(buf); 15619 (void) strcpy(&buf[i], "-(assumed fatal)\n"); 15620 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, buf); 15621 mutex_exit(&sd_sense_mutex); 15622 } 15623 /* Note: Legacy behavior, fail the command with no retry */ 15624 sd_return_failed_command(un, bp, EIO); 15625 return (SD_SENSE_DATA_IS_INVALID); 15626 } 15627 15628 /* 15629 * Check that es_code is valid (es_class concatenated with es_code 15630 * make up the "response code" field. es_class will always be 7, so 15631 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the 15632 * format. 15633 */ 15634 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) && 15635 (esp->es_code != CODE_FMT_FIXED_DEFERRED) && 15636 (esp->es_code != CODE_FMT_DESCR_CURRENT) && 15637 (esp->es_code != CODE_FMT_DESCR_DEFERRED) && 15638 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) { 15639 goto sense_failed; 15640 } 15641 15642 return (SD_SENSE_DATA_IS_VALID); 15643 15644 sense_failed: 15645 /* 15646 * If the request sense failed (for whatever reason), attempt 15647 * to retry the original command. 15648 */ 15649 #if defined(__i386) || defined(__amd64) 15650 /* 15651 * SD_RETRY_DELAY is conditionally compile (#if fibre) in 15652 * sddef.h for Sparc platform, and x86 uses 1 binary 15653 * for both SCSI/FC. 15654 * The SD_RETRY_DELAY value need to be adjusted here 15655 * when SD_RETRY_DELAY change in sddef.h 15656 */ 15657 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15658 sd_print_sense_failed_msg, msgp, EIO, 15659 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL); 15660 #else 15661 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15662 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); 15663 #endif 15664 15665 return (SD_SENSE_DATA_IS_INVALID); 15666 } 15667 15668 15669 15670 /* 15671 * Function: sd_decode_sense 15672 * 15673 * Description: Take recovery action(s) when SCSI Sense Data is received. 15674 * 15675 * Context: Interrupt context. 15676 */ 15677 15678 static void 15679 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 15680 struct scsi_pkt *pktp) 15681 { 15682 uint8_t sense_key; 15683 15684 ASSERT(un != NULL); 15685 ASSERT(mutex_owned(SD_MUTEX(un))); 15686 ASSERT(bp != NULL); 15687 ASSERT(bp != un->un_rqs_bp); 15688 ASSERT(xp != NULL); 15689 ASSERT(pktp != NULL); 15690 15691 sense_key = scsi_sense_key(xp->xb_sense_data); 15692 15693 switch (sense_key) { 15694 case KEY_NO_SENSE: 15695 sd_sense_key_no_sense(un, bp, xp, pktp); 15696 break; 15697 case KEY_RECOVERABLE_ERROR: 15698 sd_sense_key_recoverable_error(un, xp->xb_sense_data, 15699 bp, xp, pktp); 15700 break; 15701 case KEY_NOT_READY: 15702 sd_sense_key_not_ready(un, xp->xb_sense_data, 15703 bp, xp, pktp); 15704 break; 15705 case KEY_MEDIUM_ERROR: 15706 case KEY_HARDWARE_ERROR: 15707 sd_sense_key_medium_or_hardware_error(un, 15708 xp->xb_sense_data, bp, xp, pktp); 15709 break; 15710 case KEY_ILLEGAL_REQUEST: 15711 sd_sense_key_illegal_request(un, bp, xp, pktp); 15712 break; 15713 case KEY_UNIT_ATTENTION: 15714 sd_sense_key_unit_attention(un, xp->xb_sense_data, 15715 bp, xp, pktp); 15716 break; 15717 case KEY_WRITE_PROTECT: 15718 case KEY_VOLUME_OVERFLOW: 15719 case KEY_MISCOMPARE: 15720 sd_sense_key_fail_command(un, bp, xp, pktp); 15721 break; 15722 case KEY_BLANK_CHECK: 15723 sd_sense_key_blank_check(un, bp, xp, pktp); 15724 break; 15725 case KEY_ABORTED_COMMAND: 15726 sd_sense_key_aborted_command(un, bp, xp, pktp); 15727 break; 15728 case KEY_VENDOR_UNIQUE: 15729 case KEY_COPY_ABORTED: 15730 case KEY_EQUAL: 15731 case KEY_RESERVED: 15732 default: 15733 sd_sense_key_default(un, xp->xb_sense_data, 15734 bp, xp, pktp); 15735 break; 15736 } 15737 } 15738 15739 15740 /* 15741 * Function: sd_dump_memory 15742 * 15743 * Description: Debug logging routine to print the contents of a user provided 15744 * buffer. The output of the buffer is broken up into 256 byte 15745 * segments due to a size constraint of the scsi_log. 15746 * implementation. 15747 * 15748 * Arguments: un - ptr to softstate 15749 * comp - component mask 15750 * title - "title" string to preceed data when printed 15751 * data - ptr to data block to be printed 15752 * len - size of data block to be printed 15753 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c) 15754 * 15755 * Context: May be called from interrupt context 15756 */ 15757 15758 #define SD_DUMP_MEMORY_BUF_SIZE 256 15759 15760 static char *sd_dump_format_string[] = { 15761 " 0x%02x", 15762 " %c" 15763 }; 15764 15765 static void 15766 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data, 15767 int len, int fmt) 15768 { 15769 int i, j; 15770 int avail_count; 15771 int start_offset; 15772 int end_offset; 15773 size_t entry_len; 15774 char *bufp; 15775 char *local_buf; 15776 char *format_string; 15777 15778 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR)); 15779 15780 /* 15781 * In the debug version of the driver, this function is called from a 15782 * number of places which are NOPs in the release driver. 15783 * The debug driver therefore has additional methods of filtering 15784 * debug output. 15785 */ 15786 #ifdef SDDEBUG 15787 /* 15788 * In the debug version of the driver we can reduce the amount of debug 15789 * messages by setting sd_error_level to something other than 15790 * SCSI_ERR_ALL and clearing bits in sd_level_mask and 15791 * sd_component_mask. 15792 */ 15793 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) || 15794 (sd_error_level != SCSI_ERR_ALL)) { 15795 return; 15796 } 15797 if (((sd_component_mask & comp) == 0) || 15798 (sd_error_level != SCSI_ERR_ALL)) { 15799 return; 15800 } 15801 #else 15802 if (sd_error_level != SCSI_ERR_ALL) { 15803 return; 15804 } 15805 #endif 15806 15807 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP); 15808 bufp = local_buf; 15809 /* 15810 * Available length is the length of local_buf[], minus the 15811 * length of the title string, minus one for the ":", minus 15812 * one for the newline, minus one for the NULL terminator. 15813 * This gives the #bytes available for holding the printed 15814 * values from the given data buffer. 15815 */ 15816 if (fmt == SD_LOG_HEX) { 15817 format_string = sd_dump_format_string[0]; 15818 } else /* SD_LOG_CHAR */ { 15819 format_string = sd_dump_format_string[1]; 15820 } 15821 /* 15822 * Available count is the number of elements from the given 15823 * data buffer that we can fit into the available length. 15824 * This is based upon the size of the format string used. 15825 * Make one entry and find it's size. 15826 */ 15827 (void) sprintf(bufp, format_string, data[0]); 15828 entry_len = strlen(bufp); 15829 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len; 15830 15831 j = 0; 15832 while (j < len) { 15833 bufp = local_buf; 15834 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE); 15835 start_offset = j; 15836 15837 end_offset = start_offset + avail_count; 15838 15839 (void) sprintf(bufp, "%s:", title); 15840 bufp += strlen(bufp); 15841 for (i = start_offset; ((i < end_offset) && (j < len)); 15842 i++, j++) { 15843 (void) sprintf(bufp, format_string, data[i]); 15844 bufp += entry_len; 15845 } 15846 (void) sprintf(bufp, "\n"); 15847 15848 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf); 15849 } 15850 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE); 15851 } 15852 15853 /* 15854 * Function: sd_print_sense_msg 15855 * 15856 * Description: Log a message based upon the given sense data. 15857 * 15858 * Arguments: un - ptr to associated softstate 15859 * bp - ptr to buf(9S) for the command 15860 * arg - ptr to associate sd_sense_info struct 15861 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 15862 * or SD_NO_RETRY_ISSUED 15863 * 15864 * Context: May be called from interrupt context 15865 */ 15866 15867 static void 15868 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 15869 { 15870 struct sd_xbuf *xp; 15871 struct scsi_pkt *pktp; 15872 uint8_t *sensep; 15873 daddr_t request_blkno; 15874 diskaddr_t err_blkno; 15875 int severity; 15876 int pfa_flag; 15877 extern struct scsi_key_strings scsi_cmds[]; 15878 15879 ASSERT(un != NULL); 15880 ASSERT(mutex_owned(SD_MUTEX(un))); 15881 ASSERT(bp != NULL); 15882 xp = SD_GET_XBUF(bp); 15883 ASSERT(xp != NULL); 15884 pktp = SD_GET_PKTP(bp); 15885 ASSERT(pktp != NULL); 15886 ASSERT(arg != NULL); 15887 15888 severity = ((struct sd_sense_info *)(arg))->ssi_severity; 15889 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag; 15890 15891 if ((code == SD_DELAYED_RETRY_ISSUED) || 15892 (code == SD_IMMEDIATE_RETRY_ISSUED)) { 15893 severity = SCSI_ERR_RETRYABLE; 15894 } 15895 15896 /* Use absolute block number for the request block number */ 15897 request_blkno = xp->xb_blkno; 15898 15899 /* 15900 * Now try to get the error block number from the sense data 15901 */ 15902 sensep = xp->xb_sense_data; 15903 15904 if (scsi_sense_info_uint64(sensep, SENSE_LENGTH, 15905 (uint64_t *)&err_blkno)) { 15906 /* 15907 * We retrieved the error block number from the information 15908 * portion of the sense data. 15909 * 15910 * For USCSI commands we are better off using the error 15911 * block no. as the requested block no. (This is the best 15912 * we can estimate.) 15913 */ 15914 if ((SD_IS_BUFIO(xp) == FALSE) && 15915 ((pktp->pkt_flags & FLAG_SILENT) == 0)) { 15916 request_blkno = err_blkno; 15917 } 15918 } else { 15919 /* 15920 * Without the es_valid bit set (for fixed format) or an 15921 * information descriptor (for descriptor format) we cannot 15922 * be certain of the error blkno, so just use the 15923 * request_blkno. 15924 */ 15925 err_blkno = (diskaddr_t)request_blkno; 15926 } 15927 15928 /* 15929 * The following will log the buffer contents for the release driver 15930 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error 15931 * level is set to verbose. 15932 */ 15933 sd_dump_memory(un, SD_LOG_IO, "Failed CDB", 15934 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15935 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 15936 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX); 15937 15938 if (pfa_flag == FALSE) { 15939 /* This is normally only set for USCSI */ 15940 if ((pktp->pkt_flags & FLAG_SILENT) != 0) { 15941 return; 15942 } 15943 15944 if ((SD_IS_BUFIO(xp) == TRUE) && 15945 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) && 15946 (severity < sd_error_level))) { 15947 return; 15948 } 15949 } 15950 15951 /* 15952 * Check for Sonoma Failover and keep a count of how many failed I/O's 15953 */ 15954 if ((SD_IS_LSI(un)) && 15955 (scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) && 15956 (scsi_sense_asc(sensep) == 0x94) && 15957 (scsi_sense_ascq(sensep) == 0x01)) { 15958 un->un_sonoma_failure_count++; 15959 if (un->un_sonoma_failure_count > 1) { 15960 return; 15961 } 15962 } 15963 15964 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity, 15965 request_blkno, err_blkno, scsi_cmds, 15966 (struct scsi_extended_sense *)sensep, 15967 un->un_additional_codes, NULL); 15968 } 15969 15970 /* 15971 * Function: sd_sense_key_no_sense 15972 * 15973 * Description: Recovery action when sense data was not received. 15974 * 15975 * Context: May be called from interrupt context 15976 */ 15977 15978 static void 15979 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 15980 struct sd_xbuf *xp, struct scsi_pkt *pktp) 15981 { 15982 struct sd_sense_info si; 15983 15984 ASSERT(un != NULL); 15985 ASSERT(mutex_owned(SD_MUTEX(un))); 15986 ASSERT(bp != NULL); 15987 ASSERT(xp != NULL); 15988 ASSERT(pktp != NULL); 15989 15990 si.ssi_severity = SCSI_ERR_FATAL; 15991 si.ssi_pfa_flag = FALSE; 15992 15993 SD_UPDATE_ERRSTATS(un, sd_softerrs); 15994 15995 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 15996 &si, EIO, (clock_t)0, NULL); 15997 } 15998 15999 16000 /* 16001 * Function: sd_sense_key_recoverable_error 16002 * 16003 * Description: Recovery actions for a SCSI "Recovered Error" sense key. 16004 * 16005 * Context: May be called from interrupt context 16006 */ 16007 16008 static void 16009 sd_sense_key_recoverable_error(struct sd_lun *un, 16010 uint8_t *sense_datap, 16011 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16012 { 16013 struct sd_sense_info si; 16014 uint8_t asc = scsi_sense_asc(sense_datap); 16015 16016 ASSERT(un != NULL); 16017 ASSERT(mutex_owned(SD_MUTEX(un))); 16018 ASSERT(bp != NULL); 16019 ASSERT(xp != NULL); 16020 ASSERT(pktp != NULL); 16021 16022 /* 16023 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED 16024 */ 16025 if ((asc == 0x5D) && (sd_report_pfa != 0)) { 16026 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 16027 si.ssi_severity = SCSI_ERR_INFO; 16028 si.ssi_pfa_flag = TRUE; 16029 } else { 16030 SD_UPDATE_ERRSTATS(un, sd_softerrs); 16031 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err); 16032 si.ssi_severity = SCSI_ERR_RECOVERED; 16033 si.ssi_pfa_flag = FALSE; 16034 } 16035 16036 if (pktp->pkt_resid == 0) { 16037 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16038 sd_return_command(un, bp); 16039 return; 16040 } 16041 16042 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16043 &si, EIO, (clock_t)0, NULL); 16044 } 16045 16046 16047 16048 16049 /* 16050 * Function: sd_sense_key_not_ready 16051 * 16052 * Description: Recovery actions for a SCSI "Not Ready" sense key. 16053 * 16054 * Context: May be called from interrupt context 16055 */ 16056 16057 static void 16058 sd_sense_key_not_ready(struct sd_lun *un, 16059 uint8_t *sense_datap, 16060 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16061 { 16062 struct sd_sense_info si; 16063 uint8_t asc = scsi_sense_asc(sense_datap); 16064 uint8_t ascq = scsi_sense_ascq(sense_datap); 16065 16066 ASSERT(un != NULL); 16067 ASSERT(mutex_owned(SD_MUTEX(un))); 16068 ASSERT(bp != NULL); 16069 ASSERT(xp != NULL); 16070 ASSERT(pktp != NULL); 16071 16072 si.ssi_severity = SCSI_ERR_FATAL; 16073 si.ssi_pfa_flag = FALSE; 16074 16075 /* 16076 * Update error stats after first NOT READY error. Disks may have 16077 * been powered down and may need to be restarted. For CDROMs, 16078 * report NOT READY errors only if media is present. 16079 */ 16080 if ((ISCD(un) && (asc == 0x3A)) || 16081 (xp->xb_nr_retry_count > 0)) { 16082 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16083 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err); 16084 } 16085 16086 /* 16087 * Just fail if the "not ready" retry limit has been reached. 16088 */ 16089 if (xp->xb_nr_retry_count >= un->un_notready_retry_count) { 16090 /* Special check for error message printing for removables. */ 16091 if (un->un_f_has_removable_media && (asc == 0x04) && 16092 (ascq >= 0x04)) { 16093 si.ssi_severity = SCSI_ERR_ALL; 16094 } 16095 goto fail_command; 16096 } 16097 16098 /* 16099 * Check the ASC and ASCQ in the sense data as needed, to determine 16100 * what to do. 16101 */ 16102 switch (asc) { 16103 case 0x04: /* LOGICAL UNIT NOT READY */ 16104 /* 16105 * disk drives that don't spin up result in a very long delay 16106 * in format without warning messages. We will log a message 16107 * if the error level is set to verbose. 16108 */ 16109 if (sd_error_level < SCSI_ERR_RETRYABLE) { 16110 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16111 "logical unit not ready, resetting disk\n"); 16112 } 16113 16114 /* 16115 * There are different requirements for CDROMs and disks for 16116 * the number of retries. If a CD-ROM is giving this, it is 16117 * probably reading TOC and is in the process of getting 16118 * ready, so we should keep on trying for a long time to make 16119 * sure that all types of media are taken in account (for 16120 * some media the drive takes a long time to read TOC). For 16121 * disks we do not want to retry this too many times as this 16122 * can cause a long hang in format when the drive refuses to 16123 * spin up (a very common failure). 16124 */ 16125 switch (ascq) { 16126 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */ 16127 /* 16128 * Disk drives frequently refuse to spin up which 16129 * results in a very long hang in format without 16130 * warning messages. 16131 * 16132 * Note: This code preserves the legacy behavior of 16133 * comparing xb_nr_retry_count against zero for fibre 16134 * channel targets instead of comparing against the 16135 * un_reset_retry_count value. The reason for this 16136 * discrepancy has been so utterly lost beneath the 16137 * Sands of Time that even Indiana Jones could not 16138 * find it. 16139 */ 16140 if (un->un_f_is_fibre == TRUE) { 16141 if (((sd_level_mask & SD_LOGMASK_DIAG) || 16142 (xp->xb_nr_retry_count > 0)) && 16143 (un->un_startstop_timeid == NULL)) { 16144 scsi_log(SD_DEVINFO(un), sd_label, 16145 CE_WARN, "logical unit not ready, " 16146 "resetting disk\n"); 16147 sd_reset_target(un, pktp); 16148 } 16149 } else { 16150 if (((sd_level_mask & SD_LOGMASK_DIAG) || 16151 (xp->xb_nr_retry_count > 16152 un->un_reset_retry_count)) && 16153 (un->un_startstop_timeid == NULL)) { 16154 scsi_log(SD_DEVINFO(un), sd_label, 16155 CE_WARN, "logical unit not ready, " 16156 "resetting disk\n"); 16157 sd_reset_target(un, pktp); 16158 } 16159 } 16160 break; 16161 16162 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */ 16163 /* 16164 * If the target is in the process of becoming 16165 * ready, just proceed with the retry. This can 16166 * happen with CD-ROMs that take a long time to 16167 * read TOC after a power cycle or reset. 16168 */ 16169 goto do_retry; 16170 16171 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */ 16172 break; 16173 16174 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */ 16175 /* 16176 * Retries cannot help here so just fail right away. 16177 */ 16178 goto fail_command; 16179 16180 case 0x88: 16181 /* 16182 * Vendor-unique code for T3/T4: it indicates a 16183 * path problem in a mutipathed config, but as far as 16184 * the target driver is concerned it equates to a fatal 16185 * error, so we should just fail the command right away 16186 * (without printing anything to the console). If this 16187 * is not a T3/T4, fall thru to the default recovery 16188 * action. 16189 * T3/T4 is FC only, don't need to check is_fibre 16190 */ 16191 if (SD_IS_T3(un) || SD_IS_T4(un)) { 16192 sd_return_failed_command(un, bp, EIO); 16193 return; 16194 } 16195 /* FALLTHRU */ 16196 16197 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */ 16198 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */ 16199 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */ 16200 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */ 16201 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */ 16202 default: /* Possible future codes in SCSI spec? */ 16203 /* 16204 * For removable-media devices, do not retry if 16205 * ASCQ > 2 as these result mostly from USCSI commands 16206 * on MMC devices issued to check status of an 16207 * operation initiated in immediate mode. Also for 16208 * ASCQ >= 4 do not print console messages as these 16209 * mainly represent a user-initiated operation 16210 * instead of a system failure. 16211 */ 16212 if (un->un_f_has_removable_media) { 16213 si.ssi_severity = SCSI_ERR_ALL; 16214 goto fail_command; 16215 } 16216 break; 16217 } 16218 16219 /* 16220 * As part of our recovery attempt for the NOT READY 16221 * condition, we issue a START STOP UNIT command. However 16222 * we want to wait for a short delay before attempting this 16223 * as there may still be more commands coming back from the 16224 * target with the check condition. To do this we use 16225 * timeout(9F) to call sd_start_stop_unit_callback() after 16226 * the delay interval expires. (sd_start_stop_unit_callback() 16227 * dispatches sd_start_stop_unit_task(), which will issue 16228 * the actual START STOP UNIT command. The delay interval 16229 * is one-half of the delay that we will use to retry the 16230 * command that generated the NOT READY condition. 16231 * 16232 * Note that we could just dispatch sd_start_stop_unit_task() 16233 * from here and allow it to sleep for the delay interval, 16234 * but then we would be tying up the taskq thread 16235 * uncesessarily for the duration of the delay. 16236 * 16237 * Do not issue the START STOP UNIT if the current command 16238 * is already a START STOP UNIT. 16239 */ 16240 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) { 16241 break; 16242 } 16243 16244 /* 16245 * Do not schedule the timeout if one is already pending. 16246 */ 16247 if (un->un_startstop_timeid != NULL) { 16248 SD_INFO(SD_LOG_ERROR, un, 16249 "sd_sense_key_not_ready: restart already issued to" 16250 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)), 16251 ddi_get_instance(SD_DEVINFO(un))); 16252 break; 16253 } 16254 16255 /* 16256 * Schedule the START STOP UNIT command, then queue the command 16257 * for a retry. 16258 * 16259 * Note: A timeout is not scheduled for this retry because we 16260 * want the retry to be serial with the START_STOP_UNIT. The 16261 * retry will be started when the START_STOP_UNIT is completed 16262 * in sd_start_stop_unit_task. 16263 */ 16264 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback, 16265 un, SD_BSY_TIMEOUT / 2); 16266 xp->xb_nr_retry_count++; 16267 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter); 16268 return; 16269 16270 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */ 16271 if (sd_error_level < SCSI_ERR_RETRYABLE) { 16272 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16273 "unit does not respond to selection\n"); 16274 } 16275 break; 16276 16277 case 0x3A: /* MEDIUM NOT PRESENT */ 16278 if (sd_error_level >= SCSI_ERR_FATAL) { 16279 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16280 "Caddy not inserted in drive\n"); 16281 } 16282 16283 sr_ejected(un); 16284 un->un_mediastate = DKIO_EJECTED; 16285 /* The state has changed, inform the media watch routines */ 16286 cv_broadcast(&un->un_state_cv); 16287 /* Just fail if no media is present in the drive. */ 16288 goto fail_command; 16289 16290 default: 16291 if (sd_error_level < SCSI_ERR_RETRYABLE) { 16292 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 16293 "Unit not Ready. Additional sense code 0x%x\n", 16294 asc); 16295 } 16296 break; 16297 } 16298 16299 do_retry: 16300 16301 /* 16302 * Retry the command, as some targets may report NOT READY for 16303 * several seconds after being reset. 16304 */ 16305 xp->xb_nr_retry_count++; 16306 si.ssi_severity = SCSI_ERR_RETRYABLE; 16307 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 16308 &si, EIO, SD_BSY_TIMEOUT, NULL); 16309 16310 return; 16311 16312 fail_command: 16313 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16314 sd_return_failed_command(un, bp, EIO); 16315 } 16316 16317 16318 16319 /* 16320 * Function: sd_sense_key_medium_or_hardware_error 16321 * 16322 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error" 16323 * sense key. 16324 * 16325 * Context: May be called from interrupt context 16326 */ 16327 16328 static void 16329 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 16330 uint8_t *sense_datap, 16331 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16332 { 16333 struct sd_sense_info si; 16334 uint8_t sense_key = scsi_sense_key(sense_datap); 16335 uint8_t asc = scsi_sense_asc(sense_datap); 16336 16337 ASSERT(un != NULL); 16338 ASSERT(mutex_owned(SD_MUTEX(un))); 16339 ASSERT(bp != NULL); 16340 ASSERT(xp != NULL); 16341 ASSERT(pktp != NULL); 16342 16343 si.ssi_severity = SCSI_ERR_FATAL; 16344 si.ssi_pfa_flag = FALSE; 16345 16346 if (sense_key == KEY_MEDIUM_ERROR) { 16347 SD_UPDATE_ERRSTATS(un, sd_rq_media_err); 16348 } 16349 16350 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16351 16352 if ((un->un_reset_retry_count != 0) && 16353 (xp->xb_retry_count == un->un_reset_retry_count)) { 16354 mutex_exit(SD_MUTEX(un)); 16355 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */ 16356 if (un->un_f_allow_bus_device_reset == TRUE) { 16357 16358 boolean_t try_resetting_target = B_TRUE; 16359 16360 /* 16361 * We need to be able to handle specific ASC when we are 16362 * handling a KEY_HARDWARE_ERROR. In particular 16363 * taking the default action of resetting the target may 16364 * not be the appropriate way to attempt recovery. 16365 * Resetting a target because of a single LUN failure 16366 * victimizes all LUNs on that target. 16367 * 16368 * This is true for the LSI arrays, if an LSI 16369 * array controller returns an ASC of 0x84 (LUN Dead) we 16370 * should trust it. 16371 */ 16372 16373 if (sense_key == KEY_HARDWARE_ERROR) { 16374 switch (asc) { 16375 case 0x84: 16376 if (SD_IS_LSI(un)) { 16377 try_resetting_target = B_FALSE; 16378 } 16379 break; 16380 default: 16381 break; 16382 } 16383 } 16384 16385 if (try_resetting_target == B_TRUE) { 16386 int reset_retval = 0; 16387 if (un->un_f_lun_reset_enabled == TRUE) { 16388 SD_TRACE(SD_LOG_IO_CORE, un, 16389 "sd_sense_key_medium_or_hardware_" 16390 "error: issuing RESET_LUN\n"); 16391 reset_retval = 16392 scsi_reset(SD_ADDRESS(un), 16393 RESET_LUN); 16394 } 16395 if (reset_retval == 0) { 16396 SD_TRACE(SD_LOG_IO_CORE, un, 16397 "sd_sense_key_medium_or_hardware_" 16398 "error: issuing RESET_TARGET\n"); 16399 (void) scsi_reset(SD_ADDRESS(un), 16400 RESET_TARGET); 16401 } 16402 } 16403 } 16404 mutex_enter(SD_MUTEX(un)); 16405 } 16406 16407 /* 16408 * This really ought to be a fatal error, but we will retry anyway 16409 * as some drives report this as a spurious error. 16410 */ 16411 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16412 &si, EIO, (clock_t)0, NULL); 16413 } 16414 16415 16416 16417 /* 16418 * Function: sd_sense_key_illegal_request 16419 * 16420 * Description: Recovery actions for a SCSI "Illegal Request" sense key. 16421 * 16422 * Context: May be called from interrupt context 16423 */ 16424 16425 static void 16426 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 16427 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16428 { 16429 struct sd_sense_info si; 16430 16431 ASSERT(un != NULL); 16432 ASSERT(mutex_owned(SD_MUTEX(un))); 16433 ASSERT(bp != NULL); 16434 ASSERT(xp != NULL); 16435 ASSERT(pktp != NULL); 16436 16437 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err); 16438 16439 si.ssi_severity = SCSI_ERR_INFO; 16440 si.ssi_pfa_flag = FALSE; 16441 16442 /* Pointless to retry if the target thinks it's an illegal request */ 16443 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16444 sd_return_failed_command(un, bp, EIO); 16445 } 16446 16447 16448 16449 16450 /* 16451 * Function: sd_sense_key_unit_attention 16452 * 16453 * Description: Recovery actions for a SCSI "Unit Attention" sense key. 16454 * 16455 * Context: May be called from interrupt context 16456 */ 16457 16458 static void 16459 sd_sense_key_unit_attention(struct sd_lun *un, 16460 uint8_t *sense_datap, 16461 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16462 { 16463 /* 16464 * For UNIT ATTENTION we allow retries for one minute. Devices 16465 * like Sonoma can return UNIT ATTENTION close to a minute 16466 * under certain conditions. 16467 */ 16468 int retry_check_flag = SD_RETRIES_UA; 16469 boolean_t kstat_updated = B_FALSE; 16470 struct sd_sense_info si; 16471 uint8_t asc = scsi_sense_asc(sense_datap); 16472 16473 ASSERT(un != NULL); 16474 ASSERT(mutex_owned(SD_MUTEX(un))); 16475 ASSERT(bp != NULL); 16476 ASSERT(xp != NULL); 16477 ASSERT(pktp != NULL); 16478 16479 si.ssi_severity = SCSI_ERR_INFO; 16480 si.ssi_pfa_flag = FALSE; 16481 16482 16483 switch (asc) { 16484 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */ 16485 if (sd_report_pfa != 0) { 16486 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 16487 si.ssi_pfa_flag = TRUE; 16488 retry_check_flag = SD_RETRIES_STANDARD; 16489 goto do_retry; 16490 } 16491 16492 break; 16493 16494 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ 16495 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 16496 un->un_resvd_status |= 16497 (SD_LOST_RESERVE | SD_WANT_RESERVE); 16498 } 16499 #ifdef _LP64 16500 if (un->un_blockcount + 1 > SD_GROUP1_MAX_ADDRESS) { 16501 if (taskq_dispatch(sd_tq, sd_reenable_dsense_task, 16502 un, KM_NOSLEEP) == 0) { 16503 /* 16504 * If we can't dispatch the task we'll just 16505 * live without descriptor sense. We can 16506 * try again on the next "unit attention" 16507 */ 16508 SD_ERROR(SD_LOG_ERROR, un, 16509 "sd_sense_key_unit_attention: " 16510 "Could not dispatch " 16511 "sd_reenable_dsense_task\n"); 16512 } 16513 } 16514 #endif /* _LP64 */ 16515 /* FALLTHRU */ 16516 16517 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ 16518 if (!un->un_f_has_removable_media) { 16519 break; 16520 } 16521 16522 /* 16523 * When we get a unit attention from a removable-media device, 16524 * it may be in a state that will take a long time to recover 16525 * (e.g., from a reset). Since we are executing in interrupt 16526 * context here, we cannot wait around for the device to come 16527 * back. So hand this command off to sd_media_change_task() 16528 * for deferred processing under taskq thread context. (Note 16529 * that the command still may be failed if a problem is 16530 * encountered at a later time.) 16531 */ 16532 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp, 16533 KM_NOSLEEP) == 0) { 16534 /* 16535 * Cannot dispatch the request so fail the command. 16536 */ 16537 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16538 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 16539 si.ssi_severity = SCSI_ERR_FATAL; 16540 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16541 sd_return_failed_command(un, bp, EIO); 16542 } 16543 16544 /* 16545 * If failed to dispatch sd_media_change_task(), we already 16546 * updated kstat. If succeed to dispatch sd_media_change_task(), 16547 * we should update kstat later if it encounters an error. So, 16548 * we update kstat_updated flag here. 16549 */ 16550 kstat_updated = B_TRUE; 16551 16552 /* 16553 * Either the command has been successfully dispatched to a 16554 * task Q for retrying, or the dispatch failed. In either case 16555 * do NOT retry again by calling sd_retry_command. This sets up 16556 * two retries of the same command and when one completes and 16557 * frees the resources the other will access freed memory, 16558 * a bad thing. 16559 */ 16560 return; 16561 16562 default: 16563 break; 16564 } 16565 16566 /* 16567 * Update kstat if we haven't done that. 16568 */ 16569 if (!kstat_updated) { 16570 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16571 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 16572 } 16573 16574 do_retry: 16575 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si, 16576 EIO, SD_UA_RETRY_DELAY, NULL); 16577 } 16578 16579 16580 16581 /* 16582 * Function: sd_sense_key_fail_command 16583 * 16584 * Description: Use to fail a command when we don't like the sense key that 16585 * was returned. 16586 * 16587 * Context: May be called from interrupt context 16588 */ 16589 16590 static void 16591 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 16592 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16593 { 16594 struct sd_sense_info si; 16595 16596 ASSERT(un != NULL); 16597 ASSERT(mutex_owned(SD_MUTEX(un))); 16598 ASSERT(bp != NULL); 16599 ASSERT(xp != NULL); 16600 ASSERT(pktp != NULL); 16601 16602 si.ssi_severity = SCSI_ERR_FATAL; 16603 si.ssi_pfa_flag = FALSE; 16604 16605 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16606 sd_return_failed_command(un, bp, EIO); 16607 } 16608 16609 16610 16611 /* 16612 * Function: sd_sense_key_blank_check 16613 * 16614 * Description: Recovery actions for a SCSI "Blank Check" sense key. 16615 * Has no monetary connotation. 16616 * 16617 * Context: May be called from interrupt context 16618 */ 16619 16620 static void 16621 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 16622 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16623 { 16624 struct sd_sense_info si; 16625 16626 ASSERT(un != NULL); 16627 ASSERT(mutex_owned(SD_MUTEX(un))); 16628 ASSERT(bp != NULL); 16629 ASSERT(xp != NULL); 16630 ASSERT(pktp != NULL); 16631 16632 /* 16633 * Blank check is not fatal for removable devices, therefore 16634 * it does not require a console message. 16635 */ 16636 si.ssi_severity = (un->un_f_has_removable_media) ? SCSI_ERR_ALL : 16637 SCSI_ERR_FATAL; 16638 si.ssi_pfa_flag = FALSE; 16639 16640 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16641 sd_return_failed_command(un, bp, EIO); 16642 } 16643 16644 16645 16646 16647 /* 16648 * Function: sd_sense_key_aborted_command 16649 * 16650 * Description: Recovery actions for a SCSI "Aborted Command" sense key. 16651 * 16652 * Context: May be called from interrupt context 16653 */ 16654 16655 static void 16656 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 16657 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16658 { 16659 struct sd_sense_info si; 16660 16661 ASSERT(un != NULL); 16662 ASSERT(mutex_owned(SD_MUTEX(un))); 16663 ASSERT(bp != NULL); 16664 ASSERT(xp != NULL); 16665 ASSERT(pktp != NULL); 16666 16667 si.ssi_severity = SCSI_ERR_FATAL; 16668 si.ssi_pfa_flag = FALSE; 16669 16670 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16671 16672 /* 16673 * This really ought to be a fatal error, but we will retry anyway 16674 * as some drives report this as a spurious error. 16675 */ 16676 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16677 &si, EIO, drv_usectohz(100000), NULL); 16678 } 16679 16680 16681 16682 /* 16683 * Function: sd_sense_key_default 16684 * 16685 * Description: Default recovery action for several SCSI sense keys (basically 16686 * attempts a retry). 16687 * 16688 * Context: May be called from interrupt context 16689 */ 16690 16691 static void 16692 sd_sense_key_default(struct sd_lun *un, 16693 uint8_t *sense_datap, 16694 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16695 { 16696 struct sd_sense_info si; 16697 uint8_t sense_key = scsi_sense_key(sense_datap); 16698 16699 ASSERT(un != NULL); 16700 ASSERT(mutex_owned(SD_MUTEX(un))); 16701 ASSERT(bp != NULL); 16702 ASSERT(xp != NULL); 16703 ASSERT(pktp != NULL); 16704 16705 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16706 16707 /* 16708 * Undecoded sense key. Attempt retries and hope that will fix 16709 * the problem. Otherwise, we're dead. 16710 */ 16711 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16712 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16713 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]); 16714 } 16715 16716 si.ssi_severity = SCSI_ERR_FATAL; 16717 si.ssi_pfa_flag = FALSE; 16718 16719 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16720 &si, EIO, (clock_t)0, NULL); 16721 } 16722 16723 16724 16725 /* 16726 * Function: sd_print_retry_msg 16727 * 16728 * Description: Print a message indicating the retry action being taken. 16729 * 16730 * Arguments: un - ptr to associated softstate 16731 * bp - ptr to buf(9S) for the command 16732 * arg - not used. 16733 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16734 * or SD_NO_RETRY_ISSUED 16735 * 16736 * Context: May be called from interrupt context 16737 */ 16738 /* ARGSUSED */ 16739 static void 16740 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag) 16741 { 16742 struct sd_xbuf *xp; 16743 struct scsi_pkt *pktp; 16744 char *reasonp; 16745 char *msgp; 16746 16747 ASSERT(un != NULL); 16748 ASSERT(mutex_owned(SD_MUTEX(un))); 16749 ASSERT(bp != NULL); 16750 pktp = SD_GET_PKTP(bp); 16751 ASSERT(pktp != NULL); 16752 xp = SD_GET_XBUF(bp); 16753 ASSERT(xp != NULL); 16754 16755 ASSERT(!mutex_owned(&un->un_pm_mutex)); 16756 mutex_enter(&un->un_pm_mutex); 16757 if ((un->un_state == SD_STATE_SUSPENDED) || 16758 (SD_DEVICE_IS_IN_LOW_POWER(un)) || 16759 (pktp->pkt_flags & FLAG_SILENT)) { 16760 mutex_exit(&un->un_pm_mutex); 16761 goto update_pkt_reason; 16762 } 16763 mutex_exit(&un->un_pm_mutex); 16764 16765 /* 16766 * Suppress messages if they are all the same pkt_reason; with 16767 * TQ, many (up to 256) are returned with the same pkt_reason. 16768 * If we are in panic, then suppress the retry messages. 16769 */ 16770 switch (flag) { 16771 case SD_NO_RETRY_ISSUED: 16772 msgp = "giving up"; 16773 break; 16774 case SD_IMMEDIATE_RETRY_ISSUED: 16775 case SD_DELAYED_RETRY_ISSUED: 16776 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) || 16777 ((pktp->pkt_reason == un->un_last_pkt_reason) && 16778 (sd_error_level != SCSI_ERR_ALL))) { 16779 return; 16780 } 16781 msgp = "retrying command"; 16782 break; 16783 default: 16784 goto update_pkt_reason; 16785 } 16786 16787 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" : 16788 scsi_rname(pktp->pkt_reason)); 16789 16790 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16791 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp); 16792 16793 update_pkt_reason: 16794 /* 16795 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason. 16796 * This is to prevent multiple console messages for the same failure 16797 * condition. Note that un->un_last_pkt_reason is NOT restored if & 16798 * when the command is retried successfully because there still may be 16799 * more commands coming back with the same value of pktp->pkt_reason. 16800 */ 16801 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) { 16802 un->un_last_pkt_reason = pktp->pkt_reason; 16803 } 16804 } 16805 16806 16807 /* 16808 * Function: sd_print_cmd_incomplete_msg 16809 * 16810 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason. 16811 * 16812 * Arguments: un - ptr to associated softstate 16813 * bp - ptr to buf(9S) for the command 16814 * arg - passed to sd_print_retry_msg() 16815 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16816 * or SD_NO_RETRY_ISSUED 16817 * 16818 * Context: May be called from interrupt context 16819 */ 16820 16821 static void 16822 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, 16823 int code) 16824 { 16825 dev_info_t *dip; 16826 16827 ASSERT(un != NULL); 16828 ASSERT(mutex_owned(SD_MUTEX(un))); 16829 ASSERT(bp != NULL); 16830 16831 switch (code) { 16832 case SD_NO_RETRY_ISSUED: 16833 /* Command was failed. Someone turned off this target? */ 16834 if (un->un_state != SD_STATE_OFFLINE) { 16835 /* 16836 * Suppress message if we are detaching and 16837 * device has been disconnected 16838 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation 16839 * private interface and not part of the DDI 16840 */ 16841 dip = un->un_sd->sd_dev; 16842 if (!(DEVI_IS_DETACHING(dip) && 16843 DEVI_IS_DEVICE_REMOVED(dip))) { 16844 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16845 "disk not responding to selection\n"); 16846 } 16847 New_state(un, SD_STATE_OFFLINE); 16848 } 16849 break; 16850 16851 case SD_DELAYED_RETRY_ISSUED: 16852 case SD_IMMEDIATE_RETRY_ISSUED: 16853 default: 16854 /* Command was successfully queued for retry */ 16855 sd_print_retry_msg(un, bp, arg, code); 16856 break; 16857 } 16858 } 16859 16860 16861 /* 16862 * Function: sd_pkt_reason_cmd_incomplete 16863 * 16864 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason. 16865 * 16866 * Context: May be called from interrupt context 16867 */ 16868 16869 static void 16870 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 16871 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16872 { 16873 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE; 16874 16875 ASSERT(un != NULL); 16876 ASSERT(mutex_owned(SD_MUTEX(un))); 16877 ASSERT(bp != NULL); 16878 ASSERT(xp != NULL); 16879 ASSERT(pktp != NULL); 16880 16881 /* Do not do a reset if selection did not complete */ 16882 /* Note: Should this not just check the bit? */ 16883 if (pktp->pkt_state != STATE_GOT_BUS) { 16884 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16885 sd_reset_target(un, pktp); 16886 } 16887 16888 /* 16889 * If the target was not successfully selected, then set 16890 * SD_RETRIES_FAILFAST to indicate that we lost communication 16891 * with the target, and further retries and/or commands are 16892 * likely to take a long time. 16893 */ 16894 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) { 16895 flag |= SD_RETRIES_FAILFAST; 16896 } 16897 16898 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16899 16900 sd_retry_command(un, bp, flag, 16901 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16902 } 16903 16904 16905 16906 /* 16907 * Function: sd_pkt_reason_cmd_tran_err 16908 * 16909 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason. 16910 * 16911 * Context: May be called from interrupt context 16912 */ 16913 16914 static void 16915 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 16916 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16917 { 16918 ASSERT(un != NULL); 16919 ASSERT(mutex_owned(SD_MUTEX(un))); 16920 ASSERT(bp != NULL); 16921 ASSERT(xp != NULL); 16922 ASSERT(pktp != NULL); 16923 16924 /* 16925 * Do not reset if we got a parity error, or if 16926 * selection did not complete. 16927 */ 16928 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16929 /* Note: Should this not just check the bit for pkt_state? */ 16930 if (((pktp->pkt_statistics & STAT_PERR) == 0) && 16931 (pktp->pkt_state != STATE_GOT_BUS)) { 16932 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16933 sd_reset_target(un, pktp); 16934 } 16935 16936 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16937 16938 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 16939 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16940 } 16941 16942 16943 16944 /* 16945 * Function: sd_pkt_reason_cmd_reset 16946 * 16947 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason. 16948 * 16949 * Context: May be called from interrupt context 16950 */ 16951 16952 static void 16953 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 16954 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16955 { 16956 ASSERT(un != NULL); 16957 ASSERT(mutex_owned(SD_MUTEX(un))); 16958 ASSERT(bp != NULL); 16959 ASSERT(xp != NULL); 16960 ASSERT(pktp != NULL); 16961 16962 /* The target may still be running the command, so try to reset. */ 16963 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16964 sd_reset_target(un, pktp); 16965 16966 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16967 16968 /* 16969 * If pkt_reason is CMD_RESET chances are that this pkt got 16970 * reset because another target on this bus caused it. The target 16971 * that caused it should get CMD_TIMEOUT with pkt_statistics 16972 * of STAT_TIMEOUT/STAT_DEV_RESET. 16973 */ 16974 16975 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 16976 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16977 } 16978 16979 16980 16981 16982 /* 16983 * Function: sd_pkt_reason_cmd_aborted 16984 * 16985 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason. 16986 * 16987 * Context: May be called from interrupt context 16988 */ 16989 16990 static void 16991 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 16992 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16993 { 16994 ASSERT(un != NULL); 16995 ASSERT(mutex_owned(SD_MUTEX(un))); 16996 ASSERT(bp != NULL); 16997 ASSERT(xp != NULL); 16998 ASSERT(pktp != NULL); 16999 17000 /* The target may still be running the command, so try to reset. */ 17001 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17002 sd_reset_target(un, pktp); 17003 17004 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17005 17006 /* 17007 * If pkt_reason is CMD_ABORTED chances are that this pkt got 17008 * aborted because another target on this bus caused it. The target 17009 * that caused it should get CMD_TIMEOUT with pkt_statistics 17010 * of STAT_TIMEOUT/STAT_DEV_RESET. 17011 */ 17012 17013 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 17014 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17015 } 17016 17017 17018 17019 /* 17020 * Function: sd_pkt_reason_cmd_timeout 17021 * 17022 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason. 17023 * 17024 * Context: May be called from interrupt context 17025 */ 17026 17027 static void 17028 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 17029 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17030 { 17031 ASSERT(un != NULL); 17032 ASSERT(mutex_owned(SD_MUTEX(un))); 17033 ASSERT(bp != NULL); 17034 ASSERT(xp != NULL); 17035 ASSERT(pktp != NULL); 17036 17037 17038 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17039 sd_reset_target(un, pktp); 17040 17041 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17042 17043 /* 17044 * A command timeout indicates that we could not establish 17045 * communication with the target, so set SD_RETRIES_FAILFAST 17046 * as further retries/commands are likely to take a long time. 17047 */ 17048 sd_retry_command(un, bp, 17049 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST), 17050 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17051 } 17052 17053 17054 17055 /* 17056 * Function: sd_pkt_reason_cmd_unx_bus_free 17057 * 17058 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason. 17059 * 17060 * Context: May be called from interrupt context 17061 */ 17062 17063 static void 17064 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 17065 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17066 { 17067 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code); 17068 17069 ASSERT(un != NULL); 17070 ASSERT(mutex_owned(SD_MUTEX(un))); 17071 ASSERT(bp != NULL); 17072 ASSERT(xp != NULL); 17073 ASSERT(pktp != NULL); 17074 17075 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17076 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17077 17078 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? 17079 sd_print_retry_msg : NULL; 17080 17081 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 17082 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17083 } 17084 17085 17086 /* 17087 * Function: sd_pkt_reason_cmd_tag_reject 17088 * 17089 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason. 17090 * 17091 * Context: May be called from interrupt context 17092 */ 17093 17094 static void 17095 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 17096 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17097 { 17098 ASSERT(un != NULL); 17099 ASSERT(mutex_owned(SD_MUTEX(un))); 17100 ASSERT(bp != NULL); 17101 ASSERT(xp != NULL); 17102 ASSERT(pktp != NULL); 17103 17104 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17105 pktp->pkt_flags = 0; 17106 un->un_tagflags = 0; 17107 if (un->un_f_opt_queueing == TRUE) { 17108 un->un_throttle = min(un->un_throttle, 3); 17109 } else { 17110 un->un_throttle = 1; 17111 } 17112 mutex_exit(SD_MUTEX(un)); 17113 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 17114 mutex_enter(SD_MUTEX(un)); 17115 17116 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17117 17118 /* Legacy behavior not to check retry counts here. */ 17119 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE), 17120 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17121 } 17122 17123 17124 /* 17125 * Function: sd_pkt_reason_default 17126 * 17127 * Description: Default recovery actions for SCSA pkt_reason values that 17128 * do not have more explicit recovery actions. 17129 * 17130 * Context: May be called from interrupt context 17131 */ 17132 17133 static void 17134 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 17135 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17136 { 17137 ASSERT(un != NULL); 17138 ASSERT(mutex_owned(SD_MUTEX(un))); 17139 ASSERT(bp != NULL); 17140 ASSERT(xp != NULL); 17141 ASSERT(pktp != NULL); 17142 17143 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17144 sd_reset_target(un, pktp); 17145 17146 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17147 17148 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 17149 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17150 } 17151 17152 17153 17154 /* 17155 * Function: sd_pkt_status_check_condition 17156 * 17157 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status. 17158 * 17159 * Context: May be called from interrupt context 17160 */ 17161 17162 static void 17163 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 17164 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17165 { 17166 ASSERT(un != NULL); 17167 ASSERT(mutex_owned(SD_MUTEX(un))); 17168 ASSERT(bp != NULL); 17169 ASSERT(xp != NULL); 17170 ASSERT(pktp != NULL); 17171 17172 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: " 17173 "entry: buf:0x%p xp:0x%p\n", bp, xp); 17174 17175 /* 17176 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the 17177 * command will be retried after the request sense). Otherwise, retry 17178 * the command. Note: we are issuing the request sense even though the 17179 * retry limit may have been reached for the failed command. 17180 */ 17181 if (un->un_f_arq_enabled == FALSE) { 17182 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 17183 "no ARQ, sending request sense command\n"); 17184 sd_send_request_sense_command(un, bp, pktp); 17185 } else { 17186 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 17187 "ARQ,retrying request sense command\n"); 17188 #if defined(__i386) || defined(__amd64) 17189 /* 17190 * The SD_RETRY_DELAY value need to be adjusted here 17191 * when SD_RETRY_DELAY change in sddef.h 17192 */ 17193 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 17194 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, 17195 NULL); 17196 #else 17197 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 17198 EIO, SD_RETRY_DELAY, NULL); 17199 #endif 17200 } 17201 17202 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); 17203 } 17204 17205 17206 /* 17207 * Function: sd_pkt_status_busy 17208 * 17209 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status. 17210 * 17211 * Context: May be called from interrupt context 17212 */ 17213 17214 static void 17215 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 17216 struct scsi_pkt *pktp) 17217 { 17218 ASSERT(un != NULL); 17219 ASSERT(mutex_owned(SD_MUTEX(un))); 17220 ASSERT(bp != NULL); 17221 ASSERT(xp != NULL); 17222 ASSERT(pktp != NULL); 17223 17224 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17225 "sd_pkt_status_busy: entry\n"); 17226 17227 /* If retries are exhausted, just fail the command. */ 17228 if (xp->xb_retry_count >= un->un_busy_retry_count) { 17229 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17230 "device busy too long\n"); 17231 sd_return_failed_command(un, bp, EIO); 17232 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17233 "sd_pkt_status_busy: exit\n"); 17234 return; 17235 } 17236 xp->xb_retry_count++; 17237 17238 /* 17239 * Try to reset the target. However, we do not want to perform 17240 * more than one reset if the device continues to fail. The reset 17241 * will be performed when the retry count reaches the reset 17242 * threshold. This threshold should be set such that at least 17243 * one retry is issued before the reset is performed. 17244 */ 17245 if (xp->xb_retry_count == 17246 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) { 17247 int rval = 0; 17248 mutex_exit(SD_MUTEX(un)); 17249 if (un->un_f_allow_bus_device_reset == TRUE) { 17250 /* 17251 * First try to reset the LUN; if we cannot then 17252 * try to reset the target. 17253 */ 17254 if (un->un_f_lun_reset_enabled == TRUE) { 17255 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17256 "sd_pkt_status_busy: RESET_LUN\n"); 17257 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 17258 } 17259 if (rval == 0) { 17260 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17261 "sd_pkt_status_busy: RESET_TARGET\n"); 17262 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 17263 } 17264 } 17265 if (rval == 0) { 17266 /* 17267 * If the RESET_LUN and/or RESET_TARGET failed, 17268 * try RESET_ALL 17269 */ 17270 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17271 "sd_pkt_status_busy: RESET_ALL\n"); 17272 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL); 17273 } 17274 mutex_enter(SD_MUTEX(un)); 17275 if (rval == 0) { 17276 /* 17277 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed. 17278 * At this point we give up & fail the command. 17279 */ 17280 sd_return_failed_command(un, bp, EIO); 17281 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17282 "sd_pkt_status_busy: exit (failed cmd)\n"); 17283 return; 17284 } 17285 } 17286 17287 /* 17288 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as 17289 * we have already checked the retry counts above. 17290 */ 17291 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 17292 EIO, SD_BSY_TIMEOUT, NULL); 17293 17294 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17295 "sd_pkt_status_busy: exit\n"); 17296 } 17297 17298 17299 /* 17300 * Function: sd_pkt_status_reservation_conflict 17301 * 17302 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI 17303 * command status. 17304 * 17305 * Context: May be called from interrupt context 17306 */ 17307 17308 static void 17309 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp, 17310 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17311 { 17312 ASSERT(un != NULL); 17313 ASSERT(mutex_owned(SD_MUTEX(un))); 17314 ASSERT(bp != NULL); 17315 ASSERT(xp != NULL); 17316 ASSERT(pktp != NULL); 17317 17318 /* 17319 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation 17320 * conflict could be due to various reasons like incorrect keys, not 17321 * registered or not reserved etc. So, we return EACCES to the caller. 17322 */ 17323 if (un->un_reservation_type == SD_SCSI3_RESERVATION) { 17324 int cmd = SD_GET_PKT_OPCODE(pktp); 17325 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) || 17326 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) { 17327 sd_return_failed_command(un, bp, EACCES); 17328 return; 17329 } 17330 } 17331 17332 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 17333 17334 if ((un->un_resvd_status & SD_FAILFAST) != 0) { 17335 if (sd_failfast_enable != 0) { 17336 /* By definition, we must panic here.... */ 17337 sd_panic_for_res_conflict(un); 17338 /*NOTREACHED*/ 17339 } 17340 SD_ERROR(SD_LOG_IO, un, 17341 "sd_handle_resv_conflict: Disk Reserved\n"); 17342 sd_return_failed_command(un, bp, EACCES); 17343 return; 17344 } 17345 17346 /* 17347 * 1147670: retry only if sd_retry_on_reservation_conflict 17348 * property is set (default is 1). Retries will not succeed 17349 * on a disk reserved by another initiator. HA systems 17350 * may reset this via sd.conf to avoid these retries. 17351 * 17352 * Note: The legacy return code for this failure is EIO, however EACCES 17353 * seems more appropriate for a reservation conflict. 17354 */ 17355 if (sd_retry_on_reservation_conflict == 0) { 17356 SD_ERROR(SD_LOG_IO, un, 17357 "sd_handle_resv_conflict: Device Reserved\n"); 17358 sd_return_failed_command(un, bp, EIO); 17359 return; 17360 } 17361 17362 /* 17363 * Retry the command if we can. 17364 * 17365 * Note: The legacy return code for this failure is EIO, however EACCES 17366 * seems more appropriate for a reservation conflict. 17367 */ 17368 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 17369 (clock_t)2, NULL); 17370 } 17371 17372 17373 17374 /* 17375 * Function: sd_pkt_status_qfull 17376 * 17377 * Description: Handle a QUEUE FULL condition from the target. This can 17378 * occur if the HBA does not handle the queue full condition. 17379 * (Basically this means third-party HBAs as Sun HBAs will 17380 * handle the queue full condition.) Note that if there are 17381 * some commands already in the transport, then the queue full 17382 * has occurred because the queue for this nexus is actually 17383 * full. If there are no commands in the transport, then the 17384 * queue full is resulting from some other initiator or lun 17385 * consuming all the resources at the target. 17386 * 17387 * Context: May be called from interrupt context 17388 */ 17389 17390 static void 17391 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 17392 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17393 { 17394 ASSERT(un != NULL); 17395 ASSERT(mutex_owned(SD_MUTEX(un))); 17396 ASSERT(bp != NULL); 17397 ASSERT(xp != NULL); 17398 ASSERT(pktp != NULL); 17399 17400 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17401 "sd_pkt_status_qfull: entry\n"); 17402 17403 /* 17404 * Just lower the QFULL throttle and retry the command. Note that 17405 * we do not limit the number of retries here. 17406 */ 17407 sd_reduce_throttle(un, SD_THROTTLE_QFULL); 17408 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, 17409 SD_RESTART_TIMEOUT, NULL); 17410 17411 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17412 "sd_pkt_status_qfull: exit\n"); 17413 } 17414 17415 17416 /* 17417 * Function: sd_reset_target 17418 * 17419 * Description: Issue a scsi_reset(9F), with either RESET_LUN, 17420 * RESET_TARGET, or RESET_ALL. 17421 * 17422 * Context: May be called under interrupt context. 17423 */ 17424 17425 static void 17426 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp) 17427 { 17428 int rval = 0; 17429 17430 ASSERT(un != NULL); 17431 ASSERT(mutex_owned(SD_MUTEX(un))); 17432 ASSERT(pktp != NULL); 17433 17434 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n"); 17435 17436 /* 17437 * No need to reset if the transport layer has already done so. 17438 */ 17439 if ((pktp->pkt_statistics & 17440 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) { 17441 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17442 "sd_reset_target: no reset\n"); 17443 return; 17444 } 17445 17446 mutex_exit(SD_MUTEX(un)); 17447 17448 if (un->un_f_allow_bus_device_reset == TRUE) { 17449 if (un->un_f_lun_reset_enabled == TRUE) { 17450 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17451 "sd_reset_target: RESET_LUN\n"); 17452 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 17453 } 17454 if (rval == 0) { 17455 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17456 "sd_reset_target: RESET_TARGET\n"); 17457 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 17458 } 17459 } 17460 17461 if (rval == 0) { 17462 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17463 "sd_reset_target: RESET_ALL\n"); 17464 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 17465 } 17466 17467 mutex_enter(SD_MUTEX(un)); 17468 17469 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n"); 17470 } 17471 17472 17473 /* 17474 * Function: sd_media_change_task 17475 * 17476 * Description: Recovery action for CDROM to become available. 17477 * 17478 * Context: Executes in a taskq() thread context 17479 */ 17480 17481 static void 17482 sd_media_change_task(void *arg) 17483 { 17484 struct scsi_pkt *pktp = arg; 17485 struct sd_lun *un; 17486 struct buf *bp; 17487 struct sd_xbuf *xp; 17488 int err = 0; 17489 int retry_count = 0; 17490 int retry_limit = SD_UNIT_ATTENTION_RETRY/10; 17491 struct sd_sense_info si; 17492 17493 ASSERT(pktp != NULL); 17494 bp = (struct buf *)pktp->pkt_private; 17495 ASSERT(bp != NULL); 17496 xp = SD_GET_XBUF(bp); 17497 ASSERT(xp != NULL); 17498 un = SD_GET_UN(bp); 17499 ASSERT(un != NULL); 17500 ASSERT(!mutex_owned(SD_MUTEX(un))); 17501 ASSERT(un->un_f_monitor_media_state); 17502 17503 si.ssi_severity = SCSI_ERR_INFO; 17504 si.ssi_pfa_flag = FALSE; 17505 17506 /* 17507 * When a reset is issued on a CDROM, it takes a long time to 17508 * recover. First few attempts to read capacity and other things 17509 * related to handling unit attention fail (with a ASC 0x4 and 17510 * ASCQ 0x1). In that case we want to do enough retries and we want 17511 * to limit the retries in other cases of genuine failures like 17512 * no media in drive. 17513 */ 17514 while (retry_count++ < retry_limit) { 17515 if ((err = sd_handle_mchange(un)) == 0) { 17516 break; 17517 } 17518 if (err == EAGAIN) { 17519 retry_limit = SD_UNIT_ATTENTION_RETRY; 17520 } 17521 /* Sleep for 0.5 sec. & try again */ 17522 delay(drv_usectohz(500000)); 17523 } 17524 17525 /* 17526 * Dispatch (retry or fail) the original command here, 17527 * along with appropriate console messages.... 17528 * 17529 * Must grab the mutex before calling sd_retry_command, 17530 * sd_print_sense_msg and sd_return_failed_command. 17531 */ 17532 mutex_enter(SD_MUTEX(un)); 17533 if (err != SD_CMD_SUCCESS) { 17534 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17535 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17536 si.ssi_severity = SCSI_ERR_FATAL; 17537 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17538 sd_return_failed_command(un, bp, EIO); 17539 } else { 17540 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 17541 &si, EIO, (clock_t)0, NULL); 17542 } 17543 mutex_exit(SD_MUTEX(un)); 17544 } 17545 17546 17547 17548 /* 17549 * Function: sd_handle_mchange 17550 * 17551 * Description: Perform geometry validation & other recovery when CDROM 17552 * has been removed from drive. 17553 * 17554 * Return Code: 0 for success 17555 * errno-type return code of either sd_send_scsi_DOORLOCK() or 17556 * sd_send_scsi_READ_CAPACITY() 17557 * 17558 * Context: Executes in a taskq() thread context 17559 */ 17560 17561 static int 17562 sd_handle_mchange(struct sd_lun *un) 17563 { 17564 uint64_t capacity; 17565 uint32_t lbasize; 17566 int rval; 17567 17568 ASSERT(!mutex_owned(SD_MUTEX(un))); 17569 ASSERT(un->un_f_monitor_media_state); 17570 17571 if ((rval = sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 17572 SD_PATH_DIRECT_PRIORITY)) != 0) { 17573 return (rval); 17574 } 17575 17576 mutex_enter(SD_MUTEX(un)); 17577 sd_update_block_info(un, lbasize, capacity); 17578 17579 if (un->un_errstats != NULL) { 17580 struct sd_errstats *stp = 17581 (struct sd_errstats *)un->un_errstats->ks_data; 17582 stp->sd_capacity.value.ui64 = (uint64_t) 17583 ((uint64_t)un->un_blockcount * 17584 (uint64_t)un->un_tgt_blocksize); 17585 } 17586 17587 17588 /* 17589 * Check if the media in the device is writable or not 17590 */ 17591 if (ISCD(un)) 17592 sd_check_for_writable_cd(un, SD_PATH_DIRECT_PRIORITY); 17593 17594 /* 17595 * Note: Maybe let the strategy/partitioning chain worry about getting 17596 * valid geometry. 17597 */ 17598 mutex_exit(SD_MUTEX(un)); 17599 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 17600 17601 17602 if (cmlb_validate(un->un_cmlbhandle, 0, 17603 (void *)SD_PATH_DIRECT_PRIORITY) != 0) { 17604 return (EIO); 17605 } else { 17606 if (un->un_f_pkstats_enabled) { 17607 sd_set_pstats(un); 17608 SD_TRACE(SD_LOG_IO_PARTITION, un, 17609 "sd_handle_mchange: un:0x%p pstats created and " 17610 "set\n", un); 17611 } 17612 } 17613 17614 17615 /* 17616 * Try to lock the door 17617 */ 17618 return (sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 17619 SD_PATH_DIRECT_PRIORITY)); 17620 } 17621 17622 17623 /* 17624 * Function: sd_send_scsi_DOORLOCK 17625 * 17626 * Description: Issue the scsi DOOR LOCK command 17627 * 17628 * Arguments: un - pointer to driver soft state (unit) structure for 17629 * this target. 17630 * flag - SD_REMOVAL_ALLOW 17631 * SD_REMOVAL_PREVENT 17632 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17633 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17634 * to use the USCSI "direct" chain and bypass the normal 17635 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 17636 * command is issued as part of an error recovery action. 17637 * 17638 * Return Code: 0 - Success 17639 * errno return code from sd_send_scsi_cmd() 17640 * 17641 * Context: Can sleep. 17642 */ 17643 17644 static int 17645 sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag) 17646 { 17647 union scsi_cdb cdb; 17648 struct uscsi_cmd ucmd_buf; 17649 struct scsi_extended_sense sense_buf; 17650 int status; 17651 17652 ASSERT(un != NULL); 17653 ASSERT(!mutex_owned(SD_MUTEX(un))); 17654 17655 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un); 17656 17657 /* already determined doorlock is not supported, fake success */ 17658 if (un->un_f_doorlock_supported == FALSE) { 17659 return (0); 17660 } 17661 17662 /* 17663 * If we are ejecting and see an SD_REMOVAL_PREVENT 17664 * ignore the command so we can complete the eject 17665 * operation. 17666 */ 17667 if (flag == SD_REMOVAL_PREVENT) { 17668 mutex_enter(SD_MUTEX(un)); 17669 if (un->un_f_ejecting == TRUE) { 17670 mutex_exit(SD_MUTEX(un)); 17671 return (EAGAIN); 17672 } 17673 mutex_exit(SD_MUTEX(un)); 17674 } 17675 17676 bzero(&cdb, sizeof (cdb)); 17677 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17678 17679 cdb.scc_cmd = SCMD_DOORLOCK; 17680 cdb.cdb_opaque[4] = (uchar_t)flag; 17681 17682 ucmd_buf.uscsi_cdb = (char *)&cdb; 17683 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 17684 ucmd_buf.uscsi_bufaddr = NULL; 17685 ucmd_buf.uscsi_buflen = 0; 17686 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17687 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 17688 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 17689 ucmd_buf.uscsi_timeout = 15; 17690 17691 SD_TRACE(SD_LOG_IO, un, 17692 "sd_send_scsi_DOORLOCK: returning sd_send_scsi_cmd()\n"); 17693 17694 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17695 UIO_SYSSPACE, path_flag); 17696 17697 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) && 17698 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 17699 (scsi_sense_key((uint8_t *)&sense_buf) == KEY_ILLEGAL_REQUEST)) { 17700 /* fake success and skip subsequent doorlock commands */ 17701 un->un_f_doorlock_supported = FALSE; 17702 return (0); 17703 } 17704 17705 return (status); 17706 } 17707 17708 /* 17709 * Function: sd_send_scsi_READ_CAPACITY 17710 * 17711 * Description: This routine uses the scsi READ CAPACITY command to determine 17712 * the device capacity in number of blocks and the device native 17713 * block size. If this function returns a failure, then the 17714 * values in *capp and *lbap are undefined. If the capacity 17715 * returned is 0xffffffff then the lun is too large for a 17716 * normal READ CAPACITY command and the results of a 17717 * READ CAPACITY 16 will be used instead. 17718 * 17719 * Arguments: un - ptr to soft state struct for the target 17720 * capp - ptr to unsigned 64-bit variable to receive the 17721 * capacity value from the command. 17722 * lbap - ptr to unsigned 32-bit varaible to receive the 17723 * block size value from the command 17724 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17725 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17726 * to use the USCSI "direct" chain and bypass the normal 17727 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 17728 * command is issued as part of an error recovery action. 17729 * 17730 * Return Code: 0 - Success 17731 * EIO - IO error 17732 * EACCES - Reservation conflict detected 17733 * EAGAIN - Device is becoming ready 17734 * errno return code from sd_send_scsi_cmd() 17735 * 17736 * Context: Can sleep. Blocks until command completes. 17737 */ 17738 17739 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity) 17740 17741 static int 17742 sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, uint32_t *lbap, 17743 int path_flag) 17744 { 17745 struct scsi_extended_sense sense_buf; 17746 struct uscsi_cmd ucmd_buf; 17747 union scsi_cdb cdb; 17748 uint32_t *capacity_buf; 17749 uint64_t capacity; 17750 uint32_t lbasize; 17751 int status; 17752 17753 ASSERT(un != NULL); 17754 ASSERT(!mutex_owned(SD_MUTEX(un))); 17755 ASSERT(capp != NULL); 17756 ASSERT(lbap != NULL); 17757 17758 SD_TRACE(SD_LOG_IO, un, 17759 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 17760 17761 /* 17762 * First send a READ_CAPACITY command to the target. 17763 * (This command is mandatory under SCSI-2.) 17764 * 17765 * Set up the CDB for the READ_CAPACITY command. The Partial 17766 * Medium Indicator bit is cleared. The address field must be 17767 * zero if the PMI bit is zero. 17768 */ 17769 bzero(&cdb, sizeof (cdb)); 17770 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17771 17772 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP); 17773 17774 cdb.scc_cmd = SCMD_READ_CAPACITY; 17775 17776 ucmd_buf.uscsi_cdb = (char *)&cdb; 17777 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 17778 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; 17779 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; 17780 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17781 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 17782 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 17783 ucmd_buf.uscsi_timeout = 60; 17784 17785 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17786 UIO_SYSSPACE, path_flag); 17787 17788 switch (status) { 17789 case 0: 17790 /* Return failure if we did not get valid capacity data. */ 17791 if (ucmd_buf.uscsi_resid != 0) { 17792 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17793 return (EIO); 17794 } 17795 17796 /* 17797 * Read capacity and block size from the READ CAPACITY 10 data. 17798 * This data may be adjusted later due to device specific 17799 * issues. 17800 * 17801 * According to the SCSI spec, the READ CAPACITY 10 17802 * command returns the following: 17803 * 17804 * bytes 0-3: Maximum logical block address available. 17805 * (MSB in byte:0 & LSB in byte:3) 17806 * 17807 * bytes 4-7: Block length in bytes 17808 * (MSB in byte:4 & LSB in byte:7) 17809 * 17810 */ 17811 capacity = BE_32(capacity_buf[0]); 17812 lbasize = BE_32(capacity_buf[1]); 17813 17814 /* 17815 * Done with capacity_buf 17816 */ 17817 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17818 17819 /* 17820 * if the reported capacity is set to all 0xf's, then 17821 * this disk is too large and requires SBC-2 commands. 17822 * Reissue the request using READ CAPACITY 16. 17823 */ 17824 if (capacity == 0xffffffff) { 17825 status = sd_send_scsi_READ_CAPACITY_16(un, &capacity, 17826 &lbasize, path_flag); 17827 if (status != 0) { 17828 return (status); 17829 } 17830 } 17831 break; /* Success! */ 17832 case EIO: 17833 switch (ucmd_buf.uscsi_status) { 17834 case STATUS_RESERVATION_CONFLICT: 17835 status = EACCES; 17836 break; 17837 case STATUS_CHECK: 17838 /* 17839 * Check condition; look for ASC/ASCQ of 0x04/0x01 17840 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 17841 */ 17842 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 17843 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 17844 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 17845 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17846 return (EAGAIN); 17847 } 17848 break; 17849 default: 17850 break; 17851 } 17852 /* FALLTHRU */ 17853 default: 17854 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17855 return (status); 17856 } 17857 17858 /* 17859 * Some ATAPI CD-ROM drives report inaccurate LBA size values 17860 * (2352 and 0 are common) so for these devices always force the value 17861 * to 2048 as required by the ATAPI specs. 17862 */ 17863 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 17864 lbasize = 2048; 17865 } 17866 17867 /* 17868 * Get the maximum LBA value from the READ CAPACITY data. 17869 * Here we assume that the Partial Medium Indicator (PMI) bit 17870 * was cleared when issuing the command. This means that the LBA 17871 * returned from the device is the LBA of the last logical block 17872 * on the logical unit. The actual logical block count will be 17873 * this value plus one. 17874 * 17875 * Currently the capacity is saved in terms of un->un_sys_blocksize, 17876 * so scale the capacity value to reflect this. 17877 */ 17878 capacity = (capacity + 1) * (lbasize / un->un_sys_blocksize); 17879 17880 /* 17881 * Copy the values from the READ CAPACITY command into the space 17882 * provided by the caller. 17883 */ 17884 *capp = capacity; 17885 *lbap = lbasize; 17886 17887 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: " 17888 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 17889 17890 /* 17891 * Both the lbasize and capacity from the device must be nonzero, 17892 * otherwise we assume that the values are not valid and return 17893 * failure to the caller. (4203735) 17894 */ 17895 if ((capacity == 0) || (lbasize == 0)) { 17896 return (EIO); 17897 } 17898 17899 return (0); 17900 } 17901 17902 /* 17903 * Function: sd_send_scsi_READ_CAPACITY_16 17904 * 17905 * Description: This routine uses the scsi READ CAPACITY 16 command to 17906 * determine the device capacity in number of blocks and the 17907 * device native block size. If this function returns a failure, 17908 * then the values in *capp and *lbap are undefined. 17909 * This routine should always be called by 17910 * sd_send_scsi_READ_CAPACITY which will appy any device 17911 * specific adjustments to capacity and lbasize. 17912 * 17913 * Arguments: un - ptr to soft state struct for the target 17914 * capp - ptr to unsigned 64-bit variable to receive the 17915 * capacity value from the command. 17916 * lbap - ptr to unsigned 32-bit varaible to receive the 17917 * block size value from the command 17918 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17919 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17920 * to use the USCSI "direct" chain and bypass the normal 17921 * command waitq. SD_PATH_DIRECT_PRIORITY is used when 17922 * this command is issued as part of an error recovery 17923 * action. 17924 * 17925 * Return Code: 0 - Success 17926 * EIO - IO error 17927 * EACCES - Reservation conflict detected 17928 * EAGAIN - Device is becoming ready 17929 * errno return code from sd_send_scsi_cmd() 17930 * 17931 * Context: Can sleep. Blocks until command completes. 17932 */ 17933 17934 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16) 17935 17936 static int 17937 sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 17938 uint32_t *lbap, int path_flag) 17939 { 17940 struct scsi_extended_sense sense_buf; 17941 struct uscsi_cmd ucmd_buf; 17942 union scsi_cdb cdb; 17943 uint64_t *capacity16_buf; 17944 uint64_t capacity; 17945 uint32_t lbasize; 17946 int status; 17947 17948 ASSERT(un != NULL); 17949 ASSERT(!mutex_owned(SD_MUTEX(un))); 17950 ASSERT(capp != NULL); 17951 ASSERT(lbap != NULL); 17952 17953 SD_TRACE(SD_LOG_IO, un, 17954 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 17955 17956 /* 17957 * First send a READ_CAPACITY_16 command to the target. 17958 * 17959 * Set up the CDB for the READ_CAPACITY_16 command. The Partial 17960 * Medium Indicator bit is cleared. The address field must be 17961 * zero if the PMI bit is zero. 17962 */ 17963 bzero(&cdb, sizeof (cdb)); 17964 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17965 17966 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP); 17967 17968 ucmd_buf.uscsi_cdb = (char *)&cdb; 17969 ucmd_buf.uscsi_cdblen = CDB_GROUP4; 17970 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; 17971 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; 17972 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17973 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 17974 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 17975 ucmd_buf.uscsi_timeout = 60; 17976 17977 /* 17978 * Read Capacity (16) is a Service Action In command. One 17979 * command byte (0x9E) is overloaded for multiple operations, 17980 * with the second CDB byte specifying the desired operation 17981 */ 17982 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4; 17983 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4; 17984 17985 /* 17986 * Fill in allocation length field 17987 */ 17988 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen); 17989 17990 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17991 UIO_SYSSPACE, path_flag); 17992 17993 switch (status) { 17994 case 0: 17995 /* Return failure if we did not get valid capacity data. */ 17996 if (ucmd_buf.uscsi_resid > 20) { 17997 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 17998 return (EIO); 17999 } 18000 18001 /* 18002 * Read capacity and block size from the READ CAPACITY 10 data. 18003 * This data may be adjusted later due to device specific 18004 * issues. 18005 * 18006 * According to the SCSI spec, the READ CAPACITY 10 18007 * command returns the following: 18008 * 18009 * bytes 0-7: Maximum logical block address available. 18010 * (MSB in byte:0 & LSB in byte:7) 18011 * 18012 * bytes 8-11: Block length in bytes 18013 * (MSB in byte:8 & LSB in byte:11) 18014 * 18015 */ 18016 capacity = BE_64(capacity16_buf[0]); 18017 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); 18018 18019 /* 18020 * Done with capacity16_buf 18021 */ 18022 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18023 18024 /* 18025 * if the reported capacity is set to all 0xf's, then 18026 * this disk is too large. This could only happen with 18027 * a device that supports LBAs larger than 64 bits which 18028 * are not defined by any current T10 standards. 18029 */ 18030 if (capacity == 0xffffffffffffffff) { 18031 return (EIO); 18032 } 18033 break; /* Success! */ 18034 case EIO: 18035 switch (ucmd_buf.uscsi_status) { 18036 case STATUS_RESERVATION_CONFLICT: 18037 status = EACCES; 18038 break; 18039 case STATUS_CHECK: 18040 /* 18041 * Check condition; look for ASC/ASCQ of 0x04/0x01 18042 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 18043 */ 18044 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18045 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 18046 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 18047 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18048 return (EAGAIN); 18049 } 18050 break; 18051 default: 18052 break; 18053 } 18054 /* FALLTHRU */ 18055 default: 18056 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18057 return (status); 18058 } 18059 18060 *capp = capacity; 18061 *lbap = lbasize; 18062 18063 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: " 18064 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 18065 18066 return (0); 18067 } 18068 18069 18070 /* 18071 * Function: sd_send_scsi_START_STOP_UNIT 18072 * 18073 * Description: Issue a scsi START STOP UNIT command to the target. 18074 * 18075 * Arguments: un - pointer to driver soft state (unit) structure for 18076 * this target. 18077 * flag - SD_TARGET_START 18078 * SD_TARGET_STOP 18079 * SD_TARGET_EJECT 18080 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18081 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18082 * to use the USCSI "direct" chain and bypass the normal 18083 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 18084 * command is issued as part of an error recovery action. 18085 * 18086 * Return Code: 0 - Success 18087 * EIO - IO error 18088 * EACCES - Reservation conflict detected 18089 * ENXIO - Not Ready, medium not present 18090 * errno return code from sd_send_scsi_cmd() 18091 * 18092 * Context: Can sleep. 18093 */ 18094 18095 static int 18096 sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, int path_flag) 18097 { 18098 struct scsi_extended_sense sense_buf; 18099 union scsi_cdb cdb; 18100 struct uscsi_cmd ucmd_buf; 18101 int status; 18102 18103 ASSERT(un != NULL); 18104 ASSERT(!mutex_owned(SD_MUTEX(un))); 18105 18106 SD_TRACE(SD_LOG_IO, un, 18107 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un); 18108 18109 if (un->un_f_check_start_stop && 18110 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) && 18111 (un->un_f_start_stop_supported != TRUE)) { 18112 return (0); 18113 } 18114 18115 /* 18116 * If we are performing an eject operation and 18117 * we receive any command other than SD_TARGET_EJECT 18118 * we should immediately return. 18119 */ 18120 if (flag != SD_TARGET_EJECT) { 18121 mutex_enter(SD_MUTEX(un)); 18122 if (un->un_f_ejecting == TRUE) { 18123 mutex_exit(SD_MUTEX(un)); 18124 return (EAGAIN); 18125 } 18126 mutex_exit(SD_MUTEX(un)); 18127 } 18128 18129 bzero(&cdb, sizeof (cdb)); 18130 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18131 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18132 18133 cdb.scc_cmd = SCMD_START_STOP; 18134 cdb.cdb_opaque[4] = (uchar_t)flag; 18135 18136 ucmd_buf.uscsi_cdb = (char *)&cdb; 18137 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18138 ucmd_buf.uscsi_bufaddr = NULL; 18139 ucmd_buf.uscsi_buflen = 0; 18140 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18141 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18142 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18143 ucmd_buf.uscsi_timeout = 200; 18144 18145 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18146 UIO_SYSSPACE, path_flag); 18147 18148 switch (status) { 18149 case 0: 18150 break; /* Success! */ 18151 case EIO: 18152 switch (ucmd_buf.uscsi_status) { 18153 case STATUS_RESERVATION_CONFLICT: 18154 status = EACCES; 18155 break; 18156 case STATUS_CHECK: 18157 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) { 18158 switch (scsi_sense_key( 18159 (uint8_t *)&sense_buf)) { 18160 case KEY_ILLEGAL_REQUEST: 18161 status = ENOTSUP; 18162 break; 18163 case KEY_NOT_READY: 18164 if (scsi_sense_asc( 18165 (uint8_t *)&sense_buf) 18166 == 0x3A) { 18167 status = ENXIO; 18168 } 18169 break; 18170 default: 18171 break; 18172 } 18173 } 18174 break; 18175 default: 18176 break; 18177 } 18178 break; 18179 default: 18180 break; 18181 } 18182 18183 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n"); 18184 18185 return (status); 18186 } 18187 18188 18189 /* 18190 * Function: sd_start_stop_unit_callback 18191 * 18192 * Description: timeout(9F) callback to begin recovery process for a 18193 * device that has spun down. 18194 * 18195 * Arguments: arg - pointer to associated softstate struct. 18196 * 18197 * Context: Executes in a timeout(9F) thread context 18198 */ 18199 18200 static void 18201 sd_start_stop_unit_callback(void *arg) 18202 { 18203 struct sd_lun *un = arg; 18204 ASSERT(un != NULL); 18205 ASSERT(!mutex_owned(SD_MUTEX(un))); 18206 18207 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n"); 18208 18209 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP); 18210 } 18211 18212 18213 /* 18214 * Function: sd_start_stop_unit_task 18215 * 18216 * Description: Recovery procedure when a drive is spun down. 18217 * 18218 * Arguments: arg - pointer to associated softstate struct. 18219 * 18220 * Context: Executes in a taskq() thread context 18221 */ 18222 18223 static void 18224 sd_start_stop_unit_task(void *arg) 18225 { 18226 struct sd_lun *un = arg; 18227 18228 ASSERT(un != NULL); 18229 ASSERT(!mutex_owned(SD_MUTEX(un))); 18230 18231 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n"); 18232 18233 /* 18234 * Some unformatted drives report not ready error, no need to 18235 * restart if format has been initiated. 18236 */ 18237 mutex_enter(SD_MUTEX(un)); 18238 if (un->un_f_format_in_progress == TRUE) { 18239 mutex_exit(SD_MUTEX(un)); 18240 return; 18241 } 18242 mutex_exit(SD_MUTEX(un)); 18243 18244 /* 18245 * When a START STOP command is issued from here, it is part of a 18246 * failure recovery operation and must be issued before any other 18247 * commands, including any pending retries. Thus it must be sent 18248 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up 18249 * succeeds or not, we will start I/O after the attempt. 18250 */ 18251 (void) sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 18252 SD_PATH_DIRECT_PRIORITY); 18253 18254 /* 18255 * The above call blocks until the START_STOP_UNIT command completes. 18256 * Now that it has completed, we must re-try the original IO that 18257 * received the NOT READY condition in the first place. There are 18258 * three possible conditions here: 18259 * 18260 * (1) The original IO is on un_retry_bp. 18261 * (2) The original IO is on the regular wait queue, and un_retry_bp 18262 * is NULL. 18263 * (3) The original IO is on the regular wait queue, and un_retry_bp 18264 * points to some other, unrelated bp. 18265 * 18266 * For each case, we must call sd_start_cmds() with un_retry_bp 18267 * as the argument. If un_retry_bp is NULL, this will initiate 18268 * processing of the regular wait queue. If un_retry_bp is not NULL, 18269 * then this will process the bp on un_retry_bp. That may or may not 18270 * be the original IO, but that does not matter: the important thing 18271 * is to keep the IO processing going at this point. 18272 * 18273 * Note: This is a very specific error recovery sequence associated 18274 * with a drive that is not spun up. We attempt a START_STOP_UNIT and 18275 * serialize the I/O with completion of the spin-up. 18276 */ 18277 mutex_enter(SD_MUTEX(un)); 18278 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18279 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n", 18280 un, un->un_retry_bp); 18281 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */ 18282 sd_start_cmds(un, un->un_retry_bp); 18283 mutex_exit(SD_MUTEX(un)); 18284 18285 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n"); 18286 } 18287 18288 18289 /* 18290 * Function: sd_send_scsi_INQUIRY 18291 * 18292 * Description: Issue the scsi INQUIRY command. 18293 * 18294 * Arguments: un 18295 * bufaddr 18296 * buflen 18297 * evpd 18298 * page_code 18299 * page_length 18300 * 18301 * Return Code: 0 - Success 18302 * errno return code from sd_send_scsi_cmd() 18303 * 18304 * Context: Can sleep. Does not return until command is completed. 18305 */ 18306 18307 static int 18308 sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, size_t buflen, 18309 uchar_t evpd, uchar_t page_code, size_t *residp) 18310 { 18311 union scsi_cdb cdb; 18312 struct uscsi_cmd ucmd_buf; 18313 int status; 18314 18315 ASSERT(un != NULL); 18316 ASSERT(!mutex_owned(SD_MUTEX(un))); 18317 ASSERT(bufaddr != NULL); 18318 18319 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un); 18320 18321 bzero(&cdb, sizeof (cdb)); 18322 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18323 bzero(bufaddr, buflen); 18324 18325 cdb.scc_cmd = SCMD_INQUIRY; 18326 cdb.cdb_opaque[1] = evpd; 18327 cdb.cdb_opaque[2] = page_code; 18328 FORMG0COUNT(&cdb, buflen); 18329 18330 ucmd_buf.uscsi_cdb = (char *)&cdb; 18331 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18332 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 18333 ucmd_buf.uscsi_buflen = buflen; 18334 ucmd_buf.uscsi_rqbuf = NULL; 18335 ucmd_buf.uscsi_rqlen = 0; 18336 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 18337 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ 18338 18339 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18340 UIO_SYSSPACE, SD_PATH_DIRECT); 18341 18342 if ((status == 0) && (residp != NULL)) { 18343 *residp = ucmd_buf.uscsi_resid; 18344 } 18345 18346 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n"); 18347 18348 return (status); 18349 } 18350 18351 18352 /* 18353 * Function: sd_send_scsi_TEST_UNIT_READY 18354 * 18355 * Description: Issue the scsi TEST UNIT READY command. 18356 * This routine can be told to set the flag USCSI_DIAGNOSE to 18357 * prevent retrying failed commands. Use this when the intent 18358 * is either to check for device readiness, to clear a Unit 18359 * Attention, or to clear any outstanding sense data. 18360 * However under specific conditions the expected behavior 18361 * is for retries to bring a device ready, so use the flag 18362 * with caution. 18363 * 18364 * Arguments: un 18365 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present 18366 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE. 18367 * 0: dont check for media present, do retries on cmd. 18368 * 18369 * Return Code: 0 - Success 18370 * EIO - IO error 18371 * EACCES - Reservation conflict detected 18372 * ENXIO - Not Ready, medium not present 18373 * errno return code from sd_send_scsi_cmd() 18374 * 18375 * Context: Can sleep. Does not return until command is completed. 18376 */ 18377 18378 static int 18379 sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag) 18380 { 18381 struct scsi_extended_sense sense_buf; 18382 union scsi_cdb cdb; 18383 struct uscsi_cmd ucmd_buf; 18384 int status; 18385 18386 ASSERT(un != NULL); 18387 ASSERT(!mutex_owned(SD_MUTEX(un))); 18388 18389 SD_TRACE(SD_LOG_IO, un, 18390 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un); 18391 18392 /* 18393 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect 18394 * timeouts when they receive a TUR and the queue is not empty. Check 18395 * the configuration flag set during attach (indicating the drive has 18396 * this firmware bug) and un_ncmds_in_transport before issuing the 18397 * TUR. If there are 18398 * pending commands return success, this is a bit arbitrary but is ok 18399 * for non-removables (i.e. the eliteI disks) and non-clustering 18400 * configurations. 18401 */ 18402 if (un->un_f_cfg_tur_check == TRUE) { 18403 mutex_enter(SD_MUTEX(un)); 18404 if (un->un_ncmds_in_transport != 0) { 18405 mutex_exit(SD_MUTEX(un)); 18406 return (0); 18407 } 18408 mutex_exit(SD_MUTEX(un)); 18409 } 18410 18411 bzero(&cdb, sizeof (cdb)); 18412 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18413 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18414 18415 cdb.scc_cmd = SCMD_TEST_UNIT_READY; 18416 18417 ucmd_buf.uscsi_cdb = (char *)&cdb; 18418 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18419 ucmd_buf.uscsi_bufaddr = NULL; 18420 ucmd_buf.uscsi_buflen = 0; 18421 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18422 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18423 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18424 18425 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ 18426 if ((flag & SD_DONT_RETRY_TUR) != 0) { 18427 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; 18428 } 18429 ucmd_buf.uscsi_timeout = 60; 18430 18431 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18432 UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : 18433 SD_PATH_STANDARD)); 18434 18435 switch (status) { 18436 case 0: 18437 break; /* Success! */ 18438 case EIO: 18439 switch (ucmd_buf.uscsi_status) { 18440 case STATUS_RESERVATION_CONFLICT: 18441 status = EACCES; 18442 break; 18443 case STATUS_CHECK: 18444 if ((flag & SD_CHECK_FOR_MEDIA) == 0) { 18445 break; 18446 } 18447 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18448 (scsi_sense_key((uint8_t *)&sense_buf) == 18449 KEY_NOT_READY) && 18450 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x3A)) { 18451 status = ENXIO; 18452 } 18453 break; 18454 default: 18455 break; 18456 } 18457 break; 18458 default: 18459 break; 18460 } 18461 18462 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n"); 18463 18464 return (status); 18465 } 18466 18467 18468 /* 18469 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN 18470 * 18471 * Description: Issue the scsi PERSISTENT RESERVE IN command. 18472 * 18473 * Arguments: un 18474 * 18475 * Return Code: 0 - Success 18476 * EACCES 18477 * ENOTSUP 18478 * errno return code from sd_send_scsi_cmd() 18479 * 18480 * Context: Can sleep. Does not return until command is completed. 18481 */ 18482 18483 static int 18484 sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, uchar_t usr_cmd, 18485 uint16_t data_len, uchar_t *data_bufp) 18486 { 18487 struct scsi_extended_sense sense_buf; 18488 union scsi_cdb cdb; 18489 struct uscsi_cmd ucmd_buf; 18490 int status; 18491 int no_caller_buf = FALSE; 18492 18493 ASSERT(un != NULL); 18494 ASSERT(!mutex_owned(SD_MUTEX(un))); 18495 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV)); 18496 18497 SD_TRACE(SD_LOG_IO, un, 18498 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un); 18499 18500 bzero(&cdb, sizeof (cdb)); 18501 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18502 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18503 if (data_bufp == NULL) { 18504 /* Allocate a default buf if the caller did not give one */ 18505 ASSERT(data_len == 0); 18506 data_len = MHIOC_RESV_KEY_SIZE; 18507 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP); 18508 no_caller_buf = TRUE; 18509 } 18510 18511 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN; 18512 cdb.cdb_opaque[1] = usr_cmd; 18513 FORMG1COUNT(&cdb, data_len); 18514 18515 ucmd_buf.uscsi_cdb = (char *)&cdb; 18516 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 18517 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; 18518 ucmd_buf.uscsi_buflen = data_len; 18519 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18520 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18521 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 18522 ucmd_buf.uscsi_timeout = 60; 18523 18524 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18525 UIO_SYSSPACE, SD_PATH_STANDARD); 18526 18527 switch (status) { 18528 case 0: 18529 break; /* Success! */ 18530 case EIO: 18531 switch (ucmd_buf.uscsi_status) { 18532 case STATUS_RESERVATION_CONFLICT: 18533 status = EACCES; 18534 break; 18535 case STATUS_CHECK: 18536 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18537 (scsi_sense_key((uint8_t *)&sense_buf) == 18538 KEY_ILLEGAL_REQUEST)) { 18539 status = ENOTSUP; 18540 } 18541 break; 18542 default: 18543 break; 18544 } 18545 break; 18546 default: 18547 break; 18548 } 18549 18550 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n"); 18551 18552 if (no_caller_buf == TRUE) { 18553 kmem_free(data_bufp, data_len); 18554 } 18555 18556 return (status); 18557 } 18558 18559 18560 /* 18561 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT 18562 * 18563 * Description: This routine is the driver entry point for handling CD-ROM 18564 * multi-host persistent reservation requests (MHIOCGRP_INKEYS, 18565 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the 18566 * device. 18567 * 18568 * Arguments: un - Pointer to soft state struct for the target. 18569 * usr_cmd SCSI-3 reservation facility command (one of 18570 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE, 18571 * SD_SCSI3_PREEMPTANDABORT) 18572 * usr_bufp - user provided pointer register, reserve descriptor or 18573 * preempt and abort structure (mhioc_register_t, 18574 * mhioc_resv_desc_t, mhioc_preemptandabort_t) 18575 * 18576 * Return Code: 0 - Success 18577 * EACCES 18578 * ENOTSUP 18579 * errno return code from sd_send_scsi_cmd() 18580 * 18581 * Context: Can sleep. Does not return until command is completed. 18582 */ 18583 18584 static int 18585 sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, uchar_t usr_cmd, 18586 uchar_t *usr_bufp) 18587 { 18588 struct scsi_extended_sense sense_buf; 18589 union scsi_cdb cdb; 18590 struct uscsi_cmd ucmd_buf; 18591 int status; 18592 uchar_t data_len = sizeof (sd_prout_t); 18593 sd_prout_t *prp; 18594 18595 ASSERT(un != NULL); 18596 ASSERT(!mutex_owned(SD_MUTEX(un))); 18597 ASSERT(data_len == 24); /* required by scsi spec */ 18598 18599 SD_TRACE(SD_LOG_IO, un, 18600 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un); 18601 18602 if (usr_bufp == NULL) { 18603 return (EINVAL); 18604 } 18605 18606 bzero(&cdb, sizeof (cdb)); 18607 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18608 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18609 prp = kmem_zalloc(data_len, KM_SLEEP); 18610 18611 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT; 18612 cdb.cdb_opaque[1] = usr_cmd; 18613 FORMG1COUNT(&cdb, data_len); 18614 18615 ucmd_buf.uscsi_cdb = (char *)&cdb; 18616 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 18617 ucmd_buf.uscsi_bufaddr = (caddr_t)prp; 18618 ucmd_buf.uscsi_buflen = data_len; 18619 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18620 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18621 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 18622 ucmd_buf.uscsi_timeout = 60; 18623 18624 switch (usr_cmd) { 18625 case SD_SCSI3_REGISTER: { 18626 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; 18627 18628 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18629 bcopy(ptr->newkey.key, prp->service_key, 18630 MHIOC_RESV_KEY_SIZE); 18631 prp->aptpl = ptr->aptpl; 18632 break; 18633 } 18634 case SD_SCSI3_RESERVE: 18635 case SD_SCSI3_RELEASE: { 18636 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 18637 18638 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18639 prp->scope_address = BE_32(ptr->scope_specific_addr); 18640 cdb.cdb_opaque[2] = ptr->type; 18641 break; 18642 } 18643 case SD_SCSI3_PREEMPTANDABORT: { 18644 mhioc_preemptandabort_t *ptr = 18645 (mhioc_preemptandabort_t *)usr_bufp; 18646 18647 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18648 bcopy(ptr->victim_key.key, prp->service_key, 18649 MHIOC_RESV_KEY_SIZE); 18650 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr); 18651 cdb.cdb_opaque[2] = ptr->resvdesc.type; 18652 ucmd_buf.uscsi_flags |= USCSI_HEAD; 18653 break; 18654 } 18655 case SD_SCSI3_REGISTERANDIGNOREKEY: 18656 { 18657 mhioc_registerandignorekey_t *ptr; 18658 ptr = (mhioc_registerandignorekey_t *)usr_bufp; 18659 bcopy(ptr->newkey.key, 18660 prp->service_key, MHIOC_RESV_KEY_SIZE); 18661 prp->aptpl = ptr->aptpl; 18662 break; 18663 } 18664 default: 18665 ASSERT(FALSE); 18666 break; 18667 } 18668 18669 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18670 UIO_SYSSPACE, SD_PATH_STANDARD); 18671 18672 switch (status) { 18673 case 0: 18674 break; /* Success! */ 18675 case EIO: 18676 switch (ucmd_buf.uscsi_status) { 18677 case STATUS_RESERVATION_CONFLICT: 18678 status = EACCES; 18679 break; 18680 case STATUS_CHECK: 18681 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18682 (scsi_sense_key((uint8_t *)&sense_buf) == 18683 KEY_ILLEGAL_REQUEST)) { 18684 status = ENOTSUP; 18685 } 18686 break; 18687 default: 18688 break; 18689 } 18690 break; 18691 default: 18692 break; 18693 } 18694 18695 kmem_free(prp, data_len); 18696 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n"); 18697 return (status); 18698 } 18699 18700 18701 /* 18702 * Function: sd_send_scsi_SYNCHRONIZE_CACHE 18703 * 18704 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target 18705 * 18706 * Arguments: un - pointer to the target's soft state struct 18707 * dkc - pointer to the callback structure 18708 * 18709 * Return Code: 0 - success 18710 * errno-type error code 18711 * 18712 * Context: kernel thread context only. 18713 * 18714 * _______________________________________________________________ 18715 * | dkc_flag & | dkc_callback | DKIOCFLUSHWRITECACHE | 18716 * |FLUSH_VOLATILE| | operation | 18717 * |______________|______________|_________________________________| 18718 * | 0 | NULL | Synchronous flush on both | 18719 * | | | volatile and non-volatile cache | 18720 * |______________|______________|_________________________________| 18721 * | 1 | NULL | Synchronous flush on volatile | 18722 * | | | cache; disk drivers may suppress| 18723 * | | | flush if disk table indicates | 18724 * | | | non-volatile cache | 18725 * |______________|______________|_________________________________| 18726 * | 0 | !NULL | Asynchronous flush on both | 18727 * | | | volatile and non-volatile cache;| 18728 * |______________|______________|_________________________________| 18729 * | 1 | !NULL | Asynchronous flush on volatile | 18730 * | | | cache; disk drivers may suppress| 18731 * | | | flush if disk table indicates | 18732 * | | | non-volatile cache | 18733 * |______________|______________|_________________________________| 18734 * 18735 */ 18736 18737 static int 18738 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc) 18739 { 18740 struct sd_uscsi_info *uip; 18741 struct uscsi_cmd *uscmd; 18742 union scsi_cdb *cdb; 18743 struct buf *bp; 18744 int rval = 0; 18745 int is_async; 18746 18747 SD_TRACE(SD_LOG_IO, un, 18748 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un); 18749 18750 ASSERT(un != NULL); 18751 ASSERT(!mutex_owned(SD_MUTEX(un))); 18752 18753 if (dkc == NULL || dkc->dkc_callback == NULL) { 18754 is_async = FALSE; 18755 } else { 18756 is_async = TRUE; 18757 } 18758 18759 mutex_enter(SD_MUTEX(un)); 18760 /* check whether cache flush should be suppressed */ 18761 if (un->un_f_suppress_cache_flush == TRUE) { 18762 mutex_exit(SD_MUTEX(un)); 18763 /* 18764 * suppress the cache flush if the device is told to do 18765 * so by sd.conf or disk table 18766 */ 18767 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_SYNCHRONIZE_CACHE: \ 18768 skip the cache flush since suppress_cache_flush is %d!\n", 18769 un->un_f_suppress_cache_flush); 18770 18771 if (is_async == TRUE) { 18772 /* invoke callback for asynchronous flush */ 18773 (*dkc->dkc_callback)(dkc->dkc_cookie, 0); 18774 } 18775 return (rval); 18776 } 18777 mutex_exit(SD_MUTEX(un)); 18778 18779 /* 18780 * check dkc_flag & FLUSH_VOLATILE so SYNC_NV bit can be 18781 * set properly 18782 */ 18783 cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP); 18784 cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE; 18785 18786 mutex_enter(SD_MUTEX(un)); 18787 if (dkc != NULL && un->un_f_sync_nv_supported && 18788 (dkc->dkc_flag & FLUSH_VOLATILE)) { 18789 /* 18790 * if the device supports SYNC_NV bit, turn on 18791 * the SYNC_NV bit to only flush volatile cache 18792 */ 18793 cdb->cdb_un.tag |= SD_SYNC_NV_BIT; 18794 } 18795 mutex_exit(SD_MUTEX(un)); 18796 18797 /* 18798 * First get some memory for the uscsi_cmd struct and cdb 18799 * and initialize for SYNCHRONIZE_CACHE cmd. 18800 */ 18801 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 18802 uscmd->uscsi_cdblen = CDB_GROUP1; 18803 uscmd->uscsi_cdb = (caddr_t)cdb; 18804 uscmd->uscsi_bufaddr = NULL; 18805 uscmd->uscsi_buflen = 0; 18806 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 18807 uscmd->uscsi_rqlen = SENSE_LENGTH; 18808 uscmd->uscsi_rqresid = SENSE_LENGTH; 18809 uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18810 uscmd->uscsi_timeout = sd_io_time; 18811 18812 /* 18813 * Allocate an sd_uscsi_info struct and fill it with the info 18814 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 18815 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 18816 * since we allocate the buf here in this function, we do not 18817 * need to preserve the prior contents of b_private. 18818 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 18819 */ 18820 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 18821 uip->ui_flags = SD_PATH_DIRECT; 18822 uip->ui_cmdp = uscmd; 18823 18824 bp = getrbuf(KM_SLEEP); 18825 bp->b_private = uip; 18826 18827 /* 18828 * Setup buffer to carry uscsi request. 18829 */ 18830 bp->b_flags = B_BUSY; 18831 bp->b_bcount = 0; 18832 bp->b_blkno = 0; 18833 18834 if (is_async == TRUE) { 18835 bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone; 18836 uip->ui_dkc = *dkc; 18837 } 18838 18839 bp->b_edev = SD_GET_DEV(un); 18840 bp->b_dev = cmpdev(bp->b_edev); /* maybe unnecessary? */ 18841 18842 (void) sd_uscsi_strategy(bp); 18843 18844 /* 18845 * If synchronous request, wait for completion 18846 * If async just return and let b_iodone callback 18847 * cleanup. 18848 * NOTE: On return, u_ncmds_in_driver will be decremented, 18849 * but it was also incremented in sd_uscsi_strategy(), so 18850 * we should be ok. 18851 */ 18852 if (is_async == FALSE) { 18853 (void) biowait(bp); 18854 rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp); 18855 } 18856 18857 return (rval); 18858 } 18859 18860 18861 static int 18862 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp) 18863 { 18864 struct sd_uscsi_info *uip; 18865 struct uscsi_cmd *uscmd; 18866 uint8_t *sense_buf; 18867 struct sd_lun *un; 18868 int status; 18869 union scsi_cdb *cdb; 18870 18871 uip = (struct sd_uscsi_info *)(bp->b_private); 18872 ASSERT(uip != NULL); 18873 18874 uscmd = uip->ui_cmdp; 18875 ASSERT(uscmd != NULL); 18876 18877 sense_buf = (uint8_t *)uscmd->uscsi_rqbuf; 18878 ASSERT(sense_buf != NULL); 18879 18880 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 18881 ASSERT(un != NULL); 18882 18883 cdb = (union scsi_cdb *)uscmd->uscsi_cdb; 18884 18885 status = geterror(bp); 18886 switch (status) { 18887 case 0: 18888 break; /* Success! */ 18889 case EIO: 18890 switch (uscmd->uscsi_status) { 18891 case STATUS_RESERVATION_CONFLICT: 18892 /* Ignore reservation conflict */ 18893 status = 0; 18894 goto done; 18895 18896 case STATUS_CHECK: 18897 if ((uscmd->uscsi_rqstatus == STATUS_GOOD) && 18898 (scsi_sense_key(sense_buf) == 18899 KEY_ILLEGAL_REQUEST)) { 18900 /* Ignore Illegal Request error */ 18901 if (cdb->cdb_un.tag|SD_SYNC_NV_BIT) { 18902 mutex_enter(SD_MUTEX(un)); 18903 un->un_f_sync_nv_supported = FALSE; 18904 mutex_exit(SD_MUTEX(un)); 18905 status = 0; 18906 SD_TRACE(SD_LOG_IO, un, 18907 "un_f_sync_nv_supported \ 18908 is set to false.\n"); 18909 goto done; 18910 } 18911 18912 mutex_enter(SD_MUTEX(un)); 18913 un->un_f_sync_cache_supported = FALSE; 18914 mutex_exit(SD_MUTEX(un)); 18915 SD_TRACE(SD_LOG_IO, un, 18916 "sd_send_scsi_SYNCHRONIZE_CACHE_biodone: \ 18917 un_f_sync_cache_supported set to false \ 18918 with asc = %x, ascq = %x\n", 18919 scsi_sense_asc(sense_buf), 18920 scsi_sense_ascq(sense_buf)); 18921 status = ENOTSUP; 18922 goto done; 18923 } 18924 break; 18925 default: 18926 break; 18927 } 18928 /* FALLTHRU */ 18929 default: 18930 /* 18931 * Don't log an error message if this device 18932 * has removable media. 18933 */ 18934 if (!un->un_f_has_removable_media) { 18935 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18936 "SYNCHRONIZE CACHE command failed (%d)\n", status); 18937 } 18938 break; 18939 } 18940 18941 done: 18942 if (uip->ui_dkc.dkc_callback != NULL) { 18943 (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status); 18944 } 18945 18946 ASSERT((bp->b_flags & B_REMAPPED) == 0); 18947 freerbuf(bp); 18948 kmem_free(uip, sizeof (struct sd_uscsi_info)); 18949 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 18950 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 18951 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 18952 18953 return (status); 18954 } 18955 18956 18957 /* 18958 * Function: sd_send_scsi_GET_CONFIGURATION 18959 * 18960 * Description: Issues the get configuration command to the device. 18961 * Called from sd_check_for_writable_cd & sd_get_media_info 18962 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN 18963 * Arguments: un 18964 * ucmdbuf 18965 * rqbuf 18966 * rqbuflen 18967 * bufaddr 18968 * buflen 18969 * path_flag 18970 * 18971 * Return Code: 0 - Success 18972 * errno return code from sd_send_scsi_cmd() 18973 * 18974 * Context: Can sleep. Does not return until command is completed. 18975 * 18976 */ 18977 18978 static int 18979 sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, struct uscsi_cmd *ucmdbuf, 18980 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen, 18981 int path_flag) 18982 { 18983 char cdb[CDB_GROUP1]; 18984 int status; 18985 18986 ASSERT(un != NULL); 18987 ASSERT(!mutex_owned(SD_MUTEX(un))); 18988 ASSERT(bufaddr != NULL); 18989 ASSERT(ucmdbuf != NULL); 18990 ASSERT(rqbuf != NULL); 18991 18992 SD_TRACE(SD_LOG_IO, un, 18993 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un); 18994 18995 bzero(cdb, sizeof (cdb)); 18996 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 18997 bzero(rqbuf, rqbuflen); 18998 bzero(bufaddr, buflen); 18999 19000 /* 19001 * Set up cdb field for the get configuration command. 19002 */ 19003 cdb[0] = SCMD_GET_CONFIGURATION; 19004 cdb[1] = 0x02; /* Requested Type */ 19005 cdb[8] = SD_PROFILE_HEADER_LEN; 19006 ucmdbuf->uscsi_cdb = cdb; 19007 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 19008 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 19009 ucmdbuf->uscsi_buflen = buflen; 19010 ucmdbuf->uscsi_timeout = sd_io_time; 19011 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 19012 ucmdbuf->uscsi_rqlen = rqbuflen; 19013 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 19014 19015 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, FKIOCTL, 19016 UIO_SYSSPACE, path_flag); 19017 19018 switch (status) { 19019 case 0: 19020 break; /* Success! */ 19021 case EIO: 19022 switch (ucmdbuf->uscsi_status) { 19023 case STATUS_RESERVATION_CONFLICT: 19024 status = EACCES; 19025 break; 19026 default: 19027 break; 19028 } 19029 break; 19030 default: 19031 break; 19032 } 19033 19034 if (status == 0) { 19035 SD_DUMP_MEMORY(un, SD_LOG_IO, 19036 "sd_send_scsi_GET_CONFIGURATION: data", 19037 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 19038 } 19039 19040 SD_TRACE(SD_LOG_IO, un, 19041 "sd_send_scsi_GET_CONFIGURATION: exit\n"); 19042 19043 return (status); 19044 } 19045 19046 /* 19047 * Function: sd_send_scsi_feature_GET_CONFIGURATION 19048 * 19049 * Description: Issues the get configuration command to the device to 19050 * retrieve a specific feature. Called from 19051 * sd_check_for_writable_cd & sd_set_mmc_caps. 19052 * Arguments: un 19053 * ucmdbuf 19054 * rqbuf 19055 * rqbuflen 19056 * bufaddr 19057 * buflen 19058 * feature 19059 * 19060 * Return Code: 0 - Success 19061 * errno return code from sd_send_scsi_cmd() 19062 * 19063 * Context: Can sleep. Does not return until command is completed. 19064 * 19065 */ 19066 static int 19067 sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 19068 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 19069 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag) 19070 { 19071 char cdb[CDB_GROUP1]; 19072 int status; 19073 19074 ASSERT(un != NULL); 19075 ASSERT(!mutex_owned(SD_MUTEX(un))); 19076 ASSERT(bufaddr != NULL); 19077 ASSERT(ucmdbuf != NULL); 19078 ASSERT(rqbuf != NULL); 19079 19080 SD_TRACE(SD_LOG_IO, un, 19081 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un); 19082 19083 bzero(cdb, sizeof (cdb)); 19084 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 19085 bzero(rqbuf, rqbuflen); 19086 bzero(bufaddr, buflen); 19087 19088 /* 19089 * Set up cdb field for the get configuration command. 19090 */ 19091 cdb[0] = SCMD_GET_CONFIGURATION; 19092 cdb[1] = 0x02; /* Requested Type */ 19093 cdb[3] = feature; 19094 cdb[8] = buflen; 19095 ucmdbuf->uscsi_cdb = cdb; 19096 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 19097 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 19098 ucmdbuf->uscsi_buflen = buflen; 19099 ucmdbuf->uscsi_timeout = sd_io_time; 19100 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 19101 ucmdbuf->uscsi_rqlen = rqbuflen; 19102 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 19103 19104 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, FKIOCTL, 19105 UIO_SYSSPACE, path_flag); 19106 19107 switch (status) { 19108 case 0: 19109 break; /* Success! */ 19110 case EIO: 19111 switch (ucmdbuf->uscsi_status) { 19112 case STATUS_RESERVATION_CONFLICT: 19113 status = EACCES; 19114 break; 19115 default: 19116 break; 19117 } 19118 break; 19119 default: 19120 break; 19121 } 19122 19123 if (status == 0) { 19124 SD_DUMP_MEMORY(un, SD_LOG_IO, 19125 "sd_send_scsi_feature_GET_CONFIGURATION: data", 19126 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 19127 } 19128 19129 SD_TRACE(SD_LOG_IO, un, 19130 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n"); 19131 19132 return (status); 19133 } 19134 19135 19136 /* 19137 * Function: sd_send_scsi_MODE_SENSE 19138 * 19139 * Description: Utility function for issuing a scsi MODE SENSE command. 19140 * Note: This routine uses a consistent implementation for Group0, 19141 * Group1, and Group2 commands across all platforms. ATAPI devices 19142 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 19143 * 19144 * Arguments: un - pointer to the softstate struct for the target. 19145 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 19146 * CDB_GROUP[1|2] (10 byte). 19147 * bufaddr - buffer for page data retrieved from the target. 19148 * buflen - size of page to be retrieved. 19149 * page_code - page code of data to be retrieved from the target. 19150 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19151 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19152 * to use the USCSI "direct" chain and bypass the normal 19153 * command waitq. 19154 * 19155 * Return Code: 0 - Success 19156 * errno return code from sd_send_scsi_cmd() 19157 * 19158 * Context: Can sleep. Does not return until command is completed. 19159 */ 19160 19161 static int 19162 sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 19163 size_t buflen, uchar_t page_code, int path_flag) 19164 { 19165 struct scsi_extended_sense sense_buf; 19166 union scsi_cdb cdb; 19167 struct uscsi_cmd ucmd_buf; 19168 int status; 19169 int headlen; 19170 19171 ASSERT(un != NULL); 19172 ASSERT(!mutex_owned(SD_MUTEX(un))); 19173 ASSERT(bufaddr != NULL); 19174 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 19175 (cdbsize == CDB_GROUP2)); 19176 19177 SD_TRACE(SD_LOG_IO, un, 19178 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un); 19179 19180 bzero(&cdb, sizeof (cdb)); 19181 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19182 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19183 bzero(bufaddr, buflen); 19184 19185 if (cdbsize == CDB_GROUP0) { 19186 cdb.scc_cmd = SCMD_MODE_SENSE; 19187 cdb.cdb_opaque[2] = page_code; 19188 FORMG0COUNT(&cdb, buflen); 19189 headlen = MODE_HEADER_LENGTH; 19190 } else { 19191 cdb.scc_cmd = SCMD_MODE_SENSE_G1; 19192 cdb.cdb_opaque[2] = page_code; 19193 FORMG1COUNT(&cdb, buflen); 19194 headlen = MODE_HEADER_LENGTH_GRP2; 19195 } 19196 19197 ASSERT(headlen <= buflen); 19198 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 19199 19200 ucmd_buf.uscsi_cdb = (char *)&cdb; 19201 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 19202 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19203 ucmd_buf.uscsi_buflen = buflen; 19204 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19205 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19206 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19207 ucmd_buf.uscsi_timeout = 60; 19208 19209 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19210 UIO_SYSSPACE, path_flag); 19211 19212 switch (status) { 19213 case 0: 19214 /* 19215 * sr_check_wp() uses 0x3f page code and check the header of 19216 * mode page to determine if target device is write-protected. 19217 * But some USB devices return 0 bytes for 0x3f page code. For 19218 * this case, make sure that mode page header is returned at 19219 * least. 19220 */ 19221 if (buflen - ucmd_buf.uscsi_resid < headlen) 19222 status = EIO; 19223 break; /* Success! */ 19224 case EIO: 19225 switch (ucmd_buf.uscsi_status) { 19226 case STATUS_RESERVATION_CONFLICT: 19227 status = EACCES; 19228 break; 19229 default: 19230 break; 19231 } 19232 break; 19233 default: 19234 break; 19235 } 19236 19237 if (status == 0) { 19238 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data", 19239 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19240 } 19241 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n"); 19242 19243 return (status); 19244 } 19245 19246 19247 /* 19248 * Function: sd_send_scsi_MODE_SELECT 19249 * 19250 * Description: Utility function for issuing a scsi MODE SELECT command. 19251 * Note: This routine uses a consistent implementation for Group0, 19252 * Group1, and Group2 commands across all platforms. ATAPI devices 19253 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 19254 * 19255 * Arguments: un - pointer to the softstate struct for the target. 19256 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 19257 * CDB_GROUP[1|2] (10 byte). 19258 * bufaddr - buffer for page data retrieved from the target. 19259 * buflen - size of page to be retrieved. 19260 * save_page - boolean to determin if SP bit should be set. 19261 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19262 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19263 * to use the USCSI "direct" chain and bypass the normal 19264 * command waitq. 19265 * 19266 * Return Code: 0 - Success 19267 * errno return code from sd_send_scsi_cmd() 19268 * 19269 * Context: Can sleep. Does not return until command is completed. 19270 */ 19271 19272 static int 19273 sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 19274 size_t buflen, uchar_t save_page, int path_flag) 19275 { 19276 struct scsi_extended_sense sense_buf; 19277 union scsi_cdb cdb; 19278 struct uscsi_cmd ucmd_buf; 19279 int status; 19280 19281 ASSERT(un != NULL); 19282 ASSERT(!mutex_owned(SD_MUTEX(un))); 19283 ASSERT(bufaddr != NULL); 19284 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 19285 (cdbsize == CDB_GROUP2)); 19286 19287 SD_TRACE(SD_LOG_IO, un, 19288 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un); 19289 19290 bzero(&cdb, sizeof (cdb)); 19291 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19292 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19293 19294 /* Set the PF bit for many third party drives */ 19295 cdb.cdb_opaque[1] = 0x10; 19296 19297 /* Set the savepage(SP) bit if given */ 19298 if (save_page == SD_SAVE_PAGE) { 19299 cdb.cdb_opaque[1] |= 0x01; 19300 } 19301 19302 if (cdbsize == CDB_GROUP0) { 19303 cdb.scc_cmd = SCMD_MODE_SELECT; 19304 FORMG0COUNT(&cdb, buflen); 19305 } else { 19306 cdb.scc_cmd = SCMD_MODE_SELECT_G1; 19307 FORMG1COUNT(&cdb, buflen); 19308 } 19309 19310 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 19311 19312 ucmd_buf.uscsi_cdb = (char *)&cdb; 19313 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 19314 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19315 ucmd_buf.uscsi_buflen = buflen; 19316 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19317 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19318 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 19319 ucmd_buf.uscsi_timeout = 60; 19320 19321 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19322 UIO_SYSSPACE, path_flag); 19323 19324 switch (status) { 19325 case 0: 19326 break; /* Success! */ 19327 case EIO: 19328 switch (ucmd_buf.uscsi_status) { 19329 case STATUS_RESERVATION_CONFLICT: 19330 status = EACCES; 19331 break; 19332 default: 19333 break; 19334 } 19335 break; 19336 default: 19337 break; 19338 } 19339 19340 if (status == 0) { 19341 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data", 19342 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19343 } 19344 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n"); 19345 19346 return (status); 19347 } 19348 19349 19350 /* 19351 * Function: sd_send_scsi_RDWR 19352 * 19353 * Description: Issue a scsi READ or WRITE command with the given parameters. 19354 * 19355 * Arguments: un: Pointer to the sd_lun struct for the target. 19356 * cmd: SCMD_READ or SCMD_WRITE 19357 * bufaddr: Address of caller's buffer to receive the RDWR data 19358 * buflen: Length of caller's buffer receive the RDWR data. 19359 * start_block: Block number for the start of the RDWR operation. 19360 * (Assumes target-native block size.) 19361 * residp: Pointer to variable to receive the redisual of the 19362 * RDWR operation (may be NULL of no residual requested). 19363 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19364 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19365 * to use the USCSI "direct" chain and bypass the normal 19366 * command waitq. 19367 * 19368 * Return Code: 0 - Success 19369 * errno return code from sd_send_scsi_cmd() 19370 * 19371 * Context: Can sleep. Does not return until command is completed. 19372 */ 19373 19374 static int 19375 sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 19376 size_t buflen, daddr_t start_block, int path_flag) 19377 { 19378 struct scsi_extended_sense sense_buf; 19379 union scsi_cdb cdb; 19380 struct uscsi_cmd ucmd_buf; 19381 uint32_t block_count; 19382 int status; 19383 int cdbsize; 19384 uchar_t flag; 19385 19386 ASSERT(un != NULL); 19387 ASSERT(!mutex_owned(SD_MUTEX(un))); 19388 ASSERT(bufaddr != NULL); 19389 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE)); 19390 19391 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un); 19392 19393 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 19394 return (EINVAL); 19395 } 19396 19397 mutex_enter(SD_MUTEX(un)); 19398 block_count = SD_BYTES2TGTBLOCKS(un, buflen); 19399 mutex_exit(SD_MUTEX(un)); 19400 19401 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE; 19402 19403 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: " 19404 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n", 19405 bufaddr, buflen, start_block, block_count); 19406 19407 bzero(&cdb, sizeof (cdb)); 19408 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19409 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19410 19411 /* Compute CDB size to use */ 19412 if (start_block > 0xffffffff) 19413 cdbsize = CDB_GROUP4; 19414 else if ((start_block & 0xFFE00000) || 19415 (un->un_f_cfg_is_atapi == TRUE)) 19416 cdbsize = CDB_GROUP1; 19417 else 19418 cdbsize = CDB_GROUP0; 19419 19420 switch (cdbsize) { 19421 case CDB_GROUP0: /* 6-byte CDBs */ 19422 cdb.scc_cmd = cmd; 19423 FORMG0ADDR(&cdb, start_block); 19424 FORMG0COUNT(&cdb, block_count); 19425 break; 19426 case CDB_GROUP1: /* 10-byte CDBs */ 19427 cdb.scc_cmd = cmd | SCMD_GROUP1; 19428 FORMG1ADDR(&cdb, start_block); 19429 FORMG1COUNT(&cdb, block_count); 19430 break; 19431 case CDB_GROUP4: /* 16-byte CDBs */ 19432 cdb.scc_cmd = cmd | SCMD_GROUP4; 19433 FORMG4LONGADDR(&cdb, (uint64_t)start_block); 19434 FORMG4COUNT(&cdb, block_count); 19435 break; 19436 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */ 19437 default: 19438 /* All others reserved */ 19439 return (EINVAL); 19440 } 19441 19442 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */ 19443 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 19444 19445 ucmd_buf.uscsi_cdb = (char *)&cdb; 19446 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 19447 ucmd_buf.uscsi_bufaddr = bufaddr; 19448 ucmd_buf.uscsi_buflen = buflen; 19449 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19450 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19451 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; 19452 ucmd_buf.uscsi_timeout = 60; 19453 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19454 UIO_SYSSPACE, path_flag); 19455 switch (status) { 19456 case 0: 19457 break; /* Success! */ 19458 case EIO: 19459 switch (ucmd_buf.uscsi_status) { 19460 case STATUS_RESERVATION_CONFLICT: 19461 status = EACCES; 19462 break; 19463 default: 19464 break; 19465 } 19466 break; 19467 default: 19468 break; 19469 } 19470 19471 if (status == 0) { 19472 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data", 19473 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19474 } 19475 19476 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n"); 19477 19478 return (status); 19479 } 19480 19481 19482 /* 19483 * Function: sd_send_scsi_LOG_SENSE 19484 * 19485 * Description: Issue a scsi LOG_SENSE command with the given parameters. 19486 * 19487 * Arguments: un: Pointer to the sd_lun struct for the target. 19488 * 19489 * Return Code: 0 - Success 19490 * errno return code from sd_send_scsi_cmd() 19491 * 19492 * Context: Can sleep. Does not return until command is completed. 19493 */ 19494 19495 static int 19496 sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, uint16_t buflen, 19497 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, 19498 int path_flag) 19499 19500 { 19501 struct scsi_extended_sense sense_buf; 19502 union scsi_cdb cdb; 19503 struct uscsi_cmd ucmd_buf; 19504 int status; 19505 19506 ASSERT(un != NULL); 19507 ASSERT(!mutex_owned(SD_MUTEX(un))); 19508 19509 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un); 19510 19511 bzero(&cdb, sizeof (cdb)); 19512 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19513 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19514 19515 cdb.scc_cmd = SCMD_LOG_SENSE_G1; 19516 cdb.cdb_opaque[2] = (page_control << 6) | page_code; 19517 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8); 19518 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF); 19519 FORMG1COUNT(&cdb, buflen); 19520 19521 ucmd_buf.uscsi_cdb = (char *)&cdb; 19522 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19523 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19524 ucmd_buf.uscsi_buflen = buflen; 19525 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19526 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19527 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19528 ucmd_buf.uscsi_timeout = 60; 19529 19530 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19531 UIO_SYSSPACE, path_flag); 19532 19533 switch (status) { 19534 case 0: 19535 break; 19536 case EIO: 19537 switch (ucmd_buf.uscsi_status) { 19538 case STATUS_RESERVATION_CONFLICT: 19539 status = EACCES; 19540 break; 19541 case STATUS_CHECK: 19542 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19543 (scsi_sense_key((uint8_t *)&sense_buf) == 19544 KEY_ILLEGAL_REQUEST) && 19545 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x24)) { 19546 /* 19547 * ASC 0x24: INVALID FIELD IN CDB 19548 */ 19549 switch (page_code) { 19550 case START_STOP_CYCLE_PAGE: 19551 /* 19552 * The start stop cycle counter is 19553 * implemented as page 0x31 in earlier 19554 * generation disks. In new generation 19555 * disks the start stop cycle counter is 19556 * implemented as page 0xE. To properly 19557 * handle this case if an attempt for 19558 * log page 0xE is made and fails we 19559 * will try again using page 0x31. 19560 * 19561 * Network storage BU committed to 19562 * maintain the page 0x31 for this 19563 * purpose and will not have any other 19564 * page implemented with page code 0x31 19565 * until all disks transition to the 19566 * standard page. 19567 */ 19568 mutex_enter(SD_MUTEX(un)); 19569 un->un_start_stop_cycle_page = 19570 START_STOP_CYCLE_VU_PAGE; 19571 cdb.cdb_opaque[2] = 19572 (char)(page_control << 6) | 19573 un->un_start_stop_cycle_page; 19574 mutex_exit(SD_MUTEX(un)); 19575 status = sd_send_scsi_cmd( 19576 SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19577 UIO_SYSSPACE, path_flag); 19578 19579 break; 19580 case TEMPERATURE_PAGE: 19581 status = ENOTTY; 19582 break; 19583 default: 19584 break; 19585 } 19586 } 19587 break; 19588 default: 19589 break; 19590 } 19591 break; 19592 default: 19593 break; 19594 } 19595 19596 if (status == 0) { 19597 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data", 19598 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19599 } 19600 19601 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n"); 19602 19603 return (status); 19604 } 19605 19606 19607 /* 19608 * Function: sdioctl 19609 * 19610 * Description: Driver's ioctl(9e) entry point function. 19611 * 19612 * Arguments: dev - device number 19613 * cmd - ioctl operation to be performed 19614 * arg - user argument, contains data to be set or reference 19615 * parameter for get 19616 * flag - bit flag, indicating open settings, 32/64 bit type 19617 * cred_p - user credential pointer 19618 * rval_p - calling process return value (OPT) 19619 * 19620 * Return Code: EINVAL 19621 * ENOTTY 19622 * ENXIO 19623 * EIO 19624 * EFAULT 19625 * ENOTSUP 19626 * EPERM 19627 * 19628 * Context: Called from the device switch at normal priority. 19629 */ 19630 19631 static int 19632 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) 19633 { 19634 struct sd_lun *un = NULL; 19635 int err = 0; 19636 int i = 0; 19637 cred_t *cr; 19638 int tmprval = EINVAL; 19639 int is_valid; 19640 19641 /* 19642 * All device accesses go thru sdstrategy where we check on suspend 19643 * status 19644 */ 19645 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 19646 return (ENXIO); 19647 } 19648 19649 ASSERT(!mutex_owned(SD_MUTEX(un))); 19650 19651 19652 is_valid = SD_IS_VALID_LABEL(un); 19653 19654 /* 19655 * Moved this wait from sd_uscsi_strategy to here for 19656 * reasons of deadlock prevention. Internal driver commands, 19657 * specifically those to change a devices power level, result 19658 * in a call to sd_uscsi_strategy. 19659 */ 19660 mutex_enter(SD_MUTEX(un)); 19661 while ((un->un_state == SD_STATE_SUSPENDED) || 19662 (un->un_state == SD_STATE_PM_CHANGING)) { 19663 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 19664 } 19665 /* 19666 * Twiddling the counter here protects commands from now 19667 * through to the top of sd_uscsi_strategy. Without the 19668 * counter inc. a power down, for example, could get in 19669 * after the above check for state is made and before 19670 * execution gets to the top of sd_uscsi_strategy. 19671 * That would cause problems. 19672 */ 19673 un->un_ncmds_in_driver++; 19674 19675 if (!is_valid && 19676 (flag & (FNDELAY | FNONBLOCK))) { 19677 switch (cmd) { 19678 case DKIOCGGEOM: /* SD_PATH_DIRECT */ 19679 case DKIOCGVTOC: 19680 case DKIOCGAPART: 19681 case DKIOCPARTINFO: 19682 case DKIOCSGEOM: 19683 case DKIOCSAPART: 19684 case DKIOCGETEFI: 19685 case DKIOCPARTITION: 19686 case DKIOCSVTOC: 19687 case DKIOCSETEFI: 19688 case DKIOCGMBOOT: 19689 case DKIOCSMBOOT: 19690 case DKIOCG_PHYGEOM: 19691 case DKIOCG_VIRTGEOM: 19692 /* let cmlb handle it */ 19693 goto skip_ready_valid; 19694 19695 case CDROMPAUSE: 19696 case CDROMRESUME: 19697 case CDROMPLAYMSF: 19698 case CDROMPLAYTRKIND: 19699 case CDROMREADTOCHDR: 19700 case CDROMREADTOCENTRY: 19701 case CDROMSTOP: 19702 case CDROMSTART: 19703 case CDROMVOLCTRL: 19704 case CDROMSUBCHNL: 19705 case CDROMREADMODE2: 19706 case CDROMREADMODE1: 19707 case CDROMREADOFFSET: 19708 case CDROMSBLKMODE: 19709 case CDROMGBLKMODE: 19710 case CDROMGDRVSPEED: 19711 case CDROMSDRVSPEED: 19712 case CDROMCDDA: 19713 case CDROMCDXA: 19714 case CDROMSUBCODE: 19715 if (!ISCD(un)) { 19716 un->un_ncmds_in_driver--; 19717 ASSERT(un->un_ncmds_in_driver >= 0); 19718 mutex_exit(SD_MUTEX(un)); 19719 return (ENOTTY); 19720 } 19721 break; 19722 case FDEJECT: 19723 case DKIOCEJECT: 19724 case CDROMEJECT: 19725 if (!un->un_f_eject_media_supported) { 19726 un->un_ncmds_in_driver--; 19727 ASSERT(un->un_ncmds_in_driver >= 0); 19728 mutex_exit(SD_MUTEX(un)); 19729 return (ENOTTY); 19730 } 19731 break; 19732 case DKIOCFLUSHWRITECACHE: 19733 mutex_exit(SD_MUTEX(un)); 19734 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 19735 if (err != 0) { 19736 mutex_enter(SD_MUTEX(un)); 19737 un->un_ncmds_in_driver--; 19738 ASSERT(un->un_ncmds_in_driver >= 0); 19739 mutex_exit(SD_MUTEX(un)); 19740 return (EIO); 19741 } 19742 mutex_enter(SD_MUTEX(un)); 19743 /* FALLTHROUGH */ 19744 case DKIOCREMOVABLE: 19745 case DKIOCHOTPLUGGABLE: 19746 case DKIOCINFO: 19747 case DKIOCGMEDIAINFO: 19748 case MHIOCENFAILFAST: 19749 case MHIOCSTATUS: 19750 case MHIOCTKOWN: 19751 case MHIOCRELEASE: 19752 case MHIOCGRP_INKEYS: 19753 case MHIOCGRP_INRESV: 19754 case MHIOCGRP_REGISTER: 19755 case MHIOCGRP_RESERVE: 19756 case MHIOCGRP_PREEMPTANDABORT: 19757 case MHIOCGRP_REGISTERANDIGNOREKEY: 19758 case CDROMCLOSETRAY: 19759 case USCSICMD: 19760 goto skip_ready_valid; 19761 default: 19762 break; 19763 } 19764 19765 mutex_exit(SD_MUTEX(un)); 19766 err = sd_ready_and_valid(un); 19767 mutex_enter(SD_MUTEX(un)); 19768 19769 if (err != SD_READY_VALID) { 19770 switch (cmd) { 19771 case DKIOCSTATE: 19772 case CDROMGDRVSPEED: 19773 case CDROMSDRVSPEED: 19774 case FDEJECT: /* for eject command */ 19775 case DKIOCEJECT: 19776 case CDROMEJECT: 19777 case DKIOCREMOVABLE: 19778 case DKIOCHOTPLUGGABLE: 19779 break; 19780 default: 19781 if (un->un_f_has_removable_media) { 19782 err = ENXIO; 19783 } else { 19784 /* Do not map SD_RESERVED_BY_OTHERS to EIO */ 19785 if (err == SD_RESERVED_BY_OTHERS) { 19786 err = EACCES; 19787 } else { 19788 err = EIO; 19789 } 19790 } 19791 un->un_ncmds_in_driver--; 19792 ASSERT(un->un_ncmds_in_driver >= 0); 19793 mutex_exit(SD_MUTEX(un)); 19794 return (err); 19795 } 19796 } 19797 } 19798 19799 skip_ready_valid: 19800 mutex_exit(SD_MUTEX(un)); 19801 19802 switch (cmd) { 19803 case DKIOCINFO: 19804 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n"); 19805 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag); 19806 break; 19807 19808 case DKIOCGMEDIAINFO: 19809 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n"); 19810 err = sd_get_media_info(dev, (caddr_t)arg, flag); 19811 break; 19812 19813 case DKIOCGGEOM: 19814 case DKIOCGVTOC: 19815 case DKIOCGAPART: 19816 case DKIOCPARTINFO: 19817 case DKIOCSGEOM: 19818 case DKIOCSAPART: 19819 case DKIOCGETEFI: 19820 case DKIOCPARTITION: 19821 case DKIOCSVTOC: 19822 case DKIOCSETEFI: 19823 case DKIOCGMBOOT: 19824 case DKIOCSMBOOT: 19825 case DKIOCG_PHYGEOM: 19826 case DKIOCG_VIRTGEOM: 19827 SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd); 19828 19829 /* TUR should spin up */ 19830 19831 if (un->un_f_has_removable_media) 19832 err = sd_send_scsi_TEST_UNIT_READY(un, 19833 SD_CHECK_FOR_MEDIA); 19834 else 19835 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 19836 19837 if (err != 0) 19838 break; 19839 19840 err = cmlb_ioctl(un->un_cmlbhandle, dev, 19841 cmd, arg, flag, cred_p, rval_p, (void *)SD_PATH_DIRECT); 19842 19843 if ((err == 0) && 19844 ((cmd == DKIOCSETEFI) || 19845 (un->un_f_pkstats_enabled) && 19846 (cmd == DKIOCSAPART || cmd == DKIOCSVTOC))) { 19847 19848 tmprval = cmlb_validate(un->un_cmlbhandle, CMLB_SILENT, 19849 (void *)SD_PATH_DIRECT); 19850 if ((tmprval == 0) && un->un_f_pkstats_enabled) { 19851 sd_set_pstats(un); 19852 SD_TRACE(SD_LOG_IO_PARTITION, un, 19853 "sd_ioctl: un:0x%p pstats created and " 19854 "set\n", un); 19855 } 19856 } 19857 19858 if ((cmd == DKIOCSVTOC) || 19859 ((cmd == DKIOCSETEFI) && (tmprval == 0))) { 19860 19861 mutex_enter(SD_MUTEX(un)); 19862 if (un->un_f_devid_supported && 19863 (un->un_f_opt_fab_devid == TRUE)) { 19864 if (un->un_devid == NULL) { 19865 sd_register_devid(un, SD_DEVINFO(un), 19866 SD_TARGET_IS_UNRESERVED); 19867 } else { 19868 /* 19869 * The device id for this disk 19870 * has been fabricated. The 19871 * device id must be preserved 19872 * by writing it back out to 19873 * disk. 19874 */ 19875 if (sd_write_deviceid(un) != 0) { 19876 ddi_devid_free(un->un_devid); 19877 un->un_devid = NULL; 19878 } 19879 } 19880 } 19881 mutex_exit(SD_MUTEX(un)); 19882 } 19883 19884 break; 19885 19886 case DKIOCLOCK: 19887 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n"); 19888 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 19889 SD_PATH_STANDARD); 19890 break; 19891 19892 case DKIOCUNLOCK: 19893 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n"); 19894 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 19895 SD_PATH_STANDARD); 19896 break; 19897 19898 case DKIOCSTATE: { 19899 enum dkio_state state; 19900 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n"); 19901 19902 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) { 19903 err = EFAULT; 19904 } else { 19905 err = sd_check_media(dev, state); 19906 if (err == 0) { 19907 if (ddi_copyout(&un->un_mediastate, (void *)arg, 19908 sizeof (int), flag) != 0) 19909 err = EFAULT; 19910 } 19911 } 19912 break; 19913 } 19914 19915 case DKIOCREMOVABLE: 19916 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n"); 19917 i = un->un_f_has_removable_media ? 1 : 0; 19918 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 19919 err = EFAULT; 19920 } else { 19921 err = 0; 19922 } 19923 break; 19924 19925 case DKIOCHOTPLUGGABLE: 19926 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCHOTPLUGGABLE\n"); 19927 i = un->un_f_is_hotpluggable ? 1 : 0; 19928 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 19929 err = EFAULT; 19930 } else { 19931 err = 0; 19932 } 19933 break; 19934 19935 case DKIOCGTEMPERATURE: 19936 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n"); 19937 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag); 19938 break; 19939 19940 case MHIOCENFAILFAST: 19941 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n"); 19942 if ((err = drv_priv(cred_p)) == 0) { 19943 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag); 19944 } 19945 break; 19946 19947 case MHIOCTKOWN: 19948 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n"); 19949 if ((err = drv_priv(cred_p)) == 0) { 19950 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag); 19951 } 19952 break; 19953 19954 case MHIOCRELEASE: 19955 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n"); 19956 if ((err = drv_priv(cred_p)) == 0) { 19957 err = sd_mhdioc_release(dev); 19958 } 19959 break; 19960 19961 case MHIOCSTATUS: 19962 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n"); 19963 if ((err = drv_priv(cred_p)) == 0) { 19964 switch (sd_send_scsi_TEST_UNIT_READY(un, 0)) { 19965 case 0: 19966 err = 0; 19967 break; 19968 case EACCES: 19969 *rval_p = 1; 19970 err = 0; 19971 break; 19972 default: 19973 err = EIO; 19974 break; 19975 } 19976 } 19977 break; 19978 19979 case MHIOCQRESERVE: 19980 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n"); 19981 if ((err = drv_priv(cred_p)) == 0) { 19982 err = sd_reserve_release(dev, SD_RESERVE); 19983 } 19984 break; 19985 19986 case MHIOCREREGISTERDEVID: 19987 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n"); 19988 if (drv_priv(cred_p) == EPERM) { 19989 err = EPERM; 19990 } else if (!un->un_f_devid_supported) { 19991 err = ENOTTY; 19992 } else { 19993 err = sd_mhdioc_register_devid(dev); 19994 } 19995 break; 19996 19997 case MHIOCGRP_INKEYS: 19998 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n"); 19999 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 20000 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20001 err = ENOTSUP; 20002 } else { 20003 err = sd_mhdioc_inkeys(dev, (caddr_t)arg, 20004 flag); 20005 } 20006 } 20007 break; 20008 20009 case MHIOCGRP_INRESV: 20010 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n"); 20011 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 20012 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20013 err = ENOTSUP; 20014 } else { 20015 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag); 20016 } 20017 } 20018 break; 20019 20020 case MHIOCGRP_REGISTER: 20021 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n"); 20022 if ((err = drv_priv(cred_p)) != EPERM) { 20023 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20024 err = ENOTSUP; 20025 } else if (arg != NULL) { 20026 mhioc_register_t reg; 20027 if (ddi_copyin((void *)arg, ®, 20028 sizeof (mhioc_register_t), flag) != 0) { 20029 err = EFAULT; 20030 } else { 20031 err = 20032 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20033 un, SD_SCSI3_REGISTER, 20034 (uchar_t *)®); 20035 } 20036 } 20037 } 20038 break; 20039 20040 case MHIOCGRP_RESERVE: 20041 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n"); 20042 if ((err = drv_priv(cred_p)) != EPERM) { 20043 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20044 err = ENOTSUP; 20045 } else if (arg != NULL) { 20046 mhioc_resv_desc_t resv_desc; 20047 if (ddi_copyin((void *)arg, &resv_desc, 20048 sizeof (mhioc_resv_desc_t), flag) != 0) { 20049 err = EFAULT; 20050 } else { 20051 err = 20052 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20053 un, SD_SCSI3_RESERVE, 20054 (uchar_t *)&resv_desc); 20055 } 20056 } 20057 } 20058 break; 20059 20060 case MHIOCGRP_PREEMPTANDABORT: 20061 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 20062 if ((err = drv_priv(cred_p)) != EPERM) { 20063 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20064 err = ENOTSUP; 20065 } else if (arg != NULL) { 20066 mhioc_preemptandabort_t preempt_abort; 20067 if (ddi_copyin((void *)arg, &preempt_abort, 20068 sizeof (mhioc_preemptandabort_t), 20069 flag) != 0) { 20070 err = EFAULT; 20071 } else { 20072 err = 20073 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20074 un, SD_SCSI3_PREEMPTANDABORT, 20075 (uchar_t *)&preempt_abort); 20076 } 20077 } 20078 } 20079 break; 20080 20081 case MHIOCGRP_REGISTERANDIGNOREKEY: 20082 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTERANDIGNOREKEY\n"); 20083 if ((err = drv_priv(cred_p)) != EPERM) { 20084 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20085 err = ENOTSUP; 20086 } else if (arg != NULL) { 20087 mhioc_registerandignorekey_t r_and_i; 20088 if (ddi_copyin((void *)arg, (void *)&r_and_i, 20089 sizeof (mhioc_registerandignorekey_t), 20090 flag) != 0) { 20091 err = EFAULT; 20092 } else { 20093 err = 20094 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20095 un, SD_SCSI3_REGISTERANDIGNOREKEY, 20096 (uchar_t *)&r_and_i); 20097 } 20098 } 20099 } 20100 break; 20101 20102 case USCSICMD: 20103 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n"); 20104 cr = ddi_get_cred(); 20105 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 20106 err = EPERM; 20107 } else { 20108 enum uio_seg uioseg; 20109 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE : 20110 UIO_USERSPACE; 20111 if (un->un_f_format_in_progress == TRUE) { 20112 err = EAGAIN; 20113 break; 20114 } 20115 err = sd_send_scsi_cmd(dev, (struct uscsi_cmd *)arg, 20116 flag, uioseg, SD_PATH_STANDARD); 20117 } 20118 break; 20119 20120 case CDROMPAUSE: 20121 case CDROMRESUME: 20122 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n"); 20123 if (!ISCD(un)) { 20124 err = ENOTTY; 20125 } else { 20126 err = sr_pause_resume(dev, cmd); 20127 } 20128 break; 20129 20130 case CDROMPLAYMSF: 20131 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n"); 20132 if (!ISCD(un)) { 20133 err = ENOTTY; 20134 } else { 20135 err = sr_play_msf(dev, (caddr_t)arg, flag); 20136 } 20137 break; 20138 20139 case CDROMPLAYTRKIND: 20140 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); 20141 #if defined(__i386) || defined(__amd64) 20142 /* 20143 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead 20144 */ 20145 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 20146 #else 20147 if (!ISCD(un)) { 20148 #endif 20149 err = ENOTTY; 20150 } else { 20151 err = sr_play_trkind(dev, (caddr_t)arg, flag); 20152 } 20153 break; 20154 20155 case CDROMREADTOCHDR: 20156 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n"); 20157 if (!ISCD(un)) { 20158 err = ENOTTY; 20159 } else { 20160 err = sr_read_tochdr(dev, (caddr_t)arg, flag); 20161 } 20162 break; 20163 20164 case CDROMREADTOCENTRY: 20165 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n"); 20166 if (!ISCD(un)) { 20167 err = ENOTTY; 20168 } else { 20169 err = sr_read_tocentry(dev, (caddr_t)arg, flag); 20170 } 20171 break; 20172 20173 case CDROMSTOP: 20174 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n"); 20175 if (!ISCD(un)) { 20176 err = ENOTTY; 20177 } else { 20178 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_STOP, 20179 SD_PATH_STANDARD); 20180 } 20181 break; 20182 20183 case CDROMSTART: 20184 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n"); 20185 if (!ISCD(un)) { 20186 err = ENOTTY; 20187 } else { 20188 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 20189 SD_PATH_STANDARD); 20190 } 20191 break; 20192 20193 case CDROMCLOSETRAY: 20194 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n"); 20195 if (!ISCD(un)) { 20196 err = ENOTTY; 20197 } else { 20198 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_CLOSE, 20199 SD_PATH_STANDARD); 20200 } 20201 break; 20202 20203 case FDEJECT: /* for eject command */ 20204 case DKIOCEJECT: 20205 case CDROMEJECT: 20206 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n"); 20207 if (!un->un_f_eject_media_supported) { 20208 err = ENOTTY; 20209 } else { 20210 err = sr_eject(dev); 20211 } 20212 break; 20213 20214 case CDROMVOLCTRL: 20215 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n"); 20216 if (!ISCD(un)) { 20217 err = ENOTTY; 20218 } else { 20219 err = sr_volume_ctrl(dev, (caddr_t)arg, flag); 20220 } 20221 break; 20222 20223 case CDROMSUBCHNL: 20224 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n"); 20225 if (!ISCD(un)) { 20226 err = ENOTTY; 20227 } else { 20228 err = sr_read_subchannel(dev, (caddr_t)arg, flag); 20229 } 20230 break; 20231 20232 case CDROMREADMODE2: 20233 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n"); 20234 if (!ISCD(un)) { 20235 err = ENOTTY; 20236 } else if (un->un_f_cfg_is_atapi == TRUE) { 20237 /* 20238 * If the drive supports READ CD, use that instead of 20239 * switching the LBA size via a MODE SELECT 20240 * Block Descriptor 20241 */ 20242 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag); 20243 } else { 20244 err = sr_read_mode2(dev, (caddr_t)arg, flag); 20245 } 20246 break; 20247 20248 case CDROMREADMODE1: 20249 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n"); 20250 if (!ISCD(un)) { 20251 err = ENOTTY; 20252 } else { 20253 err = sr_read_mode1(dev, (caddr_t)arg, flag); 20254 } 20255 break; 20256 20257 case CDROMREADOFFSET: 20258 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n"); 20259 if (!ISCD(un)) { 20260 err = ENOTTY; 20261 } else { 20262 err = sr_read_sony_session_offset(dev, (caddr_t)arg, 20263 flag); 20264 } 20265 break; 20266 20267 case CDROMSBLKMODE: 20268 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n"); 20269 /* 20270 * There is no means of changing block size in case of atapi 20271 * drives, thus return ENOTTY if drive type is atapi 20272 */ 20273 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 20274 err = ENOTTY; 20275 } else if (un->un_f_mmc_cap == TRUE) { 20276 20277 /* 20278 * MMC Devices do not support changing the 20279 * logical block size 20280 * 20281 * Note: EINVAL is being returned instead of ENOTTY to 20282 * maintain consistancy with the original mmc 20283 * driver update. 20284 */ 20285 err = EINVAL; 20286 } else { 20287 mutex_enter(SD_MUTEX(un)); 20288 if ((!(un->un_exclopen & (1<<SDPART(dev)))) || 20289 (un->un_ncmds_in_transport > 0)) { 20290 mutex_exit(SD_MUTEX(un)); 20291 err = EINVAL; 20292 } else { 20293 mutex_exit(SD_MUTEX(un)); 20294 err = sr_change_blkmode(dev, cmd, arg, flag); 20295 } 20296 } 20297 break; 20298 20299 case CDROMGBLKMODE: 20300 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n"); 20301 if (!ISCD(un)) { 20302 err = ENOTTY; 20303 } else if ((un->un_f_cfg_is_atapi != FALSE) && 20304 (un->un_f_blockcount_is_valid != FALSE)) { 20305 /* 20306 * Drive is an ATAPI drive so return target block 20307 * size for ATAPI drives since we cannot change the 20308 * blocksize on ATAPI drives. Used primarily to detect 20309 * if an ATAPI cdrom is present. 20310 */ 20311 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg, 20312 sizeof (int), flag) != 0) { 20313 err = EFAULT; 20314 } else { 20315 err = 0; 20316 } 20317 20318 } else { 20319 /* 20320 * Drive supports changing block sizes via a Mode 20321 * Select. 20322 */ 20323 err = sr_change_blkmode(dev, cmd, arg, flag); 20324 } 20325 break; 20326 20327 case CDROMGDRVSPEED: 20328 case CDROMSDRVSPEED: 20329 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n"); 20330 if (!ISCD(un)) { 20331 err = ENOTTY; 20332 } else if (un->un_f_mmc_cap == TRUE) { 20333 /* 20334 * Note: In the future the driver implementation 20335 * for getting and 20336 * setting cd speed should entail: 20337 * 1) If non-mmc try the Toshiba mode page 20338 * (sr_change_speed) 20339 * 2) If mmc but no support for Real Time Streaming try 20340 * the SET CD SPEED (0xBB) command 20341 * (sr_atapi_change_speed) 20342 * 3) If mmc and support for Real Time Streaming 20343 * try the GET PERFORMANCE and SET STREAMING 20344 * commands (not yet implemented, 4380808) 20345 */ 20346 /* 20347 * As per recent MMC spec, CD-ROM speed is variable 20348 * and changes with LBA. Since there is no such 20349 * things as drive speed now, fail this ioctl. 20350 * 20351 * Note: EINVAL is returned for consistancy of original 20352 * implementation which included support for getting 20353 * the drive speed of mmc devices but not setting 20354 * the drive speed. Thus EINVAL would be returned 20355 * if a set request was made for an mmc device. 20356 * We no longer support get or set speed for 20357 * mmc but need to remain consistent with regard 20358 * to the error code returned. 20359 */ 20360 err = EINVAL; 20361 } else if (un->un_f_cfg_is_atapi == TRUE) { 20362 err = sr_atapi_change_speed(dev, cmd, arg, flag); 20363 } else { 20364 err = sr_change_speed(dev, cmd, arg, flag); 20365 } 20366 break; 20367 20368 case CDROMCDDA: 20369 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n"); 20370 if (!ISCD(un)) { 20371 err = ENOTTY; 20372 } else { 20373 err = sr_read_cdda(dev, (void *)arg, flag); 20374 } 20375 break; 20376 20377 case CDROMCDXA: 20378 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n"); 20379 if (!ISCD(un)) { 20380 err = ENOTTY; 20381 } else { 20382 err = sr_read_cdxa(dev, (caddr_t)arg, flag); 20383 } 20384 break; 20385 20386 case CDROMSUBCODE: 20387 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n"); 20388 if (!ISCD(un)) { 20389 err = ENOTTY; 20390 } else { 20391 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag); 20392 } 20393 break; 20394 20395 20396 #ifdef SDDEBUG 20397 /* RESET/ABORTS testing ioctls */ 20398 case DKIOCRESET: { 20399 int reset_level; 20400 20401 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) { 20402 err = EFAULT; 20403 } else { 20404 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: " 20405 "reset_level = 0x%lx\n", reset_level); 20406 if (scsi_reset(SD_ADDRESS(un), reset_level)) { 20407 err = 0; 20408 } else { 20409 err = EIO; 20410 } 20411 } 20412 break; 20413 } 20414 20415 case DKIOCABORT: 20416 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n"); 20417 if (scsi_abort(SD_ADDRESS(un), NULL)) { 20418 err = 0; 20419 } else { 20420 err = EIO; 20421 } 20422 break; 20423 #endif 20424 20425 #ifdef SD_FAULT_INJECTION 20426 /* SDIOC FaultInjection testing ioctls */ 20427 case SDIOCSTART: 20428 case SDIOCSTOP: 20429 case SDIOCINSERTPKT: 20430 case SDIOCINSERTXB: 20431 case SDIOCINSERTUN: 20432 case SDIOCINSERTARQ: 20433 case SDIOCPUSH: 20434 case SDIOCRETRIEVE: 20435 case SDIOCRUN: 20436 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" 20437 "SDIOC detected cmd:0x%X:\n", cmd); 20438 /* call error generator */ 20439 sd_faultinjection_ioctl(cmd, arg, un); 20440 err = 0; 20441 break; 20442 20443 #endif /* SD_FAULT_INJECTION */ 20444 20445 case DKIOCFLUSHWRITECACHE: 20446 { 20447 struct dk_callback *dkc = (struct dk_callback *)arg; 20448 20449 mutex_enter(SD_MUTEX(un)); 20450 if (!un->un_f_sync_cache_supported || 20451 !un->un_f_write_cache_enabled) { 20452 err = un->un_f_sync_cache_supported ? 20453 0 : ENOTSUP; 20454 mutex_exit(SD_MUTEX(un)); 20455 if ((flag & FKIOCTL) && dkc != NULL && 20456 dkc->dkc_callback != NULL) { 20457 (*dkc->dkc_callback)(dkc->dkc_cookie, 20458 err); 20459 /* 20460 * Did callback and reported error. 20461 * Since we did a callback, ioctl 20462 * should return 0. 20463 */ 20464 err = 0; 20465 } 20466 break; 20467 } 20468 mutex_exit(SD_MUTEX(un)); 20469 20470 if ((flag & FKIOCTL) && dkc != NULL && 20471 dkc->dkc_callback != NULL) { 20472 /* async SYNC CACHE request */ 20473 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 20474 } else { 20475 /* synchronous SYNC CACHE request */ 20476 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 20477 } 20478 } 20479 break; 20480 20481 case DKIOCGETWCE: { 20482 20483 int wce; 20484 20485 if ((err = sd_get_write_cache_enabled(un, &wce)) != 0) { 20486 break; 20487 } 20488 20489 if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) { 20490 err = EFAULT; 20491 } 20492 break; 20493 } 20494 20495 case DKIOCSETWCE: { 20496 20497 int wce, sync_supported; 20498 20499 if (ddi_copyin((void *)arg, &wce, sizeof (wce), flag)) { 20500 err = EFAULT; 20501 break; 20502 } 20503 20504 /* 20505 * Synchronize multiple threads trying to enable 20506 * or disable the cache via the un_f_wcc_cv 20507 * condition variable. 20508 */ 20509 mutex_enter(SD_MUTEX(un)); 20510 20511 /* 20512 * Don't allow the cache to be enabled if the 20513 * config file has it disabled. 20514 */ 20515 if (un->un_f_opt_disable_cache && wce) { 20516 mutex_exit(SD_MUTEX(un)); 20517 err = EINVAL; 20518 break; 20519 } 20520 20521 /* 20522 * Wait for write cache change in progress 20523 * bit to be clear before proceeding. 20524 */ 20525 while (un->un_f_wcc_inprog) 20526 cv_wait(&un->un_wcc_cv, SD_MUTEX(un)); 20527 20528 un->un_f_wcc_inprog = 1; 20529 20530 if (un->un_f_write_cache_enabled && wce == 0) { 20531 /* 20532 * Disable the write cache. Don't clear 20533 * un_f_write_cache_enabled until after 20534 * the mode select and flush are complete. 20535 */ 20536 sync_supported = un->un_f_sync_cache_supported; 20537 20538 /* 20539 * If cache flush is suppressed, we assume that the 20540 * controller firmware will take care of managing the 20541 * write cache for us: no need to explicitly 20542 * disable it. 20543 */ 20544 if (!un->un_f_suppress_cache_flush) { 20545 mutex_exit(SD_MUTEX(un)); 20546 if ((err = sd_cache_control(un, 20547 SD_CACHE_NOCHANGE, 20548 SD_CACHE_DISABLE)) == 0 && 20549 sync_supported) { 20550 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, 20551 NULL); 20552 } 20553 } else { 20554 mutex_exit(SD_MUTEX(un)); 20555 } 20556 20557 mutex_enter(SD_MUTEX(un)); 20558 if (err == 0) { 20559 un->un_f_write_cache_enabled = 0; 20560 } 20561 20562 } else if (!un->un_f_write_cache_enabled && wce != 0) { 20563 /* 20564 * Set un_f_write_cache_enabled first, so there is 20565 * no window where the cache is enabled, but the 20566 * bit says it isn't. 20567 */ 20568 un->un_f_write_cache_enabled = 1; 20569 20570 /* 20571 * If cache flush is suppressed, we assume that the 20572 * controller firmware will take care of managing the 20573 * write cache for us: no need to explicitly 20574 * enable it. 20575 */ 20576 if (!un->un_f_suppress_cache_flush) { 20577 mutex_exit(SD_MUTEX(un)); 20578 err = sd_cache_control(un, SD_CACHE_NOCHANGE, 20579 SD_CACHE_ENABLE); 20580 } else { 20581 mutex_exit(SD_MUTEX(un)); 20582 } 20583 20584 mutex_enter(SD_MUTEX(un)); 20585 20586 if (err) { 20587 un->un_f_write_cache_enabled = 0; 20588 } 20589 } 20590 20591 un->un_f_wcc_inprog = 0; 20592 cv_broadcast(&un->un_wcc_cv); 20593 mutex_exit(SD_MUTEX(un)); 20594 break; 20595 } 20596 20597 default: 20598 err = ENOTTY; 20599 break; 20600 } 20601 mutex_enter(SD_MUTEX(un)); 20602 un->un_ncmds_in_driver--; 20603 ASSERT(un->un_ncmds_in_driver >= 0); 20604 mutex_exit(SD_MUTEX(un)); 20605 20606 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 20607 return (err); 20608 } 20609 20610 20611 /* 20612 * Function: sd_dkio_ctrl_info 20613 * 20614 * Description: This routine is the driver entry point for handling controller 20615 * information ioctl requests (DKIOCINFO). 20616 * 20617 * Arguments: dev - the device number 20618 * arg - pointer to user provided dk_cinfo structure 20619 * specifying the controller type and attributes. 20620 * flag - this argument is a pass through to ddi_copyxxx() 20621 * directly from the mode argument of ioctl(). 20622 * 20623 * Return Code: 0 20624 * EFAULT 20625 * ENXIO 20626 */ 20627 20628 static int 20629 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag) 20630 { 20631 struct sd_lun *un = NULL; 20632 struct dk_cinfo *info; 20633 dev_info_t *pdip; 20634 int lun, tgt; 20635 20636 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20637 return (ENXIO); 20638 } 20639 20640 info = (struct dk_cinfo *) 20641 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); 20642 20643 switch (un->un_ctype) { 20644 case CTYPE_CDROM: 20645 info->dki_ctype = DKC_CDROM; 20646 break; 20647 default: 20648 info->dki_ctype = DKC_SCSI_CCS; 20649 break; 20650 } 20651 pdip = ddi_get_parent(SD_DEVINFO(un)); 20652 info->dki_cnum = ddi_get_instance(pdip); 20653 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) { 20654 (void) strcpy(info->dki_cname, ddi_get_name(pdip)); 20655 } else { 20656 (void) strncpy(info->dki_cname, ddi_node_name(pdip), 20657 DK_DEVLEN - 1); 20658 } 20659 20660 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 20661 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0); 20662 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 20663 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0); 20664 20665 /* Unit Information */ 20666 info->dki_unit = ddi_get_instance(SD_DEVINFO(un)); 20667 info->dki_slave = ((tgt << 3) | lun); 20668 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)), 20669 DK_DEVLEN - 1); 20670 info->dki_flags = DKI_FMTVOL; 20671 info->dki_partition = SDPART(dev); 20672 20673 /* Max Transfer size of this device in blocks */ 20674 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize; 20675 info->dki_addr = 0; 20676 info->dki_space = 0; 20677 info->dki_prio = 0; 20678 info->dki_vec = 0; 20679 20680 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) { 20681 kmem_free(info, sizeof (struct dk_cinfo)); 20682 return (EFAULT); 20683 } else { 20684 kmem_free(info, sizeof (struct dk_cinfo)); 20685 return (0); 20686 } 20687 } 20688 20689 20690 /* 20691 * Function: sd_get_media_info 20692 * 20693 * Description: This routine is the driver entry point for handling ioctl 20694 * requests for the media type or command set profile used by the 20695 * drive to operate on the media (DKIOCGMEDIAINFO). 20696 * 20697 * Arguments: dev - the device number 20698 * arg - pointer to user provided dk_minfo structure 20699 * specifying the media type, logical block size and 20700 * drive capacity. 20701 * flag - this argument is a pass through to ddi_copyxxx() 20702 * directly from the mode argument of ioctl(). 20703 * 20704 * Return Code: 0 20705 * EACCESS 20706 * EFAULT 20707 * ENXIO 20708 * EIO 20709 */ 20710 20711 static int 20712 sd_get_media_info(dev_t dev, caddr_t arg, int flag) 20713 { 20714 struct sd_lun *un = NULL; 20715 struct uscsi_cmd com; 20716 struct scsi_inquiry *sinq; 20717 struct dk_minfo media_info; 20718 u_longlong_t media_capacity; 20719 uint64_t capacity; 20720 uint_t lbasize; 20721 uchar_t *out_data; 20722 uchar_t *rqbuf; 20723 int rval = 0; 20724 int rtn; 20725 20726 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 20727 (un->un_state == SD_STATE_OFFLINE)) { 20728 return (ENXIO); 20729 } 20730 20731 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info: entry\n"); 20732 20733 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 20734 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 20735 20736 /* Issue a TUR to determine if the drive is ready with media present */ 20737 rval = sd_send_scsi_TEST_UNIT_READY(un, SD_CHECK_FOR_MEDIA); 20738 if (rval == ENXIO) { 20739 goto done; 20740 } 20741 20742 /* Now get configuration data */ 20743 if (ISCD(un)) { 20744 media_info.dki_media_type = DK_CDROM; 20745 20746 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 20747 if (un->un_f_mmc_cap == TRUE) { 20748 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, 20749 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN, 20750 SD_PATH_STANDARD); 20751 20752 if (rtn) { 20753 /* 20754 * Failed for other than an illegal request 20755 * or command not supported 20756 */ 20757 if ((com.uscsi_status == STATUS_CHECK) && 20758 (com.uscsi_rqstatus == STATUS_GOOD)) { 20759 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 20760 (rqbuf[12] != 0x20)) { 20761 rval = EIO; 20762 goto done; 20763 } 20764 } 20765 } else { 20766 /* 20767 * The GET CONFIGURATION command succeeded 20768 * so set the media type according to the 20769 * returned data 20770 */ 20771 media_info.dki_media_type = out_data[6]; 20772 media_info.dki_media_type <<= 8; 20773 media_info.dki_media_type |= out_data[7]; 20774 } 20775 } 20776 } else { 20777 /* 20778 * The profile list is not available, so we attempt to identify 20779 * the media type based on the inquiry data 20780 */ 20781 sinq = un->un_sd->sd_inq; 20782 if ((sinq->inq_dtype == DTYPE_DIRECT) || 20783 (sinq->inq_dtype == DTYPE_OPTICAL)) { 20784 /* This is a direct access device or optical disk */ 20785 media_info.dki_media_type = DK_FIXED_DISK; 20786 20787 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 20788 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 20789 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 20790 media_info.dki_media_type = DK_ZIP; 20791 } else if ( 20792 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 20793 media_info.dki_media_type = DK_JAZ; 20794 } 20795 } 20796 } else { 20797 /* 20798 * Not a CD, direct access or optical disk so return 20799 * unknown media 20800 */ 20801 media_info.dki_media_type = DK_UNKNOWN; 20802 } 20803 } 20804 20805 /* Now read the capacity so we can provide the lbasize and capacity */ 20806 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 20807 SD_PATH_DIRECT)) { 20808 case 0: 20809 break; 20810 case EACCES: 20811 rval = EACCES; 20812 goto done; 20813 default: 20814 rval = EIO; 20815 goto done; 20816 } 20817 20818 media_info.dki_lbsize = lbasize; 20819 media_capacity = capacity; 20820 20821 /* 20822 * sd_send_scsi_READ_CAPACITY() reports capacity in 20823 * un->un_sys_blocksize chunks. So we need to convert it into 20824 * cap.lbasize chunks. 20825 */ 20826 media_capacity *= un->un_sys_blocksize; 20827 media_capacity /= lbasize; 20828 media_info.dki_capacity = media_capacity; 20829 20830 if (ddi_copyout(&media_info, arg, sizeof (struct dk_minfo), flag)) { 20831 rval = EFAULT; 20832 /* Put goto. Anybody might add some code below in future */ 20833 goto done; 20834 } 20835 done: 20836 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 20837 kmem_free(rqbuf, SENSE_LENGTH); 20838 return (rval); 20839 } 20840 20841 20842 /* 20843 * Function: sd_check_media 20844 * 20845 * Description: This utility routine implements the functionality for the 20846 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the 20847 * driver state changes from that specified by the user 20848 * (inserted or ejected). For example, if the user specifies 20849 * DKIO_EJECTED and the current media state is inserted this 20850 * routine will immediately return DKIO_INSERTED. However, if the 20851 * current media state is not inserted the user thread will be 20852 * blocked until the drive state changes. If DKIO_NONE is specified 20853 * the user thread will block until a drive state change occurs. 20854 * 20855 * Arguments: dev - the device number 20856 * state - user pointer to a dkio_state, updated with the current 20857 * drive state at return. 20858 * 20859 * Return Code: ENXIO 20860 * EIO 20861 * EAGAIN 20862 * EINTR 20863 */ 20864 20865 static int 20866 sd_check_media(dev_t dev, enum dkio_state state) 20867 { 20868 struct sd_lun *un = NULL; 20869 enum dkio_state prev_state; 20870 opaque_t token = NULL; 20871 int rval = 0; 20872 20873 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20874 return (ENXIO); 20875 } 20876 20877 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n"); 20878 20879 mutex_enter(SD_MUTEX(un)); 20880 20881 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: " 20882 "state=%x, mediastate=%x\n", state, un->un_mediastate); 20883 20884 prev_state = un->un_mediastate; 20885 20886 /* is there anything to do? */ 20887 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) { 20888 /* 20889 * submit the request to the scsi_watch service; 20890 * scsi_media_watch_cb() does the real work 20891 */ 20892 mutex_exit(SD_MUTEX(un)); 20893 20894 /* 20895 * This change handles the case where a scsi watch request is 20896 * added to a device that is powered down. To accomplish this 20897 * we power up the device before adding the scsi watch request, 20898 * since the scsi watch sends a TUR directly to the device 20899 * which the device cannot handle if it is powered down. 20900 */ 20901 if (sd_pm_entry(un) != DDI_SUCCESS) { 20902 mutex_enter(SD_MUTEX(un)); 20903 goto done; 20904 } 20905 20906 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), 20907 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 20908 (caddr_t)dev); 20909 20910 sd_pm_exit(un); 20911 20912 mutex_enter(SD_MUTEX(un)); 20913 if (token == NULL) { 20914 rval = EAGAIN; 20915 goto done; 20916 } 20917 20918 /* 20919 * This is a special case IOCTL that doesn't return 20920 * until the media state changes. Routine sdpower 20921 * knows about and handles this so don't count it 20922 * as an active cmd in the driver, which would 20923 * keep the device busy to the pm framework. 20924 * If the count isn't decremented the device can't 20925 * be powered down. 20926 */ 20927 un->un_ncmds_in_driver--; 20928 ASSERT(un->un_ncmds_in_driver >= 0); 20929 20930 /* 20931 * if a prior request had been made, this will be the same 20932 * token, as scsi_watch was designed that way. 20933 */ 20934 un->un_swr_token = token; 20935 un->un_specified_mediastate = state; 20936 20937 /* 20938 * now wait for media change 20939 * we will not be signalled unless mediastate == state but it is 20940 * still better to test for this condition, since there is a 20941 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED 20942 */ 20943 SD_TRACE(SD_LOG_COMMON, un, 20944 "sd_check_media: waiting for media state change\n"); 20945 while (un->un_mediastate == state) { 20946 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) { 20947 SD_TRACE(SD_LOG_COMMON, un, 20948 "sd_check_media: waiting for media state " 20949 "was interrupted\n"); 20950 un->un_ncmds_in_driver++; 20951 rval = EINTR; 20952 goto done; 20953 } 20954 SD_TRACE(SD_LOG_COMMON, un, 20955 "sd_check_media: received signal, state=%x\n", 20956 un->un_mediastate); 20957 } 20958 /* 20959 * Inc the counter to indicate the device once again 20960 * has an active outstanding cmd. 20961 */ 20962 un->un_ncmds_in_driver++; 20963 } 20964 20965 /* invalidate geometry */ 20966 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) { 20967 sr_ejected(un); 20968 } 20969 20970 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) { 20971 uint64_t capacity; 20972 uint_t lbasize; 20973 20974 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n"); 20975 mutex_exit(SD_MUTEX(un)); 20976 /* 20977 * Since the following routines use SD_PATH_DIRECT, we must 20978 * call PM directly before the upcoming disk accesses. This 20979 * may cause the disk to be power/spin up. 20980 */ 20981 20982 if (sd_pm_entry(un) == DDI_SUCCESS) { 20983 rval = sd_send_scsi_READ_CAPACITY(un, 20984 &capacity, 20985 &lbasize, SD_PATH_DIRECT); 20986 if (rval != 0) { 20987 sd_pm_exit(un); 20988 mutex_enter(SD_MUTEX(un)); 20989 goto done; 20990 } 20991 } else { 20992 rval = EIO; 20993 mutex_enter(SD_MUTEX(un)); 20994 goto done; 20995 } 20996 mutex_enter(SD_MUTEX(un)); 20997 20998 sd_update_block_info(un, lbasize, capacity); 20999 21000 /* 21001 * Check if the media in the device is writable or not 21002 */ 21003 if (ISCD(un)) 21004 sd_check_for_writable_cd(un, SD_PATH_DIRECT); 21005 21006 mutex_exit(SD_MUTEX(un)); 21007 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 21008 if ((cmlb_validate(un->un_cmlbhandle, 0, 21009 (void *)SD_PATH_DIRECT) == 0) && un->un_f_pkstats_enabled) { 21010 sd_set_pstats(un); 21011 SD_TRACE(SD_LOG_IO_PARTITION, un, 21012 "sd_check_media: un:0x%p pstats created and " 21013 "set\n", un); 21014 } 21015 21016 rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 21017 SD_PATH_DIRECT); 21018 sd_pm_exit(un); 21019 21020 mutex_enter(SD_MUTEX(un)); 21021 } 21022 done: 21023 un->un_f_watcht_stopped = FALSE; 21024 if (un->un_swr_token) { 21025 /* 21026 * Use of this local token and the mutex ensures that we avoid 21027 * some race conditions associated with terminating the 21028 * scsi watch. 21029 */ 21030 token = un->un_swr_token; 21031 un->un_swr_token = (opaque_t)NULL; 21032 mutex_exit(SD_MUTEX(un)); 21033 (void) scsi_watch_request_terminate(token, 21034 SCSI_WATCH_TERMINATE_WAIT); 21035 mutex_enter(SD_MUTEX(un)); 21036 } 21037 21038 /* 21039 * Update the capacity kstat value, if no media previously 21040 * (capacity kstat is 0) and a media has been inserted 21041 * (un_f_blockcount_is_valid == TRUE) 21042 */ 21043 if (un->un_errstats) { 21044 struct sd_errstats *stp = NULL; 21045 21046 stp = (struct sd_errstats *)un->un_errstats->ks_data; 21047 if ((stp->sd_capacity.value.ui64 == 0) && 21048 (un->un_f_blockcount_is_valid == TRUE)) { 21049 stp->sd_capacity.value.ui64 = 21050 (uint64_t)((uint64_t)un->un_blockcount * 21051 un->un_sys_blocksize); 21052 } 21053 } 21054 mutex_exit(SD_MUTEX(un)); 21055 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n"); 21056 return (rval); 21057 } 21058 21059 21060 /* 21061 * Function: sd_delayed_cv_broadcast 21062 * 21063 * Description: Delayed cv_broadcast to allow for target to recover from media 21064 * insertion. 21065 * 21066 * Arguments: arg - driver soft state (unit) structure 21067 */ 21068 21069 static void 21070 sd_delayed_cv_broadcast(void *arg) 21071 { 21072 struct sd_lun *un = arg; 21073 21074 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n"); 21075 21076 mutex_enter(SD_MUTEX(un)); 21077 un->un_dcvb_timeid = NULL; 21078 cv_broadcast(&un->un_state_cv); 21079 mutex_exit(SD_MUTEX(un)); 21080 } 21081 21082 21083 /* 21084 * Function: sd_media_watch_cb 21085 * 21086 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This 21087 * routine processes the TUR sense data and updates the driver 21088 * state if a transition has occurred. The user thread 21089 * (sd_check_media) is then signalled. 21090 * 21091 * Arguments: arg - the device 'dev_t' is used for context to discriminate 21092 * among multiple watches that share this callback function 21093 * resultp - scsi watch facility result packet containing scsi 21094 * packet, status byte and sense data 21095 * 21096 * Return Code: 0 for success, -1 for failure 21097 */ 21098 21099 static int 21100 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 21101 { 21102 struct sd_lun *un; 21103 struct scsi_status *statusp = resultp->statusp; 21104 uint8_t *sensep = (uint8_t *)resultp->sensep; 21105 enum dkio_state state = DKIO_NONE; 21106 dev_t dev = (dev_t)arg; 21107 uchar_t actual_sense_length; 21108 uint8_t skey, asc, ascq; 21109 21110 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21111 return (-1); 21112 } 21113 actual_sense_length = resultp->actual_sense_length; 21114 21115 mutex_enter(SD_MUTEX(un)); 21116 SD_TRACE(SD_LOG_COMMON, un, 21117 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n", 21118 *((char *)statusp), (void *)sensep, actual_sense_length); 21119 21120 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) { 21121 un->un_mediastate = DKIO_DEV_GONE; 21122 cv_broadcast(&un->un_state_cv); 21123 mutex_exit(SD_MUTEX(un)); 21124 21125 return (0); 21126 } 21127 21128 /* 21129 * If there was a check condition then sensep points to valid sense data 21130 * If status was not a check condition but a reservation or busy status 21131 * then the new state is DKIO_NONE 21132 */ 21133 if (sensep != NULL) { 21134 skey = scsi_sense_key(sensep); 21135 asc = scsi_sense_asc(sensep); 21136 ascq = scsi_sense_ascq(sensep); 21137 21138 SD_INFO(SD_LOG_COMMON, un, 21139 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n", 21140 skey, asc, ascq); 21141 /* This routine only uses up to 13 bytes of sense data. */ 21142 if (actual_sense_length >= 13) { 21143 if (skey == KEY_UNIT_ATTENTION) { 21144 if (asc == 0x28) { 21145 state = DKIO_INSERTED; 21146 } 21147 } else if (skey == KEY_NOT_READY) { 21148 /* 21149 * if 02/04/02 means that the host 21150 * should send start command. Explicitly 21151 * leave the media state as is 21152 * (inserted) as the media is inserted 21153 * and host has stopped device for PM 21154 * reasons. Upon next true read/write 21155 * to this media will bring the 21156 * device to the right state good for 21157 * media access. 21158 */ 21159 if (asc == 0x3a) { 21160 state = DKIO_EJECTED; 21161 } else { 21162 /* 21163 * If the drive is busy with an 21164 * operation or long write, keep the 21165 * media in an inserted state. 21166 */ 21167 21168 if ((asc == 0x04) && 21169 ((ascq == 0x02) || 21170 (ascq == 0x07) || 21171 (ascq == 0x08))) { 21172 state = DKIO_INSERTED; 21173 } 21174 } 21175 } else if (skey == KEY_NO_SENSE) { 21176 if ((asc == 0x00) && (ascq == 0x00)) { 21177 /* 21178 * Sense Data 00/00/00 does not provide 21179 * any information about the state of 21180 * the media. Ignore it. 21181 */ 21182 mutex_exit(SD_MUTEX(un)); 21183 return (0); 21184 } 21185 } 21186 } 21187 } else if ((*((char *)statusp) == STATUS_GOOD) && 21188 (resultp->pkt->pkt_reason == CMD_CMPLT)) { 21189 state = DKIO_INSERTED; 21190 } 21191 21192 SD_TRACE(SD_LOG_COMMON, un, 21193 "sd_media_watch_cb: state=%x, specified=%x\n", 21194 state, un->un_specified_mediastate); 21195 21196 /* 21197 * now signal the waiting thread if this is *not* the specified state; 21198 * delay the signal if the state is DKIO_INSERTED to allow the target 21199 * to recover 21200 */ 21201 if (state != un->un_specified_mediastate) { 21202 un->un_mediastate = state; 21203 if (state == DKIO_INSERTED) { 21204 /* 21205 * delay the signal to give the drive a chance 21206 * to do what it apparently needs to do 21207 */ 21208 SD_TRACE(SD_LOG_COMMON, un, 21209 "sd_media_watch_cb: delayed cv_broadcast\n"); 21210 if (un->un_dcvb_timeid == NULL) { 21211 un->un_dcvb_timeid = 21212 timeout(sd_delayed_cv_broadcast, un, 21213 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY)); 21214 } 21215 } else { 21216 SD_TRACE(SD_LOG_COMMON, un, 21217 "sd_media_watch_cb: immediate cv_broadcast\n"); 21218 cv_broadcast(&un->un_state_cv); 21219 } 21220 } 21221 mutex_exit(SD_MUTEX(un)); 21222 return (0); 21223 } 21224 21225 21226 /* 21227 * Function: sd_dkio_get_temp 21228 * 21229 * Description: This routine is the driver entry point for handling ioctl 21230 * requests to get the disk temperature. 21231 * 21232 * Arguments: dev - the device number 21233 * arg - pointer to user provided dk_temperature structure. 21234 * flag - this argument is a pass through to ddi_copyxxx() 21235 * directly from the mode argument of ioctl(). 21236 * 21237 * Return Code: 0 21238 * EFAULT 21239 * ENXIO 21240 * EAGAIN 21241 */ 21242 21243 static int 21244 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag) 21245 { 21246 struct sd_lun *un = NULL; 21247 struct dk_temperature *dktemp = NULL; 21248 uchar_t *temperature_page; 21249 int rval = 0; 21250 int path_flag = SD_PATH_STANDARD; 21251 21252 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21253 return (ENXIO); 21254 } 21255 21256 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP); 21257 21258 /* copyin the disk temp argument to get the user flags */ 21259 if (ddi_copyin((void *)arg, dktemp, 21260 sizeof (struct dk_temperature), flag) != 0) { 21261 rval = EFAULT; 21262 goto done; 21263 } 21264 21265 /* Initialize the temperature to invalid. */ 21266 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 21267 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 21268 21269 /* 21270 * Note: Investigate removing the "bypass pm" semantic. 21271 * Can we just bypass PM always? 21272 */ 21273 if (dktemp->dkt_flags & DKT_BYPASS_PM) { 21274 path_flag = SD_PATH_DIRECT; 21275 ASSERT(!mutex_owned(&un->un_pm_mutex)); 21276 mutex_enter(&un->un_pm_mutex); 21277 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 21278 /* 21279 * If DKT_BYPASS_PM is set, and the drive happens to be 21280 * in low power mode, we can not wake it up, Need to 21281 * return EAGAIN. 21282 */ 21283 mutex_exit(&un->un_pm_mutex); 21284 rval = EAGAIN; 21285 goto done; 21286 } else { 21287 /* 21288 * Indicate to PM the device is busy. This is required 21289 * to avoid a race - i.e. the ioctl is issuing a 21290 * command and the pm framework brings down the device 21291 * to low power mode (possible power cut-off on some 21292 * platforms). 21293 */ 21294 mutex_exit(&un->un_pm_mutex); 21295 if (sd_pm_entry(un) != DDI_SUCCESS) { 21296 rval = EAGAIN; 21297 goto done; 21298 } 21299 } 21300 } 21301 21302 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP); 21303 21304 if ((rval = sd_send_scsi_LOG_SENSE(un, temperature_page, 21305 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag)) != 0) { 21306 goto done2; 21307 } 21308 21309 /* 21310 * For the current temperature verify that the parameter length is 0x02 21311 * and the parameter code is 0x00 21312 */ 21313 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) && 21314 (temperature_page[5] == 0x00)) { 21315 if (temperature_page[9] == 0xFF) { 21316 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 21317 } else { 21318 dktemp->dkt_cur_temp = (short)(temperature_page[9]); 21319 } 21320 } 21321 21322 /* 21323 * For the reference temperature verify that the parameter 21324 * length is 0x02 and the parameter code is 0x01 21325 */ 21326 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) && 21327 (temperature_page[11] == 0x01)) { 21328 if (temperature_page[15] == 0xFF) { 21329 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 21330 } else { 21331 dktemp->dkt_ref_temp = (short)(temperature_page[15]); 21332 } 21333 } 21334 21335 /* Do the copyout regardless of the temperature commands status. */ 21336 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature), 21337 flag) != 0) { 21338 rval = EFAULT; 21339 } 21340 21341 done2: 21342 if (path_flag == SD_PATH_DIRECT) { 21343 sd_pm_exit(un); 21344 } 21345 21346 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE); 21347 done: 21348 if (dktemp != NULL) { 21349 kmem_free(dktemp, sizeof (struct dk_temperature)); 21350 } 21351 21352 return (rval); 21353 } 21354 21355 21356 /* 21357 * Function: sd_log_page_supported 21358 * 21359 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of 21360 * supported log pages. 21361 * 21362 * Arguments: un - 21363 * log_page - 21364 * 21365 * Return Code: -1 - on error (log sense is optional and may not be supported). 21366 * 0 - log page not found. 21367 * 1 - log page found. 21368 */ 21369 21370 static int 21371 sd_log_page_supported(struct sd_lun *un, int log_page) 21372 { 21373 uchar_t *log_page_data; 21374 int i; 21375 int match = 0; 21376 int log_size; 21377 21378 log_page_data = kmem_zalloc(0xFF, KM_SLEEP); 21379 21380 if (sd_send_scsi_LOG_SENSE(un, log_page_data, 0xFF, 0, 0x01, 0, 21381 SD_PATH_DIRECT) != 0) { 21382 SD_ERROR(SD_LOG_COMMON, un, 21383 "sd_log_page_supported: failed log page retrieval\n"); 21384 kmem_free(log_page_data, 0xFF); 21385 return (-1); 21386 } 21387 log_size = log_page_data[3]; 21388 21389 /* 21390 * The list of supported log pages start from the fourth byte. Check 21391 * until we run out of log pages or a match is found. 21392 */ 21393 for (i = 4; (i < (log_size + 4)) && !match; i++) { 21394 if (log_page_data[i] == log_page) { 21395 match++; 21396 } 21397 } 21398 kmem_free(log_page_data, 0xFF); 21399 return (match); 21400 } 21401 21402 21403 /* 21404 * Function: sd_mhdioc_failfast 21405 * 21406 * Description: This routine is the driver entry point for handling ioctl 21407 * requests to enable/disable the multihost failfast option. 21408 * (MHIOCENFAILFAST) 21409 * 21410 * Arguments: dev - the device number 21411 * arg - user specified probing interval. 21412 * flag - this argument is a pass through to ddi_copyxxx() 21413 * directly from the mode argument of ioctl(). 21414 * 21415 * Return Code: 0 21416 * EFAULT 21417 * ENXIO 21418 */ 21419 21420 static int 21421 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag) 21422 { 21423 struct sd_lun *un = NULL; 21424 int mh_time; 21425 int rval = 0; 21426 21427 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21428 return (ENXIO); 21429 } 21430 21431 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag)) 21432 return (EFAULT); 21433 21434 if (mh_time) { 21435 mutex_enter(SD_MUTEX(un)); 21436 un->un_resvd_status |= SD_FAILFAST; 21437 mutex_exit(SD_MUTEX(un)); 21438 /* 21439 * If mh_time is INT_MAX, then this ioctl is being used for 21440 * SCSI-3 PGR purposes, and we don't need to spawn watch thread. 21441 */ 21442 if (mh_time != INT_MAX) { 21443 rval = sd_check_mhd(dev, mh_time); 21444 } 21445 } else { 21446 (void) sd_check_mhd(dev, 0); 21447 mutex_enter(SD_MUTEX(un)); 21448 un->un_resvd_status &= ~SD_FAILFAST; 21449 mutex_exit(SD_MUTEX(un)); 21450 } 21451 return (rval); 21452 } 21453 21454 21455 /* 21456 * Function: sd_mhdioc_takeown 21457 * 21458 * Description: This routine is the driver entry point for handling ioctl 21459 * requests to forcefully acquire exclusive access rights to the 21460 * multihost disk (MHIOCTKOWN). 21461 * 21462 * Arguments: dev - the device number 21463 * arg - user provided structure specifying the delay 21464 * parameters in milliseconds 21465 * flag - this argument is a pass through to ddi_copyxxx() 21466 * directly from the mode argument of ioctl(). 21467 * 21468 * Return Code: 0 21469 * EFAULT 21470 * ENXIO 21471 */ 21472 21473 static int 21474 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag) 21475 { 21476 struct sd_lun *un = NULL; 21477 struct mhioctkown *tkown = NULL; 21478 int rval = 0; 21479 21480 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21481 return (ENXIO); 21482 } 21483 21484 if (arg != NULL) { 21485 tkown = (struct mhioctkown *) 21486 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP); 21487 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag); 21488 if (rval != 0) { 21489 rval = EFAULT; 21490 goto error; 21491 } 21492 } 21493 21494 rval = sd_take_ownership(dev, tkown); 21495 mutex_enter(SD_MUTEX(un)); 21496 if (rval == 0) { 21497 un->un_resvd_status |= SD_RESERVE; 21498 if (tkown != NULL && tkown->reinstate_resv_delay != 0) { 21499 sd_reinstate_resv_delay = 21500 tkown->reinstate_resv_delay * 1000; 21501 } else { 21502 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 21503 } 21504 /* 21505 * Give the scsi_watch routine interval set by 21506 * the MHIOCENFAILFAST ioctl precedence here. 21507 */ 21508 if ((un->un_resvd_status & SD_FAILFAST) == 0) { 21509 mutex_exit(SD_MUTEX(un)); 21510 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000); 21511 SD_TRACE(SD_LOG_IOCTL_MHD, un, 21512 "sd_mhdioc_takeown : %d\n", 21513 sd_reinstate_resv_delay); 21514 } else { 21515 mutex_exit(SD_MUTEX(un)); 21516 } 21517 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY, 21518 sd_mhd_reset_notify_cb, (caddr_t)un); 21519 } else { 21520 un->un_resvd_status &= ~SD_RESERVE; 21521 mutex_exit(SD_MUTEX(un)); 21522 } 21523 21524 error: 21525 if (tkown != NULL) { 21526 kmem_free(tkown, sizeof (struct mhioctkown)); 21527 } 21528 return (rval); 21529 } 21530 21531 21532 /* 21533 * Function: sd_mhdioc_release 21534 * 21535 * Description: This routine is the driver entry point for handling ioctl 21536 * requests to release exclusive access rights to the multihost 21537 * disk (MHIOCRELEASE). 21538 * 21539 * Arguments: dev - the device number 21540 * 21541 * Return Code: 0 21542 * ENXIO 21543 */ 21544 21545 static int 21546 sd_mhdioc_release(dev_t dev) 21547 { 21548 struct sd_lun *un = NULL; 21549 timeout_id_t resvd_timeid_save; 21550 int resvd_status_save; 21551 int rval = 0; 21552 21553 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21554 return (ENXIO); 21555 } 21556 21557 mutex_enter(SD_MUTEX(un)); 21558 resvd_status_save = un->un_resvd_status; 21559 un->un_resvd_status &= 21560 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE); 21561 if (un->un_resvd_timeid) { 21562 resvd_timeid_save = un->un_resvd_timeid; 21563 un->un_resvd_timeid = NULL; 21564 mutex_exit(SD_MUTEX(un)); 21565 (void) untimeout(resvd_timeid_save); 21566 } else { 21567 mutex_exit(SD_MUTEX(un)); 21568 } 21569 21570 /* 21571 * destroy any pending timeout thread that may be attempting to 21572 * reinstate reservation on this device. 21573 */ 21574 sd_rmv_resv_reclaim_req(dev); 21575 21576 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) { 21577 mutex_enter(SD_MUTEX(un)); 21578 if ((un->un_mhd_token) && 21579 ((un->un_resvd_status & SD_FAILFAST) == 0)) { 21580 mutex_exit(SD_MUTEX(un)); 21581 (void) sd_check_mhd(dev, 0); 21582 } else { 21583 mutex_exit(SD_MUTEX(un)); 21584 } 21585 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 21586 sd_mhd_reset_notify_cb, (caddr_t)un); 21587 } else { 21588 /* 21589 * sd_mhd_watch_cb will restart the resvd recover timeout thread 21590 */ 21591 mutex_enter(SD_MUTEX(un)); 21592 un->un_resvd_status = resvd_status_save; 21593 mutex_exit(SD_MUTEX(un)); 21594 } 21595 return (rval); 21596 } 21597 21598 21599 /* 21600 * Function: sd_mhdioc_register_devid 21601 * 21602 * Description: This routine is the driver entry point for handling ioctl 21603 * requests to register the device id (MHIOCREREGISTERDEVID). 21604 * 21605 * Note: The implementation for this ioctl has been updated to 21606 * be consistent with the original PSARC case (1999/357) 21607 * (4375899, 4241671, 4220005) 21608 * 21609 * Arguments: dev - the device number 21610 * 21611 * Return Code: 0 21612 * ENXIO 21613 */ 21614 21615 static int 21616 sd_mhdioc_register_devid(dev_t dev) 21617 { 21618 struct sd_lun *un = NULL; 21619 int rval = 0; 21620 21621 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21622 return (ENXIO); 21623 } 21624 21625 ASSERT(!mutex_owned(SD_MUTEX(un))); 21626 21627 mutex_enter(SD_MUTEX(un)); 21628 21629 /* If a devid already exists, de-register it */ 21630 if (un->un_devid != NULL) { 21631 ddi_devid_unregister(SD_DEVINFO(un)); 21632 /* 21633 * After unregister devid, needs to free devid memory 21634 */ 21635 ddi_devid_free(un->un_devid); 21636 un->un_devid = NULL; 21637 } 21638 21639 /* Check for reservation conflict */ 21640 mutex_exit(SD_MUTEX(un)); 21641 rval = sd_send_scsi_TEST_UNIT_READY(un, 0); 21642 mutex_enter(SD_MUTEX(un)); 21643 21644 switch (rval) { 21645 case 0: 21646 sd_register_devid(un, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED); 21647 break; 21648 case EACCES: 21649 break; 21650 default: 21651 rval = EIO; 21652 } 21653 21654 mutex_exit(SD_MUTEX(un)); 21655 return (rval); 21656 } 21657 21658 21659 /* 21660 * Function: sd_mhdioc_inkeys 21661 * 21662 * Description: This routine is the driver entry point for handling ioctl 21663 * requests to issue the SCSI-3 Persistent In Read Keys command 21664 * to the device (MHIOCGRP_INKEYS). 21665 * 21666 * Arguments: dev - the device number 21667 * arg - user provided in_keys structure 21668 * flag - this argument is a pass through to ddi_copyxxx() 21669 * directly from the mode argument of ioctl(). 21670 * 21671 * Return Code: code returned by sd_persistent_reservation_in_read_keys() 21672 * ENXIO 21673 * EFAULT 21674 */ 21675 21676 static int 21677 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag) 21678 { 21679 struct sd_lun *un; 21680 mhioc_inkeys_t inkeys; 21681 int rval = 0; 21682 21683 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21684 return (ENXIO); 21685 } 21686 21687 #ifdef _MULTI_DATAMODEL 21688 switch (ddi_model_convert_from(flag & FMODELS)) { 21689 case DDI_MODEL_ILP32: { 21690 struct mhioc_inkeys32 inkeys32; 21691 21692 if (ddi_copyin(arg, &inkeys32, 21693 sizeof (struct mhioc_inkeys32), flag) != 0) { 21694 return (EFAULT); 21695 } 21696 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li; 21697 if ((rval = sd_persistent_reservation_in_read_keys(un, 21698 &inkeys, flag)) != 0) { 21699 return (rval); 21700 } 21701 inkeys32.generation = inkeys.generation; 21702 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32), 21703 flag) != 0) { 21704 return (EFAULT); 21705 } 21706 break; 21707 } 21708 case DDI_MODEL_NONE: 21709 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), 21710 flag) != 0) { 21711 return (EFAULT); 21712 } 21713 if ((rval = sd_persistent_reservation_in_read_keys(un, 21714 &inkeys, flag)) != 0) { 21715 return (rval); 21716 } 21717 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), 21718 flag) != 0) { 21719 return (EFAULT); 21720 } 21721 break; 21722 } 21723 21724 #else /* ! _MULTI_DATAMODEL */ 21725 21726 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) { 21727 return (EFAULT); 21728 } 21729 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag); 21730 if (rval != 0) { 21731 return (rval); 21732 } 21733 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) { 21734 return (EFAULT); 21735 } 21736 21737 #endif /* _MULTI_DATAMODEL */ 21738 21739 return (rval); 21740 } 21741 21742 21743 /* 21744 * Function: sd_mhdioc_inresv 21745 * 21746 * Description: This routine is the driver entry point for handling ioctl 21747 * requests to issue the SCSI-3 Persistent In Read Reservations 21748 * command to the device (MHIOCGRP_INKEYS). 21749 * 21750 * Arguments: dev - the device number 21751 * arg - user provided in_resv structure 21752 * flag - this argument is a pass through to ddi_copyxxx() 21753 * directly from the mode argument of ioctl(). 21754 * 21755 * Return Code: code returned by sd_persistent_reservation_in_read_resv() 21756 * ENXIO 21757 * EFAULT 21758 */ 21759 21760 static int 21761 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag) 21762 { 21763 struct sd_lun *un; 21764 mhioc_inresvs_t inresvs; 21765 int rval = 0; 21766 21767 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21768 return (ENXIO); 21769 } 21770 21771 #ifdef _MULTI_DATAMODEL 21772 21773 switch (ddi_model_convert_from(flag & FMODELS)) { 21774 case DDI_MODEL_ILP32: { 21775 struct mhioc_inresvs32 inresvs32; 21776 21777 if (ddi_copyin(arg, &inresvs32, 21778 sizeof (struct mhioc_inresvs32), flag) != 0) { 21779 return (EFAULT); 21780 } 21781 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li; 21782 if ((rval = sd_persistent_reservation_in_read_resv(un, 21783 &inresvs, flag)) != 0) { 21784 return (rval); 21785 } 21786 inresvs32.generation = inresvs.generation; 21787 if (ddi_copyout(&inresvs32, arg, 21788 sizeof (struct mhioc_inresvs32), flag) != 0) { 21789 return (EFAULT); 21790 } 21791 break; 21792 } 21793 case DDI_MODEL_NONE: 21794 if (ddi_copyin(arg, &inresvs, 21795 sizeof (mhioc_inresvs_t), flag) != 0) { 21796 return (EFAULT); 21797 } 21798 if ((rval = sd_persistent_reservation_in_read_resv(un, 21799 &inresvs, flag)) != 0) { 21800 return (rval); 21801 } 21802 if (ddi_copyout(&inresvs, arg, 21803 sizeof (mhioc_inresvs_t), flag) != 0) { 21804 return (EFAULT); 21805 } 21806 break; 21807 } 21808 21809 #else /* ! _MULTI_DATAMODEL */ 21810 21811 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) { 21812 return (EFAULT); 21813 } 21814 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag); 21815 if (rval != 0) { 21816 return (rval); 21817 } 21818 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) { 21819 return (EFAULT); 21820 } 21821 21822 #endif /* ! _MULTI_DATAMODEL */ 21823 21824 return (rval); 21825 } 21826 21827 21828 /* 21829 * The following routines support the clustering functionality described below 21830 * and implement lost reservation reclaim functionality. 21831 * 21832 * Clustering 21833 * ---------- 21834 * The clustering code uses two different, independent forms of SCSI 21835 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3 21836 * Persistent Group Reservations. For any particular disk, it will use either 21837 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk. 21838 * 21839 * SCSI-2 21840 * The cluster software takes ownership of a multi-hosted disk by issuing the 21841 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the 21842 * MHIOCRELEASE ioctl. Closely related is the MHIOCENFAILFAST ioctl -- a 21843 * cluster, just after taking ownership of the disk with the MHIOCTKOWN ioctl 21844 * then issues the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the 21845 * driver. The meaning of failfast is that if the driver (on this host) ever 21846 * encounters the scsi error return code RESERVATION_CONFLICT from the device, 21847 * it should immediately panic the host. The motivation for this ioctl is that 21848 * if this host does encounter reservation conflict, the underlying cause is 21849 * that some other host of the cluster has decided that this host is no longer 21850 * in the cluster and has seized control of the disks for itself. Since this 21851 * host is no longer in the cluster, it ought to panic itself. The 21852 * MHIOCENFAILFAST ioctl does two things: 21853 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT 21854 * error to panic the host 21855 * (b) it sets up a periodic timer to test whether this host still has 21856 * "access" (in that no other host has reserved the device): if the 21857 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The 21858 * purpose of that periodic timer is to handle scenarios where the host is 21859 * otherwise temporarily quiescent, temporarily doing no real i/o. 21860 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host, 21861 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for 21862 * the device itself. 21863 * 21864 * SCSI-3 PGR 21865 * A direct semantic implementation of the SCSI-3 Persistent Reservation 21866 * facility is supported through the shared multihost disk ioctls 21867 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE, 21868 * MHIOCGRP_PREEMPTANDABORT) 21869 * 21870 * Reservation Reclaim: 21871 * -------------------- 21872 * To support the lost reservation reclaim operations this driver creates a 21873 * single thread to handle reinstating reservations on all devices that have 21874 * lost reservations sd_resv_reclaim_requests are logged for all devices that 21875 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb 21876 * and the reservation reclaim thread loops through the requests to regain the 21877 * lost reservations. 21878 */ 21879 21880 /* 21881 * Function: sd_check_mhd() 21882 * 21883 * Description: This function sets up and submits a scsi watch request or 21884 * terminates an existing watch request. This routine is used in 21885 * support of reservation reclaim. 21886 * 21887 * Arguments: dev - the device 'dev_t' is used for context to discriminate 21888 * among multiple watches that share the callback function 21889 * interval - the number of microseconds specifying the watch 21890 * interval for issuing TEST UNIT READY commands. If 21891 * set to 0 the watch should be terminated. If the 21892 * interval is set to 0 and if the device is required 21893 * to hold reservation while disabling failfast, the 21894 * watch is restarted with an interval of 21895 * reinstate_resv_delay. 21896 * 21897 * Return Code: 0 - Successful submit/terminate of scsi watch request 21898 * ENXIO - Indicates an invalid device was specified 21899 * EAGAIN - Unable to submit the scsi watch request 21900 */ 21901 21902 static int 21903 sd_check_mhd(dev_t dev, int interval) 21904 { 21905 struct sd_lun *un; 21906 opaque_t token; 21907 21908 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21909 return (ENXIO); 21910 } 21911 21912 /* is this a watch termination request? */ 21913 if (interval == 0) { 21914 mutex_enter(SD_MUTEX(un)); 21915 /* if there is an existing watch task then terminate it */ 21916 if (un->un_mhd_token) { 21917 token = un->un_mhd_token; 21918 un->un_mhd_token = NULL; 21919 mutex_exit(SD_MUTEX(un)); 21920 (void) scsi_watch_request_terminate(token, 21921 SCSI_WATCH_TERMINATE_WAIT); 21922 mutex_enter(SD_MUTEX(un)); 21923 } else { 21924 mutex_exit(SD_MUTEX(un)); 21925 /* 21926 * Note: If we return here we don't check for the 21927 * failfast case. This is the original legacy 21928 * implementation but perhaps we should be checking 21929 * the failfast case. 21930 */ 21931 return (0); 21932 } 21933 /* 21934 * If the device is required to hold reservation while 21935 * disabling failfast, we need to restart the scsi_watch 21936 * routine with an interval of reinstate_resv_delay. 21937 */ 21938 if (un->un_resvd_status & SD_RESERVE) { 21939 interval = sd_reinstate_resv_delay/1000; 21940 } else { 21941 /* no failfast so bail */ 21942 mutex_exit(SD_MUTEX(un)); 21943 return (0); 21944 } 21945 mutex_exit(SD_MUTEX(un)); 21946 } 21947 21948 /* 21949 * adjust minimum time interval to 1 second, 21950 * and convert from msecs to usecs 21951 */ 21952 if (interval > 0 && interval < 1000) { 21953 interval = 1000; 21954 } 21955 interval *= 1000; 21956 21957 /* 21958 * submit the request to the scsi_watch service 21959 */ 21960 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval, 21961 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev); 21962 if (token == NULL) { 21963 return (EAGAIN); 21964 } 21965 21966 /* 21967 * save token for termination later on 21968 */ 21969 mutex_enter(SD_MUTEX(un)); 21970 un->un_mhd_token = token; 21971 mutex_exit(SD_MUTEX(un)); 21972 return (0); 21973 } 21974 21975 21976 /* 21977 * Function: sd_mhd_watch_cb() 21978 * 21979 * Description: This function is the call back function used by the scsi watch 21980 * facility. The scsi watch facility sends the "Test Unit Ready" 21981 * and processes the status. If applicable (i.e. a "Unit Attention" 21982 * status and automatic "Request Sense" not used) the scsi watch 21983 * facility will send a "Request Sense" and retrieve the sense data 21984 * to be passed to this callback function. In either case the 21985 * automatic "Request Sense" or the facility submitting one, this 21986 * callback is passed the status and sense data. 21987 * 21988 * Arguments: arg - the device 'dev_t' is used for context to discriminate 21989 * among multiple watches that share this callback function 21990 * resultp - scsi watch facility result packet containing scsi 21991 * packet, status byte and sense data 21992 * 21993 * Return Code: 0 - continue the watch task 21994 * non-zero - terminate the watch task 21995 */ 21996 21997 static int 21998 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 21999 { 22000 struct sd_lun *un; 22001 struct scsi_status *statusp; 22002 uint8_t *sensep; 22003 struct scsi_pkt *pkt; 22004 uchar_t actual_sense_length; 22005 dev_t dev = (dev_t)arg; 22006 22007 ASSERT(resultp != NULL); 22008 statusp = resultp->statusp; 22009 sensep = (uint8_t *)resultp->sensep; 22010 pkt = resultp->pkt; 22011 actual_sense_length = resultp->actual_sense_length; 22012 22013 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22014 return (ENXIO); 22015 } 22016 22017 SD_TRACE(SD_LOG_IOCTL_MHD, un, 22018 "sd_mhd_watch_cb: reason '%s', status '%s'\n", 22019 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp))); 22020 22021 /* Begin processing of the status and/or sense data */ 22022 if (pkt->pkt_reason != CMD_CMPLT) { 22023 /* Handle the incomplete packet */ 22024 sd_mhd_watch_incomplete(un, pkt); 22025 return (0); 22026 } else if (*((unsigned char *)statusp) != STATUS_GOOD) { 22027 if (*((unsigned char *)statusp) 22028 == STATUS_RESERVATION_CONFLICT) { 22029 /* 22030 * Handle a reservation conflict by panicking if 22031 * configured for failfast or by logging the conflict 22032 * and updating the reservation status 22033 */ 22034 mutex_enter(SD_MUTEX(un)); 22035 if ((un->un_resvd_status & SD_FAILFAST) && 22036 (sd_failfast_enable)) { 22037 sd_panic_for_res_conflict(un); 22038 /*NOTREACHED*/ 22039 } 22040 SD_INFO(SD_LOG_IOCTL_MHD, un, 22041 "sd_mhd_watch_cb: Reservation Conflict\n"); 22042 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 22043 mutex_exit(SD_MUTEX(un)); 22044 } 22045 } 22046 22047 if (sensep != NULL) { 22048 if (actual_sense_length >= (SENSE_LENGTH - 2)) { 22049 mutex_enter(SD_MUTEX(un)); 22050 if ((scsi_sense_asc(sensep) == 22051 SD_SCSI_RESET_SENSE_CODE) && 22052 (un->un_resvd_status & SD_RESERVE)) { 22053 /* 22054 * The additional sense code indicates a power 22055 * on or bus device reset has occurred; update 22056 * the reservation status. 22057 */ 22058 un->un_resvd_status |= 22059 (SD_LOST_RESERVE | SD_WANT_RESERVE); 22060 SD_INFO(SD_LOG_IOCTL_MHD, un, 22061 "sd_mhd_watch_cb: Lost Reservation\n"); 22062 } 22063 } else { 22064 return (0); 22065 } 22066 } else { 22067 mutex_enter(SD_MUTEX(un)); 22068 } 22069 22070 if ((un->un_resvd_status & SD_RESERVE) && 22071 (un->un_resvd_status & SD_LOST_RESERVE)) { 22072 if (un->un_resvd_status & SD_WANT_RESERVE) { 22073 /* 22074 * A reset occurred in between the last probe and this 22075 * one so if a timeout is pending cancel it. 22076 */ 22077 if (un->un_resvd_timeid) { 22078 timeout_id_t temp_id = un->un_resvd_timeid; 22079 un->un_resvd_timeid = NULL; 22080 mutex_exit(SD_MUTEX(un)); 22081 (void) untimeout(temp_id); 22082 mutex_enter(SD_MUTEX(un)); 22083 } 22084 un->un_resvd_status &= ~SD_WANT_RESERVE; 22085 } 22086 if (un->un_resvd_timeid == 0) { 22087 /* Schedule a timeout to handle the lost reservation */ 22088 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover, 22089 (void *)dev, 22090 drv_usectohz(sd_reinstate_resv_delay)); 22091 } 22092 } 22093 mutex_exit(SD_MUTEX(un)); 22094 return (0); 22095 } 22096 22097 22098 /* 22099 * Function: sd_mhd_watch_incomplete() 22100 * 22101 * Description: This function is used to find out why a scsi pkt sent by the 22102 * scsi watch facility was not completed. Under some scenarios this 22103 * routine will return. Otherwise it will send a bus reset to see 22104 * if the drive is still online. 22105 * 22106 * Arguments: un - driver soft state (unit) structure 22107 * pkt - incomplete scsi pkt 22108 */ 22109 22110 static void 22111 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt) 22112 { 22113 int be_chatty; 22114 int perr; 22115 22116 ASSERT(pkt != NULL); 22117 ASSERT(un != NULL); 22118 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT)); 22119 perr = (pkt->pkt_statistics & STAT_PERR); 22120 22121 mutex_enter(SD_MUTEX(un)); 22122 if (un->un_state == SD_STATE_DUMPING) { 22123 mutex_exit(SD_MUTEX(un)); 22124 return; 22125 } 22126 22127 switch (pkt->pkt_reason) { 22128 case CMD_UNX_BUS_FREE: 22129 /* 22130 * If we had a parity error that caused the target to drop BSY*, 22131 * don't be chatty about it. 22132 */ 22133 if (perr && be_chatty) { 22134 be_chatty = 0; 22135 } 22136 break; 22137 case CMD_TAG_REJECT: 22138 /* 22139 * The SCSI-2 spec states that a tag reject will be sent by the 22140 * target if tagged queuing is not supported. A tag reject may 22141 * also be sent during certain initialization periods or to 22142 * control internal resources. For the latter case the target 22143 * may also return Queue Full. 22144 * 22145 * If this driver receives a tag reject from a target that is 22146 * going through an init period or controlling internal 22147 * resources tagged queuing will be disabled. This is a less 22148 * than optimal behavior but the driver is unable to determine 22149 * the target state and assumes tagged queueing is not supported 22150 */ 22151 pkt->pkt_flags = 0; 22152 un->un_tagflags = 0; 22153 22154 if (un->un_f_opt_queueing == TRUE) { 22155 un->un_throttle = min(un->un_throttle, 3); 22156 } else { 22157 un->un_throttle = 1; 22158 } 22159 mutex_exit(SD_MUTEX(un)); 22160 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 22161 mutex_enter(SD_MUTEX(un)); 22162 break; 22163 case CMD_INCOMPLETE: 22164 /* 22165 * The transport stopped with an abnormal state, fallthrough and 22166 * reset the target and/or bus unless selection did not complete 22167 * (indicated by STATE_GOT_BUS) in which case we don't want to 22168 * go through a target/bus reset 22169 */ 22170 if (pkt->pkt_state == STATE_GOT_BUS) { 22171 break; 22172 } 22173 /*FALLTHROUGH*/ 22174 22175 case CMD_TIMEOUT: 22176 default: 22177 /* 22178 * The lun may still be running the command, so a lun reset 22179 * should be attempted. If the lun reset fails or cannot be 22180 * issued, than try a target reset. Lastly try a bus reset. 22181 */ 22182 if ((pkt->pkt_statistics & 22183 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 22184 int reset_retval = 0; 22185 mutex_exit(SD_MUTEX(un)); 22186 if (un->un_f_allow_bus_device_reset == TRUE) { 22187 if (un->un_f_lun_reset_enabled == TRUE) { 22188 reset_retval = 22189 scsi_reset(SD_ADDRESS(un), 22190 RESET_LUN); 22191 } 22192 if (reset_retval == 0) { 22193 reset_retval = 22194 scsi_reset(SD_ADDRESS(un), 22195 RESET_TARGET); 22196 } 22197 } 22198 if (reset_retval == 0) { 22199 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 22200 } 22201 mutex_enter(SD_MUTEX(un)); 22202 } 22203 break; 22204 } 22205 22206 /* A device/bus reset has occurred; update the reservation status. */ 22207 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics & 22208 (STAT_BUS_RESET | STAT_DEV_RESET))) { 22209 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 22210 un->un_resvd_status |= 22211 (SD_LOST_RESERVE | SD_WANT_RESERVE); 22212 SD_INFO(SD_LOG_IOCTL_MHD, un, 22213 "sd_mhd_watch_incomplete: Lost Reservation\n"); 22214 } 22215 } 22216 22217 /* 22218 * The disk has been turned off; Update the device state. 22219 * 22220 * Note: Should we be offlining the disk here? 22221 */ 22222 if (pkt->pkt_state == STATE_GOT_BUS) { 22223 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: " 22224 "Disk not responding to selection\n"); 22225 if (un->un_state != SD_STATE_OFFLINE) { 22226 New_state(un, SD_STATE_OFFLINE); 22227 } 22228 } else if (be_chatty) { 22229 /* 22230 * suppress messages if they are all the same pkt reason; 22231 * with TQ, many (up to 256) are returned with the same 22232 * pkt_reason 22233 */ 22234 if (pkt->pkt_reason != un->un_last_pkt_reason) { 22235 SD_ERROR(SD_LOG_IOCTL_MHD, un, 22236 "sd_mhd_watch_incomplete: " 22237 "SCSI transport failed: reason '%s'\n", 22238 scsi_rname(pkt->pkt_reason)); 22239 } 22240 } 22241 un->un_last_pkt_reason = pkt->pkt_reason; 22242 mutex_exit(SD_MUTEX(un)); 22243 } 22244 22245 22246 /* 22247 * Function: sd_sname() 22248 * 22249 * Description: This is a simple little routine to return a string containing 22250 * a printable description of command status byte for use in 22251 * logging. 22252 * 22253 * Arguments: status - pointer to a status byte 22254 * 22255 * Return Code: char * - string containing status description. 22256 */ 22257 22258 static char * 22259 sd_sname(uchar_t status) 22260 { 22261 switch (status & STATUS_MASK) { 22262 case STATUS_GOOD: 22263 return ("good status"); 22264 case STATUS_CHECK: 22265 return ("check condition"); 22266 case STATUS_MET: 22267 return ("condition met"); 22268 case STATUS_BUSY: 22269 return ("busy"); 22270 case STATUS_INTERMEDIATE: 22271 return ("intermediate"); 22272 case STATUS_INTERMEDIATE_MET: 22273 return ("intermediate - condition met"); 22274 case STATUS_RESERVATION_CONFLICT: 22275 return ("reservation_conflict"); 22276 case STATUS_TERMINATED: 22277 return ("command terminated"); 22278 case STATUS_QFULL: 22279 return ("queue full"); 22280 default: 22281 return ("<unknown status>"); 22282 } 22283 } 22284 22285 22286 /* 22287 * Function: sd_mhd_resvd_recover() 22288 * 22289 * Description: This function adds a reservation entry to the 22290 * sd_resv_reclaim_request list and signals the reservation 22291 * reclaim thread that there is work pending. If the reservation 22292 * reclaim thread has not been previously created this function 22293 * will kick it off. 22294 * 22295 * Arguments: arg - the device 'dev_t' is used for context to discriminate 22296 * among multiple watches that share this callback function 22297 * 22298 * Context: This routine is called by timeout() and is run in interrupt 22299 * context. It must not sleep or call other functions which may 22300 * sleep. 22301 */ 22302 22303 static void 22304 sd_mhd_resvd_recover(void *arg) 22305 { 22306 dev_t dev = (dev_t)arg; 22307 struct sd_lun *un; 22308 struct sd_thr_request *sd_treq = NULL; 22309 struct sd_thr_request *sd_cur = NULL; 22310 struct sd_thr_request *sd_prev = NULL; 22311 int already_there = 0; 22312 22313 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22314 return; 22315 } 22316 22317 mutex_enter(SD_MUTEX(un)); 22318 un->un_resvd_timeid = NULL; 22319 if (un->un_resvd_status & SD_WANT_RESERVE) { 22320 /* 22321 * There was a reset so don't issue the reserve, allow the 22322 * sd_mhd_watch_cb callback function to notice this and 22323 * reschedule the timeout for reservation. 22324 */ 22325 mutex_exit(SD_MUTEX(un)); 22326 return; 22327 } 22328 mutex_exit(SD_MUTEX(un)); 22329 22330 /* 22331 * Add this device to the sd_resv_reclaim_request list and the 22332 * sd_resv_reclaim_thread should take care of the rest. 22333 * 22334 * Note: We can't sleep in this context so if the memory allocation 22335 * fails allow the sd_mhd_watch_cb callback function to notice this and 22336 * reschedule the timeout for reservation. (4378460) 22337 */ 22338 sd_treq = (struct sd_thr_request *) 22339 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP); 22340 if (sd_treq == NULL) { 22341 return; 22342 } 22343 22344 sd_treq->sd_thr_req_next = NULL; 22345 sd_treq->dev = dev; 22346 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22347 if (sd_tr.srq_thr_req_head == NULL) { 22348 sd_tr.srq_thr_req_head = sd_treq; 22349 } else { 22350 sd_cur = sd_prev = sd_tr.srq_thr_req_head; 22351 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) { 22352 if (sd_cur->dev == dev) { 22353 /* 22354 * already in Queue so don't log 22355 * another request for the device 22356 */ 22357 already_there = 1; 22358 break; 22359 } 22360 sd_prev = sd_cur; 22361 } 22362 if (!already_there) { 22363 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: " 22364 "logging request for %lx\n", dev); 22365 sd_prev->sd_thr_req_next = sd_treq; 22366 } else { 22367 kmem_free(sd_treq, sizeof (struct sd_thr_request)); 22368 } 22369 } 22370 22371 /* 22372 * Create a kernel thread to do the reservation reclaim and free up this 22373 * thread. We cannot block this thread while we go away to do the 22374 * reservation reclaim 22375 */ 22376 if (sd_tr.srq_resv_reclaim_thread == NULL) 22377 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0, 22378 sd_resv_reclaim_thread, NULL, 22379 0, &p0, TS_RUN, v.v_maxsyspri - 2); 22380 22381 /* Tell the reservation reclaim thread that it has work to do */ 22382 cv_signal(&sd_tr.srq_resv_reclaim_cv); 22383 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22384 } 22385 22386 /* 22387 * Function: sd_resv_reclaim_thread() 22388 * 22389 * Description: This function implements the reservation reclaim operations 22390 * 22391 * Arguments: arg - the device 'dev_t' is used for context to discriminate 22392 * among multiple watches that share this callback function 22393 */ 22394 22395 static void 22396 sd_resv_reclaim_thread() 22397 { 22398 struct sd_lun *un; 22399 struct sd_thr_request *sd_mhreq; 22400 22401 /* Wait for work */ 22402 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22403 if (sd_tr.srq_thr_req_head == NULL) { 22404 cv_wait(&sd_tr.srq_resv_reclaim_cv, 22405 &sd_tr.srq_resv_reclaim_mutex); 22406 } 22407 22408 /* Loop while we have work */ 22409 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) { 22410 un = ddi_get_soft_state(sd_state, 22411 SDUNIT(sd_tr.srq_thr_cur_req->dev)); 22412 if (un == NULL) { 22413 /* 22414 * softstate structure is NULL so just 22415 * dequeue the request and continue 22416 */ 22417 sd_tr.srq_thr_req_head = 22418 sd_tr.srq_thr_cur_req->sd_thr_req_next; 22419 kmem_free(sd_tr.srq_thr_cur_req, 22420 sizeof (struct sd_thr_request)); 22421 continue; 22422 } 22423 22424 /* dequeue the request */ 22425 sd_mhreq = sd_tr.srq_thr_cur_req; 22426 sd_tr.srq_thr_req_head = 22427 sd_tr.srq_thr_cur_req->sd_thr_req_next; 22428 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22429 22430 /* 22431 * Reclaim reservation only if SD_RESERVE is still set. There 22432 * may have been a call to MHIOCRELEASE before we got here. 22433 */ 22434 mutex_enter(SD_MUTEX(un)); 22435 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 22436 /* 22437 * Note: The SD_LOST_RESERVE flag is cleared before 22438 * reclaiming the reservation. If this is done after the 22439 * call to sd_reserve_release a reservation loss in the 22440 * window between pkt completion of reserve cmd and 22441 * mutex_enter below may not be recognized 22442 */ 22443 un->un_resvd_status &= ~SD_LOST_RESERVE; 22444 mutex_exit(SD_MUTEX(un)); 22445 22446 if (sd_reserve_release(sd_mhreq->dev, 22447 SD_RESERVE) == 0) { 22448 mutex_enter(SD_MUTEX(un)); 22449 un->un_resvd_status |= SD_RESERVE; 22450 mutex_exit(SD_MUTEX(un)); 22451 SD_INFO(SD_LOG_IOCTL_MHD, un, 22452 "sd_resv_reclaim_thread: " 22453 "Reservation Recovered\n"); 22454 } else { 22455 mutex_enter(SD_MUTEX(un)); 22456 un->un_resvd_status |= SD_LOST_RESERVE; 22457 mutex_exit(SD_MUTEX(un)); 22458 SD_INFO(SD_LOG_IOCTL_MHD, un, 22459 "sd_resv_reclaim_thread: Failed " 22460 "Reservation Recovery\n"); 22461 } 22462 } else { 22463 mutex_exit(SD_MUTEX(un)); 22464 } 22465 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22466 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req); 22467 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22468 sd_mhreq = sd_tr.srq_thr_cur_req = NULL; 22469 /* 22470 * wakeup the destroy thread if anyone is waiting on 22471 * us to complete. 22472 */ 22473 cv_signal(&sd_tr.srq_inprocess_cv); 22474 SD_TRACE(SD_LOG_IOCTL_MHD, un, 22475 "sd_resv_reclaim_thread: cv_signalling current request \n"); 22476 } 22477 22478 /* 22479 * cleanup the sd_tr structure now that this thread will not exist 22480 */ 22481 ASSERT(sd_tr.srq_thr_req_head == NULL); 22482 ASSERT(sd_tr.srq_thr_cur_req == NULL); 22483 sd_tr.srq_resv_reclaim_thread = NULL; 22484 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22485 thread_exit(); 22486 } 22487 22488 22489 /* 22490 * Function: sd_rmv_resv_reclaim_req() 22491 * 22492 * Description: This function removes any pending reservation reclaim requests 22493 * for the specified device. 22494 * 22495 * Arguments: dev - the device 'dev_t' 22496 */ 22497 22498 static void 22499 sd_rmv_resv_reclaim_req(dev_t dev) 22500 { 22501 struct sd_thr_request *sd_mhreq; 22502 struct sd_thr_request *sd_prev; 22503 22504 /* Remove a reservation reclaim request from the list */ 22505 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22506 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) { 22507 /* 22508 * We are attempting to reinstate reservation for 22509 * this device. We wait for sd_reserve_release() 22510 * to return before we return. 22511 */ 22512 cv_wait(&sd_tr.srq_inprocess_cv, 22513 &sd_tr.srq_resv_reclaim_mutex); 22514 } else { 22515 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head; 22516 if (sd_mhreq && sd_mhreq->dev == dev) { 22517 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next; 22518 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22519 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22520 return; 22521 } 22522 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) { 22523 if (sd_mhreq && sd_mhreq->dev == dev) { 22524 break; 22525 } 22526 sd_prev = sd_mhreq; 22527 } 22528 if (sd_mhreq != NULL) { 22529 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next; 22530 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22531 } 22532 } 22533 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22534 } 22535 22536 22537 /* 22538 * Function: sd_mhd_reset_notify_cb() 22539 * 22540 * Description: This is a call back function for scsi_reset_notify. This 22541 * function updates the softstate reserved status and logs the 22542 * reset. The driver scsi watch facility callback function 22543 * (sd_mhd_watch_cb) and reservation reclaim thread functionality 22544 * will reclaim the reservation. 22545 * 22546 * Arguments: arg - driver soft state (unit) structure 22547 */ 22548 22549 static void 22550 sd_mhd_reset_notify_cb(caddr_t arg) 22551 { 22552 struct sd_lun *un = (struct sd_lun *)arg; 22553 22554 mutex_enter(SD_MUTEX(un)); 22555 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 22556 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE); 22557 SD_INFO(SD_LOG_IOCTL_MHD, un, 22558 "sd_mhd_reset_notify_cb: Lost Reservation\n"); 22559 } 22560 mutex_exit(SD_MUTEX(un)); 22561 } 22562 22563 22564 /* 22565 * Function: sd_take_ownership() 22566 * 22567 * Description: This routine implements an algorithm to achieve a stable 22568 * reservation on disks which don't implement priority reserve, 22569 * and makes sure that other host lose re-reservation attempts. 22570 * This algorithm contains of a loop that keeps issuing the RESERVE 22571 * for some period of time (min_ownership_delay, default 6 seconds) 22572 * During that loop, it looks to see if there has been a bus device 22573 * reset or bus reset (both of which cause an existing reservation 22574 * to be lost). If the reservation is lost issue RESERVE until a 22575 * period of min_ownership_delay with no resets has gone by, or 22576 * until max_ownership_delay has expired. This loop ensures that 22577 * the host really did manage to reserve the device, in spite of 22578 * resets. The looping for min_ownership_delay (default six 22579 * seconds) is important to early generation clustering products, 22580 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an 22581 * MHIOCENFAILFAST periodic timer of two seconds. By having 22582 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having 22583 * MHIOCENFAILFAST poll every two seconds, the idea is that by the 22584 * time the MHIOCTKOWN ioctl returns, the other host (if any) will 22585 * have already noticed, via the MHIOCENFAILFAST polling, that it 22586 * no longer "owns" the disk and will have panicked itself. Thus, 22587 * the host issuing the MHIOCTKOWN is assured (with timing 22588 * dependencies) that by the time it actually starts to use the 22589 * disk for real work, the old owner is no longer accessing it. 22590 * 22591 * min_ownership_delay is the minimum amount of time for which the 22592 * disk must be reserved continuously devoid of resets before the 22593 * MHIOCTKOWN ioctl will return success. 22594 * 22595 * max_ownership_delay indicates the amount of time by which the 22596 * take ownership should succeed or timeout with an error. 22597 * 22598 * Arguments: dev - the device 'dev_t' 22599 * *p - struct containing timing info. 22600 * 22601 * Return Code: 0 for success or error code 22602 */ 22603 22604 static int 22605 sd_take_ownership(dev_t dev, struct mhioctkown *p) 22606 { 22607 struct sd_lun *un; 22608 int rval; 22609 int err; 22610 int reservation_count = 0; 22611 int min_ownership_delay = 6000000; /* in usec */ 22612 int max_ownership_delay = 30000000; /* in usec */ 22613 clock_t start_time; /* starting time of this algorithm */ 22614 clock_t end_time; /* time limit for giving up */ 22615 clock_t ownership_time; /* time limit for stable ownership */ 22616 clock_t current_time; 22617 clock_t previous_current_time; 22618 22619 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22620 return (ENXIO); 22621 } 22622 22623 /* 22624 * Attempt a device reservation. A priority reservation is requested. 22625 */ 22626 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE)) 22627 != SD_SUCCESS) { 22628 SD_ERROR(SD_LOG_IOCTL_MHD, un, 22629 "sd_take_ownership: return(1)=%d\n", rval); 22630 return (rval); 22631 } 22632 22633 /* Update the softstate reserved status to indicate the reservation */ 22634 mutex_enter(SD_MUTEX(un)); 22635 un->un_resvd_status |= SD_RESERVE; 22636 un->un_resvd_status &= 22637 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT); 22638 mutex_exit(SD_MUTEX(un)); 22639 22640 if (p != NULL) { 22641 if (p->min_ownership_delay != 0) { 22642 min_ownership_delay = p->min_ownership_delay * 1000; 22643 } 22644 if (p->max_ownership_delay != 0) { 22645 max_ownership_delay = p->max_ownership_delay * 1000; 22646 } 22647 } 22648 SD_INFO(SD_LOG_IOCTL_MHD, un, 22649 "sd_take_ownership: min, max delays: %d, %d\n", 22650 min_ownership_delay, max_ownership_delay); 22651 22652 start_time = ddi_get_lbolt(); 22653 current_time = start_time; 22654 ownership_time = current_time + drv_usectohz(min_ownership_delay); 22655 end_time = start_time + drv_usectohz(max_ownership_delay); 22656 22657 while (current_time - end_time < 0) { 22658 delay(drv_usectohz(500000)); 22659 22660 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) { 22661 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) { 22662 mutex_enter(SD_MUTEX(un)); 22663 rval = (un->un_resvd_status & 22664 SD_RESERVATION_CONFLICT) ? EACCES : EIO; 22665 mutex_exit(SD_MUTEX(un)); 22666 break; 22667 } 22668 } 22669 previous_current_time = current_time; 22670 current_time = ddi_get_lbolt(); 22671 mutex_enter(SD_MUTEX(un)); 22672 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) { 22673 ownership_time = ddi_get_lbolt() + 22674 drv_usectohz(min_ownership_delay); 22675 reservation_count = 0; 22676 } else { 22677 reservation_count++; 22678 } 22679 un->un_resvd_status |= SD_RESERVE; 22680 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE); 22681 mutex_exit(SD_MUTEX(un)); 22682 22683 SD_INFO(SD_LOG_IOCTL_MHD, un, 22684 "sd_take_ownership: ticks for loop iteration=%ld, " 22685 "reservation=%s\n", (current_time - previous_current_time), 22686 reservation_count ? "ok" : "reclaimed"); 22687 22688 if (current_time - ownership_time >= 0 && 22689 reservation_count >= 4) { 22690 rval = 0; /* Achieved a stable ownership */ 22691 break; 22692 } 22693 if (current_time - end_time >= 0) { 22694 rval = EACCES; /* No ownership in max possible time */ 22695 break; 22696 } 22697 } 22698 SD_TRACE(SD_LOG_IOCTL_MHD, un, 22699 "sd_take_ownership: return(2)=%d\n", rval); 22700 return (rval); 22701 } 22702 22703 22704 /* 22705 * Function: sd_reserve_release() 22706 * 22707 * Description: This function builds and sends scsi RESERVE, RELEASE, and 22708 * PRIORITY RESERVE commands based on a user specified command type 22709 * 22710 * Arguments: dev - the device 'dev_t' 22711 * cmd - user specified command type; one of SD_PRIORITY_RESERVE, 22712 * SD_RESERVE, SD_RELEASE 22713 * 22714 * Return Code: 0 or Error Code 22715 */ 22716 22717 static int 22718 sd_reserve_release(dev_t dev, int cmd) 22719 { 22720 struct uscsi_cmd *com = NULL; 22721 struct sd_lun *un = NULL; 22722 char cdb[CDB_GROUP0]; 22723 int rval; 22724 22725 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) || 22726 (cmd == SD_PRIORITY_RESERVE)); 22727 22728 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22729 return (ENXIO); 22730 } 22731 22732 /* instantiate and initialize the command and cdb */ 22733 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 22734 bzero(cdb, CDB_GROUP0); 22735 com->uscsi_flags = USCSI_SILENT; 22736 com->uscsi_timeout = un->un_reserve_release_time; 22737 com->uscsi_cdblen = CDB_GROUP0; 22738 com->uscsi_cdb = cdb; 22739 if (cmd == SD_RELEASE) { 22740 cdb[0] = SCMD_RELEASE; 22741 } else { 22742 cdb[0] = SCMD_RESERVE; 22743 } 22744 22745 /* Send the command. */ 22746 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 22747 SD_PATH_STANDARD); 22748 22749 /* 22750 * "break" a reservation that is held by another host, by issuing a 22751 * reset if priority reserve is desired, and we could not get the 22752 * device. 22753 */ 22754 if ((cmd == SD_PRIORITY_RESERVE) && 22755 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 22756 /* 22757 * First try to reset the LUN. If we cannot, then try a target 22758 * reset, followed by a bus reset if the target reset fails. 22759 */ 22760 int reset_retval = 0; 22761 if (un->un_f_lun_reset_enabled == TRUE) { 22762 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 22763 } 22764 if (reset_retval == 0) { 22765 /* The LUN reset either failed or was not issued */ 22766 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 22767 } 22768 if ((reset_retval == 0) && 22769 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) { 22770 rval = EIO; 22771 kmem_free(com, sizeof (*com)); 22772 return (rval); 22773 } 22774 22775 bzero(com, sizeof (struct uscsi_cmd)); 22776 com->uscsi_flags = USCSI_SILENT; 22777 com->uscsi_cdb = cdb; 22778 com->uscsi_cdblen = CDB_GROUP0; 22779 com->uscsi_timeout = 5; 22780 22781 /* 22782 * Reissue the last reserve command, this time without request 22783 * sense. Assume that it is just a regular reserve command. 22784 */ 22785 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 22786 SD_PATH_STANDARD); 22787 } 22788 22789 /* Return an error if still getting a reservation conflict. */ 22790 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 22791 rval = EACCES; 22792 } 22793 22794 kmem_free(com, sizeof (*com)); 22795 return (rval); 22796 } 22797 22798 22799 #define SD_NDUMP_RETRIES 12 22800 /* 22801 * System Crash Dump routine 22802 */ 22803 22804 static int 22805 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 22806 { 22807 int instance; 22808 int partition; 22809 int i; 22810 int err; 22811 struct sd_lun *un; 22812 struct scsi_pkt *wr_pktp; 22813 struct buf *wr_bp; 22814 struct buf wr_buf; 22815 daddr_t tgt_byte_offset; /* rmw - byte offset for target */ 22816 daddr_t tgt_blkno; /* rmw - blkno for target */ 22817 size_t tgt_byte_count; /* rmw - # of bytes to xfer */ 22818 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */ 22819 size_t io_start_offset; 22820 int doing_rmw = FALSE; 22821 int rval; 22822 ssize_t dma_resid; 22823 daddr_t oblkno; 22824 diskaddr_t nblks = 0; 22825 diskaddr_t start_block; 22826 22827 instance = SDUNIT(dev); 22828 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 22829 !SD_IS_VALID_LABEL(un) || ISCD(un)) { 22830 return (ENXIO); 22831 } 22832 22833 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 22834 22835 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); 22836 22837 partition = SDPART(dev); 22838 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); 22839 22840 /* Validate blocks to dump at against partition size. */ 22841 22842 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 22843 &nblks, &start_block, NULL, NULL, (void *)SD_PATH_DIRECT); 22844 22845 if ((blkno + nblk) > nblks) { 22846 SD_TRACE(SD_LOG_DUMP, un, 22847 "sddump: dump range larger than partition: " 22848 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 22849 blkno, nblk, nblks); 22850 return (EINVAL); 22851 } 22852 22853 mutex_enter(&un->un_pm_mutex); 22854 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 22855 struct scsi_pkt *start_pktp; 22856 22857 mutex_exit(&un->un_pm_mutex); 22858 22859 /* 22860 * use pm framework to power on HBA 1st 22861 */ 22862 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 22863 22864 /* 22865 * Dump no long uses sdpower to power on a device, it's 22866 * in-line here so it can be done in polled mode. 22867 */ 22868 22869 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n"); 22870 22871 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL, 22872 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL); 22873 22874 if (start_pktp == NULL) { 22875 /* We were not given a SCSI packet, fail. */ 22876 return (EIO); 22877 } 22878 bzero(start_pktp->pkt_cdbp, CDB_GROUP0); 22879 start_pktp->pkt_cdbp[0] = SCMD_START_STOP; 22880 start_pktp->pkt_cdbp[4] = SD_TARGET_START; 22881 start_pktp->pkt_flags = FLAG_NOINTR; 22882 22883 mutex_enter(SD_MUTEX(un)); 22884 SD_FILL_SCSI1_LUN(un, start_pktp); 22885 mutex_exit(SD_MUTEX(un)); 22886 /* 22887 * Scsi_poll returns 0 (success) if the command completes and 22888 * the status block is STATUS_GOOD. 22889 */ 22890 if (sd_scsi_poll(un, start_pktp) != 0) { 22891 scsi_destroy_pkt(start_pktp); 22892 return (EIO); 22893 } 22894 scsi_destroy_pkt(start_pktp); 22895 (void) sd_ddi_pm_resume(un); 22896 } else { 22897 mutex_exit(&un->un_pm_mutex); 22898 } 22899 22900 mutex_enter(SD_MUTEX(un)); 22901 un->un_throttle = 0; 22902 22903 /* 22904 * The first time through, reset the specific target device. 22905 * However, when cpr calls sddump we know that sd is in a 22906 * a good state so no bus reset is required. 22907 * Clear sense data via Request Sense cmd. 22908 * In sddump we don't care about allow_bus_device_reset anymore 22909 */ 22910 22911 if ((un->un_state != SD_STATE_SUSPENDED) && 22912 (un->un_state != SD_STATE_DUMPING)) { 22913 22914 New_state(un, SD_STATE_DUMPING); 22915 22916 if (un->un_f_is_fibre == FALSE) { 22917 mutex_exit(SD_MUTEX(un)); 22918 /* 22919 * Attempt a bus reset for parallel scsi. 22920 * 22921 * Note: A bus reset is required because on some host 22922 * systems (i.e. E420R) a bus device reset is 22923 * insufficient to reset the state of the target. 22924 * 22925 * Note: Don't issue the reset for fibre-channel, 22926 * because this tends to hang the bus (loop) for 22927 * too long while everyone is logging out and in 22928 * and the deadman timer for dumping will fire 22929 * before the dump is complete. 22930 */ 22931 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) { 22932 mutex_enter(SD_MUTEX(un)); 22933 Restore_state(un); 22934 mutex_exit(SD_MUTEX(un)); 22935 return (EIO); 22936 } 22937 22938 /* Delay to give the device some recovery time. */ 22939 drv_usecwait(10000); 22940 22941 if (sd_send_polled_RQS(un) == SD_FAILURE) { 22942 SD_INFO(SD_LOG_DUMP, un, 22943 "sddump: sd_send_polled_RQS failed\n"); 22944 } 22945 mutex_enter(SD_MUTEX(un)); 22946 } 22947 } 22948 22949 /* 22950 * Convert the partition-relative block number to a 22951 * disk physical block number. 22952 */ 22953 blkno += start_block; 22954 22955 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno); 22956 22957 22958 /* 22959 * Check if the device has a non-512 block size. 22960 */ 22961 wr_bp = NULL; 22962 if (NOT_DEVBSIZE(un)) { 22963 tgt_byte_offset = blkno * un->un_sys_blocksize; 22964 tgt_byte_count = nblk * un->un_sys_blocksize; 22965 if ((tgt_byte_offset % un->un_tgt_blocksize) || 22966 (tgt_byte_count % un->un_tgt_blocksize)) { 22967 doing_rmw = TRUE; 22968 /* 22969 * Calculate the block number and number of block 22970 * in terms of the media block size. 22971 */ 22972 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 22973 tgt_nblk = 22974 ((tgt_byte_offset + tgt_byte_count + 22975 (un->un_tgt_blocksize - 1)) / 22976 un->un_tgt_blocksize) - tgt_blkno; 22977 22978 /* 22979 * Invoke the routine which is going to do read part 22980 * of read-modify-write. 22981 * Note that this routine returns a pointer to 22982 * a valid bp in wr_bp. 22983 */ 22984 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk, 22985 &wr_bp); 22986 if (err) { 22987 mutex_exit(SD_MUTEX(un)); 22988 return (err); 22989 } 22990 /* 22991 * Offset is being calculated as - 22992 * (original block # * system block size) - 22993 * (new block # * target block size) 22994 */ 22995 io_start_offset = 22996 ((uint64_t)(blkno * un->un_sys_blocksize)) - 22997 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize)); 22998 22999 ASSERT((io_start_offset >= 0) && 23000 (io_start_offset < un->un_tgt_blocksize)); 23001 /* 23002 * Do the modify portion of read modify write. 23003 */ 23004 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset], 23005 (size_t)nblk * un->un_sys_blocksize); 23006 } else { 23007 doing_rmw = FALSE; 23008 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 23009 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize; 23010 } 23011 23012 /* Convert blkno and nblk to target blocks */ 23013 blkno = tgt_blkno; 23014 nblk = tgt_nblk; 23015 } else { 23016 wr_bp = &wr_buf; 23017 bzero(wr_bp, sizeof (struct buf)); 23018 wr_bp->b_flags = B_BUSY; 23019 wr_bp->b_un.b_addr = addr; 23020 wr_bp->b_bcount = nblk << DEV_BSHIFT; 23021 wr_bp->b_resid = 0; 23022 } 23023 23024 mutex_exit(SD_MUTEX(un)); 23025 23026 /* 23027 * Obtain a SCSI packet for the write command. 23028 * It should be safe to call the allocator here without 23029 * worrying about being locked for DVMA mapping because 23030 * the address we're passed is already a DVMA mapping 23031 * 23032 * We are also not going to worry about semaphore ownership 23033 * in the dump buffer. Dumping is single threaded at present. 23034 */ 23035 23036 wr_pktp = NULL; 23037 23038 dma_resid = wr_bp->b_bcount; 23039 oblkno = blkno; 23040 23041 while (dma_resid != 0) { 23042 23043 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 23044 wr_bp->b_flags &= ~B_ERROR; 23045 23046 if (un->un_partial_dma_supported == 1) { 23047 blkno = oblkno + 23048 ((wr_bp->b_bcount - dma_resid) / 23049 un->un_tgt_blocksize); 23050 nblk = dma_resid / un->un_tgt_blocksize; 23051 23052 if (wr_pktp) { 23053 /* 23054 * Partial DMA transfers after initial transfer 23055 */ 23056 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp, 23057 blkno, nblk); 23058 } else { 23059 /* Initial transfer */ 23060 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 23061 un->un_pkt_flags, NULL_FUNC, NULL, 23062 blkno, nblk); 23063 } 23064 } else { 23065 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 23066 0, NULL_FUNC, NULL, blkno, nblk); 23067 } 23068 23069 if (rval == 0) { 23070 /* We were given a SCSI packet, continue. */ 23071 break; 23072 } 23073 23074 if (i == 0) { 23075 if (wr_bp->b_flags & B_ERROR) { 23076 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23077 "no resources for dumping; " 23078 "error code: 0x%x, retrying", 23079 geterror(wr_bp)); 23080 } else { 23081 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23082 "no resources for dumping; retrying"); 23083 } 23084 } else if (i != (SD_NDUMP_RETRIES - 1)) { 23085 if (wr_bp->b_flags & B_ERROR) { 23086 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 23087 "no resources for dumping; error code: " 23088 "0x%x, retrying\n", geterror(wr_bp)); 23089 } 23090 } else { 23091 if (wr_bp->b_flags & B_ERROR) { 23092 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 23093 "no resources for dumping; " 23094 "error code: 0x%x, retries failed, " 23095 "giving up.\n", geterror(wr_bp)); 23096 } else { 23097 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 23098 "no resources for dumping; " 23099 "retries failed, giving up.\n"); 23100 } 23101 mutex_enter(SD_MUTEX(un)); 23102 Restore_state(un); 23103 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) { 23104 mutex_exit(SD_MUTEX(un)); 23105 scsi_free_consistent_buf(wr_bp); 23106 } else { 23107 mutex_exit(SD_MUTEX(un)); 23108 } 23109 return (EIO); 23110 } 23111 drv_usecwait(10000); 23112 } 23113 23114 if (un->un_partial_dma_supported == 1) { 23115 /* 23116 * save the resid from PARTIAL_DMA 23117 */ 23118 dma_resid = wr_pktp->pkt_resid; 23119 if (dma_resid != 0) 23120 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid); 23121 wr_pktp->pkt_resid = 0; 23122 } else { 23123 dma_resid = 0; 23124 } 23125 23126 /* SunBug 1222170 */ 23127 wr_pktp->pkt_flags = FLAG_NOINTR; 23128 23129 err = EIO; 23130 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 23131 23132 /* 23133 * Scsi_poll returns 0 (success) if the command completes and 23134 * the status block is STATUS_GOOD. We should only check 23135 * errors if this condition is not true. Even then we should 23136 * send our own request sense packet only if we have a check 23137 * condition and auto request sense has not been performed by 23138 * the hba. 23139 */ 23140 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n"); 23141 23142 if ((sd_scsi_poll(un, wr_pktp) == 0) && 23143 (wr_pktp->pkt_resid == 0)) { 23144 err = SD_SUCCESS; 23145 break; 23146 } 23147 23148 /* 23149 * Check CMD_DEV_GONE 1st, give up if device is gone. 23150 */ 23151 if (wr_pktp->pkt_reason == CMD_DEV_GONE) { 23152 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23153 "Error while dumping state...Device is gone\n"); 23154 break; 23155 } 23156 23157 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) { 23158 SD_INFO(SD_LOG_DUMP, un, 23159 "sddump: write failed with CHECK, try # %d\n", i); 23160 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) { 23161 (void) sd_send_polled_RQS(un); 23162 } 23163 23164 continue; 23165 } 23166 23167 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) { 23168 int reset_retval = 0; 23169 23170 SD_INFO(SD_LOG_DUMP, un, 23171 "sddump: write failed with BUSY, try # %d\n", i); 23172 23173 if (un->un_f_lun_reset_enabled == TRUE) { 23174 reset_retval = scsi_reset(SD_ADDRESS(un), 23175 RESET_LUN); 23176 } 23177 if (reset_retval == 0) { 23178 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 23179 } 23180 (void) sd_send_polled_RQS(un); 23181 23182 } else { 23183 SD_INFO(SD_LOG_DUMP, un, 23184 "sddump: write failed with 0x%x, try # %d\n", 23185 SD_GET_PKT_STATUS(wr_pktp), i); 23186 mutex_enter(SD_MUTEX(un)); 23187 sd_reset_target(un, wr_pktp); 23188 mutex_exit(SD_MUTEX(un)); 23189 } 23190 23191 /* 23192 * If we are not getting anywhere with lun/target resets, 23193 * let's reset the bus. 23194 */ 23195 if (i == SD_NDUMP_RETRIES/2) { 23196 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 23197 (void) sd_send_polled_RQS(un); 23198 } 23199 } 23200 } 23201 23202 scsi_destroy_pkt(wr_pktp); 23203 mutex_enter(SD_MUTEX(un)); 23204 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) { 23205 mutex_exit(SD_MUTEX(un)); 23206 scsi_free_consistent_buf(wr_bp); 23207 } else { 23208 mutex_exit(SD_MUTEX(un)); 23209 } 23210 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err); 23211 return (err); 23212 } 23213 23214 /* 23215 * Function: sd_scsi_poll() 23216 * 23217 * Description: This is a wrapper for the scsi_poll call. 23218 * 23219 * Arguments: sd_lun - The unit structure 23220 * scsi_pkt - The scsi packet being sent to the device. 23221 * 23222 * Return Code: 0 - Command completed successfully with good status 23223 * -1 - Command failed. This could indicate a check condition 23224 * or other status value requiring recovery action. 23225 * 23226 */ 23227 23228 static int 23229 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp) 23230 { 23231 int status; 23232 23233 ASSERT(un != NULL); 23234 ASSERT(!mutex_owned(SD_MUTEX(un))); 23235 ASSERT(pktp != NULL); 23236 23237 status = SD_SUCCESS; 23238 23239 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) { 23240 pktp->pkt_flags |= un->un_tagflags; 23241 pktp->pkt_flags &= ~FLAG_NODISCON; 23242 } 23243 23244 status = sd_ddi_scsi_poll(pktp); 23245 /* 23246 * Scsi_poll returns 0 (success) if the command completes and the 23247 * status block is STATUS_GOOD. We should only check errors if this 23248 * condition is not true. Even then we should send our own request 23249 * sense packet only if we have a check condition and auto 23250 * request sense has not been performed by the hba. 23251 * Don't get RQS data if pkt_reason is CMD_DEV_GONE. 23252 */ 23253 if ((status != SD_SUCCESS) && 23254 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) && 23255 (pktp->pkt_state & STATE_ARQ_DONE) == 0 && 23256 (pktp->pkt_reason != CMD_DEV_GONE)) 23257 (void) sd_send_polled_RQS(un); 23258 23259 return (status); 23260 } 23261 23262 /* 23263 * Function: sd_send_polled_RQS() 23264 * 23265 * Description: This sends the request sense command to a device. 23266 * 23267 * Arguments: sd_lun - The unit structure 23268 * 23269 * Return Code: 0 - Command completed successfully with good status 23270 * -1 - Command failed. 23271 * 23272 */ 23273 23274 static int 23275 sd_send_polled_RQS(struct sd_lun *un) 23276 { 23277 int ret_val; 23278 struct scsi_pkt *rqs_pktp; 23279 struct buf *rqs_bp; 23280 23281 ASSERT(un != NULL); 23282 ASSERT(!mutex_owned(SD_MUTEX(un))); 23283 23284 ret_val = SD_SUCCESS; 23285 23286 rqs_pktp = un->un_rqs_pktp; 23287 rqs_bp = un->un_rqs_bp; 23288 23289 mutex_enter(SD_MUTEX(un)); 23290 23291 if (un->un_sense_isbusy) { 23292 ret_val = SD_FAILURE; 23293 mutex_exit(SD_MUTEX(un)); 23294 return (ret_val); 23295 } 23296 23297 /* 23298 * If the request sense buffer (and packet) is not in use, 23299 * let's set the un_sense_isbusy and send our packet 23300 */ 23301 un->un_sense_isbusy = 1; 23302 rqs_pktp->pkt_resid = 0; 23303 rqs_pktp->pkt_reason = 0; 23304 rqs_pktp->pkt_flags |= FLAG_NOINTR; 23305 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH); 23306 23307 mutex_exit(SD_MUTEX(un)); 23308 23309 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at" 23310 " 0x%p\n", rqs_bp->b_un.b_addr); 23311 23312 /* 23313 * Can't send this to sd_scsi_poll, we wrap ourselves around the 23314 * axle - it has a call into us! 23315 */ 23316 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) { 23317 SD_INFO(SD_LOG_COMMON, un, 23318 "sd_send_polled_RQS: RQS failed\n"); 23319 } 23320 23321 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:", 23322 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX); 23323 23324 mutex_enter(SD_MUTEX(un)); 23325 un->un_sense_isbusy = 0; 23326 mutex_exit(SD_MUTEX(un)); 23327 23328 return (ret_val); 23329 } 23330 23331 /* 23332 * Defines needed for localized version of the scsi_poll routine. 23333 */ 23334 #define SD_CSEC 10000 /* usecs */ 23335 #define SD_SEC_TO_CSEC (1000000/SD_CSEC) 23336 23337 23338 /* 23339 * Function: sd_ddi_scsi_poll() 23340 * 23341 * Description: Localized version of the scsi_poll routine. The purpose is to 23342 * send a scsi_pkt to a device as a polled command. This version 23343 * is to ensure more robust handling of transport errors. 23344 * Specifically this routine cures not ready, coming ready 23345 * transition for power up and reset of sonoma's. This can take 23346 * up to 45 seconds for power-on and 20 seconds for reset of a 23347 * sonoma lun. 23348 * 23349 * Arguments: scsi_pkt - The scsi_pkt being sent to a device 23350 * 23351 * Return Code: 0 - Command completed successfully with good status 23352 * -1 - Command failed. 23353 * 23354 */ 23355 23356 static int 23357 sd_ddi_scsi_poll(struct scsi_pkt *pkt) 23358 { 23359 int busy_count; 23360 int timeout; 23361 int rval = SD_FAILURE; 23362 int savef; 23363 uint8_t *sensep; 23364 long savet; 23365 void (*savec)(); 23366 /* 23367 * The following is defined in machdep.c and is used in determining if 23368 * the scsi transport system will do polled I/O instead of interrupt 23369 * I/O when called from xx_dump(). 23370 */ 23371 extern int do_polled_io; 23372 23373 /* 23374 * save old flags in pkt, to restore at end 23375 */ 23376 savef = pkt->pkt_flags; 23377 savec = pkt->pkt_comp; 23378 savet = pkt->pkt_time; 23379 23380 pkt->pkt_flags |= FLAG_NOINTR; 23381 23382 /* 23383 * XXX there is nothing in the SCSA spec that states that we should not 23384 * do a callback for polled cmds; however, removing this will break sd 23385 * and probably other target drivers 23386 */ 23387 pkt->pkt_comp = NULL; 23388 23389 /* 23390 * we don't like a polled command without timeout. 23391 * 60 seconds seems long enough. 23392 */ 23393 if (pkt->pkt_time == 0) { 23394 pkt->pkt_time = SCSI_POLL_TIMEOUT; 23395 } 23396 23397 /* 23398 * Send polled cmd. 23399 * 23400 * We do some error recovery for various errors. Tran_busy, 23401 * queue full, and non-dispatched commands are retried every 10 msec. 23402 * as they are typically transient failures. Busy status and Not 23403 * Ready are retried every second as this status takes a while to 23404 * change. Unit attention is retried for pkt_time (60) times 23405 * with no delay. 23406 */ 23407 timeout = pkt->pkt_time * SD_SEC_TO_CSEC; 23408 23409 for (busy_count = 0; busy_count < timeout; busy_count++) { 23410 int rc; 23411 int poll_delay; 23412 23413 /* 23414 * Initialize pkt status variables. 23415 */ 23416 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0; 23417 23418 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) { 23419 if (rc != TRAN_BUSY) { 23420 /* Transport failed - give up. */ 23421 break; 23422 } else { 23423 /* Transport busy - try again. */ 23424 poll_delay = 1 * SD_CSEC; /* 10 msec */ 23425 } 23426 } else { 23427 /* 23428 * Transport accepted - check pkt status. 23429 */ 23430 rc = (*pkt->pkt_scbp) & STATUS_MASK; 23431 if (pkt->pkt_reason == CMD_CMPLT && 23432 rc == STATUS_CHECK && 23433 pkt->pkt_state & STATE_ARQ_DONE) { 23434 struct scsi_arq_status *arqstat = 23435 (struct scsi_arq_status *)(pkt->pkt_scbp); 23436 23437 sensep = (uint8_t *)&arqstat->sts_sensedata; 23438 } else { 23439 sensep = NULL; 23440 } 23441 23442 if ((pkt->pkt_reason == CMD_CMPLT) && 23443 (rc == STATUS_GOOD)) { 23444 /* No error - we're done */ 23445 rval = SD_SUCCESS; 23446 break; 23447 23448 } else if (pkt->pkt_reason == CMD_DEV_GONE) { 23449 /* Lost connection - give up */ 23450 break; 23451 23452 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) && 23453 (pkt->pkt_state == 0)) { 23454 /* Pkt not dispatched - try again. */ 23455 poll_delay = 1 * SD_CSEC; /* 10 msec. */ 23456 23457 } else if ((pkt->pkt_reason == CMD_CMPLT) && 23458 (rc == STATUS_QFULL)) { 23459 /* Queue full - try again. */ 23460 poll_delay = 1 * SD_CSEC; /* 10 msec. */ 23461 23462 } else if ((pkt->pkt_reason == CMD_CMPLT) && 23463 (rc == STATUS_BUSY)) { 23464 /* Busy - try again. */ 23465 poll_delay = 100 * SD_CSEC; /* 1 sec. */ 23466 busy_count += (SD_SEC_TO_CSEC - 1); 23467 23468 } else if ((sensep != NULL) && 23469 (scsi_sense_key(sensep) == 23470 KEY_UNIT_ATTENTION)) { 23471 /* Unit Attention - try again */ 23472 busy_count += (SD_SEC_TO_CSEC - 1); /* 1 */ 23473 continue; 23474 23475 } else if ((sensep != NULL) && 23476 (scsi_sense_key(sensep) == KEY_NOT_READY) && 23477 (scsi_sense_asc(sensep) == 0x04) && 23478 (scsi_sense_ascq(sensep) == 0x01)) { 23479 /* Not ready -> ready - try again. */ 23480 poll_delay = 100 * SD_CSEC; /* 1 sec. */ 23481 busy_count += (SD_SEC_TO_CSEC - 1); 23482 23483 } else { 23484 /* BAD status - give up. */ 23485 break; 23486 } 23487 } 23488 23489 if ((curthread->t_flag & T_INTR_THREAD) == 0 && 23490 !do_polled_io) { 23491 delay(drv_usectohz(poll_delay)); 23492 } else { 23493 /* we busy wait during cpr_dump or interrupt threads */ 23494 drv_usecwait(poll_delay); 23495 } 23496 } 23497 23498 pkt->pkt_flags = savef; 23499 pkt->pkt_comp = savec; 23500 pkt->pkt_time = savet; 23501 return (rval); 23502 } 23503 23504 23505 /* 23506 * Function: sd_persistent_reservation_in_read_keys 23507 * 23508 * Description: This routine is the driver entry point for handling CD-ROM 23509 * multi-host persistent reservation requests (MHIOCGRP_INKEYS) 23510 * by sending the SCSI-3 PRIN commands to the device. 23511 * Processes the read keys command response by copying the 23512 * reservation key information into the user provided buffer. 23513 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented. 23514 * 23515 * Arguments: un - Pointer to soft state struct for the target. 23516 * usrp - user provided pointer to multihost Persistent In Read 23517 * Keys structure (mhioc_inkeys_t) 23518 * flag - this argument is a pass through to ddi_copyxxx() 23519 * directly from the mode argument of ioctl(). 23520 * 23521 * Return Code: 0 - Success 23522 * EACCES 23523 * ENOTSUP 23524 * errno return code from sd_send_scsi_cmd() 23525 * 23526 * Context: Can sleep. Does not return until command is completed. 23527 */ 23528 23529 static int 23530 sd_persistent_reservation_in_read_keys(struct sd_lun *un, 23531 mhioc_inkeys_t *usrp, int flag) 23532 { 23533 #ifdef _MULTI_DATAMODEL 23534 struct mhioc_key_list32 li32; 23535 #endif 23536 sd_prin_readkeys_t *in; 23537 mhioc_inkeys_t *ptr; 23538 mhioc_key_list_t li; 23539 uchar_t *data_bufp; 23540 int data_len; 23541 int rval; 23542 size_t copysz; 23543 23544 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) { 23545 return (EINVAL); 23546 } 23547 bzero(&li, sizeof (mhioc_key_list_t)); 23548 23549 /* 23550 * Get the listsize from user 23551 */ 23552 #ifdef _MULTI_DATAMODEL 23553 23554 switch (ddi_model_convert_from(flag & FMODELS)) { 23555 case DDI_MODEL_ILP32: 23556 copysz = sizeof (struct mhioc_key_list32); 23557 if (ddi_copyin(ptr->li, &li32, copysz, flag)) { 23558 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23559 "sd_persistent_reservation_in_read_keys: " 23560 "failed ddi_copyin: mhioc_key_list32_t\n"); 23561 rval = EFAULT; 23562 goto done; 23563 } 23564 li.listsize = li32.listsize; 23565 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list; 23566 break; 23567 23568 case DDI_MODEL_NONE: 23569 copysz = sizeof (mhioc_key_list_t); 23570 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 23571 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23572 "sd_persistent_reservation_in_read_keys: " 23573 "failed ddi_copyin: mhioc_key_list_t\n"); 23574 rval = EFAULT; 23575 goto done; 23576 } 23577 break; 23578 } 23579 23580 #else /* ! _MULTI_DATAMODEL */ 23581 copysz = sizeof (mhioc_key_list_t); 23582 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 23583 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23584 "sd_persistent_reservation_in_read_keys: " 23585 "failed ddi_copyin: mhioc_key_list_t\n"); 23586 rval = EFAULT; 23587 goto done; 23588 } 23589 #endif 23590 23591 data_len = li.listsize * MHIOC_RESV_KEY_SIZE; 23592 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t)); 23593 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 23594 23595 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 23596 data_len, data_bufp)) != 0) { 23597 goto done; 23598 } 23599 in = (sd_prin_readkeys_t *)data_bufp; 23600 ptr->generation = BE_32(in->generation); 23601 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE; 23602 23603 /* 23604 * Return the min(listsize, listlen) keys 23605 */ 23606 #ifdef _MULTI_DATAMODEL 23607 23608 switch (ddi_model_convert_from(flag & FMODELS)) { 23609 case DDI_MODEL_ILP32: 23610 li32.listlen = li.listlen; 23611 if (ddi_copyout(&li32, ptr->li, copysz, flag)) { 23612 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23613 "sd_persistent_reservation_in_read_keys: " 23614 "failed ddi_copyout: mhioc_key_list32_t\n"); 23615 rval = EFAULT; 23616 goto done; 23617 } 23618 break; 23619 23620 case DDI_MODEL_NONE: 23621 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 23622 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23623 "sd_persistent_reservation_in_read_keys: " 23624 "failed ddi_copyout: mhioc_key_list_t\n"); 23625 rval = EFAULT; 23626 goto done; 23627 } 23628 break; 23629 } 23630 23631 #else /* ! _MULTI_DATAMODEL */ 23632 23633 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 23634 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23635 "sd_persistent_reservation_in_read_keys: " 23636 "failed ddi_copyout: mhioc_key_list_t\n"); 23637 rval = EFAULT; 23638 goto done; 23639 } 23640 23641 #endif /* _MULTI_DATAMODEL */ 23642 23643 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE, 23644 li.listsize * MHIOC_RESV_KEY_SIZE); 23645 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) { 23646 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23647 "sd_persistent_reservation_in_read_keys: " 23648 "failed ddi_copyout: keylist\n"); 23649 rval = EFAULT; 23650 } 23651 done: 23652 kmem_free(data_bufp, data_len); 23653 return (rval); 23654 } 23655 23656 23657 /* 23658 * Function: sd_persistent_reservation_in_read_resv 23659 * 23660 * Description: This routine is the driver entry point for handling CD-ROM 23661 * multi-host persistent reservation requests (MHIOCGRP_INRESV) 23662 * by sending the SCSI-3 PRIN commands to the device. 23663 * Process the read persistent reservations command response by 23664 * copying the reservation information into the user provided 23665 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented. 23666 * 23667 * Arguments: un - Pointer to soft state struct for the target. 23668 * usrp - user provided pointer to multihost Persistent In Read 23669 * Keys structure (mhioc_inkeys_t) 23670 * flag - this argument is a pass through to ddi_copyxxx() 23671 * directly from the mode argument of ioctl(). 23672 * 23673 * Return Code: 0 - Success 23674 * EACCES 23675 * ENOTSUP 23676 * errno return code from sd_send_scsi_cmd() 23677 * 23678 * Context: Can sleep. Does not return until command is completed. 23679 */ 23680 23681 static int 23682 sd_persistent_reservation_in_read_resv(struct sd_lun *un, 23683 mhioc_inresvs_t *usrp, int flag) 23684 { 23685 #ifdef _MULTI_DATAMODEL 23686 struct mhioc_resv_desc_list32 resvlist32; 23687 #endif 23688 sd_prin_readresv_t *in; 23689 mhioc_inresvs_t *ptr; 23690 sd_readresv_desc_t *readresv_ptr; 23691 mhioc_resv_desc_list_t resvlist; 23692 mhioc_resv_desc_t resvdesc; 23693 uchar_t *data_bufp; 23694 int data_len; 23695 int rval; 23696 int i; 23697 size_t copysz; 23698 mhioc_resv_desc_t *bufp; 23699 23700 if ((ptr = usrp) == NULL) { 23701 return (EINVAL); 23702 } 23703 23704 /* 23705 * Get the listsize from user 23706 */ 23707 #ifdef _MULTI_DATAMODEL 23708 switch (ddi_model_convert_from(flag & FMODELS)) { 23709 case DDI_MODEL_ILP32: 23710 copysz = sizeof (struct mhioc_resv_desc_list32); 23711 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) { 23712 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23713 "sd_persistent_reservation_in_read_resv: " 23714 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23715 rval = EFAULT; 23716 goto done; 23717 } 23718 resvlist.listsize = resvlist32.listsize; 23719 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list; 23720 break; 23721 23722 case DDI_MODEL_NONE: 23723 copysz = sizeof (mhioc_resv_desc_list_t); 23724 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 23725 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23726 "sd_persistent_reservation_in_read_resv: " 23727 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23728 rval = EFAULT; 23729 goto done; 23730 } 23731 break; 23732 } 23733 #else /* ! _MULTI_DATAMODEL */ 23734 copysz = sizeof (mhioc_resv_desc_list_t); 23735 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 23736 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23737 "sd_persistent_reservation_in_read_resv: " 23738 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23739 rval = EFAULT; 23740 goto done; 23741 } 23742 #endif /* ! _MULTI_DATAMODEL */ 23743 23744 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN; 23745 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t)); 23746 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 23747 23748 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_RESV, 23749 data_len, data_bufp)) != 0) { 23750 goto done; 23751 } 23752 in = (sd_prin_readresv_t *)data_bufp; 23753 ptr->generation = BE_32(in->generation); 23754 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN; 23755 23756 /* 23757 * Return the min(listsize, listlen( keys 23758 */ 23759 #ifdef _MULTI_DATAMODEL 23760 23761 switch (ddi_model_convert_from(flag & FMODELS)) { 23762 case DDI_MODEL_ILP32: 23763 resvlist32.listlen = resvlist.listlen; 23764 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) { 23765 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23766 "sd_persistent_reservation_in_read_resv: " 23767 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 23768 rval = EFAULT; 23769 goto done; 23770 } 23771 break; 23772 23773 case DDI_MODEL_NONE: 23774 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 23775 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23776 "sd_persistent_reservation_in_read_resv: " 23777 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 23778 rval = EFAULT; 23779 goto done; 23780 } 23781 break; 23782 } 23783 23784 #else /* ! _MULTI_DATAMODEL */ 23785 23786 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 23787 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23788 "sd_persistent_reservation_in_read_resv: " 23789 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 23790 rval = EFAULT; 23791 goto done; 23792 } 23793 23794 #endif /* ! _MULTI_DATAMODEL */ 23795 23796 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc; 23797 bufp = resvlist.list; 23798 copysz = sizeof (mhioc_resv_desc_t); 23799 for (i = 0; i < min(resvlist.listlen, resvlist.listsize); 23800 i++, readresv_ptr++, bufp++) { 23801 23802 bcopy(&readresv_ptr->resvkey, &resvdesc.key, 23803 MHIOC_RESV_KEY_SIZE); 23804 resvdesc.type = readresv_ptr->type; 23805 resvdesc.scope = readresv_ptr->scope; 23806 resvdesc.scope_specific_addr = 23807 BE_32(readresv_ptr->scope_specific_addr); 23808 23809 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) { 23810 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23811 "sd_persistent_reservation_in_read_resv: " 23812 "failed ddi_copyout: resvlist\n"); 23813 rval = EFAULT; 23814 goto done; 23815 } 23816 } 23817 done: 23818 kmem_free(data_bufp, data_len); 23819 return (rval); 23820 } 23821 23822 23823 /* 23824 * Function: sr_change_blkmode() 23825 * 23826 * Description: This routine is the driver entry point for handling CD-ROM 23827 * block mode ioctl requests. Support for returning and changing 23828 * the current block size in use by the device is implemented. The 23829 * LBA size is changed via a MODE SELECT Block Descriptor. 23830 * 23831 * This routine issues a mode sense with an allocation length of 23832 * 12 bytes for the mode page header and a single block descriptor. 23833 * 23834 * Arguments: dev - the device 'dev_t' 23835 * cmd - the request type; one of CDROMGBLKMODE (get) or 23836 * CDROMSBLKMODE (set) 23837 * data - current block size or requested block size 23838 * flag - this argument is a pass through to ddi_copyxxx() directly 23839 * from the mode argument of ioctl(). 23840 * 23841 * Return Code: the code returned by sd_send_scsi_cmd() 23842 * EINVAL if invalid arguments are provided 23843 * EFAULT if ddi_copyxxx() fails 23844 * ENXIO if fail ddi_get_soft_state 23845 * EIO if invalid mode sense block descriptor length 23846 * 23847 */ 23848 23849 static int 23850 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag) 23851 { 23852 struct sd_lun *un = NULL; 23853 struct mode_header *sense_mhp, *select_mhp; 23854 struct block_descriptor *sense_desc, *select_desc; 23855 int current_bsize; 23856 int rval = EINVAL; 23857 uchar_t *sense = NULL; 23858 uchar_t *select = NULL; 23859 23860 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE)); 23861 23862 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23863 return (ENXIO); 23864 } 23865 23866 /* 23867 * The block length is changed via the Mode Select block descriptor, the 23868 * "Read/Write Error Recovery" mode page (0x1) contents are not actually 23869 * required as part of this routine. Therefore the mode sense allocation 23870 * length is specified to be the length of a mode page header and a 23871 * block descriptor. 23872 */ 23873 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 23874 23875 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 23876 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD)) != 0) { 23877 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23878 "sr_change_blkmode: Mode Sense Failed\n"); 23879 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23880 return (rval); 23881 } 23882 23883 /* Check the block descriptor len to handle only 1 block descriptor */ 23884 sense_mhp = (struct mode_header *)sense; 23885 if ((sense_mhp->bdesc_length == 0) || 23886 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) { 23887 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23888 "sr_change_blkmode: Mode Sense returned invalid block" 23889 " descriptor length\n"); 23890 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23891 return (EIO); 23892 } 23893 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH); 23894 current_bsize = ((sense_desc->blksize_hi << 16) | 23895 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo); 23896 23897 /* Process command */ 23898 switch (cmd) { 23899 case CDROMGBLKMODE: 23900 /* Return the block size obtained during the mode sense */ 23901 if (ddi_copyout(¤t_bsize, (void *)data, 23902 sizeof (int), flag) != 0) 23903 rval = EFAULT; 23904 break; 23905 case CDROMSBLKMODE: 23906 /* Validate the requested block size */ 23907 switch (data) { 23908 case CDROM_BLK_512: 23909 case CDROM_BLK_1024: 23910 case CDROM_BLK_2048: 23911 case CDROM_BLK_2056: 23912 case CDROM_BLK_2336: 23913 case CDROM_BLK_2340: 23914 case CDROM_BLK_2352: 23915 case CDROM_BLK_2368: 23916 case CDROM_BLK_2448: 23917 case CDROM_BLK_2646: 23918 case CDROM_BLK_2647: 23919 break; 23920 default: 23921 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23922 "sr_change_blkmode: " 23923 "Block Size '%ld' Not Supported\n", data); 23924 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23925 return (EINVAL); 23926 } 23927 23928 /* 23929 * The current block size matches the requested block size so 23930 * there is no need to send the mode select to change the size 23931 */ 23932 if (current_bsize == data) { 23933 break; 23934 } 23935 23936 /* Build the select data for the requested block size */ 23937 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 23938 select_mhp = (struct mode_header *)select; 23939 select_desc = 23940 (struct block_descriptor *)(select + MODE_HEADER_LENGTH); 23941 /* 23942 * The LBA size is changed via the block descriptor, so the 23943 * descriptor is built according to the user data 23944 */ 23945 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH; 23946 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16); 23947 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8); 23948 select_desc->blksize_lo = (char)((data) & 0x000000ff); 23949 23950 /* Send the mode select for the requested block size */ 23951 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 23952 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 23953 SD_PATH_STANDARD)) != 0) { 23954 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23955 "sr_change_blkmode: Mode Select Failed\n"); 23956 /* 23957 * The mode select failed for the requested block size, 23958 * so reset the data for the original block size and 23959 * send it to the target. The error is indicated by the 23960 * return value for the failed mode select. 23961 */ 23962 select_desc->blksize_hi = sense_desc->blksize_hi; 23963 select_desc->blksize_mid = sense_desc->blksize_mid; 23964 select_desc->blksize_lo = sense_desc->blksize_lo; 23965 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 23966 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 23967 SD_PATH_STANDARD); 23968 } else { 23969 ASSERT(!mutex_owned(SD_MUTEX(un))); 23970 mutex_enter(SD_MUTEX(un)); 23971 sd_update_block_info(un, (uint32_t)data, 0); 23972 mutex_exit(SD_MUTEX(un)); 23973 } 23974 break; 23975 default: 23976 /* should not reach here, but check anyway */ 23977 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23978 "sr_change_blkmode: Command '%x' Not Supported\n", cmd); 23979 rval = EINVAL; 23980 break; 23981 } 23982 23983 if (select) { 23984 kmem_free(select, BUFLEN_CHG_BLK_MODE); 23985 } 23986 if (sense) { 23987 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23988 } 23989 return (rval); 23990 } 23991 23992 23993 /* 23994 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines 23995 * implement driver support for getting and setting the CD speed. The command 23996 * set used will be based on the device type. If the device has not been 23997 * identified as MMC the Toshiba vendor specific mode page will be used. If 23998 * the device is MMC but does not support the Real Time Streaming feature 23999 * the SET CD SPEED command will be used to set speed and mode page 0x2A will 24000 * be used to read the speed. 24001 */ 24002 24003 /* 24004 * Function: sr_change_speed() 24005 * 24006 * Description: This routine is the driver entry point for handling CD-ROM 24007 * drive speed ioctl requests for devices supporting the Toshiba 24008 * vendor specific drive speed mode page. Support for returning 24009 * and changing the current drive speed in use by the device is 24010 * implemented. 24011 * 24012 * Arguments: dev - the device 'dev_t' 24013 * cmd - the request type; one of CDROMGDRVSPEED (get) or 24014 * CDROMSDRVSPEED (set) 24015 * data - current drive speed or requested drive speed 24016 * flag - this argument is a pass through to ddi_copyxxx() directly 24017 * from the mode argument of ioctl(). 24018 * 24019 * Return Code: the code returned by sd_send_scsi_cmd() 24020 * EINVAL if invalid arguments are provided 24021 * EFAULT if ddi_copyxxx() fails 24022 * ENXIO if fail ddi_get_soft_state 24023 * EIO if invalid mode sense block descriptor length 24024 */ 24025 24026 static int 24027 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 24028 { 24029 struct sd_lun *un = NULL; 24030 struct mode_header *sense_mhp, *select_mhp; 24031 struct mode_speed *sense_page, *select_page; 24032 int current_speed; 24033 int rval = EINVAL; 24034 int bd_len; 24035 uchar_t *sense = NULL; 24036 uchar_t *select = NULL; 24037 24038 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 24039 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24040 return (ENXIO); 24041 } 24042 24043 /* 24044 * Note: The drive speed is being modified here according to a Toshiba 24045 * vendor specific mode page (0x31). 24046 */ 24047 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 24048 24049 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 24050 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED, 24051 SD_PATH_STANDARD)) != 0) { 24052 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24053 "sr_change_speed: Mode Sense Failed\n"); 24054 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24055 return (rval); 24056 } 24057 sense_mhp = (struct mode_header *)sense; 24058 24059 /* Check the block descriptor len to handle only 1 block descriptor */ 24060 bd_len = sense_mhp->bdesc_length; 24061 if (bd_len > MODE_BLK_DESC_LENGTH) { 24062 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24063 "sr_change_speed: Mode Sense returned invalid block " 24064 "descriptor length\n"); 24065 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24066 return (EIO); 24067 } 24068 24069 sense_page = (struct mode_speed *) 24070 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 24071 current_speed = sense_page->speed; 24072 24073 /* Process command */ 24074 switch (cmd) { 24075 case CDROMGDRVSPEED: 24076 /* Return the drive speed obtained during the mode sense */ 24077 if (current_speed == 0x2) { 24078 current_speed = CDROM_TWELVE_SPEED; 24079 } 24080 if (ddi_copyout(¤t_speed, (void *)data, 24081 sizeof (int), flag) != 0) { 24082 rval = EFAULT; 24083 } 24084 break; 24085 case CDROMSDRVSPEED: 24086 /* Validate the requested drive speed */ 24087 switch ((uchar_t)data) { 24088 case CDROM_TWELVE_SPEED: 24089 data = 0x2; 24090 /*FALLTHROUGH*/ 24091 case CDROM_NORMAL_SPEED: 24092 case CDROM_DOUBLE_SPEED: 24093 case CDROM_QUAD_SPEED: 24094 case CDROM_MAXIMUM_SPEED: 24095 break; 24096 default: 24097 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24098 "sr_change_speed: " 24099 "Drive Speed '%d' Not Supported\n", (uchar_t)data); 24100 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24101 return (EINVAL); 24102 } 24103 24104 /* 24105 * The current drive speed matches the requested drive speed so 24106 * there is no need to send the mode select to change the speed 24107 */ 24108 if (current_speed == data) { 24109 break; 24110 } 24111 24112 /* Build the select data for the requested drive speed */ 24113 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 24114 select_mhp = (struct mode_header *)select; 24115 select_mhp->bdesc_length = 0; 24116 select_page = 24117 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 24118 select_page = 24119 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 24120 select_page->mode_page.code = CDROM_MODE_SPEED; 24121 select_page->mode_page.length = 2; 24122 select_page->speed = (uchar_t)data; 24123 24124 /* Send the mode select for the requested block size */ 24125 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 24126 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 24127 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 24128 /* 24129 * The mode select failed for the requested drive speed, 24130 * so reset the data for the original drive speed and 24131 * send it to the target. The error is indicated by the 24132 * return value for the failed mode select. 24133 */ 24134 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24135 "sr_drive_speed: Mode Select Failed\n"); 24136 select_page->speed = sense_page->speed; 24137 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 24138 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 24139 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 24140 } 24141 break; 24142 default: 24143 /* should not reach here, but check anyway */ 24144 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24145 "sr_change_speed: Command '%x' Not Supported\n", cmd); 24146 rval = EINVAL; 24147 break; 24148 } 24149 24150 if (select) { 24151 kmem_free(select, BUFLEN_MODE_CDROM_SPEED); 24152 } 24153 if (sense) { 24154 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24155 } 24156 24157 return (rval); 24158 } 24159 24160 24161 /* 24162 * Function: sr_atapi_change_speed() 24163 * 24164 * Description: This routine is the driver entry point for handling CD-ROM 24165 * drive speed ioctl requests for MMC devices that do not support 24166 * the Real Time Streaming feature (0x107). 24167 * 24168 * Note: This routine will use the SET SPEED command which may not 24169 * be supported by all devices. 24170 * 24171 * Arguments: dev- the device 'dev_t' 24172 * cmd- the request type; one of CDROMGDRVSPEED (get) or 24173 * CDROMSDRVSPEED (set) 24174 * data- current drive speed or requested drive speed 24175 * flag- this argument is a pass through to ddi_copyxxx() directly 24176 * from the mode argument of ioctl(). 24177 * 24178 * Return Code: the code returned by sd_send_scsi_cmd() 24179 * EINVAL if invalid arguments are provided 24180 * EFAULT if ddi_copyxxx() fails 24181 * ENXIO if fail ddi_get_soft_state 24182 * EIO if invalid mode sense block descriptor length 24183 */ 24184 24185 static int 24186 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 24187 { 24188 struct sd_lun *un; 24189 struct uscsi_cmd *com = NULL; 24190 struct mode_header_grp2 *sense_mhp; 24191 uchar_t *sense_page; 24192 uchar_t *sense = NULL; 24193 char cdb[CDB_GROUP5]; 24194 int bd_len; 24195 int current_speed = 0; 24196 int max_speed = 0; 24197 int rval; 24198 24199 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 24200 24201 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24202 return (ENXIO); 24203 } 24204 24205 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 24206 24207 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 24208 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, 24209 SD_PATH_STANDARD)) != 0) { 24210 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24211 "sr_atapi_change_speed: Mode Sense Failed\n"); 24212 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24213 return (rval); 24214 } 24215 24216 /* Check the block descriptor len to handle only 1 block descriptor */ 24217 sense_mhp = (struct mode_header_grp2 *)sense; 24218 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 24219 if (bd_len > MODE_BLK_DESC_LENGTH) { 24220 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24221 "sr_atapi_change_speed: Mode Sense returned invalid " 24222 "block descriptor length\n"); 24223 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24224 return (EIO); 24225 } 24226 24227 /* Calculate the current and maximum drive speeds */ 24228 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 24229 current_speed = (sense_page[14] << 8) | sense_page[15]; 24230 max_speed = (sense_page[8] << 8) | sense_page[9]; 24231 24232 /* Process the command */ 24233 switch (cmd) { 24234 case CDROMGDRVSPEED: 24235 current_speed /= SD_SPEED_1X; 24236 if (ddi_copyout(¤t_speed, (void *)data, 24237 sizeof (int), flag) != 0) 24238 rval = EFAULT; 24239 break; 24240 case CDROMSDRVSPEED: 24241 /* Convert the speed code to KB/sec */ 24242 switch ((uchar_t)data) { 24243 case CDROM_NORMAL_SPEED: 24244 current_speed = SD_SPEED_1X; 24245 break; 24246 case CDROM_DOUBLE_SPEED: 24247 current_speed = 2 * SD_SPEED_1X; 24248 break; 24249 case CDROM_QUAD_SPEED: 24250 current_speed = 4 * SD_SPEED_1X; 24251 break; 24252 case CDROM_TWELVE_SPEED: 24253 current_speed = 12 * SD_SPEED_1X; 24254 break; 24255 case CDROM_MAXIMUM_SPEED: 24256 current_speed = 0xffff; 24257 break; 24258 default: 24259 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24260 "sr_atapi_change_speed: invalid drive speed %d\n", 24261 (uchar_t)data); 24262 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24263 return (EINVAL); 24264 } 24265 24266 /* Check the request against the drive's max speed. */ 24267 if (current_speed != 0xffff) { 24268 if (current_speed > max_speed) { 24269 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24270 return (EINVAL); 24271 } 24272 } 24273 24274 /* 24275 * Build and send the SET SPEED command 24276 * 24277 * Note: The SET SPEED (0xBB) command used in this routine is 24278 * obsolete per the SCSI MMC spec but still supported in the 24279 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 24280 * therefore the command is still implemented in this routine. 24281 */ 24282 bzero(cdb, sizeof (cdb)); 24283 cdb[0] = (char)SCMD_SET_CDROM_SPEED; 24284 cdb[2] = (uchar_t)(current_speed >> 8); 24285 cdb[3] = (uchar_t)current_speed; 24286 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24287 com->uscsi_cdb = (caddr_t)cdb; 24288 com->uscsi_cdblen = CDB_GROUP5; 24289 com->uscsi_bufaddr = NULL; 24290 com->uscsi_buflen = 0; 24291 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24292 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, 0, SD_PATH_STANDARD); 24293 break; 24294 default: 24295 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24296 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd); 24297 rval = EINVAL; 24298 } 24299 24300 if (sense) { 24301 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24302 } 24303 if (com) { 24304 kmem_free(com, sizeof (*com)); 24305 } 24306 return (rval); 24307 } 24308 24309 24310 /* 24311 * Function: sr_pause_resume() 24312 * 24313 * Description: This routine is the driver entry point for handling CD-ROM 24314 * pause/resume ioctl requests. This only affects the audio play 24315 * operation. 24316 * 24317 * Arguments: dev - the device 'dev_t' 24318 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used 24319 * for setting the resume bit of the cdb. 24320 * 24321 * Return Code: the code returned by sd_send_scsi_cmd() 24322 * EINVAL if invalid mode specified 24323 * 24324 */ 24325 24326 static int 24327 sr_pause_resume(dev_t dev, int cmd) 24328 { 24329 struct sd_lun *un; 24330 struct uscsi_cmd *com; 24331 char cdb[CDB_GROUP1]; 24332 int rval; 24333 24334 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24335 return (ENXIO); 24336 } 24337 24338 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24339 bzero(cdb, CDB_GROUP1); 24340 cdb[0] = SCMD_PAUSE_RESUME; 24341 switch (cmd) { 24342 case CDROMRESUME: 24343 cdb[8] = 1; 24344 break; 24345 case CDROMPAUSE: 24346 cdb[8] = 0; 24347 break; 24348 default: 24349 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:" 24350 " Command '%x' Not Supported\n", cmd); 24351 rval = EINVAL; 24352 goto done; 24353 } 24354 24355 com->uscsi_cdb = cdb; 24356 com->uscsi_cdblen = CDB_GROUP1; 24357 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24358 24359 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24360 SD_PATH_STANDARD); 24361 24362 done: 24363 kmem_free(com, sizeof (*com)); 24364 return (rval); 24365 } 24366 24367 24368 /* 24369 * Function: sr_play_msf() 24370 * 24371 * Description: This routine is the driver entry point for handling CD-ROM 24372 * ioctl requests to output the audio signals at the specified 24373 * starting address and continue the audio play until the specified 24374 * ending address (CDROMPLAYMSF) The address is in Minute Second 24375 * Frame (MSF) format. 24376 * 24377 * Arguments: dev - the device 'dev_t' 24378 * data - pointer to user provided audio msf structure, 24379 * specifying start/end addresses. 24380 * flag - this argument is a pass through to ddi_copyxxx() 24381 * directly from the mode argument of ioctl(). 24382 * 24383 * Return Code: the code returned by sd_send_scsi_cmd() 24384 * EFAULT if ddi_copyxxx() fails 24385 * ENXIO if fail ddi_get_soft_state 24386 * EINVAL if data pointer is NULL 24387 */ 24388 24389 static int 24390 sr_play_msf(dev_t dev, caddr_t data, int flag) 24391 { 24392 struct sd_lun *un; 24393 struct uscsi_cmd *com; 24394 struct cdrom_msf msf_struct; 24395 struct cdrom_msf *msf = &msf_struct; 24396 char cdb[CDB_GROUP1]; 24397 int rval; 24398 24399 if (data == NULL) { 24400 return (EINVAL); 24401 } 24402 24403 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24404 return (ENXIO); 24405 } 24406 24407 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) { 24408 return (EFAULT); 24409 } 24410 24411 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24412 bzero(cdb, CDB_GROUP1); 24413 cdb[0] = SCMD_PLAYAUDIO_MSF; 24414 if (un->un_f_cfg_playmsf_bcd == TRUE) { 24415 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0); 24416 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0); 24417 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0); 24418 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1); 24419 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1); 24420 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1); 24421 } else { 24422 cdb[3] = msf->cdmsf_min0; 24423 cdb[4] = msf->cdmsf_sec0; 24424 cdb[5] = msf->cdmsf_frame0; 24425 cdb[6] = msf->cdmsf_min1; 24426 cdb[7] = msf->cdmsf_sec1; 24427 cdb[8] = msf->cdmsf_frame1; 24428 } 24429 com->uscsi_cdb = cdb; 24430 com->uscsi_cdblen = CDB_GROUP1; 24431 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24432 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24433 SD_PATH_STANDARD); 24434 kmem_free(com, sizeof (*com)); 24435 return (rval); 24436 } 24437 24438 24439 /* 24440 * Function: sr_play_trkind() 24441 * 24442 * Description: This routine is the driver entry point for handling CD-ROM 24443 * ioctl requests to output the audio signals at the specified 24444 * starting address and continue the audio play until the specified 24445 * ending address (CDROMPLAYTRKIND). The address is in Track Index 24446 * format. 24447 * 24448 * Arguments: dev - the device 'dev_t' 24449 * data - pointer to user provided audio track/index structure, 24450 * specifying start/end addresses. 24451 * flag - this argument is a pass through to ddi_copyxxx() 24452 * directly from the mode argument of ioctl(). 24453 * 24454 * Return Code: the code returned by sd_send_scsi_cmd() 24455 * EFAULT if ddi_copyxxx() fails 24456 * ENXIO if fail ddi_get_soft_state 24457 * EINVAL if data pointer is NULL 24458 */ 24459 24460 static int 24461 sr_play_trkind(dev_t dev, caddr_t data, int flag) 24462 { 24463 struct cdrom_ti ti_struct; 24464 struct cdrom_ti *ti = &ti_struct; 24465 struct uscsi_cmd *com = NULL; 24466 char cdb[CDB_GROUP1]; 24467 int rval; 24468 24469 if (data == NULL) { 24470 return (EINVAL); 24471 } 24472 24473 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) { 24474 return (EFAULT); 24475 } 24476 24477 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24478 bzero(cdb, CDB_GROUP1); 24479 cdb[0] = SCMD_PLAYAUDIO_TI; 24480 cdb[4] = ti->cdti_trk0; 24481 cdb[5] = ti->cdti_ind0; 24482 cdb[7] = ti->cdti_trk1; 24483 cdb[8] = ti->cdti_ind1; 24484 com->uscsi_cdb = cdb; 24485 com->uscsi_cdblen = CDB_GROUP1; 24486 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24487 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24488 SD_PATH_STANDARD); 24489 kmem_free(com, sizeof (*com)); 24490 return (rval); 24491 } 24492 24493 24494 /* 24495 * Function: sr_read_all_subcodes() 24496 * 24497 * Description: This routine is the driver entry point for handling CD-ROM 24498 * ioctl requests to return raw subcode data while the target is 24499 * playing audio (CDROMSUBCODE). 24500 * 24501 * Arguments: dev - the device 'dev_t' 24502 * data - pointer to user provided cdrom subcode structure, 24503 * specifying the transfer length and address. 24504 * flag - this argument is a pass through to ddi_copyxxx() 24505 * directly from the mode argument of ioctl(). 24506 * 24507 * Return Code: the code returned by sd_send_scsi_cmd() 24508 * EFAULT if ddi_copyxxx() fails 24509 * ENXIO if fail ddi_get_soft_state 24510 * EINVAL if data pointer is NULL 24511 */ 24512 24513 static int 24514 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag) 24515 { 24516 struct sd_lun *un = NULL; 24517 struct uscsi_cmd *com = NULL; 24518 struct cdrom_subcode *subcode = NULL; 24519 int rval; 24520 size_t buflen; 24521 char cdb[CDB_GROUP5]; 24522 24523 #ifdef _MULTI_DATAMODEL 24524 /* To support ILP32 applications in an LP64 world */ 24525 struct cdrom_subcode32 cdrom_subcode32; 24526 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32; 24527 #endif 24528 if (data == NULL) { 24529 return (EINVAL); 24530 } 24531 24532 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24533 return (ENXIO); 24534 } 24535 24536 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP); 24537 24538 #ifdef _MULTI_DATAMODEL 24539 switch (ddi_model_convert_from(flag & FMODELS)) { 24540 case DDI_MODEL_ILP32: 24541 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) { 24542 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24543 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24544 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24545 return (EFAULT); 24546 } 24547 /* Convert the ILP32 uscsi data from the application to LP64 */ 24548 cdrom_subcode32tocdrom_subcode(cdsc32, subcode); 24549 break; 24550 case DDI_MODEL_NONE: 24551 if (ddi_copyin(data, subcode, 24552 sizeof (struct cdrom_subcode), flag)) { 24553 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24554 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24555 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24556 return (EFAULT); 24557 } 24558 break; 24559 } 24560 #else /* ! _MULTI_DATAMODEL */ 24561 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) { 24562 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24563 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24564 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24565 return (EFAULT); 24566 } 24567 #endif /* _MULTI_DATAMODEL */ 24568 24569 /* 24570 * Since MMC-2 expects max 3 bytes for length, check if the 24571 * length input is greater than 3 bytes 24572 */ 24573 if ((subcode->cdsc_length & 0xFF000000) != 0) { 24574 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24575 "sr_read_all_subcodes: " 24576 "cdrom transfer length too large: %d (limit %d)\n", 24577 subcode->cdsc_length, 0xFFFFFF); 24578 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24579 return (EINVAL); 24580 } 24581 24582 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length; 24583 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24584 bzero(cdb, CDB_GROUP5); 24585 24586 if (un->un_f_mmc_cap == TRUE) { 24587 cdb[0] = (char)SCMD_READ_CD; 24588 cdb[2] = (char)0xff; 24589 cdb[3] = (char)0xff; 24590 cdb[4] = (char)0xff; 24591 cdb[5] = (char)0xff; 24592 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 24593 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 24594 cdb[8] = ((subcode->cdsc_length) & 0x000000ff); 24595 cdb[10] = 1; 24596 } else { 24597 /* 24598 * Note: A vendor specific command (0xDF) is being used her to 24599 * request a read of all subcodes. 24600 */ 24601 cdb[0] = (char)SCMD_READ_ALL_SUBCODES; 24602 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24); 24603 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 24604 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 24605 cdb[9] = ((subcode->cdsc_length) & 0x000000ff); 24606 } 24607 com->uscsi_cdb = cdb; 24608 com->uscsi_cdblen = CDB_GROUP5; 24609 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr; 24610 com->uscsi_buflen = buflen; 24611 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24612 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 24613 SD_PATH_STANDARD); 24614 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24615 kmem_free(com, sizeof (*com)); 24616 return (rval); 24617 } 24618 24619 24620 /* 24621 * Function: sr_read_subchannel() 24622 * 24623 * Description: This routine is the driver entry point for handling CD-ROM 24624 * ioctl requests to return the Q sub-channel data of the CD 24625 * current position block. (CDROMSUBCHNL) The data includes the 24626 * track number, index number, absolute CD-ROM address (LBA or MSF 24627 * format per the user) , track relative CD-ROM address (LBA or MSF 24628 * format per the user), control data and audio status. 24629 * 24630 * Arguments: dev - the device 'dev_t' 24631 * data - pointer to user provided cdrom sub-channel structure 24632 * flag - this argument is a pass through to ddi_copyxxx() 24633 * directly from the mode argument of ioctl(). 24634 * 24635 * Return Code: the code returned by sd_send_scsi_cmd() 24636 * EFAULT if ddi_copyxxx() fails 24637 * ENXIO if fail ddi_get_soft_state 24638 * EINVAL if data pointer is NULL 24639 */ 24640 24641 static int 24642 sr_read_subchannel(dev_t dev, caddr_t data, int flag) 24643 { 24644 struct sd_lun *un; 24645 struct uscsi_cmd *com; 24646 struct cdrom_subchnl subchanel; 24647 struct cdrom_subchnl *subchnl = &subchanel; 24648 char cdb[CDB_GROUP1]; 24649 caddr_t buffer; 24650 int rval; 24651 24652 if (data == NULL) { 24653 return (EINVAL); 24654 } 24655 24656 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24657 (un->un_state == SD_STATE_OFFLINE)) { 24658 return (ENXIO); 24659 } 24660 24661 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) { 24662 return (EFAULT); 24663 } 24664 24665 buffer = kmem_zalloc((size_t)16, KM_SLEEP); 24666 bzero(cdb, CDB_GROUP1); 24667 cdb[0] = SCMD_READ_SUBCHANNEL; 24668 /* Set the MSF bit based on the user requested address format */ 24669 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02; 24670 /* 24671 * Set the Q bit in byte 2 to indicate that Q sub-channel data be 24672 * returned 24673 */ 24674 cdb[2] = 0x40; 24675 /* 24676 * Set byte 3 to specify the return data format. A value of 0x01 24677 * indicates that the CD-ROM current position should be returned. 24678 */ 24679 cdb[3] = 0x01; 24680 cdb[8] = 0x10; 24681 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24682 com->uscsi_cdb = cdb; 24683 com->uscsi_cdblen = CDB_GROUP1; 24684 com->uscsi_bufaddr = buffer; 24685 com->uscsi_buflen = 16; 24686 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24687 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24688 SD_PATH_STANDARD); 24689 if (rval != 0) { 24690 kmem_free(buffer, 16); 24691 kmem_free(com, sizeof (*com)); 24692 return (rval); 24693 } 24694 24695 /* Process the returned Q sub-channel data */ 24696 subchnl->cdsc_audiostatus = buffer[1]; 24697 subchnl->cdsc_adr = (buffer[5] & 0xF0); 24698 subchnl->cdsc_ctrl = (buffer[5] & 0x0F); 24699 subchnl->cdsc_trk = buffer[6]; 24700 subchnl->cdsc_ind = buffer[7]; 24701 if (subchnl->cdsc_format & CDROM_LBA) { 24702 subchnl->cdsc_absaddr.lba = 24703 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 24704 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 24705 subchnl->cdsc_reladdr.lba = 24706 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) + 24707 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]); 24708 } else if (un->un_f_cfg_readsub_bcd == TRUE) { 24709 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]); 24710 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]); 24711 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]); 24712 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]); 24713 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]); 24714 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]); 24715 } else { 24716 subchnl->cdsc_absaddr.msf.minute = buffer[9]; 24717 subchnl->cdsc_absaddr.msf.second = buffer[10]; 24718 subchnl->cdsc_absaddr.msf.frame = buffer[11]; 24719 subchnl->cdsc_reladdr.msf.minute = buffer[13]; 24720 subchnl->cdsc_reladdr.msf.second = buffer[14]; 24721 subchnl->cdsc_reladdr.msf.frame = buffer[15]; 24722 } 24723 kmem_free(buffer, 16); 24724 kmem_free(com, sizeof (*com)); 24725 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag) 24726 != 0) { 24727 return (EFAULT); 24728 } 24729 return (rval); 24730 } 24731 24732 24733 /* 24734 * Function: sr_read_tocentry() 24735 * 24736 * Description: This routine is the driver entry point for handling CD-ROM 24737 * ioctl requests to read from the Table of Contents (TOC) 24738 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL 24739 * fields, the starting address (LBA or MSF format per the user) 24740 * and the data mode if the user specified track is a data track. 24741 * 24742 * Note: The READ HEADER (0x44) command used in this routine is 24743 * obsolete per the SCSI MMC spec but still supported in the 24744 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 24745 * therefore the command is still implemented in this routine. 24746 * 24747 * Arguments: dev - the device 'dev_t' 24748 * data - pointer to user provided toc entry structure, 24749 * specifying the track # and the address format 24750 * (LBA or MSF). 24751 * flag - this argument is a pass through to ddi_copyxxx() 24752 * directly from the mode argument of ioctl(). 24753 * 24754 * Return Code: the code returned by sd_send_scsi_cmd() 24755 * EFAULT if ddi_copyxxx() fails 24756 * ENXIO if fail ddi_get_soft_state 24757 * EINVAL if data pointer is NULL 24758 */ 24759 24760 static int 24761 sr_read_tocentry(dev_t dev, caddr_t data, int flag) 24762 { 24763 struct sd_lun *un = NULL; 24764 struct uscsi_cmd *com; 24765 struct cdrom_tocentry toc_entry; 24766 struct cdrom_tocentry *entry = &toc_entry; 24767 caddr_t buffer; 24768 int rval; 24769 char cdb[CDB_GROUP1]; 24770 24771 if (data == NULL) { 24772 return (EINVAL); 24773 } 24774 24775 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24776 (un->un_state == SD_STATE_OFFLINE)) { 24777 return (ENXIO); 24778 } 24779 24780 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) { 24781 return (EFAULT); 24782 } 24783 24784 /* Validate the requested track and address format */ 24785 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) { 24786 return (EINVAL); 24787 } 24788 24789 if (entry->cdte_track == 0) { 24790 return (EINVAL); 24791 } 24792 24793 buffer = kmem_zalloc((size_t)12, KM_SLEEP); 24794 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24795 bzero(cdb, CDB_GROUP1); 24796 24797 cdb[0] = SCMD_READ_TOC; 24798 /* Set the MSF bit based on the user requested address format */ 24799 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2); 24800 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 24801 cdb[6] = BYTE_TO_BCD(entry->cdte_track); 24802 } else { 24803 cdb[6] = entry->cdte_track; 24804 } 24805 24806 /* 24807 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 24808 * (4 byte TOC response header + 8 byte track descriptor) 24809 */ 24810 cdb[8] = 12; 24811 com->uscsi_cdb = cdb; 24812 com->uscsi_cdblen = CDB_GROUP1; 24813 com->uscsi_bufaddr = buffer; 24814 com->uscsi_buflen = 0x0C; 24815 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ); 24816 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24817 SD_PATH_STANDARD); 24818 if (rval != 0) { 24819 kmem_free(buffer, 12); 24820 kmem_free(com, sizeof (*com)); 24821 return (rval); 24822 } 24823 24824 /* Process the toc entry */ 24825 entry->cdte_adr = (buffer[5] & 0xF0) >> 4; 24826 entry->cdte_ctrl = (buffer[5] & 0x0F); 24827 if (entry->cdte_format & CDROM_LBA) { 24828 entry->cdte_addr.lba = 24829 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 24830 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 24831 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) { 24832 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]); 24833 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]); 24834 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]); 24835 /* 24836 * Send a READ TOC command using the LBA address format to get 24837 * the LBA for the track requested so it can be used in the 24838 * READ HEADER request 24839 * 24840 * Note: The MSF bit of the READ HEADER command specifies the 24841 * output format. The block address specified in that command 24842 * must be in LBA format. 24843 */ 24844 cdb[1] = 0; 24845 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24846 SD_PATH_STANDARD); 24847 if (rval != 0) { 24848 kmem_free(buffer, 12); 24849 kmem_free(com, sizeof (*com)); 24850 return (rval); 24851 } 24852 } else { 24853 entry->cdte_addr.msf.minute = buffer[9]; 24854 entry->cdte_addr.msf.second = buffer[10]; 24855 entry->cdte_addr.msf.frame = buffer[11]; 24856 /* 24857 * Send a READ TOC command using the LBA address format to get 24858 * the LBA for the track requested so it can be used in the 24859 * READ HEADER request 24860 * 24861 * Note: The MSF bit of the READ HEADER command specifies the 24862 * output format. The block address specified in that command 24863 * must be in LBA format. 24864 */ 24865 cdb[1] = 0; 24866 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24867 SD_PATH_STANDARD); 24868 if (rval != 0) { 24869 kmem_free(buffer, 12); 24870 kmem_free(com, sizeof (*com)); 24871 return (rval); 24872 } 24873 } 24874 24875 /* 24876 * Build and send the READ HEADER command to determine the data mode of 24877 * the user specified track. 24878 */ 24879 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) && 24880 (entry->cdte_track != CDROM_LEADOUT)) { 24881 bzero(cdb, CDB_GROUP1); 24882 cdb[0] = SCMD_READ_HEADER; 24883 cdb[2] = buffer[8]; 24884 cdb[3] = buffer[9]; 24885 cdb[4] = buffer[10]; 24886 cdb[5] = buffer[11]; 24887 cdb[8] = 0x08; 24888 com->uscsi_buflen = 0x08; 24889 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24890 SD_PATH_STANDARD); 24891 if (rval == 0) { 24892 entry->cdte_datamode = buffer[0]; 24893 } else { 24894 /* 24895 * READ HEADER command failed, since this is 24896 * obsoleted in one spec, its better to return 24897 * -1 for an invlid track so that we can still 24898 * receive the rest of the TOC data. 24899 */ 24900 entry->cdte_datamode = (uchar_t)-1; 24901 } 24902 } else { 24903 entry->cdte_datamode = (uchar_t)-1; 24904 } 24905 24906 kmem_free(buffer, 12); 24907 kmem_free(com, sizeof (*com)); 24908 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0) 24909 return (EFAULT); 24910 24911 return (rval); 24912 } 24913 24914 24915 /* 24916 * Function: sr_read_tochdr() 24917 * 24918 * Description: This routine is the driver entry point for handling CD-ROM 24919 * ioctl requests to read the Table of Contents (TOC) header 24920 * (CDROMREADTOHDR). The TOC header consists of the disk starting 24921 * and ending track numbers 24922 * 24923 * Arguments: dev - the device 'dev_t' 24924 * data - pointer to user provided toc header structure, 24925 * specifying the starting and ending track numbers. 24926 * flag - this argument is a pass through to ddi_copyxxx() 24927 * directly from the mode argument of ioctl(). 24928 * 24929 * Return Code: the code returned by sd_send_scsi_cmd() 24930 * EFAULT if ddi_copyxxx() fails 24931 * ENXIO if fail ddi_get_soft_state 24932 * EINVAL if data pointer is NULL 24933 */ 24934 24935 static int 24936 sr_read_tochdr(dev_t dev, caddr_t data, int flag) 24937 { 24938 struct sd_lun *un; 24939 struct uscsi_cmd *com; 24940 struct cdrom_tochdr toc_header; 24941 struct cdrom_tochdr *hdr = &toc_header; 24942 char cdb[CDB_GROUP1]; 24943 int rval; 24944 caddr_t buffer; 24945 24946 if (data == NULL) { 24947 return (EINVAL); 24948 } 24949 24950 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24951 (un->un_state == SD_STATE_OFFLINE)) { 24952 return (ENXIO); 24953 } 24954 24955 buffer = kmem_zalloc(4, KM_SLEEP); 24956 bzero(cdb, CDB_GROUP1); 24957 cdb[0] = SCMD_READ_TOC; 24958 /* 24959 * Specifying a track number of 0x00 in the READ TOC command indicates 24960 * that the TOC header should be returned 24961 */ 24962 cdb[6] = 0x00; 24963 /* 24964 * Bytes 7 & 8 are the 4 byte allocation length for TOC header. 24965 * (2 byte data len + 1 byte starting track # + 1 byte ending track #) 24966 */ 24967 cdb[8] = 0x04; 24968 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24969 com->uscsi_cdb = cdb; 24970 com->uscsi_cdblen = CDB_GROUP1; 24971 com->uscsi_bufaddr = buffer; 24972 com->uscsi_buflen = 0x04; 24973 com->uscsi_timeout = 300; 24974 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24975 24976 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24977 SD_PATH_STANDARD); 24978 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 24979 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]); 24980 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]); 24981 } else { 24982 hdr->cdth_trk0 = buffer[2]; 24983 hdr->cdth_trk1 = buffer[3]; 24984 } 24985 kmem_free(buffer, 4); 24986 kmem_free(com, sizeof (*com)); 24987 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) { 24988 return (EFAULT); 24989 } 24990 return (rval); 24991 } 24992 24993 24994 /* 24995 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(), 24996 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for 24997 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data, 24998 * digital audio and extended architecture digital audio. These modes are 24999 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3 25000 * MMC specs. 25001 * 25002 * In addition to support for the various data formats these routines also 25003 * include support for devices that implement only the direct access READ 25004 * commands (0x08, 0x28), devices that implement the READ_CD commands 25005 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and 25006 * READ CDXA commands (0xD8, 0xDB) 25007 */ 25008 25009 /* 25010 * Function: sr_read_mode1() 25011 * 25012 * Description: This routine is the driver entry point for handling CD-ROM 25013 * ioctl read mode1 requests (CDROMREADMODE1). 25014 * 25015 * Arguments: dev - the device 'dev_t' 25016 * data - pointer to user provided cd read structure specifying 25017 * the lba buffer address and length. 25018 * flag - this argument is a pass through to ddi_copyxxx() 25019 * directly from the mode argument of ioctl(). 25020 * 25021 * Return Code: the code returned by sd_send_scsi_cmd() 25022 * EFAULT if ddi_copyxxx() fails 25023 * ENXIO if fail ddi_get_soft_state 25024 * EINVAL if data pointer is NULL 25025 */ 25026 25027 static int 25028 sr_read_mode1(dev_t dev, caddr_t data, int flag) 25029 { 25030 struct sd_lun *un; 25031 struct cdrom_read mode1_struct; 25032 struct cdrom_read *mode1 = &mode1_struct; 25033 int rval; 25034 #ifdef _MULTI_DATAMODEL 25035 /* To support ILP32 applications in an LP64 world */ 25036 struct cdrom_read32 cdrom_read32; 25037 struct cdrom_read32 *cdrd32 = &cdrom_read32; 25038 #endif /* _MULTI_DATAMODEL */ 25039 25040 if (data == NULL) { 25041 return (EINVAL); 25042 } 25043 25044 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25045 (un->un_state == SD_STATE_OFFLINE)) { 25046 return (ENXIO); 25047 } 25048 25049 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25050 "sd_read_mode1: entry: un:0x%p\n", un); 25051 25052 #ifdef _MULTI_DATAMODEL 25053 switch (ddi_model_convert_from(flag & FMODELS)) { 25054 case DDI_MODEL_ILP32: 25055 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 25056 return (EFAULT); 25057 } 25058 /* Convert the ILP32 uscsi data from the application to LP64 */ 25059 cdrom_read32tocdrom_read(cdrd32, mode1); 25060 break; 25061 case DDI_MODEL_NONE: 25062 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 25063 return (EFAULT); 25064 } 25065 } 25066 #else /* ! _MULTI_DATAMODEL */ 25067 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 25068 return (EFAULT); 25069 } 25070 #endif /* _MULTI_DATAMODEL */ 25071 25072 rval = sd_send_scsi_READ(un, mode1->cdread_bufaddr, 25073 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD); 25074 25075 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25076 "sd_read_mode1: exit: un:0x%p\n", un); 25077 25078 return (rval); 25079 } 25080 25081 25082 /* 25083 * Function: sr_read_cd_mode2() 25084 * 25085 * Description: This routine is the driver entry point for handling CD-ROM 25086 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 25087 * support the READ CD (0xBE) command or the 1st generation 25088 * READ CD (0xD4) command. 25089 * 25090 * Arguments: dev - the device 'dev_t' 25091 * data - pointer to user provided cd read structure specifying 25092 * the lba buffer address and length. 25093 * flag - this argument is a pass through to ddi_copyxxx() 25094 * directly from the mode argument of ioctl(). 25095 * 25096 * Return Code: the code returned by sd_send_scsi_cmd() 25097 * EFAULT if ddi_copyxxx() fails 25098 * ENXIO if fail ddi_get_soft_state 25099 * EINVAL if data pointer is NULL 25100 */ 25101 25102 static int 25103 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag) 25104 { 25105 struct sd_lun *un; 25106 struct uscsi_cmd *com; 25107 struct cdrom_read mode2_struct; 25108 struct cdrom_read *mode2 = &mode2_struct; 25109 uchar_t cdb[CDB_GROUP5]; 25110 int nblocks; 25111 int rval; 25112 #ifdef _MULTI_DATAMODEL 25113 /* To support ILP32 applications in an LP64 world */ 25114 struct cdrom_read32 cdrom_read32; 25115 struct cdrom_read32 *cdrd32 = &cdrom_read32; 25116 #endif /* _MULTI_DATAMODEL */ 25117 25118 if (data == NULL) { 25119 return (EINVAL); 25120 } 25121 25122 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25123 (un->un_state == SD_STATE_OFFLINE)) { 25124 return (ENXIO); 25125 } 25126 25127 #ifdef _MULTI_DATAMODEL 25128 switch (ddi_model_convert_from(flag & FMODELS)) { 25129 case DDI_MODEL_ILP32: 25130 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 25131 return (EFAULT); 25132 } 25133 /* Convert the ILP32 uscsi data from the application to LP64 */ 25134 cdrom_read32tocdrom_read(cdrd32, mode2); 25135 break; 25136 case DDI_MODEL_NONE: 25137 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 25138 return (EFAULT); 25139 } 25140 break; 25141 } 25142 25143 #else /* ! _MULTI_DATAMODEL */ 25144 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 25145 return (EFAULT); 25146 } 25147 #endif /* _MULTI_DATAMODEL */ 25148 25149 bzero(cdb, sizeof (cdb)); 25150 if (un->un_f_cfg_read_cd_xd4 == TRUE) { 25151 /* Read command supported by 1st generation atapi drives */ 25152 cdb[0] = SCMD_READ_CDD4; 25153 } else { 25154 /* Universal CD Access Command */ 25155 cdb[0] = SCMD_READ_CD; 25156 } 25157 25158 /* 25159 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book 25160 */ 25161 cdb[1] = CDROM_SECTOR_TYPE_MODE2; 25162 25163 /* set the start address */ 25164 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF); 25165 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF); 25166 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 25167 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF); 25168 25169 /* set the transfer length */ 25170 nblocks = mode2->cdread_buflen / 2336; 25171 cdb[6] = (uchar_t)(nblocks >> 16); 25172 cdb[7] = (uchar_t)(nblocks >> 8); 25173 cdb[8] = (uchar_t)nblocks; 25174 25175 /* set the filter bits */ 25176 cdb[9] = CDROM_READ_CD_USERDATA; 25177 25178 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25179 com->uscsi_cdb = (caddr_t)cdb; 25180 com->uscsi_cdblen = sizeof (cdb); 25181 com->uscsi_bufaddr = mode2->cdread_bufaddr; 25182 com->uscsi_buflen = mode2->cdread_buflen; 25183 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25184 25185 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25186 SD_PATH_STANDARD); 25187 kmem_free(com, sizeof (*com)); 25188 return (rval); 25189 } 25190 25191 25192 /* 25193 * Function: sr_read_mode2() 25194 * 25195 * Description: This routine is the driver entry point for handling CD-ROM 25196 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 25197 * do not support the READ CD (0xBE) command. 25198 * 25199 * Arguments: dev - the device 'dev_t' 25200 * data - pointer to user provided cd read structure specifying 25201 * the lba buffer address and length. 25202 * flag - this argument is a pass through to ddi_copyxxx() 25203 * directly from the mode argument of ioctl(). 25204 * 25205 * Return Code: the code returned by sd_send_scsi_cmd() 25206 * EFAULT if ddi_copyxxx() fails 25207 * ENXIO if fail ddi_get_soft_state 25208 * EINVAL if data pointer is NULL 25209 * EIO if fail to reset block size 25210 * EAGAIN if commands are in progress in the driver 25211 */ 25212 25213 static int 25214 sr_read_mode2(dev_t dev, caddr_t data, int flag) 25215 { 25216 struct sd_lun *un; 25217 struct cdrom_read mode2_struct; 25218 struct cdrom_read *mode2 = &mode2_struct; 25219 int rval; 25220 uint32_t restore_blksize; 25221 struct uscsi_cmd *com; 25222 uchar_t cdb[CDB_GROUP0]; 25223 int nblocks; 25224 25225 #ifdef _MULTI_DATAMODEL 25226 /* To support ILP32 applications in an LP64 world */ 25227 struct cdrom_read32 cdrom_read32; 25228 struct cdrom_read32 *cdrd32 = &cdrom_read32; 25229 #endif /* _MULTI_DATAMODEL */ 25230 25231 if (data == NULL) { 25232 return (EINVAL); 25233 } 25234 25235 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25236 (un->un_state == SD_STATE_OFFLINE)) { 25237 return (ENXIO); 25238 } 25239 25240 /* 25241 * Because this routine will update the device and driver block size 25242 * being used we want to make sure there are no commands in progress. 25243 * If commands are in progress the user will have to try again. 25244 * 25245 * We check for 1 instead of 0 because we increment un_ncmds_in_driver 25246 * in sdioctl to protect commands from sdioctl through to the top of 25247 * sd_uscsi_strategy. See sdioctl for details. 25248 */ 25249 mutex_enter(SD_MUTEX(un)); 25250 if (un->un_ncmds_in_driver != 1) { 25251 mutex_exit(SD_MUTEX(un)); 25252 return (EAGAIN); 25253 } 25254 mutex_exit(SD_MUTEX(un)); 25255 25256 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25257 "sd_read_mode2: entry: un:0x%p\n", un); 25258 25259 #ifdef _MULTI_DATAMODEL 25260 switch (ddi_model_convert_from(flag & FMODELS)) { 25261 case DDI_MODEL_ILP32: 25262 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 25263 return (EFAULT); 25264 } 25265 /* Convert the ILP32 uscsi data from the application to LP64 */ 25266 cdrom_read32tocdrom_read(cdrd32, mode2); 25267 break; 25268 case DDI_MODEL_NONE: 25269 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 25270 return (EFAULT); 25271 } 25272 break; 25273 } 25274 #else /* ! _MULTI_DATAMODEL */ 25275 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) { 25276 return (EFAULT); 25277 } 25278 #endif /* _MULTI_DATAMODEL */ 25279 25280 /* Store the current target block size for restoration later */ 25281 restore_blksize = un->un_tgt_blocksize; 25282 25283 /* Change the device and soft state target block size to 2336 */ 25284 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) { 25285 rval = EIO; 25286 goto done; 25287 } 25288 25289 25290 bzero(cdb, sizeof (cdb)); 25291 25292 /* set READ operation */ 25293 cdb[0] = SCMD_READ; 25294 25295 /* adjust lba for 2kbyte blocks from 512 byte blocks */ 25296 mode2->cdread_lba >>= 2; 25297 25298 /* set the start address */ 25299 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F); 25300 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 25301 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF); 25302 25303 /* set the transfer length */ 25304 nblocks = mode2->cdread_buflen / 2336; 25305 cdb[4] = (uchar_t)nblocks & 0xFF; 25306 25307 /* build command */ 25308 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25309 com->uscsi_cdb = (caddr_t)cdb; 25310 com->uscsi_cdblen = sizeof (cdb); 25311 com->uscsi_bufaddr = mode2->cdread_bufaddr; 25312 com->uscsi_buflen = mode2->cdread_buflen; 25313 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25314 25315 /* 25316 * Issue SCSI command with user space address for read buffer. 25317 * 25318 * This sends the command through main channel in the driver. 25319 * 25320 * Since this is accessed via an IOCTL call, we go through the 25321 * standard path, so that if the device was powered down, then 25322 * it would be 'awakened' to handle the command. 25323 */ 25324 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25325 SD_PATH_STANDARD); 25326 25327 kmem_free(com, sizeof (*com)); 25328 25329 /* Restore the device and soft state target block size */ 25330 if (sr_sector_mode(dev, restore_blksize) != 0) { 25331 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25332 "can't do switch back to mode 1\n"); 25333 /* 25334 * If sd_send_scsi_READ succeeded we still need to report 25335 * an error because we failed to reset the block size 25336 */ 25337 if (rval == 0) { 25338 rval = EIO; 25339 } 25340 } 25341 25342 done: 25343 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25344 "sd_read_mode2: exit: un:0x%p\n", un); 25345 25346 return (rval); 25347 } 25348 25349 25350 /* 25351 * Function: sr_sector_mode() 25352 * 25353 * Description: This utility function is used by sr_read_mode2 to set the target 25354 * block size based on the user specified size. This is a legacy 25355 * implementation based upon a vendor specific mode page 25356 * 25357 * Arguments: dev - the device 'dev_t' 25358 * data - flag indicating if block size is being set to 2336 or 25359 * 512. 25360 * 25361 * Return Code: the code returned by sd_send_scsi_cmd() 25362 * EFAULT if ddi_copyxxx() fails 25363 * ENXIO if fail ddi_get_soft_state 25364 * EINVAL if data pointer is NULL 25365 */ 25366 25367 static int 25368 sr_sector_mode(dev_t dev, uint32_t blksize) 25369 { 25370 struct sd_lun *un; 25371 uchar_t *sense; 25372 uchar_t *select; 25373 int rval; 25374 25375 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25376 (un->un_state == SD_STATE_OFFLINE)) { 25377 return (ENXIO); 25378 } 25379 25380 sense = kmem_zalloc(20, KM_SLEEP); 25381 25382 /* Note: This is a vendor specific mode page (0x81) */ 25383 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 20, 0x81, 25384 SD_PATH_STANDARD)) != 0) { 25385 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 25386 "sr_sector_mode: Mode Sense failed\n"); 25387 kmem_free(sense, 20); 25388 return (rval); 25389 } 25390 select = kmem_zalloc(20, KM_SLEEP); 25391 select[3] = 0x08; 25392 select[10] = ((blksize >> 8) & 0xff); 25393 select[11] = (blksize & 0xff); 25394 select[12] = 0x01; 25395 select[13] = 0x06; 25396 select[14] = sense[14]; 25397 select[15] = sense[15]; 25398 if (blksize == SD_MODE2_BLKSIZE) { 25399 select[14] |= 0x01; 25400 } 25401 25402 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 20, 25403 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 25404 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 25405 "sr_sector_mode: Mode Select failed\n"); 25406 } else { 25407 /* 25408 * Only update the softstate block size if we successfully 25409 * changed the device block mode. 25410 */ 25411 mutex_enter(SD_MUTEX(un)); 25412 sd_update_block_info(un, blksize, 0); 25413 mutex_exit(SD_MUTEX(un)); 25414 } 25415 kmem_free(sense, 20); 25416 kmem_free(select, 20); 25417 return (rval); 25418 } 25419 25420 25421 /* 25422 * Function: sr_read_cdda() 25423 * 25424 * Description: This routine is the driver entry point for handling CD-ROM 25425 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If 25426 * the target supports CDDA these requests are handled via a vendor 25427 * specific command (0xD8) If the target does not support CDDA 25428 * these requests are handled via the READ CD command (0xBE). 25429 * 25430 * Arguments: dev - the device 'dev_t' 25431 * data - pointer to user provided CD-DA structure specifying 25432 * the track starting address, transfer length, and 25433 * subcode options. 25434 * flag - this argument is a pass through to ddi_copyxxx() 25435 * directly from the mode argument of ioctl(). 25436 * 25437 * Return Code: the code returned by sd_send_scsi_cmd() 25438 * EFAULT if ddi_copyxxx() fails 25439 * ENXIO if fail ddi_get_soft_state 25440 * EINVAL if invalid arguments are provided 25441 * ENOTTY 25442 */ 25443 25444 static int 25445 sr_read_cdda(dev_t dev, caddr_t data, int flag) 25446 { 25447 struct sd_lun *un; 25448 struct uscsi_cmd *com; 25449 struct cdrom_cdda *cdda; 25450 int rval; 25451 size_t buflen; 25452 char cdb[CDB_GROUP5]; 25453 25454 #ifdef _MULTI_DATAMODEL 25455 /* To support ILP32 applications in an LP64 world */ 25456 struct cdrom_cdda32 cdrom_cdda32; 25457 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32; 25458 #endif /* _MULTI_DATAMODEL */ 25459 25460 if (data == NULL) { 25461 return (EINVAL); 25462 } 25463 25464 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25465 return (ENXIO); 25466 } 25467 25468 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP); 25469 25470 #ifdef _MULTI_DATAMODEL 25471 switch (ddi_model_convert_from(flag & FMODELS)) { 25472 case DDI_MODEL_ILP32: 25473 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) { 25474 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25475 "sr_read_cdda: ddi_copyin Failed\n"); 25476 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25477 return (EFAULT); 25478 } 25479 /* Convert the ILP32 uscsi data from the application to LP64 */ 25480 cdrom_cdda32tocdrom_cdda(cdda32, cdda); 25481 break; 25482 case DDI_MODEL_NONE: 25483 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 25484 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25485 "sr_read_cdda: ddi_copyin Failed\n"); 25486 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25487 return (EFAULT); 25488 } 25489 break; 25490 } 25491 #else /* ! _MULTI_DATAMODEL */ 25492 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 25493 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25494 "sr_read_cdda: ddi_copyin Failed\n"); 25495 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25496 return (EFAULT); 25497 } 25498 #endif /* _MULTI_DATAMODEL */ 25499 25500 /* 25501 * Since MMC-2 expects max 3 bytes for length, check if the 25502 * length input is greater than 3 bytes 25503 */ 25504 if ((cdda->cdda_length & 0xFF000000) != 0) { 25505 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: " 25506 "cdrom transfer length too large: %d (limit %d)\n", 25507 cdda->cdda_length, 0xFFFFFF); 25508 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25509 return (EINVAL); 25510 } 25511 25512 switch (cdda->cdda_subcode) { 25513 case CDROM_DA_NO_SUBCODE: 25514 buflen = CDROM_BLK_2352 * cdda->cdda_length; 25515 break; 25516 case CDROM_DA_SUBQ: 25517 buflen = CDROM_BLK_2368 * cdda->cdda_length; 25518 break; 25519 case CDROM_DA_ALL_SUBCODE: 25520 buflen = CDROM_BLK_2448 * cdda->cdda_length; 25521 break; 25522 case CDROM_DA_SUBCODE_ONLY: 25523 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length; 25524 break; 25525 default: 25526 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25527 "sr_read_cdda: Subcode '0x%x' Not Supported\n", 25528 cdda->cdda_subcode); 25529 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25530 return (EINVAL); 25531 } 25532 25533 /* Build and send the command */ 25534 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25535 bzero(cdb, CDB_GROUP5); 25536 25537 if (un->un_f_cfg_cdda == TRUE) { 25538 cdb[0] = (char)SCMD_READ_CD; 25539 cdb[1] = 0x04; 25540 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 25541 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 25542 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 25543 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 25544 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 25545 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 25546 cdb[8] = ((cdda->cdda_length) & 0x000000ff); 25547 cdb[9] = 0x10; 25548 switch (cdda->cdda_subcode) { 25549 case CDROM_DA_NO_SUBCODE : 25550 cdb[10] = 0x0; 25551 break; 25552 case CDROM_DA_SUBQ : 25553 cdb[10] = 0x2; 25554 break; 25555 case CDROM_DA_ALL_SUBCODE : 25556 cdb[10] = 0x1; 25557 break; 25558 case CDROM_DA_SUBCODE_ONLY : 25559 /* FALLTHROUGH */ 25560 default : 25561 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25562 kmem_free(com, sizeof (*com)); 25563 return (ENOTTY); 25564 } 25565 } else { 25566 cdb[0] = (char)SCMD_READ_CDDA; 25567 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 25568 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 25569 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 25570 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 25571 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24); 25572 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 25573 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 25574 cdb[9] = ((cdda->cdda_length) & 0x000000ff); 25575 cdb[10] = cdda->cdda_subcode; 25576 } 25577 25578 com->uscsi_cdb = cdb; 25579 com->uscsi_cdblen = CDB_GROUP5; 25580 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data; 25581 com->uscsi_buflen = buflen; 25582 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25583 25584 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25585 SD_PATH_STANDARD); 25586 25587 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25588 kmem_free(com, sizeof (*com)); 25589 return (rval); 25590 } 25591 25592 25593 /* 25594 * Function: sr_read_cdxa() 25595 * 25596 * Description: This routine is the driver entry point for handling CD-ROM 25597 * ioctl requests to return CD-XA (Extended Architecture) data. 25598 * (CDROMCDXA). 25599 * 25600 * Arguments: dev - the device 'dev_t' 25601 * data - pointer to user provided CD-XA structure specifying 25602 * the data starting address, transfer length, and format 25603 * flag - this argument is a pass through to ddi_copyxxx() 25604 * directly from the mode argument of ioctl(). 25605 * 25606 * Return Code: the code returned by sd_send_scsi_cmd() 25607 * EFAULT if ddi_copyxxx() fails 25608 * ENXIO if fail ddi_get_soft_state 25609 * EINVAL if data pointer is NULL 25610 */ 25611 25612 static int 25613 sr_read_cdxa(dev_t dev, caddr_t data, int flag) 25614 { 25615 struct sd_lun *un; 25616 struct uscsi_cmd *com; 25617 struct cdrom_cdxa *cdxa; 25618 int rval; 25619 size_t buflen; 25620 char cdb[CDB_GROUP5]; 25621 uchar_t read_flags; 25622 25623 #ifdef _MULTI_DATAMODEL 25624 /* To support ILP32 applications in an LP64 world */ 25625 struct cdrom_cdxa32 cdrom_cdxa32; 25626 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32; 25627 #endif /* _MULTI_DATAMODEL */ 25628 25629 if (data == NULL) { 25630 return (EINVAL); 25631 } 25632 25633 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25634 return (ENXIO); 25635 } 25636 25637 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP); 25638 25639 #ifdef _MULTI_DATAMODEL 25640 switch (ddi_model_convert_from(flag & FMODELS)) { 25641 case DDI_MODEL_ILP32: 25642 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) { 25643 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25644 return (EFAULT); 25645 } 25646 /* 25647 * Convert the ILP32 uscsi data from the 25648 * application to LP64 for internal use. 25649 */ 25650 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa); 25651 break; 25652 case DDI_MODEL_NONE: 25653 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 25654 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25655 return (EFAULT); 25656 } 25657 break; 25658 } 25659 #else /* ! _MULTI_DATAMODEL */ 25660 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 25661 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25662 return (EFAULT); 25663 } 25664 #endif /* _MULTI_DATAMODEL */ 25665 25666 /* 25667 * Since MMC-2 expects max 3 bytes for length, check if the 25668 * length input is greater than 3 bytes 25669 */ 25670 if ((cdxa->cdxa_length & 0xFF000000) != 0) { 25671 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: " 25672 "cdrom transfer length too large: %d (limit %d)\n", 25673 cdxa->cdxa_length, 0xFFFFFF); 25674 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25675 return (EINVAL); 25676 } 25677 25678 switch (cdxa->cdxa_format) { 25679 case CDROM_XA_DATA: 25680 buflen = CDROM_BLK_2048 * cdxa->cdxa_length; 25681 read_flags = 0x10; 25682 break; 25683 case CDROM_XA_SECTOR_DATA: 25684 buflen = CDROM_BLK_2352 * cdxa->cdxa_length; 25685 read_flags = 0xf8; 25686 break; 25687 case CDROM_XA_DATA_W_ERROR: 25688 buflen = CDROM_BLK_2646 * cdxa->cdxa_length; 25689 read_flags = 0xfc; 25690 break; 25691 default: 25692 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25693 "sr_read_cdxa: Format '0x%x' Not Supported\n", 25694 cdxa->cdxa_format); 25695 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25696 return (EINVAL); 25697 } 25698 25699 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25700 bzero(cdb, CDB_GROUP5); 25701 if (un->un_f_mmc_cap == TRUE) { 25702 cdb[0] = (char)SCMD_READ_CD; 25703 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 25704 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 25705 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 25706 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 25707 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 25708 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 25709 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff); 25710 cdb[9] = (char)read_flags; 25711 } else { 25712 /* 25713 * Note: A vendor specific command (0xDB) is being used her to 25714 * request a read of all subcodes. 25715 */ 25716 cdb[0] = (char)SCMD_READ_CDXA; 25717 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 25718 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 25719 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 25720 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 25721 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24); 25722 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 25723 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 25724 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff); 25725 cdb[10] = cdxa->cdxa_format; 25726 } 25727 com->uscsi_cdb = cdb; 25728 com->uscsi_cdblen = CDB_GROUP5; 25729 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data; 25730 com->uscsi_buflen = buflen; 25731 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25732 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25733 SD_PATH_STANDARD); 25734 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25735 kmem_free(com, sizeof (*com)); 25736 return (rval); 25737 } 25738 25739 25740 /* 25741 * Function: sr_eject() 25742 * 25743 * Description: This routine is the driver entry point for handling CD-ROM 25744 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT) 25745 * 25746 * Arguments: dev - the device 'dev_t' 25747 * 25748 * Return Code: the code returned by sd_send_scsi_cmd() 25749 */ 25750 25751 static int 25752 sr_eject(dev_t dev) 25753 { 25754 struct sd_lun *un; 25755 int rval; 25756 25757 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25758 (un->un_state == SD_STATE_OFFLINE)) { 25759 return (ENXIO); 25760 } 25761 25762 /* 25763 * To prevent race conditions with the eject 25764 * command, keep track of an eject command as 25765 * it progresses. If we are already handling 25766 * an eject command in the driver for the given 25767 * unit and another request to eject is received 25768 * immediately return EAGAIN so we don't lose 25769 * the command if the current eject command fails. 25770 */ 25771 mutex_enter(SD_MUTEX(un)); 25772 if (un->un_f_ejecting == TRUE) { 25773 mutex_exit(SD_MUTEX(un)); 25774 return (EAGAIN); 25775 } 25776 un->un_f_ejecting = TRUE; 25777 mutex_exit(SD_MUTEX(un)); 25778 25779 if ((rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 25780 SD_PATH_STANDARD)) != 0) { 25781 mutex_enter(SD_MUTEX(un)); 25782 un->un_f_ejecting = FALSE; 25783 mutex_exit(SD_MUTEX(un)); 25784 return (rval); 25785 } 25786 25787 rval = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_EJECT, 25788 SD_PATH_STANDARD); 25789 25790 if (rval == 0) { 25791 mutex_enter(SD_MUTEX(un)); 25792 sr_ejected(un); 25793 un->un_mediastate = DKIO_EJECTED; 25794 un->un_f_ejecting = FALSE; 25795 cv_broadcast(&un->un_state_cv); 25796 mutex_exit(SD_MUTEX(un)); 25797 } else { 25798 mutex_enter(SD_MUTEX(un)); 25799 un->un_f_ejecting = FALSE; 25800 mutex_exit(SD_MUTEX(un)); 25801 } 25802 return (rval); 25803 } 25804 25805 25806 /* 25807 * Function: sr_ejected() 25808 * 25809 * Description: This routine updates the soft state structure to invalidate the 25810 * geometry information after the media has been ejected or a 25811 * media eject has been detected. 25812 * 25813 * Arguments: un - driver soft state (unit) structure 25814 */ 25815 25816 static void 25817 sr_ejected(struct sd_lun *un) 25818 { 25819 struct sd_errstats *stp; 25820 25821 ASSERT(un != NULL); 25822 ASSERT(mutex_owned(SD_MUTEX(un))); 25823 25824 un->un_f_blockcount_is_valid = FALSE; 25825 un->un_f_tgt_blocksize_is_valid = FALSE; 25826 mutex_exit(SD_MUTEX(un)); 25827 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 25828 mutex_enter(SD_MUTEX(un)); 25829 25830 if (un->un_errstats != NULL) { 25831 stp = (struct sd_errstats *)un->un_errstats->ks_data; 25832 stp->sd_capacity.value.ui64 = 0; 25833 } 25834 25835 /* remove "capacity-of-device" properties */ 25836 (void) ddi_prop_remove(DDI_DEV_T_NONE, SD_DEVINFO(un), 25837 "device-nblocks"); 25838 (void) ddi_prop_remove(DDI_DEV_T_NONE, SD_DEVINFO(un), 25839 "device-blksize"); 25840 } 25841 25842 25843 /* 25844 * Function: sr_check_wp() 25845 * 25846 * Description: This routine checks the write protection of a removable 25847 * media disk and hotpluggable devices via the write protect bit of 25848 * the Mode Page Header device specific field. Some devices choke 25849 * on unsupported mode page. In order to workaround this issue, 25850 * this routine has been implemented to use 0x3f mode page(request 25851 * for all pages) for all device types. 25852 * 25853 * Arguments: dev - the device 'dev_t' 25854 * 25855 * Return Code: int indicating if the device is write protected (1) or not (0) 25856 * 25857 * Context: Kernel thread. 25858 * 25859 */ 25860 25861 static int 25862 sr_check_wp(dev_t dev) 25863 { 25864 struct sd_lun *un; 25865 uchar_t device_specific; 25866 uchar_t *sense; 25867 int hdrlen; 25868 int rval = FALSE; 25869 25870 /* 25871 * Note: The return codes for this routine should be reworked to 25872 * properly handle the case of a NULL softstate. 25873 */ 25874 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25875 return (FALSE); 25876 } 25877 25878 if (un->un_f_cfg_is_atapi == TRUE) { 25879 /* 25880 * The mode page contents are not required; set the allocation 25881 * length for the mode page header only 25882 */ 25883 hdrlen = MODE_HEADER_LENGTH_GRP2; 25884 sense = kmem_zalloc(hdrlen, KM_SLEEP); 25885 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, hdrlen, 25886 MODEPAGE_ALLPAGES, SD_PATH_STANDARD) != 0) 25887 goto err_exit; 25888 device_specific = 25889 ((struct mode_header_grp2 *)sense)->device_specific; 25890 } else { 25891 hdrlen = MODE_HEADER_LENGTH; 25892 sense = kmem_zalloc(hdrlen, KM_SLEEP); 25893 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, hdrlen, 25894 MODEPAGE_ALLPAGES, SD_PATH_STANDARD) != 0) 25895 goto err_exit; 25896 device_specific = 25897 ((struct mode_header *)sense)->device_specific; 25898 } 25899 25900 /* 25901 * Write protect mode sense failed; not all disks 25902 * understand this query. Return FALSE assuming that 25903 * these devices are not writable. 25904 */ 25905 if (device_specific & WRITE_PROTECT) { 25906 rval = TRUE; 25907 } 25908 25909 err_exit: 25910 kmem_free(sense, hdrlen); 25911 return (rval); 25912 } 25913 25914 /* 25915 * Function: sr_volume_ctrl() 25916 * 25917 * Description: This routine is the driver entry point for handling CD-ROM 25918 * audio output volume ioctl requests. (CDROMVOLCTRL) 25919 * 25920 * Arguments: dev - the device 'dev_t' 25921 * data - pointer to user audio volume control structure 25922 * flag - this argument is a pass through to ddi_copyxxx() 25923 * directly from the mode argument of ioctl(). 25924 * 25925 * Return Code: the code returned by sd_send_scsi_cmd() 25926 * EFAULT if ddi_copyxxx() fails 25927 * ENXIO if fail ddi_get_soft_state 25928 * EINVAL if data pointer is NULL 25929 * 25930 */ 25931 25932 static int 25933 sr_volume_ctrl(dev_t dev, caddr_t data, int flag) 25934 { 25935 struct sd_lun *un; 25936 struct cdrom_volctrl volume; 25937 struct cdrom_volctrl *vol = &volume; 25938 uchar_t *sense_page; 25939 uchar_t *select_page; 25940 uchar_t *sense; 25941 uchar_t *select; 25942 int sense_buflen; 25943 int select_buflen; 25944 int rval; 25945 25946 if (data == NULL) { 25947 return (EINVAL); 25948 } 25949 25950 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25951 (un->un_state == SD_STATE_OFFLINE)) { 25952 return (ENXIO); 25953 } 25954 25955 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) { 25956 return (EFAULT); 25957 } 25958 25959 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 25960 struct mode_header_grp2 *sense_mhp; 25961 struct mode_header_grp2 *select_mhp; 25962 int bd_len; 25963 25964 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN; 25965 select_buflen = MODE_HEADER_LENGTH_GRP2 + 25966 MODEPAGE_AUDIO_CTRL_LEN; 25967 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 25968 select = kmem_zalloc(select_buflen, KM_SLEEP); 25969 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 25970 sense_buflen, MODEPAGE_AUDIO_CTRL, 25971 SD_PATH_STANDARD)) != 0) { 25972 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 25973 "sr_volume_ctrl: Mode Sense Failed\n"); 25974 kmem_free(sense, sense_buflen); 25975 kmem_free(select, select_buflen); 25976 return (rval); 25977 } 25978 sense_mhp = (struct mode_header_grp2 *)sense; 25979 select_mhp = (struct mode_header_grp2 *)select; 25980 bd_len = (sense_mhp->bdesc_length_hi << 8) | 25981 sense_mhp->bdesc_length_lo; 25982 if (bd_len > MODE_BLK_DESC_LENGTH) { 25983 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25984 "sr_volume_ctrl: Mode Sense returned invalid " 25985 "block descriptor length\n"); 25986 kmem_free(sense, sense_buflen); 25987 kmem_free(select, select_buflen); 25988 return (EIO); 25989 } 25990 sense_page = (uchar_t *) 25991 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 25992 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2); 25993 select_mhp->length_msb = 0; 25994 select_mhp->length_lsb = 0; 25995 select_mhp->bdesc_length_hi = 0; 25996 select_mhp->bdesc_length_lo = 0; 25997 } else { 25998 struct mode_header *sense_mhp, *select_mhp; 25999 26000 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 26001 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 26002 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 26003 select = kmem_zalloc(select_buflen, KM_SLEEP); 26004 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 26005 sense_buflen, MODEPAGE_AUDIO_CTRL, 26006 SD_PATH_STANDARD)) != 0) { 26007 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26008 "sr_volume_ctrl: Mode Sense Failed\n"); 26009 kmem_free(sense, sense_buflen); 26010 kmem_free(select, select_buflen); 26011 return (rval); 26012 } 26013 sense_mhp = (struct mode_header *)sense; 26014 select_mhp = (struct mode_header *)select; 26015 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) { 26016 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26017 "sr_volume_ctrl: Mode Sense returned invalid " 26018 "block descriptor length\n"); 26019 kmem_free(sense, sense_buflen); 26020 kmem_free(select, select_buflen); 26021 return (EIO); 26022 } 26023 sense_page = (uchar_t *) 26024 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 26025 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH); 26026 select_mhp->length = 0; 26027 select_mhp->bdesc_length = 0; 26028 } 26029 /* 26030 * Note: An audio control data structure could be created and overlayed 26031 * on the following in place of the array indexing method implemented. 26032 */ 26033 26034 /* Build the select data for the user volume data */ 26035 select_page[0] = MODEPAGE_AUDIO_CTRL; 26036 select_page[1] = 0xE; 26037 /* Set the immediate bit */ 26038 select_page[2] = 0x04; 26039 /* Zero out reserved fields */ 26040 select_page[3] = 0x00; 26041 select_page[4] = 0x00; 26042 /* Return sense data for fields not to be modified */ 26043 select_page[5] = sense_page[5]; 26044 select_page[6] = sense_page[6]; 26045 select_page[7] = sense_page[7]; 26046 /* Set the user specified volume levels for channel 0 and 1 */ 26047 select_page[8] = 0x01; 26048 select_page[9] = vol->channel0; 26049 select_page[10] = 0x02; 26050 select_page[11] = vol->channel1; 26051 /* Channel 2 and 3 are currently unsupported so return the sense data */ 26052 select_page[12] = sense_page[12]; 26053 select_page[13] = sense_page[13]; 26054 select_page[14] = sense_page[14]; 26055 select_page[15] = sense_page[15]; 26056 26057 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 26058 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, select, 26059 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 26060 } else { 26061 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 26062 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 26063 } 26064 26065 kmem_free(sense, sense_buflen); 26066 kmem_free(select, select_buflen); 26067 return (rval); 26068 } 26069 26070 26071 /* 26072 * Function: sr_read_sony_session_offset() 26073 * 26074 * Description: This routine is the driver entry point for handling CD-ROM 26075 * ioctl requests for session offset information. (CDROMREADOFFSET) 26076 * The address of the first track in the last session of a 26077 * multi-session CD-ROM is returned 26078 * 26079 * Note: This routine uses a vendor specific key value in the 26080 * command control field without implementing any vendor check here 26081 * or in the ioctl routine. 26082 * 26083 * Arguments: dev - the device 'dev_t' 26084 * data - pointer to an int to hold the requested address 26085 * flag - this argument is a pass through to ddi_copyxxx() 26086 * directly from the mode argument of ioctl(). 26087 * 26088 * Return Code: the code returned by sd_send_scsi_cmd() 26089 * EFAULT if ddi_copyxxx() fails 26090 * ENXIO if fail ddi_get_soft_state 26091 * EINVAL if data pointer is NULL 26092 */ 26093 26094 static int 26095 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag) 26096 { 26097 struct sd_lun *un; 26098 struct uscsi_cmd *com; 26099 caddr_t buffer; 26100 char cdb[CDB_GROUP1]; 26101 int session_offset = 0; 26102 int rval; 26103 26104 if (data == NULL) { 26105 return (EINVAL); 26106 } 26107 26108 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26109 (un->un_state == SD_STATE_OFFLINE)) { 26110 return (ENXIO); 26111 } 26112 26113 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP); 26114 bzero(cdb, CDB_GROUP1); 26115 cdb[0] = SCMD_READ_TOC; 26116 /* 26117 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 26118 * (4 byte TOC response header + 8 byte response data) 26119 */ 26120 cdb[8] = SONY_SESSION_OFFSET_LEN; 26121 /* Byte 9 is the control byte. A vendor specific value is used */ 26122 cdb[9] = SONY_SESSION_OFFSET_KEY; 26123 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26124 com->uscsi_cdb = cdb; 26125 com->uscsi_cdblen = CDB_GROUP1; 26126 com->uscsi_bufaddr = buffer; 26127 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN; 26128 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26129 26130 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26131 SD_PATH_STANDARD); 26132 if (rval != 0) { 26133 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 26134 kmem_free(com, sizeof (*com)); 26135 return (rval); 26136 } 26137 if (buffer[1] == SONY_SESSION_OFFSET_VALID) { 26138 session_offset = 26139 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 26140 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 26141 /* 26142 * Offset returned offset in current lbasize block's. Convert to 26143 * 2k block's to return to the user 26144 */ 26145 if (un->un_tgt_blocksize == CDROM_BLK_512) { 26146 session_offset >>= 2; 26147 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) { 26148 session_offset >>= 1; 26149 } 26150 } 26151 26152 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) { 26153 rval = EFAULT; 26154 } 26155 26156 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 26157 kmem_free(com, sizeof (*com)); 26158 return (rval); 26159 } 26160 26161 26162 /* 26163 * Function: sd_wm_cache_constructor() 26164 * 26165 * Description: Cache Constructor for the wmap cache for the read/modify/write 26166 * devices. 26167 * 26168 * Arguments: wm - A pointer to the sd_w_map to be initialized. 26169 * un - sd_lun structure for the device. 26170 * flag - the km flags passed to constructor 26171 * 26172 * Return Code: 0 on success. 26173 * -1 on failure. 26174 */ 26175 26176 /*ARGSUSED*/ 26177 static int 26178 sd_wm_cache_constructor(void *wm, void *un, int flags) 26179 { 26180 bzero(wm, sizeof (struct sd_w_map)); 26181 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL); 26182 return (0); 26183 } 26184 26185 26186 /* 26187 * Function: sd_wm_cache_destructor() 26188 * 26189 * Description: Cache destructor for the wmap cache for the read/modify/write 26190 * devices. 26191 * 26192 * Arguments: wm - A pointer to the sd_w_map to be initialized. 26193 * un - sd_lun structure for the device. 26194 */ 26195 /*ARGSUSED*/ 26196 static void 26197 sd_wm_cache_destructor(void *wm, void *un) 26198 { 26199 cv_destroy(&((struct sd_w_map *)wm)->wm_avail); 26200 } 26201 26202 26203 /* 26204 * Function: sd_range_lock() 26205 * 26206 * Description: Lock the range of blocks specified as parameter to ensure 26207 * that read, modify write is atomic and no other i/o writes 26208 * to the same location. The range is specified in terms 26209 * of start and end blocks. Block numbers are the actual 26210 * media block numbers and not system. 26211 * 26212 * Arguments: un - sd_lun structure for the device. 26213 * startb - The starting block number 26214 * endb - The end block number 26215 * typ - type of i/o - simple/read_modify_write 26216 * 26217 * Return Code: wm - pointer to the wmap structure. 26218 * 26219 * Context: This routine can sleep. 26220 */ 26221 26222 static struct sd_w_map * 26223 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ) 26224 { 26225 struct sd_w_map *wmp = NULL; 26226 struct sd_w_map *sl_wmp = NULL; 26227 struct sd_w_map *tmp_wmp; 26228 wm_state state = SD_WM_CHK_LIST; 26229 26230 26231 ASSERT(un != NULL); 26232 ASSERT(!mutex_owned(SD_MUTEX(un))); 26233 26234 mutex_enter(SD_MUTEX(un)); 26235 26236 while (state != SD_WM_DONE) { 26237 26238 switch (state) { 26239 case SD_WM_CHK_LIST: 26240 /* 26241 * This is the starting state. Check the wmap list 26242 * to see if the range is currently available. 26243 */ 26244 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) { 26245 /* 26246 * If this is a simple write and no rmw 26247 * i/o is pending then try to lock the 26248 * range as the range should be available. 26249 */ 26250 state = SD_WM_LOCK_RANGE; 26251 } else { 26252 tmp_wmp = sd_get_range(un, startb, endb); 26253 if (tmp_wmp != NULL) { 26254 if ((wmp != NULL) && ONLIST(un, wmp)) { 26255 /* 26256 * Should not keep onlist wmps 26257 * while waiting this macro 26258 * will also do wmp = NULL; 26259 */ 26260 FREE_ONLIST_WMAP(un, wmp); 26261 } 26262 /* 26263 * sl_wmp is the wmap on which wait 26264 * is done, since the tmp_wmp points 26265 * to the inuse wmap, set sl_wmp to 26266 * tmp_wmp and change the state to sleep 26267 */ 26268 sl_wmp = tmp_wmp; 26269 state = SD_WM_WAIT_MAP; 26270 } else { 26271 state = SD_WM_LOCK_RANGE; 26272 } 26273 26274 } 26275 break; 26276 26277 case SD_WM_LOCK_RANGE: 26278 ASSERT(un->un_wm_cache); 26279 /* 26280 * The range need to be locked, try to get a wmap. 26281 * First attempt it with NO_SLEEP, want to avoid a sleep 26282 * if possible as we will have to release the sd mutex 26283 * if we have to sleep. 26284 */ 26285 if (wmp == NULL) 26286 wmp = kmem_cache_alloc(un->un_wm_cache, 26287 KM_NOSLEEP); 26288 if (wmp == NULL) { 26289 mutex_exit(SD_MUTEX(un)); 26290 _NOTE(DATA_READABLE_WITHOUT_LOCK 26291 (sd_lun::un_wm_cache)) 26292 wmp = kmem_cache_alloc(un->un_wm_cache, 26293 KM_SLEEP); 26294 mutex_enter(SD_MUTEX(un)); 26295 /* 26296 * we released the mutex so recheck and go to 26297 * check list state. 26298 */ 26299 state = SD_WM_CHK_LIST; 26300 } else { 26301 /* 26302 * We exit out of state machine since we 26303 * have the wmap. Do the housekeeping first. 26304 * place the wmap on the wmap list if it is not 26305 * on it already and then set the state to done. 26306 */ 26307 wmp->wm_start = startb; 26308 wmp->wm_end = endb; 26309 wmp->wm_flags = typ | SD_WM_BUSY; 26310 if (typ & SD_WTYPE_RMW) { 26311 un->un_rmw_count++; 26312 } 26313 /* 26314 * If not already on the list then link 26315 */ 26316 if (!ONLIST(un, wmp)) { 26317 wmp->wm_next = un->un_wm; 26318 wmp->wm_prev = NULL; 26319 if (wmp->wm_next) 26320 wmp->wm_next->wm_prev = wmp; 26321 un->un_wm = wmp; 26322 } 26323 state = SD_WM_DONE; 26324 } 26325 break; 26326 26327 case SD_WM_WAIT_MAP: 26328 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY); 26329 /* 26330 * Wait is done on sl_wmp, which is set in the 26331 * check_list state. 26332 */ 26333 sl_wmp->wm_wanted_count++; 26334 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un)); 26335 sl_wmp->wm_wanted_count--; 26336 /* 26337 * We can reuse the memory from the completed sl_wmp 26338 * lock range for our new lock, but only if noone is 26339 * waiting for it. 26340 */ 26341 ASSERT(!(sl_wmp->wm_flags & SD_WM_BUSY)); 26342 if (sl_wmp->wm_wanted_count == 0) { 26343 if (wmp != NULL) 26344 CHK_N_FREEWMP(un, wmp); 26345 wmp = sl_wmp; 26346 } 26347 sl_wmp = NULL; 26348 /* 26349 * After waking up, need to recheck for availability of 26350 * range. 26351 */ 26352 state = SD_WM_CHK_LIST; 26353 break; 26354 26355 default: 26356 panic("sd_range_lock: " 26357 "Unknown state %d in sd_range_lock", state); 26358 /*NOTREACHED*/ 26359 } /* switch(state) */ 26360 26361 } /* while(state != SD_WM_DONE) */ 26362 26363 mutex_exit(SD_MUTEX(un)); 26364 26365 ASSERT(wmp != NULL); 26366 26367 return (wmp); 26368 } 26369 26370 26371 /* 26372 * Function: sd_get_range() 26373 * 26374 * Description: Find if there any overlapping I/O to this one 26375 * Returns the write-map of 1st such I/O, NULL otherwise. 26376 * 26377 * Arguments: un - sd_lun structure for the device. 26378 * startb - The starting block number 26379 * endb - The end block number 26380 * 26381 * Return Code: wm - pointer to the wmap structure. 26382 */ 26383 26384 static struct sd_w_map * 26385 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb) 26386 { 26387 struct sd_w_map *wmp; 26388 26389 ASSERT(un != NULL); 26390 26391 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) { 26392 if (!(wmp->wm_flags & SD_WM_BUSY)) { 26393 continue; 26394 } 26395 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) { 26396 break; 26397 } 26398 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) { 26399 break; 26400 } 26401 } 26402 26403 return (wmp); 26404 } 26405 26406 26407 /* 26408 * Function: sd_free_inlist_wmap() 26409 * 26410 * Description: Unlink and free a write map struct. 26411 * 26412 * Arguments: un - sd_lun structure for the device. 26413 * wmp - sd_w_map which needs to be unlinked. 26414 */ 26415 26416 static void 26417 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp) 26418 { 26419 ASSERT(un != NULL); 26420 26421 if (un->un_wm == wmp) { 26422 un->un_wm = wmp->wm_next; 26423 } else { 26424 wmp->wm_prev->wm_next = wmp->wm_next; 26425 } 26426 26427 if (wmp->wm_next) { 26428 wmp->wm_next->wm_prev = wmp->wm_prev; 26429 } 26430 26431 wmp->wm_next = wmp->wm_prev = NULL; 26432 26433 kmem_cache_free(un->un_wm_cache, wmp); 26434 } 26435 26436 26437 /* 26438 * Function: sd_range_unlock() 26439 * 26440 * Description: Unlock the range locked by wm. 26441 * Free write map if nobody else is waiting on it. 26442 * 26443 * Arguments: un - sd_lun structure for the device. 26444 * wmp - sd_w_map which needs to be unlinked. 26445 */ 26446 26447 static void 26448 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm) 26449 { 26450 ASSERT(un != NULL); 26451 ASSERT(wm != NULL); 26452 ASSERT(!mutex_owned(SD_MUTEX(un))); 26453 26454 mutex_enter(SD_MUTEX(un)); 26455 26456 if (wm->wm_flags & SD_WTYPE_RMW) { 26457 un->un_rmw_count--; 26458 } 26459 26460 if (wm->wm_wanted_count) { 26461 wm->wm_flags = 0; 26462 /* 26463 * Broadcast that the wmap is available now. 26464 */ 26465 cv_broadcast(&wm->wm_avail); 26466 } else { 26467 /* 26468 * If no one is waiting on the map, it should be free'ed. 26469 */ 26470 sd_free_inlist_wmap(un, wm); 26471 } 26472 26473 mutex_exit(SD_MUTEX(un)); 26474 } 26475 26476 26477 /* 26478 * Function: sd_read_modify_write_task 26479 * 26480 * Description: Called from a taskq thread to initiate the write phase of 26481 * a read-modify-write request. This is used for targets where 26482 * un->un_sys_blocksize != un->un_tgt_blocksize. 26483 * 26484 * Arguments: arg - a pointer to the buf(9S) struct for the write command. 26485 * 26486 * Context: Called under taskq thread context. 26487 */ 26488 26489 static void 26490 sd_read_modify_write_task(void *arg) 26491 { 26492 struct sd_mapblocksize_info *bsp; 26493 struct buf *bp; 26494 struct sd_xbuf *xp; 26495 struct sd_lun *un; 26496 26497 bp = arg; /* The bp is given in arg */ 26498 ASSERT(bp != NULL); 26499 26500 /* Get the pointer to the layer-private data struct */ 26501 xp = SD_GET_XBUF(bp); 26502 ASSERT(xp != NULL); 26503 bsp = xp->xb_private; 26504 ASSERT(bsp != NULL); 26505 26506 un = SD_GET_UN(bp); 26507 ASSERT(un != NULL); 26508 ASSERT(!mutex_owned(SD_MUTEX(un))); 26509 26510 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 26511 "sd_read_modify_write_task: entry: buf:0x%p\n", bp); 26512 26513 /* 26514 * This is the write phase of a read-modify-write request, called 26515 * under the context of a taskq thread in response to the completion 26516 * of the read portion of the rmw request completing under interrupt 26517 * context. The write request must be sent from here down the iostart 26518 * chain as if it were being sent from sd_mapblocksize_iostart(), so 26519 * we use the layer index saved in the layer-private data area. 26520 */ 26521 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp); 26522 26523 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 26524 "sd_read_modify_write_task: exit: buf:0x%p\n", bp); 26525 } 26526 26527 26528 /* 26529 * Function: sddump_do_read_of_rmw() 26530 * 26531 * Description: This routine will be called from sddump, If sddump is called 26532 * with an I/O which not aligned on device blocksize boundary 26533 * then the write has to be converted to read-modify-write. 26534 * Do the read part here in order to keep sddump simple. 26535 * Note - That the sd_mutex is held across the call to this 26536 * routine. 26537 * 26538 * Arguments: un - sd_lun 26539 * blkno - block number in terms of media block size. 26540 * nblk - number of blocks. 26541 * bpp - pointer to pointer to the buf structure. On return 26542 * from this function, *bpp points to the valid buffer 26543 * to which the write has to be done. 26544 * 26545 * Return Code: 0 for success or errno-type return code 26546 */ 26547 26548 static int 26549 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 26550 struct buf **bpp) 26551 { 26552 int err; 26553 int i; 26554 int rval; 26555 struct buf *bp; 26556 struct scsi_pkt *pkt = NULL; 26557 uint32_t target_blocksize; 26558 26559 ASSERT(un != NULL); 26560 ASSERT(mutex_owned(SD_MUTEX(un))); 26561 26562 target_blocksize = un->un_tgt_blocksize; 26563 26564 mutex_exit(SD_MUTEX(un)); 26565 26566 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL, 26567 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL); 26568 if (bp == NULL) { 26569 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26570 "no resources for dumping; giving up"); 26571 err = ENOMEM; 26572 goto done; 26573 } 26574 26575 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL, 26576 blkno, nblk); 26577 if (rval != 0) { 26578 scsi_free_consistent_buf(bp); 26579 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26580 "no resources for dumping; giving up"); 26581 err = ENOMEM; 26582 goto done; 26583 } 26584 26585 pkt->pkt_flags |= FLAG_NOINTR; 26586 26587 err = EIO; 26588 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 26589 26590 /* 26591 * Scsi_poll returns 0 (success) if the command completes and 26592 * the status block is STATUS_GOOD. We should only check 26593 * errors if this condition is not true. Even then we should 26594 * send our own request sense packet only if we have a check 26595 * condition and auto request sense has not been performed by 26596 * the hba. 26597 */ 26598 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n"); 26599 26600 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) { 26601 err = 0; 26602 break; 26603 } 26604 26605 /* 26606 * Check CMD_DEV_GONE 1st, give up if device is gone, 26607 * no need to read RQS data. 26608 */ 26609 if (pkt->pkt_reason == CMD_DEV_GONE) { 26610 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26611 "Error while dumping state with rmw..." 26612 "Device is gone\n"); 26613 break; 26614 } 26615 26616 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) { 26617 SD_INFO(SD_LOG_DUMP, un, 26618 "sddump: read failed with CHECK, try # %d\n", i); 26619 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) { 26620 (void) sd_send_polled_RQS(un); 26621 } 26622 26623 continue; 26624 } 26625 26626 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) { 26627 int reset_retval = 0; 26628 26629 SD_INFO(SD_LOG_DUMP, un, 26630 "sddump: read failed with BUSY, try # %d\n", i); 26631 26632 if (un->un_f_lun_reset_enabled == TRUE) { 26633 reset_retval = scsi_reset(SD_ADDRESS(un), 26634 RESET_LUN); 26635 } 26636 if (reset_retval == 0) { 26637 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 26638 } 26639 (void) sd_send_polled_RQS(un); 26640 26641 } else { 26642 SD_INFO(SD_LOG_DUMP, un, 26643 "sddump: read failed with 0x%x, try # %d\n", 26644 SD_GET_PKT_STATUS(pkt), i); 26645 mutex_enter(SD_MUTEX(un)); 26646 sd_reset_target(un, pkt); 26647 mutex_exit(SD_MUTEX(un)); 26648 } 26649 26650 /* 26651 * If we are not getting anywhere with lun/target resets, 26652 * let's reset the bus. 26653 */ 26654 if (i > SD_NDUMP_RETRIES/2) { 26655 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 26656 (void) sd_send_polled_RQS(un); 26657 } 26658 26659 } 26660 scsi_destroy_pkt(pkt); 26661 26662 if (err != 0) { 26663 scsi_free_consistent_buf(bp); 26664 *bpp = NULL; 26665 } else { 26666 *bpp = bp; 26667 } 26668 26669 done: 26670 mutex_enter(SD_MUTEX(un)); 26671 return (err); 26672 } 26673 26674 26675 /* 26676 * Function: sd_failfast_flushq 26677 * 26678 * Description: Take all bp's on the wait queue that have B_FAILFAST set 26679 * in b_flags and move them onto the failfast queue, then kick 26680 * off a thread to return all bp's on the failfast queue to 26681 * their owners with an error set. 26682 * 26683 * Arguments: un - pointer to the soft state struct for the instance. 26684 * 26685 * Context: may execute in interrupt context. 26686 */ 26687 26688 static void 26689 sd_failfast_flushq(struct sd_lun *un) 26690 { 26691 struct buf *bp; 26692 struct buf *next_waitq_bp; 26693 struct buf *prev_waitq_bp = NULL; 26694 26695 ASSERT(un != NULL); 26696 ASSERT(mutex_owned(SD_MUTEX(un))); 26697 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE); 26698 ASSERT(un->un_failfast_bp == NULL); 26699 26700 SD_TRACE(SD_LOG_IO_FAILFAST, un, 26701 "sd_failfast_flushq: entry: un:0x%p\n", un); 26702 26703 /* 26704 * Check if we should flush all bufs when entering failfast state, or 26705 * just those with B_FAILFAST set. 26706 */ 26707 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { 26708 /* 26709 * Move *all* bp's on the wait queue to the failfast flush 26710 * queue, including those that do NOT have B_FAILFAST set. 26711 */ 26712 if (un->un_failfast_headp == NULL) { 26713 ASSERT(un->un_failfast_tailp == NULL); 26714 un->un_failfast_headp = un->un_waitq_headp; 26715 } else { 26716 ASSERT(un->un_failfast_tailp != NULL); 26717 un->un_failfast_tailp->av_forw = un->un_waitq_headp; 26718 } 26719 26720 un->un_failfast_tailp = un->un_waitq_tailp; 26721 26722 /* update kstat for each bp moved out of the waitq */ 26723 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) { 26724 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 26725 } 26726 26727 /* empty the waitq */ 26728 un->un_waitq_headp = un->un_waitq_tailp = NULL; 26729 26730 } else { 26731 /* 26732 * Go thru the wait queue, pick off all entries with 26733 * B_FAILFAST set, and move these onto the failfast queue. 26734 */ 26735 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) { 26736 /* 26737 * Save the pointer to the next bp on the wait queue, 26738 * so we get to it on the next iteration of this loop. 26739 */ 26740 next_waitq_bp = bp->av_forw; 26741 26742 /* 26743 * If this bp from the wait queue does NOT have 26744 * B_FAILFAST set, just move on to the next element 26745 * in the wait queue. Note, this is the only place 26746 * where it is correct to set prev_waitq_bp. 26747 */ 26748 if ((bp->b_flags & B_FAILFAST) == 0) { 26749 prev_waitq_bp = bp; 26750 continue; 26751 } 26752 26753 /* 26754 * Remove the bp from the wait queue. 26755 */ 26756 if (bp == un->un_waitq_headp) { 26757 /* The bp is the first element of the waitq. */ 26758 un->un_waitq_headp = next_waitq_bp; 26759 if (un->un_waitq_headp == NULL) { 26760 /* The wait queue is now empty */ 26761 un->un_waitq_tailp = NULL; 26762 } 26763 } else { 26764 /* 26765 * The bp is either somewhere in the middle 26766 * or at the end of the wait queue. 26767 */ 26768 ASSERT(un->un_waitq_headp != NULL); 26769 ASSERT(prev_waitq_bp != NULL); 26770 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST) 26771 == 0); 26772 if (bp == un->un_waitq_tailp) { 26773 /* bp is the last entry on the waitq. */ 26774 ASSERT(next_waitq_bp == NULL); 26775 un->un_waitq_tailp = prev_waitq_bp; 26776 } 26777 prev_waitq_bp->av_forw = next_waitq_bp; 26778 } 26779 bp->av_forw = NULL; 26780 26781 /* 26782 * update kstat since the bp is moved out of 26783 * the waitq 26784 */ 26785 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 26786 26787 /* 26788 * Now put the bp onto the failfast queue. 26789 */ 26790 if (un->un_failfast_headp == NULL) { 26791 /* failfast queue is currently empty */ 26792 ASSERT(un->un_failfast_tailp == NULL); 26793 un->un_failfast_headp = 26794 un->un_failfast_tailp = bp; 26795 } else { 26796 /* Add the bp to the end of the failfast q */ 26797 ASSERT(un->un_failfast_tailp != NULL); 26798 ASSERT(un->un_failfast_tailp->b_flags & 26799 B_FAILFAST); 26800 un->un_failfast_tailp->av_forw = bp; 26801 un->un_failfast_tailp = bp; 26802 } 26803 } 26804 } 26805 26806 /* 26807 * Now return all bp's on the failfast queue to their owners. 26808 */ 26809 while ((bp = un->un_failfast_headp) != NULL) { 26810 26811 un->un_failfast_headp = bp->av_forw; 26812 if (un->un_failfast_headp == NULL) { 26813 un->un_failfast_tailp = NULL; 26814 } 26815 26816 /* 26817 * We want to return the bp with a failure error code, but 26818 * we do not want a call to sd_start_cmds() to occur here, 26819 * so use sd_return_failed_command_no_restart() instead of 26820 * sd_return_failed_command(). 26821 */ 26822 sd_return_failed_command_no_restart(un, bp, EIO); 26823 } 26824 26825 /* Flush the xbuf queues if required. */ 26826 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) { 26827 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback); 26828 } 26829 26830 SD_TRACE(SD_LOG_IO_FAILFAST, un, 26831 "sd_failfast_flushq: exit: un:0x%p\n", un); 26832 } 26833 26834 26835 /* 26836 * Function: sd_failfast_flushq_callback 26837 * 26838 * Description: Return TRUE if the given bp meets the criteria for failfast 26839 * flushing. Used with ddi_xbuf_flushq(9F). 26840 * 26841 * Arguments: bp - ptr to buf struct to be examined. 26842 * 26843 * Context: Any 26844 */ 26845 26846 static int 26847 sd_failfast_flushq_callback(struct buf *bp) 26848 { 26849 /* 26850 * Return TRUE if (1) we want to flush ALL bufs when the failfast 26851 * state is entered; OR (2) the given bp has B_FAILFAST set. 26852 */ 26853 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || 26854 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE); 26855 } 26856 26857 26858 26859 /* 26860 * Function: sd_setup_next_xfer 26861 * 26862 * Description: Prepare next I/O operation using DMA_PARTIAL 26863 * 26864 */ 26865 26866 static int 26867 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 26868 struct scsi_pkt *pkt, struct sd_xbuf *xp) 26869 { 26870 ssize_t num_blks_not_xfered; 26871 daddr_t strt_blk_num; 26872 ssize_t bytes_not_xfered; 26873 int rval; 26874 26875 ASSERT(pkt->pkt_resid == 0); 26876 26877 /* 26878 * Calculate next block number and amount to be transferred. 26879 * 26880 * How much data NOT transfered to the HBA yet. 26881 */ 26882 bytes_not_xfered = xp->xb_dma_resid; 26883 26884 /* 26885 * figure how many blocks NOT transfered to the HBA yet. 26886 */ 26887 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered); 26888 26889 /* 26890 * set starting block number to the end of what WAS transfered. 26891 */ 26892 strt_blk_num = xp->xb_blkno + 26893 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered); 26894 26895 /* 26896 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt 26897 * will call scsi_initpkt with NULL_FUNC so we do not have to release 26898 * the disk mutex here. 26899 */ 26900 rval = sd_setup_next_rw_pkt(un, pkt, bp, 26901 strt_blk_num, num_blks_not_xfered); 26902 26903 if (rval == 0) { 26904 26905 /* 26906 * Success. 26907 * 26908 * Adjust things if there are still more blocks to be 26909 * transfered. 26910 */ 26911 xp->xb_dma_resid = pkt->pkt_resid; 26912 pkt->pkt_resid = 0; 26913 26914 return (1); 26915 } 26916 26917 /* 26918 * There's really only one possible return value from 26919 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt 26920 * returns NULL. 26921 */ 26922 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 26923 26924 bp->b_resid = bp->b_bcount; 26925 bp->b_flags |= B_ERROR; 26926 26927 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26928 "Error setting up next portion of DMA transfer\n"); 26929 26930 return (0); 26931 } 26932 26933 /* 26934 * Function: sd_panic_for_res_conflict 26935 * 26936 * Description: Call panic with a string formatted with "Reservation Conflict" 26937 * and a human readable identifier indicating the SD instance 26938 * that experienced the reservation conflict. 26939 * 26940 * Arguments: un - pointer to the soft state struct for the instance. 26941 * 26942 * Context: may execute in interrupt context. 26943 */ 26944 26945 #define SD_RESV_CONFLICT_FMT_LEN 40 26946 void 26947 sd_panic_for_res_conflict(struct sd_lun *un) 26948 { 26949 char panic_str[SD_RESV_CONFLICT_FMT_LEN+MAXPATHLEN]; 26950 char path_str[MAXPATHLEN]; 26951 26952 (void) snprintf(panic_str, sizeof (panic_str), 26953 "Reservation Conflict\nDisk: %s", 26954 ddi_pathname(SD_DEVINFO(un), path_str)); 26955 26956 panic(panic_str); 26957 } 26958 26959 /* 26960 * Note: The following sd_faultinjection_ioctl( ) routines implement 26961 * driver support for handling fault injection for error analysis 26962 * causing faults in multiple layers of the driver. 26963 * 26964 */ 26965 26966 #ifdef SD_FAULT_INJECTION 26967 static uint_t sd_fault_injection_on = 0; 26968 26969 /* 26970 * Function: sd_faultinjection_ioctl() 26971 * 26972 * Description: This routine is the driver entry point for handling 26973 * faultinjection ioctls to inject errors into the 26974 * layer model 26975 * 26976 * Arguments: cmd - the ioctl cmd received 26977 * arg - the arguments from user and returns 26978 */ 26979 26980 static void 26981 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) { 26982 26983 uint_t i; 26984 uint_t rval; 26985 26986 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); 26987 26988 mutex_enter(SD_MUTEX(un)); 26989 26990 switch (cmd) { 26991 case SDIOCRUN: 26992 /* Allow pushed faults to be injected */ 26993 SD_INFO(SD_LOG_SDTEST, un, 26994 "sd_faultinjection_ioctl: Injecting Fault Run\n"); 26995 26996 sd_fault_injection_on = 1; 26997 26998 SD_INFO(SD_LOG_IOERR, un, 26999 "sd_faultinjection_ioctl: run finished\n"); 27000 break; 27001 27002 case SDIOCSTART: 27003 /* Start Injection Session */ 27004 SD_INFO(SD_LOG_SDTEST, un, 27005 "sd_faultinjection_ioctl: Injecting Fault Start\n"); 27006 27007 sd_fault_injection_on = 0; 27008 un->sd_injection_mask = 0xFFFFFFFF; 27009 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 27010 un->sd_fi_fifo_pkt[i] = NULL; 27011 un->sd_fi_fifo_xb[i] = NULL; 27012 un->sd_fi_fifo_un[i] = NULL; 27013 un->sd_fi_fifo_arq[i] = NULL; 27014 } 27015 un->sd_fi_fifo_start = 0; 27016 un->sd_fi_fifo_end = 0; 27017 27018 mutex_enter(&(un->un_fi_mutex)); 27019 un->sd_fi_log[0] = '\0'; 27020 un->sd_fi_buf_len = 0; 27021 mutex_exit(&(un->un_fi_mutex)); 27022 27023 SD_INFO(SD_LOG_IOERR, un, 27024 "sd_faultinjection_ioctl: start finished\n"); 27025 break; 27026 27027 case SDIOCSTOP: 27028 /* Stop Injection Session */ 27029 SD_INFO(SD_LOG_SDTEST, un, 27030 "sd_faultinjection_ioctl: Injecting Fault Stop\n"); 27031 sd_fault_injection_on = 0; 27032 un->sd_injection_mask = 0x0; 27033 27034 /* Empty stray or unuseds structs from fifo */ 27035 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 27036 if (un->sd_fi_fifo_pkt[i] != NULL) { 27037 kmem_free(un->sd_fi_fifo_pkt[i], 27038 sizeof (struct sd_fi_pkt)); 27039 } 27040 if (un->sd_fi_fifo_xb[i] != NULL) { 27041 kmem_free(un->sd_fi_fifo_xb[i], 27042 sizeof (struct sd_fi_xb)); 27043 } 27044 if (un->sd_fi_fifo_un[i] != NULL) { 27045 kmem_free(un->sd_fi_fifo_un[i], 27046 sizeof (struct sd_fi_un)); 27047 } 27048 if (un->sd_fi_fifo_arq[i] != NULL) { 27049 kmem_free(un->sd_fi_fifo_arq[i], 27050 sizeof (struct sd_fi_arq)); 27051 } 27052 un->sd_fi_fifo_pkt[i] = NULL; 27053 un->sd_fi_fifo_un[i] = NULL; 27054 un->sd_fi_fifo_xb[i] = NULL; 27055 un->sd_fi_fifo_arq[i] = NULL; 27056 } 27057 un->sd_fi_fifo_start = 0; 27058 un->sd_fi_fifo_end = 0; 27059 27060 SD_INFO(SD_LOG_IOERR, un, 27061 "sd_faultinjection_ioctl: stop finished\n"); 27062 break; 27063 27064 case SDIOCINSERTPKT: 27065 /* Store a packet struct to be pushed onto fifo */ 27066 SD_INFO(SD_LOG_SDTEST, un, 27067 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); 27068 27069 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27070 27071 sd_fault_injection_on = 0; 27072 27073 /* No more that SD_FI_MAX_ERROR allowed in Queue */ 27074 if (un->sd_fi_fifo_pkt[i] != NULL) { 27075 kmem_free(un->sd_fi_fifo_pkt[i], 27076 sizeof (struct sd_fi_pkt)); 27077 } 27078 if (arg != NULL) { 27079 un->sd_fi_fifo_pkt[i] = 27080 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); 27081 if (un->sd_fi_fifo_pkt[i] == NULL) { 27082 /* Alloc failed don't store anything */ 27083 break; 27084 } 27085 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], 27086 sizeof (struct sd_fi_pkt), 0); 27087 if (rval == -1) { 27088 kmem_free(un->sd_fi_fifo_pkt[i], 27089 sizeof (struct sd_fi_pkt)); 27090 un->sd_fi_fifo_pkt[i] = NULL; 27091 } 27092 } else { 27093 SD_INFO(SD_LOG_IOERR, un, 27094 "sd_faultinjection_ioctl: pkt null\n"); 27095 } 27096 break; 27097 27098 case SDIOCINSERTXB: 27099 /* Store a xb struct to be pushed onto fifo */ 27100 SD_INFO(SD_LOG_SDTEST, un, 27101 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); 27102 27103 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27104 27105 sd_fault_injection_on = 0; 27106 27107 if (un->sd_fi_fifo_xb[i] != NULL) { 27108 kmem_free(un->sd_fi_fifo_xb[i], 27109 sizeof (struct sd_fi_xb)); 27110 un->sd_fi_fifo_xb[i] = NULL; 27111 } 27112 if (arg != NULL) { 27113 un->sd_fi_fifo_xb[i] = 27114 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); 27115 if (un->sd_fi_fifo_xb[i] == NULL) { 27116 /* Alloc failed don't store anything */ 27117 break; 27118 } 27119 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], 27120 sizeof (struct sd_fi_xb), 0); 27121 27122 if (rval == -1) { 27123 kmem_free(un->sd_fi_fifo_xb[i], 27124 sizeof (struct sd_fi_xb)); 27125 un->sd_fi_fifo_xb[i] = NULL; 27126 } 27127 } else { 27128 SD_INFO(SD_LOG_IOERR, un, 27129 "sd_faultinjection_ioctl: xb null\n"); 27130 } 27131 break; 27132 27133 case SDIOCINSERTUN: 27134 /* Store a un struct to be pushed onto fifo */ 27135 SD_INFO(SD_LOG_SDTEST, un, 27136 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); 27137 27138 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27139 27140 sd_fault_injection_on = 0; 27141 27142 if (un->sd_fi_fifo_un[i] != NULL) { 27143 kmem_free(un->sd_fi_fifo_un[i], 27144 sizeof (struct sd_fi_un)); 27145 un->sd_fi_fifo_un[i] = NULL; 27146 } 27147 if (arg != NULL) { 27148 un->sd_fi_fifo_un[i] = 27149 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); 27150 if (un->sd_fi_fifo_un[i] == NULL) { 27151 /* Alloc failed don't store anything */ 27152 break; 27153 } 27154 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], 27155 sizeof (struct sd_fi_un), 0); 27156 if (rval == -1) { 27157 kmem_free(un->sd_fi_fifo_un[i], 27158 sizeof (struct sd_fi_un)); 27159 un->sd_fi_fifo_un[i] = NULL; 27160 } 27161 27162 } else { 27163 SD_INFO(SD_LOG_IOERR, un, 27164 "sd_faultinjection_ioctl: un null\n"); 27165 } 27166 27167 break; 27168 27169 case SDIOCINSERTARQ: 27170 /* Store a arq struct to be pushed onto fifo */ 27171 SD_INFO(SD_LOG_SDTEST, un, 27172 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); 27173 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27174 27175 sd_fault_injection_on = 0; 27176 27177 if (un->sd_fi_fifo_arq[i] != NULL) { 27178 kmem_free(un->sd_fi_fifo_arq[i], 27179 sizeof (struct sd_fi_arq)); 27180 un->sd_fi_fifo_arq[i] = NULL; 27181 } 27182 if (arg != NULL) { 27183 un->sd_fi_fifo_arq[i] = 27184 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); 27185 if (un->sd_fi_fifo_arq[i] == NULL) { 27186 /* Alloc failed don't store anything */ 27187 break; 27188 } 27189 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], 27190 sizeof (struct sd_fi_arq), 0); 27191 if (rval == -1) { 27192 kmem_free(un->sd_fi_fifo_arq[i], 27193 sizeof (struct sd_fi_arq)); 27194 un->sd_fi_fifo_arq[i] = NULL; 27195 } 27196 27197 } else { 27198 SD_INFO(SD_LOG_IOERR, un, 27199 "sd_faultinjection_ioctl: arq null\n"); 27200 } 27201 27202 break; 27203 27204 case SDIOCPUSH: 27205 /* Push stored xb, pkt, un, and arq onto fifo */ 27206 sd_fault_injection_on = 0; 27207 27208 if (arg != NULL) { 27209 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); 27210 if (rval != -1 && 27211 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 27212 un->sd_fi_fifo_end += i; 27213 } 27214 } else { 27215 SD_INFO(SD_LOG_IOERR, un, 27216 "sd_faultinjection_ioctl: push arg null\n"); 27217 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 27218 un->sd_fi_fifo_end++; 27219 } 27220 } 27221 SD_INFO(SD_LOG_IOERR, un, 27222 "sd_faultinjection_ioctl: push to end=%d\n", 27223 un->sd_fi_fifo_end); 27224 break; 27225 27226 case SDIOCRETRIEVE: 27227 /* Return buffer of log from Injection session */ 27228 SD_INFO(SD_LOG_SDTEST, un, 27229 "sd_faultinjection_ioctl: Injecting Fault Retreive"); 27230 27231 sd_fault_injection_on = 0; 27232 27233 mutex_enter(&(un->un_fi_mutex)); 27234 rval = ddi_copyout(un->sd_fi_log, (void *)arg, 27235 un->sd_fi_buf_len+1, 0); 27236 mutex_exit(&(un->un_fi_mutex)); 27237 27238 if (rval == -1) { 27239 /* 27240 * arg is possibly invalid setting 27241 * it to NULL for return 27242 */ 27243 arg = NULL; 27244 } 27245 break; 27246 } 27247 27248 mutex_exit(SD_MUTEX(un)); 27249 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl:" 27250 " exit\n"); 27251 } 27252 27253 27254 /* 27255 * Function: sd_injection_log() 27256 * 27257 * Description: This routine adds buff to the already existing injection log 27258 * for retrieval via faultinjection_ioctl for use in fault 27259 * detection and recovery 27260 * 27261 * Arguments: buf - the string to add to the log 27262 */ 27263 27264 static void 27265 sd_injection_log(char *buf, struct sd_lun *un) 27266 { 27267 uint_t len; 27268 27269 ASSERT(un != NULL); 27270 ASSERT(buf != NULL); 27271 27272 mutex_enter(&(un->un_fi_mutex)); 27273 27274 len = min(strlen(buf), 255); 27275 /* Add logged value to Injection log to be returned later */ 27276 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) { 27277 uint_t offset = strlen((char *)un->sd_fi_log); 27278 char *destp = (char *)un->sd_fi_log + offset; 27279 int i; 27280 for (i = 0; i < len; i++) { 27281 *destp++ = *buf++; 27282 } 27283 un->sd_fi_buf_len += len; 27284 un->sd_fi_log[un->sd_fi_buf_len] = '\0'; 27285 } 27286 27287 mutex_exit(&(un->un_fi_mutex)); 27288 } 27289 27290 27291 /* 27292 * Function: sd_faultinjection() 27293 * 27294 * Description: This routine takes the pkt and changes its 27295 * content based on error injection scenerio. 27296 * 27297 * Arguments: pktp - packet to be changed 27298 */ 27299 27300 static void 27301 sd_faultinjection(struct scsi_pkt *pktp) 27302 { 27303 uint_t i; 27304 struct sd_fi_pkt *fi_pkt; 27305 struct sd_fi_xb *fi_xb; 27306 struct sd_fi_un *fi_un; 27307 struct sd_fi_arq *fi_arq; 27308 struct buf *bp; 27309 struct sd_xbuf *xb; 27310 struct sd_lun *un; 27311 27312 ASSERT(pktp != NULL); 27313 27314 /* pull bp xb and un from pktp */ 27315 bp = (struct buf *)pktp->pkt_private; 27316 xb = SD_GET_XBUF(bp); 27317 un = SD_GET_UN(bp); 27318 27319 ASSERT(un != NULL); 27320 27321 mutex_enter(SD_MUTEX(un)); 27322 27323 SD_TRACE(SD_LOG_SDTEST, un, 27324 "sd_faultinjection: entry Injection from sdintr\n"); 27325 27326 /* if injection is off return */ 27327 if (sd_fault_injection_on == 0 || 27328 un->sd_fi_fifo_start == un->sd_fi_fifo_end) { 27329 mutex_exit(SD_MUTEX(un)); 27330 return; 27331 } 27332 27333 27334 /* take next set off fifo */ 27335 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; 27336 27337 fi_pkt = un->sd_fi_fifo_pkt[i]; 27338 fi_xb = un->sd_fi_fifo_xb[i]; 27339 fi_un = un->sd_fi_fifo_un[i]; 27340 fi_arq = un->sd_fi_fifo_arq[i]; 27341 27342 27343 /* set variables accordingly */ 27344 /* set pkt if it was on fifo */ 27345 if (fi_pkt != NULL) { 27346 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags"); 27347 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp"); 27348 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp"); 27349 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state"); 27350 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics"); 27351 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason"); 27352 27353 } 27354 27355 /* set xb if it was on fifo */ 27356 if (fi_xb != NULL) { 27357 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno"); 27358 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid"); 27359 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count"); 27360 SD_CONDSET(xb, xb, xb_victim_retry_count, 27361 "xb_victim_retry_count"); 27362 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status"); 27363 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state"); 27364 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid"); 27365 27366 /* copy in block data from sense */ 27367 if (fi_xb->xb_sense_data[0] != -1) { 27368 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, 27369 SENSE_LENGTH); 27370 } 27371 27372 /* copy in extended sense codes */ 27373 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_code, 27374 "es_code"); 27375 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_key, 27376 "es_key"); 27377 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_add_code, 27378 "es_add_code"); 27379 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, 27380 es_qual_code, "es_qual_code"); 27381 } 27382 27383 /* set un if it was on fifo */ 27384 if (fi_un != NULL) { 27385 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb"); 27386 SD_CONDSET(un, un, un_ctype, "un_ctype"); 27387 SD_CONDSET(un, un, un_reset_retry_count, 27388 "un_reset_retry_count"); 27389 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type"); 27390 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status"); 27391 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled"); 27392 SD_CONDSET(un, un, un_f_allow_bus_device_reset, 27393 "un_f_allow_bus_device_reset"); 27394 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing"); 27395 27396 } 27397 27398 /* copy in auto request sense if it was on fifo */ 27399 if (fi_arq != NULL) { 27400 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq)); 27401 } 27402 27403 /* free structs */ 27404 if (un->sd_fi_fifo_pkt[i] != NULL) { 27405 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); 27406 } 27407 if (un->sd_fi_fifo_xb[i] != NULL) { 27408 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); 27409 } 27410 if (un->sd_fi_fifo_un[i] != NULL) { 27411 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); 27412 } 27413 if (un->sd_fi_fifo_arq[i] != NULL) { 27414 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); 27415 } 27416 27417 /* 27418 * kmem_free does not gurantee to set to NULL 27419 * since we uses these to determine if we set 27420 * values or not lets confirm they are always 27421 * NULL after free 27422 */ 27423 un->sd_fi_fifo_pkt[i] = NULL; 27424 un->sd_fi_fifo_un[i] = NULL; 27425 un->sd_fi_fifo_xb[i] = NULL; 27426 un->sd_fi_fifo_arq[i] = NULL; 27427 27428 un->sd_fi_fifo_start++; 27429 27430 mutex_exit(SD_MUTEX(un)); 27431 27432 SD_TRACE(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n"); 27433 } 27434 27435 #endif /* SD_FAULT_INJECTION */ 27436 27437 /* 27438 * This routine is invoked in sd_unit_attach(). Before calling it, the 27439 * properties in conf file should be processed already, and "hotpluggable" 27440 * property was processed also. 27441 * 27442 * The sd driver distinguishes 3 different type of devices: removable media, 27443 * non-removable media, and hotpluggable. Below the differences are defined: 27444 * 27445 * 1. Device ID 27446 * 27447 * The device ID of a device is used to identify this device. Refer to 27448 * ddi_devid_register(9F). 27449 * 27450 * For a non-removable media disk device which can provide 0x80 or 0x83 27451 * VPD page (refer to INQUIRY command of SCSI SPC specification), a unique 27452 * device ID is created to identify this device. For other non-removable 27453 * media devices, a default device ID is created only if this device has 27454 * at least 2 alter cylinders. Otherwise, this device has no devid. 27455 * 27456 * ------------------------------------------------------- 27457 * removable media hotpluggable | Can Have Device ID 27458 * ------------------------------------------------------- 27459 * false false | Yes 27460 * false true | Yes 27461 * true x | No 27462 * ------------------------------------------------------ 27463 * 27464 * 27465 * 2. SCSI group 4 commands 27466 * 27467 * In SCSI specs, only some commands in group 4 command set can use 27468 * 8-byte addresses that can be used to access >2TB storage spaces. 27469 * Other commands have no such capability. Without supporting group4, 27470 * it is impossible to make full use of storage spaces of a disk with 27471 * capacity larger than 2TB. 27472 * 27473 * ----------------------------------------------- 27474 * removable media hotpluggable LP64 | Group 27475 * ----------------------------------------------- 27476 * false false false | 1 27477 * false false true | 4 27478 * false true false | 1 27479 * false true true | 4 27480 * true x x | 5 27481 * ----------------------------------------------- 27482 * 27483 * 27484 * 3. Check for VTOC Label 27485 * 27486 * If a direct-access disk has no EFI label, sd will check if it has a 27487 * valid VTOC label. Now, sd also does that check for removable media 27488 * and hotpluggable devices. 27489 * 27490 * -------------------------------------------------------------- 27491 * Direct-Access removable media hotpluggable | Check Label 27492 * ------------------------------------------------------------- 27493 * false false false | No 27494 * false false true | No 27495 * false true false | Yes 27496 * false true true | Yes 27497 * true x x | Yes 27498 * -------------------------------------------------------------- 27499 * 27500 * 27501 * 4. Building default VTOC label 27502 * 27503 * As section 3 says, sd checks if some kinds of devices have VTOC label. 27504 * If those devices have no valid VTOC label, sd(7d) will attempt to 27505 * create default VTOC for them. Currently sd creates default VTOC label 27506 * for all devices on x86 platform (VTOC_16), but only for removable 27507 * media devices on SPARC (VTOC_8). 27508 * 27509 * ----------------------------------------------------------- 27510 * removable media hotpluggable platform | Default Label 27511 * ----------------------------------------------------------- 27512 * false false sparc | No 27513 * false true x86 | Yes 27514 * false true sparc | Yes 27515 * true x x | Yes 27516 * ---------------------------------------------------------- 27517 * 27518 * 27519 * 5. Supported blocksizes of target devices 27520 * 27521 * Sd supports non-512-byte blocksize for removable media devices only. 27522 * For other devices, only 512-byte blocksize is supported. This may be 27523 * changed in near future because some RAID devices require non-512-byte 27524 * blocksize 27525 * 27526 * ----------------------------------------------------------- 27527 * removable media hotpluggable | non-512-byte blocksize 27528 * ----------------------------------------------------------- 27529 * false false | No 27530 * false true | No 27531 * true x | Yes 27532 * ----------------------------------------------------------- 27533 * 27534 * 27535 * 6. Automatic mount & unmount 27536 * 27537 * Sd(7d) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query 27538 * if a device is removable media device. It return 1 for removable media 27539 * devices, and 0 for others. 27540 * 27541 * The automatic mounting subsystem should distinguish between the types 27542 * of devices and apply automounting policies to each. 27543 * 27544 * 27545 * 7. fdisk partition management 27546 * 27547 * Fdisk is traditional partition method on x86 platform. Sd(7d) driver 27548 * just supports fdisk partitions on x86 platform. On sparc platform, sd 27549 * doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize 27550 * fdisk partitions on both x86 and SPARC platform. 27551 * 27552 * ----------------------------------------------------------- 27553 * platform removable media USB/1394 | fdisk supported 27554 * ----------------------------------------------------------- 27555 * x86 X X | true 27556 * ------------------------------------------------------------ 27557 * sparc X X | false 27558 * ------------------------------------------------------------ 27559 * 27560 * 27561 * 8. MBOOT/MBR 27562 * 27563 * Although sd(7d) doesn't support fdisk on SPARC platform, it does support 27564 * read/write mboot for removable media devices on sparc platform. 27565 * 27566 * ----------------------------------------------------------- 27567 * platform removable media USB/1394 | mboot supported 27568 * ----------------------------------------------------------- 27569 * x86 X X | true 27570 * ------------------------------------------------------------ 27571 * sparc false false | false 27572 * sparc false true | true 27573 * sparc true false | true 27574 * sparc true true | true 27575 * ------------------------------------------------------------ 27576 * 27577 * 27578 * 9. error handling during opening device 27579 * 27580 * If failed to open a disk device, an errno is returned. For some kinds 27581 * of errors, different errno is returned depending on if this device is 27582 * a removable media device. This brings USB/1394 hard disks in line with 27583 * expected hard disk behavior. It is not expected that this breaks any 27584 * application. 27585 * 27586 * ------------------------------------------------------ 27587 * removable media hotpluggable | errno 27588 * ------------------------------------------------------ 27589 * false false | EIO 27590 * false true | EIO 27591 * true x | ENXIO 27592 * ------------------------------------------------------ 27593 * 27594 * 27595 * 11. ioctls: DKIOCEJECT, CDROMEJECT 27596 * 27597 * These IOCTLs are applicable only to removable media devices. 27598 * 27599 * ----------------------------------------------------------- 27600 * removable media hotpluggable |DKIOCEJECT, CDROMEJECT 27601 * ----------------------------------------------------------- 27602 * false false | No 27603 * false true | No 27604 * true x | Yes 27605 * ----------------------------------------------------------- 27606 * 27607 * 27608 * 12. Kstats for partitions 27609 * 27610 * sd creates partition kstat for non-removable media devices. USB and 27611 * Firewire hard disks now have partition kstats 27612 * 27613 * ------------------------------------------------------ 27614 * removable media hotpluggable | kstat 27615 * ------------------------------------------------------ 27616 * false false | Yes 27617 * false true | Yes 27618 * true x | No 27619 * ------------------------------------------------------ 27620 * 27621 * 27622 * 13. Removable media & hotpluggable properties 27623 * 27624 * Sd driver creates a "removable-media" property for removable media 27625 * devices. Parent nexus drivers create a "hotpluggable" property if 27626 * it supports hotplugging. 27627 * 27628 * --------------------------------------------------------------------- 27629 * removable media hotpluggable | "removable-media" " hotpluggable" 27630 * --------------------------------------------------------------------- 27631 * false false | No No 27632 * false true | No Yes 27633 * true false | Yes No 27634 * true true | Yes Yes 27635 * --------------------------------------------------------------------- 27636 * 27637 * 27638 * 14. Power Management 27639 * 27640 * sd only power manages removable media devices or devices that support 27641 * LOG_SENSE or have a "pm-capable" property (PSARC/2002/250) 27642 * 27643 * A parent nexus that supports hotplugging can also set "pm-capable" 27644 * if the disk can be power managed. 27645 * 27646 * ------------------------------------------------------------ 27647 * removable media hotpluggable pm-capable | power manage 27648 * ------------------------------------------------------------ 27649 * false false false | No 27650 * false false true | Yes 27651 * false true false | No 27652 * false true true | Yes 27653 * true x x | Yes 27654 * ------------------------------------------------------------ 27655 * 27656 * USB and firewire hard disks can now be power managed independently 27657 * of the framebuffer 27658 * 27659 * 27660 * 15. Support for USB disks with capacity larger than 1TB 27661 * 27662 * Currently, sd doesn't permit a fixed disk device with capacity 27663 * larger than 1TB to be used in a 32-bit operating system environment. 27664 * However, sd doesn't do that for removable media devices. Instead, it 27665 * assumes that removable media devices cannot have a capacity larger 27666 * than 1TB. Therefore, using those devices on 32-bit system is partially 27667 * supported, which can cause some unexpected results. 27668 * 27669 * --------------------------------------------------------------------- 27670 * removable media USB/1394 | Capacity > 1TB | Used in 32-bit env 27671 * --------------------------------------------------------------------- 27672 * false false | true | no 27673 * false true | true | no 27674 * true false | true | Yes 27675 * true true | true | Yes 27676 * --------------------------------------------------------------------- 27677 * 27678 * 27679 * 16. Check write-protection at open time 27680 * 27681 * When a removable media device is being opened for writing without NDELAY 27682 * flag, sd will check if this device is writable. If attempting to open 27683 * without NDELAY flag a write-protected device, this operation will abort. 27684 * 27685 * ------------------------------------------------------------ 27686 * removable media USB/1394 | WP Check 27687 * ------------------------------------------------------------ 27688 * false false | No 27689 * false true | No 27690 * true false | Yes 27691 * true true | Yes 27692 * ------------------------------------------------------------ 27693 * 27694 * 27695 * 17. syslog when corrupted VTOC is encountered 27696 * 27697 * Currently, if an invalid VTOC is encountered, sd only print syslog 27698 * for fixed SCSI disks. 27699 * ------------------------------------------------------------ 27700 * removable media USB/1394 | print syslog 27701 * ------------------------------------------------------------ 27702 * false false | Yes 27703 * false true | No 27704 * true false | No 27705 * true true | No 27706 * ------------------------------------------------------------ 27707 */ 27708 static void 27709 sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi) 27710 { 27711 int pm_capable_prop; 27712 27713 ASSERT(un->un_sd); 27714 ASSERT(un->un_sd->sd_inq); 27715 27716 /* 27717 * Enable SYNC CACHE support for all devices. 27718 */ 27719 un->un_f_sync_cache_supported = TRUE; 27720 27721 if (un->un_sd->sd_inq->inq_rmb) { 27722 /* 27723 * The media of this device is removable. And for this kind 27724 * of devices, it is possible to change medium after opening 27725 * devices. Thus we should support this operation. 27726 */ 27727 un->un_f_has_removable_media = TRUE; 27728 27729 /* 27730 * support non-512-byte blocksize of removable media devices 27731 */ 27732 un->un_f_non_devbsize_supported = TRUE; 27733 27734 /* 27735 * Assume that all removable media devices support DOOR_LOCK 27736 */ 27737 un->un_f_doorlock_supported = TRUE; 27738 27739 /* 27740 * For a removable media device, it is possible to be opened 27741 * with NDELAY flag when there is no media in drive, in this 27742 * case we don't care if device is writable. But if without 27743 * NDELAY flag, we need to check if media is write-protected. 27744 */ 27745 un->un_f_chk_wp_open = TRUE; 27746 27747 /* 27748 * need to start a SCSI watch thread to monitor media state, 27749 * when media is being inserted or ejected, notify syseventd. 27750 */ 27751 un->un_f_monitor_media_state = TRUE; 27752 27753 /* 27754 * Some devices don't support START_STOP_UNIT command. 27755 * Therefore, we'd better check if a device supports it 27756 * before sending it. 27757 */ 27758 un->un_f_check_start_stop = TRUE; 27759 27760 /* 27761 * support eject media ioctl: 27762 * FDEJECT, DKIOCEJECT, CDROMEJECT 27763 */ 27764 un->un_f_eject_media_supported = TRUE; 27765 27766 /* 27767 * Because many removable-media devices don't support 27768 * LOG_SENSE, we couldn't use this command to check if 27769 * a removable media device support power-management. 27770 * We assume that they support power-management via 27771 * START_STOP_UNIT command and can be spun up and down 27772 * without limitations. 27773 */ 27774 un->un_f_pm_supported = TRUE; 27775 27776 /* 27777 * Need to create a zero length (Boolean) property 27778 * removable-media for the removable media devices. 27779 * Note that the return value of the property is not being 27780 * checked, since if unable to create the property 27781 * then do not want the attach to fail altogether. Consistent 27782 * with other property creation in attach. 27783 */ 27784 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, 27785 DDI_PROP_CANSLEEP, "removable-media", NULL, 0); 27786 27787 } else { 27788 /* 27789 * create device ID for device 27790 */ 27791 un->un_f_devid_supported = TRUE; 27792 27793 /* 27794 * Spin up non-removable-media devices once it is attached 27795 */ 27796 un->un_f_attach_spinup = TRUE; 27797 27798 /* 27799 * According to SCSI specification, Sense data has two kinds of 27800 * format: fixed format, and descriptor format. At present, we 27801 * don't support descriptor format sense data for removable 27802 * media. 27803 */ 27804 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT) { 27805 un->un_f_descr_format_supported = TRUE; 27806 } 27807 27808 /* 27809 * kstats are created only for non-removable media devices. 27810 * 27811 * Set this in sd.conf to 0 in order to disable kstats. The 27812 * default is 1, so they are enabled by default. 27813 */ 27814 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY, 27815 SD_DEVINFO(un), DDI_PROP_DONTPASS, 27816 "enable-partition-kstats", 1)); 27817 27818 /* 27819 * Check if HBA has set the "pm-capable" property. 27820 * If "pm-capable" exists and is non-zero then we can 27821 * power manage the device without checking the start/stop 27822 * cycle count log sense page. 27823 * 27824 * If "pm-capable" exists and is SD_PM_CAPABLE_FALSE (0) 27825 * then we should not power manage the device. 27826 * 27827 * If "pm-capable" doesn't exist then pm_capable_prop will 27828 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case, 27829 * sd will check the start/stop cycle count log sense page 27830 * and power manage the device if the cycle count limit has 27831 * not been exceeded. 27832 */ 27833 pm_capable_prop = ddi_prop_get_int(DDI_DEV_T_ANY, devi, 27834 DDI_PROP_DONTPASS, "pm-capable", SD_PM_CAPABLE_UNDEFINED); 27835 if (pm_capable_prop == SD_PM_CAPABLE_UNDEFINED) { 27836 un->un_f_log_sense_supported = TRUE; 27837 } else { 27838 /* 27839 * pm-capable property exists. 27840 * 27841 * Convert "TRUE" values for pm_capable_prop to 27842 * SD_PM_CAPABLE_TRUE (1) to make it easier to check 27843 * later. "TRUE" values are any values except 27844 * SD_PM_CAPABLE_FALSE (0) and 27845 * SD_PM_CAPABLE_UNDEFINED (-1) 27846 */ 27847 if (pm_capable_prop == SD_PM_CAPABLE_FALSE) { 27848 un->un_f_log_sense_supported = FALSE; 27849 } else { 27850 un->un_f_pm_supported = TRUE; 27851 } 27852 27853 SD_INFO(SD_LOG_ATTACH_DETACH, un, 27854 "sd_unit_attach: un:0x%p pm-capable " 27855 "property set to %d.\n", un, un->un_f_pm_supported); 27856 } 27857 } 27858 27859 if (un->un_f_is_hotpluggable) { 27860 27861 /* 27862 * Have to watch hotpluggable devices as well, since 27863 * that's the only way for userland applications to 27864 * detect hot removal while device is busy/mounted. 27865 */ 27866 un->un_f_monitor_media_state = TRUE; 27867 27868 un->un_f_check_start_stop = TRUE; 27869 27870 } 27871 } 27872 27873 /* 27874 * sd_tg_rdwr: 27875 * Provides rdwr access for cmlb via sd_tgops. The start_block is 27876 * in sys block size, req_length in bytes. 27877 * 27878 */ 27879 static int 27880 sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 27881 diskaddr_t start_block, size_t reqlength, void *tg_cookie) 27882 { 27883 struct sd_lun *un; 27884 int path_flag = (int)(uintptr_t)tg_cookie; 27885 char *dkl = NULL; 27886 diskaddr_t real_addr = start_block; 27887 diskaddr_t first_byte, end_block; 27888 27889 size_t buffer_size = reqlength; 27890 int rval; 27891 diskaddr_t cap; 27892 uint32_t lbasize; 27893 27894 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 27895 if (un == NULL) 27896 return (ENXIO); 27897 27898 if (cmd != TG_READ && cmd != TG_WRITE) 27899 return (EINVAL); 27900 27901 mutex_enter(SD_MUTEX(un)); 27902 if (un->un_f_tgt_blocksize_is_valid == FALSE) { 27903 mutex_exit(SD_MUTEX(un)); 27904 rval = sd_send_scsi_READ_CAPACITY(un, (uint64_t *)&cap, 27905 &lbasize, path_flag); 27906 if (rval != 0) 27907 return (rval); 27908 mutex_enter(SD_MUTEX(un)); 27909 sd_update_block_info(un, lbasize, cap); 27910 if ((un->un_f_tgt_blocksize_is_valid == FALSE)) { 27911 mutex_exit(SD_MUTEX(un)); 27912 return (EIO); 27913 } 27914 } 27915 27916 if (NOT_DEVBSIZE(un)) { 27917 /* 27918 * sys_blocksize != tgt_blocksize, need to re-adjust 27919 * blkno and save the index to beginning of dk_label 27920 */ 27921 first_byte = SD_SYSBLOCKS2BYTES(un, start_block); 27922 real_addr = first_byte / un->un_tgt_blocksize; 27923 27924 end_block = (first_byte + reqlength + 27925 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize; 27926 27927 /* round up buffer size to multiple of target block size */ 27928 buffer_size = (end_block - real_addr) * un->un_tgt_blocksize; 27929 27930 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_tg_rdwr", 27931 "label_addr: 0x%x allocation size: 0x%x\n", 27932 real_addr, buffer_size); 27933 27934 if (((first_byte % un->un_tgt_blocksize) != 0) || 27935 (reqlength % un->un_tgt_blocksize) != 0) 27936 /* the request is not aligned */ 27937 dkl = kmem_zalloc(buffer_size, KM_SLEEP); 27938 } 27939 27940 /* 27941 * The MMC standard allows READ CAPACITY to be 27942 * inaccurate by a bounded amount (in the interest of 27943 * response latency). As a result, failed READs are 27944 * commonplace (due to the reading of metadata and not 27945 * data). Depending on the per-Vendor/drive Sense data, 27946 * the failed READ can cause many (unnecessary) retries. 27947 */ 27948 27949 if (ISCD(un) && (cmd == TG_READ) && 27950 (un->un_f_blockcount_is_valid == TRUE) && 27951 ((start_block == (un->un_blockcount - 1))|| 27952 (start_block == (un->un_blockcount - 2)))) { 27953 path_flag = SD_PATH_DIRECT_PRIORITY; 27954 } 27955 27956 mutex_exit(SD_MUTEX(un)); 27957 if (cmd == TG_READ) { 27958 rval = sd_send_scsi_READ(un, (dkl != NULL)? dkl: bufaddr, 27959 buffer_size, real_addr, path_flag); 27960 if (dkl != NULL) 27961 bcopy(dkl + SD_TGTBYTEOFFSET(un, start_block, 27962 real_addr), bufaddr, reqlength); 27963 } else { 27964 if (dkl) { 27965 rval = sd_send_scsi_READ(un, dkl, buffer_size, 27966 real_addr, path_flag); 27967 if (rval) { 27968 kmem_free(dkl, buffer_size); 27969 return (rval); 27970 } 27971 bcopy(bufaddr, dkl + SD_TGTBYTEOFFSET(un, start_block, 27972 real_addr), reqlength); 27973 } 27974 rval = sd_send_scsi_WRITE(un, (dkl != NULL)? dkl: bufaddr, 27975 buffer_size, real_addr, path_flag); 27976 } 27977 27978 if (dkl != NULL) 27979 kmem_free(dkl, buffer_size); 27980 27981 return (rval); 27982 } 27983 27984 27985 static int 27986 sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie) 27987 { 27988 27989 struct sd_lun *un; 27990 diskaddr_t cap; 27991 uint32_t lbasize; 27992 int path_flag = (int)(uintptr_t)tg_cookie; 27993 int ret = 0; 27994 27995 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 27996 if (un == NULL) 27997 return (ENXIO); 27998 27999 switch (cmd) { 28000 case TG_GETPHYGEOM: 28001 case TG_GETVIRTGEOM: 28002 case TG_GETCAPACITY: 28003 case TG_GETBLOCKSIZE: 28004 mutex_enter(SD_MUTEX(un)); 28005 28006 if ((un->un_f_blockcount_is_valid == TRUE) && 28007 (un->un_f_tgt_blocksize_is_valid == TRUE)) { 28008 cap = un->un_blockcount; 28009 lbasize = un->un_tgt_blocksize; 28010 mutex_exit(SD_MUTEX(un)); 28011 } else { 28012 mutex_exit(SD_MUTEX(un)); 28013 ret = sd_send_scsi_READ_CAPACITY(un, (uint64_t *)&cap, 28014 &lbasize, path_flag); 28015 if (ret != 0) 28016 return (ret); 28017 mutex_enter(SD_MUTEX(un)); 28018 sd_update_block_info(un, lbasize, cap); 28019 if ((un->un_f_blockcount_is_valid == FALSE) || 28020 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 28021 mutex_exit(SD_MUTEX(un)); 28022 return (EIO); 28023 } 28024 mutex_exit(SD_MUTEX(un)); 28025 } 28026 28027 if (cmd == TG_GETCAPACITY) { 28028 *(diskaddr_t *)arg = cap; 28029 return (0); 28030 } 28031 28032 if (cmd == TG_GETBLOCKSIZE) { 28033 *(uint32_t *)arg = lbasize; 28034 return (0); 28035 } 28036 28037 if (cmd == TG_GETPHYGEOM) 28038 ret = sd_get_physical_geometry(un, (cmlb_geom_t *)arg, 28039 cap, lbasize, path_flag); 28040 else 28041 /* TG_GETVIRTGEOM */ 28042 ret = sd_get_virtual_geometry(un, 28043 (cmlb_geom_t *)arg, cap, lbasize); 28044 28045 return (ret); 28046 28047 case TG_GETATTR: 28048 mutex_enter(SD_MUTEX(un)); 28049 ((tg_attribute_t *)arg)->media_is_writable = 28050 un->un_f_mmc_writable_media; 28051 mutex_exit(SD_MUTEX(un)); 28052 return (0); 28053 default: 28054 return (ENOTTY); 28055 28056 } 28057 28058 } 28059