1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * SCSI disk target driver. 30 */ 31 #include <sys/scsi/scsi.h> 32 #include <sys/dkbad.h> 33 #include <sys/dklabel.h> 34 #include <sys/dkio.h> 35 #include <sys/fdio.h> 36 #include <sys/cdio.h> 37 #include <sys/mhd.h> 38 #include <sys/vtoc.h> 39 #include <sys/dktp/fdisk.h> 40 #include <sys/kstat.h> 41 #include <sys/vtrace.h> 42 #include <sys/note.h> 43 #include <sys/thread.h> 44 #include <sys/proc.h> 45 #include <sys/efi_partition.h> 46 #include <sys/var.h> 47 #include <sys/aio_req.h> 48 49 #ifdef __lock_lint 50 #define _LP64 51 #define __amd64 52 #endif 53 54 #if (defined(__fibre)) 55 /* Note: is there a leadville version of the following? */ 56 #include <sys/fc4/fcal_linkapp.h> 57 #endif 58 #include <sys/taskq.h> 59 #include <sys/uuid.h> 60 #include <sys/byteorder.h> 61 #include <sys/sdt.h> 62 63 #include "sd_xbuf.h" 64 65 #include <sys/scsi/targets/sddef.h> 66 #include <sys/cmlb.h> 67 68 69 /* 70 * Loadable module info. 71 */ 72 #if (defined(__fibre)) 73 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver %I%" 74 char _depends_on[] = "misc/scsi misc/cmlb drv/fcp"; 75 #else 76 #define SD_MODULE_NAME "SCSI Disk Driver %I%" 77 char _depends_on[] = "misc/scsi misc/cmlb"; 78 #endif 79 80 /* 81 * Define the interconnect type, to allow the driver to distinguish 82 * between parallel SCSI (sd) and fibre channel (ssd) behaviors. 83 * 84 * This is really for backward compatability. In the future, the driver 85 * should actually check the "interconnect-type" property as reported by 86 * the HBA; however at present this property is not defined by all HBAs, 87 * so we will use this #define (1) to permit the driver to run in 88 * backward-compatability mode; and (2) to print a notification message 89 * if an FC HBA does not support the "interconnect-type" property. The 90 * behavior of the driver will be to assume parallel SCSI behaviors unless 91 * the "interconnect-type" property is defined by the HBA **AND** has a 92 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or 93 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre 94 * Channel behaviors (as per the old ssd). (Note that the 95 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and 96 * will result in the driver assuming parallel SCSI behaviors.) 97 * 98 * (see common/sys/scsi/impl/services.h) 99 * 100 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default 101 * since some FC HBAs may already support that, and there is some code in 102 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the 103 * default would confuse that code, and besides things should work fine 104 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the 105 * "interconnect_type" property. 106 * 107 */ 108 #if (defined(__fibre)) 109 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE 110 #else 111 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL 112 #endif 113 114 /* 115 * The name of the driver, established from the module name in _init. 116 */ 117 static char *sd_label = NULL; 118 119 /* 120 * Driver name is unfortunately prefixed on some driver.conf properties. 121 */ 122 #if (defined(__fibre)) 123 #define sd_max_xfer_size ssd_max_xfer_size 124 #define sd_config_list ssd_config_list 125 static char *sd_max_xfer_size = "ssd_max_xfer_size"; 126 static char *sd_config_list = "ssd-config-list"; 127 #else 128 static char *sd_max_xfer_size = "sd_max_xfer_size"; 129 static char *sd_config_list = "sd-config-list"; 130 #endif 131 132 /* 133 * Driver global variables 134 */ 135 136 #if (defined(__fibre)) 137 /* 138 * These #defines are to avoid namespace collisions that occur because this 139 * code is currently used to compile two seperate driver modules: sd and ssd. 140 * All global variables need to be treated this way (even if declared static) 141 * in order to allow the debugger to resolve the names properly. 142 * It is anticipated that in the near future the ssd module will be obsoleted, 143 * at which time this namespace issue should go away. 144 */ 145 #define sd_state ssd_state 146 #define sd_io_time ssd_io_time 147 #define sd_failfast_enable ssd_failfast_enable 148 #define sd_ua_retry_count ssd_ua_retry_count 149 #define sd_report_pfa ssd_report_pfa 150 #define sd_max_throttle ssd_max_throttle 151 #define sd_min_throttle ssd_min_throttle 152 #define sd_rot_delay ssd_rot_delay 153 154 #define sd_retry_on_reservation_conflict \ 155 ssd_retry_on_reservation_conflict 156 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay 157 #define sd_resv_conflict_name ssd_resv_conflict_name 158 159 #define sd_component_mask ssd_component_mask 160 #define sd_level_mask ssd_level_mask 161 #define sd_debug_un ssd_debug_un 162 #define sd_error_level ssd_error_level 163 164 #define sd_xbuf_active_limit ssd_xbuf_active_limit 165 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit 166 167 #define sd_tr ssd_tr 168 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout 169 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout 170 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable 171 #define sd_check_media_time ssd_check_media_time 172 #define sd_wait_cmds_complete ssd_wait_cmds_complete 173 #define sd_label_mutex ssd_label_mutex 174 #define sd_detach_mutex ssd_detach_mutex 175 #define sd_log_buf ssd_log_buf 176 #define sd_log_mutex ssd_log_mutex 177 178 #define sd_disk_table ssd_disk_table 179 #define sd_disk_table_size ssd_disk_table_size 180 #define sd_sense_mutex ssd_sense_mutex 181 #define sd_cdbtab ssd_cdbtab 182 183 #define sd_cb_ops ssd_cb_ops 184 #define sd_ops ssd_ops 185 #define sd_additional_codes ssd_additional_codes 186 #define sd_tgops ssd_tgops 187 188 #define sd_minor_data ssd_minor_data 189 #define sd_minor_data_efi ssd_minor_data_efi 190 191 #define sd_tq ssd_tq 192 #define sd_wmr_tq ssd_wmr_tq 193 #define sd_taskq_name ssd_taskq_name 194 #define sd_wmr_taskq_name ssd_wmr_taskq_name 195 #define sd_taskq_minalloc ssd_taskq_minalloc 196 #define sd_taskq_maxalloc ssd_taskq_maxalloc 197 198 #define sd_dump_format_string ssd_dump_format_string 199 200 #define sd_iostart_chain ssd_iostart_chain 201 #define sd_iodone_chain ssd_iodone_chain 202 203 #define sd_pm_idletime ssd_pm_idletime 204 205 #define sd_force_pm_supported ssd_force_pm_supported 206 207 #define sd_dtype_optical_bind ssd_dtype_optical_bind 208 209 #endif 210 211 212 #ifdef SDDEBUG 213 int sd_force_pm_supported = 0; 214 #endif /* SDDEBUG */ 215 216 void *sd_state = NULL; 217 int sd_io_time = SD_IO_TIME; 218 int sd_failfast_enable = 1; 219 int sd_ua_retry_count = SD_UA_RETRY_COUNT; 220 int sd_report_pfa = 1; 221 int sd_max_throttle = SD_MAX_THROTTLE; 222 int sd_min_throttle = SD_MIN_THROTTLE; 223 int sd_rot_delay = 4; /* Default 4ms Rotation delay */ 224 int sd_qfull_throttle_enable = TRUE; 225 226 int sd_retry_on_reservation_conflict = 1; 227 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 228 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) 229 230 static int sd_dtype_optical_bind = -1; 231 232 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ 233 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; 234 235 /* 236 * Global data for debug logging. To enable debug printing, sd_component_mask 237 * and sd_level_mask should be set to the desired bit patterns as outlined in 238 * sddef.h. 239 */ 240 uint_t sd_component_mask = 0x0; 241 uint_t sd_level_mask = 0x0; 242 struct sd_lun *sd_debug_un = NULL; 243 uint_t sd_error_level = SCSI_ERR_RETRYABLE; 244 245 /* Note: these may go away in the future... */ 246 static uint32_t sd_xbuf_active_limit = 512; 247 static uint32_t sd_xbuf_reserve_limit = 16; 248 249 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 }; 250 251 /* 252 * Timer value used to reset the throttle after it has been reduced 253 * (typically in response to TRAN_BUSY or STATUS_QFULL) 254 */ 255 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT; 256 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT; 257 258 /* 259 * Interval value associated with the media change scsi watch. 260 */ 261 static int sd_check_media_time = 3000000; 262 263 /* 264 * Wait value used for in progress operations during a DDI_SUSPEND 265 */ 266 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE; 267 268 /* 269 * sd_label_mutex protects a static buffer used in the disk label 270 * component of the driver 271 */ 272 static kmutex_t sd_label_mutex; 273 274 /* 275 * sd_detach_mutex protects un_layer_count, un_detach_count, and 276 * un_opens_in_progress in the sd_lun structure. 277 */ 278 static kmutex_t sd_detach_mutex; 279 280 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex, 281 sd_lun::{un_layer_count un_detach_count un_opens_in_progress})) 282 283 /* 284 * Global buffer and mutex for debug logging 285 */ 286 static char sd_log_buf[1024]; 287 static kmutex_t sd_log_mutex; 288 289 /* 290 * Structs and globals for recording attached lun information. 291 * This maintains a chain. Each node in the chain represents a SCSI controller. 292 * The structure records the number of luns attached to each target connected 293 * with the controller. 294 * For parallel scsi device only. 295 */ 296 struct sd_scsi_hba_tgt_lun { 297 struct sd_scsi_hba_tgt_lun *next; 298 dev_info_t *pdip; 299 int nlun[NTARGETS_WIDE]; 300 }; 301 302 /* 303 * Flag to indicate the lun is attached or detached 304 */ 305 #define SD_SCSI_LUN_ATTACH 0 306 #define SD_SCSI_LUN_DETACH 1 307 308 static kmutex_t sd_scsi_target_lun_mutex; 309 static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL; 310 311 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 312 sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip)) 313 314 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 315 sd_scsi_target_lun_head)) 316 317 /* 318 * "Smart" Probe Caching structs, globals, #defines, etc. 319 * For parallel scsi and non-self-identify device only. 320 */ 321 322 /* 323 * The following resources and routines are implemented to support 324 * "smart" probing, which caches the scsi_probe() results in an array, 325 * in order to help avoid long probe times. 326 */ 327 struct sd_scsi_probe_cache { 328 struct sd_scsi_probe_cache *next; 329 dev_info_t *pdip; 330 int cache[NTARGETS_WIDE]; 331 }; 332 333 static kmutex_t sd_scsi_probe_cache_mutex; 334 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; 335 336 /* 337 * Really we only need protection on the head of the linked list, but 338 * better safe than sorry. 339 */ 340 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 341 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) 342 343 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 344 sd_scsi_probe_cache_head)) 345 346 347 /* 348 * Vendor specific data name property declarations 349 */ 350 351 #if defined(__fibre) || defined(__i386) ||defined(__amd64) 352 353 static sd_tunables seagate_properties = { 354 SEAGATE_THROTTLE_VALUE, 355 0, 356 0, 357 0, 358 0, 359 0, 360 0, 361 0, 362 0 363 }; 364 365 366 static sd_tunables fujitsu_properties = { 367 FUJITSU_THROTTLE_VALUE, 368 0, 369 0, 370 0, 371 0, 372 0, 373 0, 374 0, 375 0 376 }; 377 378 static sd_tunables ibm_properties = { 379 IBM_THROTTLE_VALUE, 380 0, 381 0, 382 0, 383 0, 384 0, 385 0, 386 0, 387 0 388 }; 389 390 static sd_tunables purple_properties = { 391 PURPLE_THROTTLE_VALUE, 392 0, 393 0, 394 PURPLE_BUSY_RETRIES, 395 PURPLE_RESET_RETRY_COUNT, 396 PURPLE_RESERVE_RELEASE_TIME, 397 0, 398 0, 399 0 400 }; 401 402 static sd_tunables sve_properties = { 403 SVE_THROTTLE_VALUE, 404 0, 405 0, 406 SVE_BUSY_RETRIES, 407 SVE_RESET_RETRY_COUNT, 408 SVE_RESERVE_RELEASE_TIME, 409 SVE_MIN_THROTTLE_VALUE, 410 SVE_DISKSORT_DISABLED_FLAG, 411 0 412 }; 413 414 static sd_tunables maserati_properties = { 415 0, 416 0, 417 0, 418 0, 419 0, 420 0, 421 0, 422 MASERATI_DISKSORT_DISABLED_FLAG, 423 MASERATI_LUN_RESET_ENABLED_FLAG 424 }; 425 426 static sd_tunables pirus_properties = { 427 PIRUS_THROTTLE_VALUE, 428 0, 429 PIRUS_NRR_COUNT, 430 PIRUS_BUSY_RETRIES, 431 PIRUS_RESET_RETRY_COUNT, 432 0, 433 PIRUS_MIN_THROTTLE_VALUE, 434 PIRUS_DISKSORT_DISABLED_FLAG, 435 PIRUS_LUN_RESET_ENABLED_FLAG 436 }; 437 438 #endif 439 440 #if (defined(__sparc) && !defined(__fibre)) || \ 441 (defined(__i386) || defined(__amd64)) 442 443 444 static sd_tunables elite_properties = { 445 ELITE_THROTTLE_VALUE, 446 0, 447 0, 448 0, 449 0, 450 0, 451 0, 452 0, 453 0 454 }; 455 456 static sd_tunables st31200n_properties = { 457 ST31200N_THROTTLE_VALUE, 458 0, 459 0, 460 0, 461 0, 462 0, 463 0, 464 0, 465 0 466 }; 467 468 #endif /* Fibre or not */ 469 470 static sd_tunables lsi_properties_scsi = { 471 LSI_THROTTLE_VALUE, 472 0, 473 LSI_NOTREADY_RETRIES, 474 0, 475 0, 476 0, 477 0, 478 0, 479 0 480 }; 481 482 static sd_tunables symbios_properties = { 483 SYMBIOS_THROTTLE_VALUE, 484 0, 485 SYMBIOS_NOTREADY_RETRIES, 486 0, 487 0, 488 0, 489 0, 490 0, 491 0 492 }; 493 494 static sd_tunables lsi_properties = { 495 0, 496 0, 497 LSI_NOTREADY_RETRIES, 498 0, 499 0, 500 0, 501 0, 502 0, 503 0 504 }; 505 506 static sd_tunables lsi_oem_properties = { 507 0, 508 0, 509 LSI_OEM_NOTREADY_RETRIES, 510 0, 511 0, 512 0, 513 0, 514 0, 515 0 516 }; 517 518 519 520 #if (defined(SD_PROP_TST)) 521 522 #define SD_TST_CTYPE_VAL CTYPE_CDROM 523 #define SD_TST_THROTTLE_VAL 16 524 #define SD_TST_NOTREADY_VAL 12 525 #define SD_TST_BUSY_VAL 60 526 #define SD_TST_RST_RETRY_VAL 36 527 #define SD_TST_RSV_REL_TIME 60 528 529 static sd_tunables tst_properties = { 530 SD_TST_THROTTLE_VAL, 531 SD_TST_CTYPE_VAL, 532 SD_TST_NOTREADY_VAL, 533 SD_TST_BUSY_VAL, 534 SD_TST_RST_RETRY_VAL, 535 SD_TST_RSV_REL_TIME, 536 0, 537 0, 538 0 539 }; 540 #endif 541 542 /* This is similiar to the ANSI toupper implementation */ 543 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C)) 544 545 /* 546 * Static Driver Configuration Table 547 * 548 * This is the table of disks which need throttle adjustment (or, perhaps 549 * something else as defined by the flags at a future time.) device_id 550 * is a string consisting of concatenated vid (vendor), pid (product/model) 551 * and revision strings as defined in the scsi_inquiry structure. Offsets of 552 * the parts of the string are as defined by the sizes in the scsi_inquiry 553 * structure. Device type is searched as far as the device_id string is 554 * defined. Flags defines which values are to be set in the driver from the 555 * properties list. 556 * 557 * Entries below which begin and end with a "*" are a special case. 558 * These do not have a specific vendor, and the string which follows 559 * can appear anywhere in the 16 byte PID portion of the inquiry data. 560 * 561 * Entries below which begin and end with a " " (blank) are a special 562 * case. The comparison function will treat multiple consecutive blanks 563 * as equivalent to a single blank. For example, this causes a 564 * sd_disk_table entry of " NEC CDROM " to match a device's id string 565 * of "NEC CDROM". 566 * 567 * Note: The MD21 controller type has been obsoleted. 568 * ST318202F is a Legacy device 569 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been 570 * made with an FC connection. The entries here are a legacy. 571 */ 572 static sd_disk_config_t sd_disk_table[] = { 573 #if defined(__fibre) || defined(__i386) || defined(__amd64) 574 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 575 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 576 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 577 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 578 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 579 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 580 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 581 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 582 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 583 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 584 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 585 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 586 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 587 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 588 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 589 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 590 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 591 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 592 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 593 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 594 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 595 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 596 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 597 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties }, 598 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties }, 599 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties }, 600 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties }, 601 { "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 602 { "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 603 { "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 604 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 605 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 606 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 607 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 608 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 609 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 610 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 611 { "IBM 1814", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 612 { "IBM 1814-200", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 613 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 614 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 615 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 616 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 617 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 618 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 619 { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties }, 620 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, 621 { "SUN T3", SD_CONF_BSET_THROTTLE | 622 SD_CONF_BSET_BSY_RETRY_COUNT| 623 SD_CONF_BSET_RST_RETRIES| 624 SD_CONF_BSET_RSV_REL_TIME, 625 &purple_properties }, 626 { "SUN SESS01", SD_CONF_BSET_THROTTLE | 627 SD_CONF_BSET_BSY_RETRY_COUNT| 628 SD_CONF_BSET_RST_RETRIES| 629 SD_CONF_BSET_RSV_REL_TIME| 630 SD_CONF_BSET_MIN_THROTTLE| 631 SD_CONF_BSET_DISKSORT_DISABLED, 632 &sve_properties }, 633 { "SUN T4", SD_CONF_BSET_THROTTLE | 634 SD_CONF_BSET_BSY_RETRY_COUNT| 635 SD_CONF_BSET_RST_RETRIES| 636 SD_CONF_BSET_RSV_REL_TIME, 637 &purple_properties }, 638 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | 639 SD_CONF_BSET_LUN_RESET_ENABLED, 640 &maserati_properties }, 641 { "SUN SE6920", SD_CONF_BSET_THROTTLE | 642 SD_CONF_BSET_NRR_COUNT| 643 SD_CONF_BSET_BSY_RETRY_COUNT| 644 SD_CONF_BSET_RST_RETRIES| 645 SD_CONF_BSET_MIN_THROTTLE| 646 SD_CONF_BSET_DISKSORT_DISABLED| 647 SD_CONF_BSET_LUN_RESET_ENABLED, 648 &pirus_properties }, 649 { "SUN SE6940", SD_CONF_BSET_THROTTLE | 650 SD_CONF_BSET_NRR_COUNT| 651 SD_CONF_BSET_BSY_RETRY_COUNT| 652 SD_CONF_BSET_RST_RETRIES| 653 SD_CONF_BSET_MIN_THROTTLE| 654 SD_CONF_BSET_DISKSORT_DISABLED| 655 SD_CONF_BSET_LUN_RESET_ENABLED, 656 &pirus_properties }, 657 { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE | 658 SD_CONF_BSET_NRR_COUNT| 659 SD_CONF_BSET_BSY_RETRY_COUNT| 660 SD_CONF_BSET_RST_RETRIES| 661 SD_CONF_BSET_MIN_THROTTLE| 662 SD_CONF_BSET_DISKSORT_DISABLED| 663 SD_CONF_BSET_LUN_RESET_ENABLED, 664 &pirus_properties }, 665 { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE | 666 SD_CONF_BSET_NRR_COUNT| 667 SD_CONF_BSET_BSY_RETRY_COUNT| 668 SD_CONF_BSET_RST_RETRIES| 669 SD_CONF_BSET_MIN_THROTTLE| 670 SD_CONF_BSET_DISKSORT_DISABLED| 671 SD_CONF_BSET_LUN_RESET_ENABLED, 672 &pirus_properties }, 673 { "SUN PSX1000", SD_CONF_BSET_THROTTLE | 674 SD_CONF_BSET_NRR_COUNT| 675 SD_CONF_BSET_BSY_RETRY_COUNT| 676 SD_CONF_BSET_RST_RETRIES| 677 SD_CONF_BSET_MIN_THROTTLE| 678 SD_CONF_BSET_DISKSORT_DISABLED| 679 SD_CONF_BSET_LUN_RESET_ENABLED, 680 &pirus_properties }, 681 { "SUN SE6330", SD_CONF_BSET_THROTTLE | 682 SD_CONF_BSET_NRR_COUNT| 683 SD_CONF_BSET_BSY_RETRY_COUNT| 684 SD_CONF_BSET_RST_RETRIES| 685 SD_CONF_BSET_MIN_THROTTLE| 686 SD_CONF_BSET_DISKSORT_DISABLED| 687 SD_CONF_BSET_LUN_RESET_ENABLED, 688 &pirus_properties }, 689 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 690 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 691 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 692 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 693 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, 694 #endif /* fibre or NON-sparc platforms */ 695 #if ((defined(__sparc) && !defined(__fibre)) ||\ 696 (defined(__i386) || defined(__amd64))) 697 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, 698 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, 699 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, 700 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, 701 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, 702 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL }, 703 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, 704 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, 705 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, 706 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, 707 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, 708 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, 709 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, 710 &symbios_properties }, 711 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, 712 &lsi_properties_scsi }, 713 #if defined(__i386) || defined(__amd64) 714 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD 715 | SD_CONF_BSET_READSUB_BCD 716 | SD_CONF_BSET_READ_TOC_ADDR_BCD 717 | SD_CONF_BSET_NO_READ_HEADER 718 | SD_CONF_BSET_READ_CD_XD4), NULL }, 719 720 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD 721 | SD_CONF_BSET_READSUB_BCD 722 | SD_CONF_BSET_READ_TOC_ADDR_BCD 723 | SD_CONF_BSET_NO_READ_HEADER 724 | SD_CONF_BSET_READ_CD_XD4), NULL }, 725 #endif /* __i386 || __amd64 */ 726 #endif /* sparc NON-fibre or NON-sparc platforms */ 727 728 #if (defined(SD_PROP_TST)) 729 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE 730 | SD_CONF_BSET_CTYPE 731 | SD_CONF_BSET_NRR_COUNT 732 | SD_CONF_BSET_FAB_DEVID 733 | SD_CONF_BSET_NOCACHE 734 | SD_CONF_BSET_BSY_RETRY_COUNT 735 | SD_CONF_BSET_PLAYMSF_BCD 736 | SD_CONF_BSET_READSUB_BCD 737 | SD_CONF_BSET_READ_TOC_TRK_BCD 738 | SD_CONF_BSET_READ_TOC_ADDR_BCD 739 | SD_CONF_BSET_NO_READ_HEADER 740 | SD_CONF_BSET_READ_CD_XD4 741 | SD_CONF_BSET_RST_RETRIES 742 | SD_CONF_BSET_RSV_REL_TIME 743 | SD_CONF_BSET_TUR_CHECK), &tst_properties}, 744 #endif 745 }; 746 747 static const int sd_disk_table_size = 748 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); 749 750 751 752 #define SD_INTERCONNECT_PARALLEL 0 753 #define SD_INTERCONNECT_FABRIC 1 754 #define SD_INTERCONNECT_FIBRE 2 755 #define SD_INTERCONNECT_SSA 3 756 #define SD_INTERCONNECT_SATA 4 757 #define SD_IS_PARALLEL_SCSI(un) \ 758 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL) 759 #define SD_IS_SERIAL(un) \ 760 ((un)->un_interconnect_type == SD_INTERCONNECT_SATA) 761 762 /* 763 * Definitions used by device id registration routines 764 */ 765 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */ 766 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */ 767 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */ 768 769 static kmutex_t sd_sense_mutex = {0}; 770 771 /* 772 * Macros for updates of the driver state 773 */ 774 #define New_state(un, s) \ 775 (un)->un_last_state = (un)->un_state, (un)->un_state = (s) 776 #define Restore_state(un) \ 777 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); } 778 779 static struct sd_cdbinfo sd_cdbtab[] = { 780 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, }, 781 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, }, 782 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, }, 783 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, }, 784 }; 785 786 /* 787 * Specifies the number of seconds that must have elapsed since the last 788 * cmd. has completed for a device to be declared idle to the PM framework. 789 */ 790 static int sd_pm_idletime = 1; 791 792 /* 793 * Internal function prototypes 794 */ 795 796 #if (defined(__fibre)) 797 /* 798 * These #defines are to avoid namespace collisions that occur because this 799 * code is currently used to compile two seperate driver modules: sd and ssd. 800 * All function names need to be treated this way (even if declared static) 801 * in order to allow the debugger to resolve the names properly. 802 * It is anticipated that in the near future the ssd module will be obsoleted, 803 * at which time this ugliness should go away. 804 */ 805 #define sd_log_trace ssd_log_trace 806 #define sd_log_info ssd_log_info 807 #define sd_log_err ssd_log_err 808 #define sdprobe ssdprobe 809 #define sdinfo ssdinfo 810 #define sd_prop_op ssd_prop_op 811 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init 812 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini 813 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache 814 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache 815 #define sd_scsi_target_lun_init ssd_scsi_target_lun_init 816 #define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini 817 #define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count 818 #define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target 819 #define sd_spin_up_unit ssd_spin_up_unit 820 #define sd_enable_descr_sense ssd_enable_descr_sense 821 #define sd_reenable_dsense_task ssd_reenable_dsense_task 822 #define sd_set_mmc_caps ssd_set_mmc_caps 823 #define sd_read_unit_properties ssd_read_unit_properties 824 #define sd_process_sdconf_file ssd_process_sdconf_file 825 #define sd_process_sdconf_table ssd_process_sdconf_table 826 #define sd_sdconf_id_match ssd_sdconf_id_match 827 #define sd_blank_cmp ssd_blank_cmp 828 #define sd_chk_vers1_data ssd_chk_vers1_data 829 #define sd_set_vers1_properties ssd_set_vers1_properties 830 831 #define sd_get_physical_geometry ssd_get_physical_geometry 832 #define sd_get_virtual_geometry ssd_get_virtual_geometry 833 #define sd_update_block_info ssd_update_block_info 834 #define sd_register_devid ssd_register_devid 835 #define sd_get_devid ssd_get_devid 836 #define sd_create_devid ssd_create_devid 837 #define sd_write_deviceid ssd_write_deviceid 838 #define sd_check_vpd_page_support ssd_check_vpd_page_support 839 #define sd_setup_pm ssd_setup_pm 840 #define sd_create_pm_components ssd_create_pm_components 841 #define sd_ddi_suspend ssd_ddi_suspend 842 #define sd_ddi_pm_suspend ssd_ddi_pm_suspend 843 #define sd_ddi_resume ssd_ddi_resume 844 #define sd_ddi_pm_resume ssd_ddi_pm_resume 845 #define sdpower ssdpower 846 #define sdattach ssdattach 847 #define sddetach ssddetach 848 #define sd_unit_attach ssd_unit_attach 849 #define sd_unit_detach ssd_unit_detach 850 #define sd_set_unit_attributes ssd_set_unit_attributes 851 #define sd_create_errstats ssd_create_errstats 852 #define sd_set_errstats ssd_set_errstats 853 #define sd_set_pstats ssd_set_pstats 854 #define sddump ssddump 855 #define sd_scsi_poll ssd_scsi_poll 856 #define sd_send_polled_RQS ssd_send_polled_RQS 857 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll 858 #define sd_init_event_callbacks ssd_init_event_callbacks 859 #define sd_event_callback ssd_event_callback 860 #define sd_cache_control ssd_cache_control 861 #define sd_get_write_cache_enabled ssd_get_write_cache_enabled 862 #define sd_make_device ssd_make_device 863 #define sdopen ssdopen 864 #define sdclose ssdclose 865 #define sd_ready_and_valid ssd_ready_and_valid 866 #define sdmin ssdmin 867 #define sdread ssdread 868 #define sdwrite ssdwrite 869 #define sdaread ssdaread 870 #define sdawrite ssdawrite 871 #define sdstrategy ssdstrategy 872 #define sdioctl ssdioctl 873 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart 874 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart 875 #define sd_checksum_iostart ssd_checksum_iostart 876 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart 877 #define sd_pm_iostart ssd_pm_iostart 878 #define sd_core_iostart ssd_core_iostart 879 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone 880 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone 881 #define sd_checksum_iodone ssd_checksum_iodone 882 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone 883 #define sd_pm_iodone ssd_pm_iodone 884 #define sd_initpkt_for_buf ssd_initpkt_for_buf 885 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf 886 #define sd_setup_rw_pkt ssd_setup_rw_pkt 887 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt 888 #define sd_buf_iodone ssd_buf_iodone 889 #define sd_uscsi_strategy ssd_uscsi_strategy 890 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi 891 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi 892 #define sd_uscsi_iodone ssd_uscsi_iodone 893 #define sd_xbuf_strategy ssd_xbuf_strategy 894 #define sd_xbuf_init ssd_xbuf_init 895 #define sd_pm_entry ssd_pm_entry 896 #define sd_pm_exit ssd_pm_exit 897 898 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler 899 #define sd_pm_timeout_handler ssd_pm_timeout_handler 900 901 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq 902 #define sdintr ssdintr 903 #define sd_start_cmds ssd_start_cmds 904 #define sd_send_scsi_cmd ssd_send_scsi_cmd 905 #define sd_bioclone_alloc ssd_bioclone_alloc 906 #define sd_bioclone_free ssd_bioclone_free 907 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc 908 #define sd_shadow_buf_free ssd_shadow_buf_free 909 #define sd_print_transport_rejected_message \ 910 ssd_print_transport_rejected_message 911 #define sd_retry_command ssd_retry_command 912 #define sd_set_retry_bp ssd_set_retry_bp 913 #define sd_send_request_sense_command ssd_send_request_sense_command 914 #define sd_start_retry_command ssd_start_retry_command 915 #define sd_start_direct_priority_command \ 916 ssd_start_direct_priority_command 917 #define sd_return_failed_command ssd_return_failed_command 918 #define sd_return_failed_command_no_restart \ 919 ssd_return_failed_command_no_restart 920 #define sd_return_command ssd_return_command 921 #define sd_sync_with_callback ssd_sync_with_callback 922 #define sdrunout ssdrunout 923 #define sd_mark_rqs_busy ssd_mark_rqs_busy 924 #define sd_mark_rqs_idle ssd_mark_rqs_idle 925 #define sd_reduce_throttle ssd_reduce_throttle 926 #define sd_restore_throttle ssd_restore_throttle 927 #define sd_print_incomplete_msg ssd_print_incomplete_msg 928 #define sd_init_cdb_limits ssd_init_cdb_limits 929 #define sd_pkt_status_good ssd_pkt_status_good 930 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition 931 #define sd_pkt_status_busy ssd_pkt_status_busy 932 #define sd_pkt_status_reservation_conflict \ 933 ssd_pkt_status_reservation_conflict 934 #define sd_pkt_status_qfull ssd_pkt_status_qfull 935 #define sd_handle_request_sense ssd_handle_request_sense 936 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense 937 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg 938 #define sd_validate_sense_data ssd_validate_sense_data 939 #define sd_decode_sense ssd_decode_sense 940 #define sd_print_sense_msg ssd_print_sense_msg 941 #define sd_sense_key_no_sense ssd_sense_key_no_sense 942 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error 943 #define sd_sense_key_not_ready ssd_sense_key_not_ready 944 #define sd_sense_key_medium_or_hardware_error \ 945 ssd_sense_key_medium_or_hardware_error 946 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request 947 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention 948 #define sd_sense_key_fail_command ssd_sense_key_fail_command 949 #define sd_sense_key_blank_check ssd_sense_key_blank_check 950 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command 951 #define sd_sense_key_default ssd_sense_key_default 952 #define sd_print_retry_msg ssd_print_retry_msg 953 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg 954 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete 955 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err 956 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset 957 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted 958 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout 959 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free 960 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject 961 #define sd_pkt_reason_default ssd_pkt_reason_default 962 #define sd_reset_target ssd_reset_target 963 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback 964 #define sd_start_stop_unit_task ssd_start_stop_unit_task 965 #define sd_taskq_create ssd_taskq_create 966 #define sd_taskq_delete ssd_taskq_delete 967 #define sd_media_change_task ssd_media_change_task 968 #define sd_handle_mchange ssd_handle_mchange 969 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK 970 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY 971 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 972 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION 973 #define sd_send_scsi_feature_GET_CONFIGURATION \ 974 sd_send_scsi_feature_GET_CONFIGURATION 975 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT 976 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY 977 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY 978 #define sd_send_scsi_PERSISTENT_RESERVE_IN \ 979 ssd_send_scsi_PERSISTENT_RESERVE_IN 980 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ 981 ssd_send_scsi_PERSISTENT_RESERVE_OUT 982 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE 983 #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \ 984 ssd_send_scsi_SYNCHRONIZE_CACHE_biodone 985 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE 986 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT 987 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR 988 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE 989 #define sd_alloc_rqs ssd_alloc_rqs 990 #define sd_free_rqs ssd_free_rqs 991 #define sd_dump_memory ssd_dump_memory 992 #define sd_get_media_info ssd_get_media_info 993 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info 994 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf 995 #define sd_setup_next_xfer ssd_setup_next_xfer 996 #define sd_dkio_get_temp ssd_dkio_get_temp 997 #define sd_check_mhd ssd_check_mhd 998 #define sd_mhd_watch_cb ssd_mhd_watch_cb 999 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete 1000 #define sd_sname ssd_sname 1001 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover 1002 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread 1003 #define sd_take_ownership ssd_take_ownership 1004 #define sd_reserve_release ssd_reserve_release 1005 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req 1006 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb 1007 #define sd_persistent_reservation_in_read_keys \ 1008 ssd_persistent_reservation_in_read_keys 1009 #define sd_persistent_reservation_in_read_resv \ 1010 ssd_persistent_reservation_in_read_resv 1011 #define sd_mhdioc_takeown ssd_mhdioc_takeown 1012 #define sd_mhdioc_failfast ssd_mhdioc_failfast 1013 #define sd_mhdioc_release ssd_mhdioc_release 1014 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid 1015 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys 1016 #define sd_mhdioc_inresv ssd_mhdioc_inresv 1017 #define sr_change_blkmode ssr_change_blkmode 1018 #define sr_change_speed ssr_change_speed 1019 #define sr_atapi_change_speed ssr_atapi_change_speed 1020 #define sr_pause_resume ssr_pause_resume 1021 #define sr_play_msf ssr_play_msf 1022 #define sr_play_trkind ssr_play_trkind 1023 #define sr_read_all_subcodes ssr_read_all_subcodes 1024 #define sr_read_subchannel ssr_read_subchannel 1025 #define sr_read_tocentry ssr_read_tocentry 1026 #define sr_read_tochdr ssr_read_tochdr 1027 #define sr_read_cdda ssr_read_cdda 1028 #define sr_read_cdxa ssr_read_cdxa 1029 #define sr_read_mode1 ssr_read_mode1 1030 #define sr_read_mode2 ssr_read_mode2 1031 #define sr_read_cd_mode2 ssr_read_cd_mode2 1032 #define sr_sector_mode ssr_sector_mode 1033 #define sr_eject ssr_eject 1034 #define sr_ejected ssr_ejected 1035 #define sr_check_wp ssr_check_wp 1036 #define sd_check_media ssd_check_media 1037 #define sd_media_watch_cb ssd_media_watch_cb 1038 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast 1039 #define sr_volume_ctrl ssr_volume_ctrl 1040 #define sr_read_sony_session_offset ssr_read_sony_session_offset 1041 #define sd_log_page_supported ssd_log_page_supported 1042 #define sd_check_for_writable_cd ssd_check_for_writable_cd 1043 #define sd_wm_cache_constructor ssd_wm_cache_constructor 1044 #define sd_wm_cache_destructor ssd_wm_cache_destructor 1045 #define sd_range_lock ssd_range_lock 1046 #define sd_get_range ssd_get_range 1047 #define sd_free_inlist_wmap ssd_free_inlist_wmap 1048 #define sd_range_unlock ssd_range_unlock 1049 #define sd_read_modify_write_task ssd_read_modify_write_task 1050 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw 1051 1052 #define sd_iostart_chain ssd_iostart_chain 1053 #define sd_iodone_chain ssd_iodone_chain 1054 #define sd_initpkt_map ssd_initpkt_map 1055 #define sd_destroypkt_map ssd_destroypkt_map 1056 #define sd_chain_type_map ssd_chain_type_map 1057 #define sd_chain_index_map ssd_chain_index_map 1058 1059 #define sd_failfast_flushctl ssd_failfast_flushctl 1060 #define sd_failfast_flushq ssd_failfast_flushq 1061 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback 1062 1063 #define sd_is_lsi ssd_is_lsi 1064 #define sd_tg_rdwr ssd_tg_rdwr 1065 #define sd_tg_getinfo ssd_tg_getinfo 1066 1067 #endif /* #if (defined(__fibre)) */ 1068 1069 1070 int _init(void); 1071 int _fini(void); 1072 int _info(struct modinfo *modinfop); 1073 1074 /*PRINTFLIKE3*/ 1075 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1076 /*PRINTFLIKE3*/ 1077 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1078 /*PRINTFLIKE3*/ 1079 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1080 1081 static int sdprobe(dev_info_t *devi); 1082 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 1083 void **result); 1084 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1085 int mod_flags, char *name, caddr_t valuep, int *lengthp); 1086 1087 /* 1088 * Smart probe for parallel scsi 1089 */ 1090 static void sd_scsi_probe_cache_init(void); 1091 static void sd_scsi_probe_cache_fini(void); 1092 static void sd_scsi_clear_probe_cache(void); 1093 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)()); 1094 1095 /* 1096 * Attached luns on target for parallel scsi 1097 */ 1098 static void sd_scsi_target_lun_init(void); 1099 static void sd_scsi_target_lun_fini(void); 1100 static int sd_scsi_get_target_lun_count(dev_info_t *dip, int target); 1101 static void sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag); 1102 1103 static int sd_spin_up_unit(struct sd_lun *un); 1104 #ifdef _LP64 1105 static void sd_enable_descr_sense(struct sd_lun *un); 1106 static void sd_reenable_dsense_task(void *arg); 1107 #endif /* _LP64 */ 1108 1109 static void sd_set_mmc_caps(struct sd_lun *un); 1110 1111 static void sd_read_unit_properties(struct sd_lun *un); 1112 static int sd_process_sdconf_file(struct sd_lun *un); 1113 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags, 1114 int *data_list, sd_tunables *values); 1115 static void sd_process_sdconf_table(struct sd_lun *un); 1116 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen); 1117 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen); 1118 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 1119 int list_len, char *dataname_ptr); 1120 static void sd_set_vers1_properties(struct sd_lun *un, int flags, 1121 sd_tunables *prop_list); 1122 1123 static void sd_register_devid(struct sd_lun *un, dev_info_t *devi, 1124 int reservation_flag); 1125 static int sd_get_devid(struct sd_lun *un); 1126 static int sd_get_serialnum(struct sd_lun *un, uchar_t *wwn, int *len); 1127 static ddi_devid_t sd_create_devid(struct sd_lun *un); 1128 static int sd_write_deviceid(struct sd_lun *un); 1129 static int sd_get_devid_page(struct sd_lun *un, uchar_t *wwn, int *len); 1130 static int sd_check_vpd_page_support(struct sd_lun *un); 1131 1132 static void sd_setup_pm(struct sd_lun *un, dev_info_t *devi); 1133 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); 1134 1135 static int sd_ddi_suspend(dev_info_t *devi); 1136 static int sd_ddi_pm_suspend(struct sd_lun *un); 1137 static int sd_ddi_resume(dev_info_t *devi); 1138 static int sd_ddi_pm_resume(struct sd_lun *un); 1139 static int sdpower(dev_info_t *devi, int component, int level); 1140 1141 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 1142 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 1143 static int sd_unit_attach(dev_info_t *devi); 1144 static int sd_unit_detach(dev_info_t *devi); 1145 1146 static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi); 1147 static void sd_create_errstats(struct sd_lun *un, int instance); 1148 static void sd_set_errstats(struct sd_lun *un); 1149 static void sd_set_pstats(struct sd_lun *un); 1150 1151 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 1152 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); 1153 static int sd_send_polled_RQS(struct sd_lun *un); 1154 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); 1155 1156 #if (defined(__fibre)) 1157 /* 1158 * Event callbacks (photon) 1159 */ 1160 static void sd_init_event_callbacks(struct sd_lun *un); 1161 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); 1162 #endif 1163 1164 /* 1165 * Defines for sd_cache_control 1166 */ 1167 1168 #define SD_CACHE_ENABLE 1 1169 #define SD_CACHE_DISABLE 0 1170 #define SD_CACHE_NOCHANGE -1 1171 1172 static int sd_cache_control(struct sd_lun *un, int rcd_flag, int wce_flag); 1173 static int sd_get_write_cache_enabled(struct sd_lun *un, int *is_enabled); 1174 static dev_t sd_make_device(dev_info_t *devi); 1175 1176 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize, 1177 uint64_t capacity); 1178 1179 /* 1180 * Driver entry point functions. 1181 */ 1182 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 1183 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 1184 static int sd_ready_and_valid(struct sd_lun *un); 1185 1186 static void sdmin(struct buf *bp); 1187 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p); 1188 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 1189 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1190 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1191 1192 static int sdstrategy(struct buf *bp); 1193 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1194 1195 /* 1196 * Function prototypes for layering functions in the iostart chain. 1197 */ 1198 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un, 1199 struct buf *bp); 1200 static void sd_mapblocksize_iostart(int index, struct sd_lun *un, 1201 struct buf *bp); 1202 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp); 1203 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un, 1204 struct buf *bp); 1205 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp); 1206 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp); 1207 1208 /* 1209 * Function prototypes for layering functions in the iodone chain. 1210 */ 1211 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp); 1212 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp); 1213 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un, 1214 struct buf *bp); 1215 static void sd_mapblocksize_iodone(int index, struct sd_lun *un, 1216 struct buf *bp); 1217 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp); 1218 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un, 1219 struct buf *bp); 1220 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp); 1221 1222 /* 1223 * Prototypes for functions to support buf(9S) based IO. 1224 */ 1225 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg); 1226 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **); 1227 static void sd_destroypkt_for_buf(struct buf *); 1228 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp, 1229 struct buf *bp, int flags, 1230 int (*callback)(caddr_t), caddr_t callback_arg, 1231 diskaddr_t lba, uint32_t blockcount); 1232 #if defined(__i386) || defined(__amd64) 1233 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp, 1234 struct buf *bp, diskaddr_t lba, uint32_t blockcount); 1235 #endif /* defined(__i386) || defined(__amd64) */ 1236 1237 /* 1238 * Prototypes for functions to support USCSI IO. 1239 */ 1240 static int sd_uscsi_strategy(struct buf *bp); 1241 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **); 1242 static void sd_destroypkt_for_uscsi(struct buf *); 1243 1244 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 1245 uchar_t chain_type, void *pktinfop); 1246 1247 static int sd_pm_entry(struct sd_lun *un); 1248 static void sd_pm_exit(struct sd_lun *un); 1249 1250 static void sd_pm_idletimeout_handler(void *arg); 1251 1252 /* 1253 * sd_core internal functions (used at the sd_core_io layer). 1254 */ 1255 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp); 1256 static void sdintr(struct scsi_pkt *pktp); 1257 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp); 1258 1259 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1260 enum uio_seg dataspace, int path_flag); 1261 1262 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen, 1263 daddr_t blkno, int (*func)(struct buf *)); 1264 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen, 1265 uint_t bflags, daddr_t blkno, int (*func)(struct buf *)); 1266 static void sd_bioclone_free(struct buf *bp); 1267 static void sd_shadow_buf_free(struct buf *bp); 1268 1269 static void sd_print_transport_rejected_message(struct sd_lun *un, 1270 struct sd_xbuf *xp, int code); 1271 static void sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, 1272 void *arg, int code); 1273 static void sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, 1274 void *arg, int code); 1275 static void sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, 1276 void *arg, int code); 1277 1278 static void sd_retry_command(struct sd_lun *un, struct buf *bp, 1279 int retry_check_flag, 1280 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, 1281 int c), 1282 void *user_arg, int failure_code, clock_t retry_delay, 1283 void (*statp)(kstat_io_t *)); 1284 1285 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, 1286 clock_t retry_delay, void (*statp)(kstat_io_t *)); 1287 1288 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 1289 struct scsi_pkt *pktp); 1290 static void sd_start_retry_command(void *arg); 1291 static void sd_start_direct_priority_command(void *arg); 1292 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, 1293 int errcode); 1294 static void sd_return_failed_command_no_restart(struct sd_lun *un, 1295 struct buf *bp, int errcode); 1296 static void sd_return_command(struct sd_lun *un, struct buf *bp); 1297 static void sd_sync_with_callback(struct sd_lun *un); 1298 static int sdrunout(caddr_t arg); 1299 1300 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp); 1301 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp); 1302 1303 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type); 1304 static void sd_restore_throttle(void *arg); 1305 1306 static void sd_init_cdb_limits(struct sd_lun *un); 1307 1308 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 1309 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1310 1311 /* 1312 * Error handling functions 1313 */ 1314 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 1315 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1316 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, 1317 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1318 static void sd_pkt_status_reservation_conflict(struct sd_lun *un, 1319 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1320 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 1321 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1322 1323 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp, 1324 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1325 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 1326 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1327 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp, 1328 struct sd_xbuf *xp); 1329 static void sd_decode_sense(struct sd_lun *un, struct buf *bp, 1330 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1331 1332 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp, 1333 void *arg, int code); 1334 1335 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 1336 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1337 static void sd_sense_key_recoverable_error(struct sd_lun *un, 1338 uint8_t *sense_datap, 1339 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1340 static void sd_sense_key_not_ready(struct sd_lun *un, 1341 uint8_t *sense_datap, 1342 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1343 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 1344 uint8_t *sense_datap, 1345 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1346 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 1347 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1348 static void sd_sense_key_unit_attention(struct sd_lun *un, 1349 uint8_t *sense_datap, 1350 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1351 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 1352 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1353 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 1354 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1355 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 1356 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1357 static void sd_sense_key_default(struct sd_lun *un, 1358 uint8_t *sense_datap, 1359 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1360 1361 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp, 1362 void *arg, int flag); 1363 1364 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 1365 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1366 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 1367 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1368 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 1369 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1370 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 1371 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1372 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 1373 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1374 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 1375 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1376 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 1377 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1378 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 1379 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1380 1381 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp); 1382 1383 static void sd_start_stop_unit_callback(void *arg); 1384 static void sd_start_stop_unit_task(void *arg); 1385 1386 static void sd_taskq_create(void); 1387 static void sd_taskq_delete(void); 1388 static void sd_media_change_task(void *arg); 1389 1390 static int sd_handle_mchange(struct sd_lun *un); 1391 static int sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag); 1392 static int sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, 1393 uint32_t *lbap, int path_flag); 1394 static int sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 1395 uint32_t *lbap, int path_flag); 1396 static int sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, 1397 int path_flag); 1398 static int sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, 1399 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp); 1400 static int sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag); 1401 static int sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, 1402 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp); 1403 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, 1404 uchar_t usr_cmd, uchar_t *usr_bufp); 1405 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, 1406 struct dk_callback *dkc); 1407 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp); 1408 static int sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, 1409 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1410 uchar_t *bufaddr, uint_t buflen, int path_flag); 1411 static int sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 1412 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1413 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag); 1414 static int sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, 1415 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag); 1416 static int sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, 1417 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag); 1418 static int sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 1419 size_t buflen, daddr_t start_block, int path_flag); 1420 #define sd_send_scsi_READ(un, bufaddr, buflen, start_block, path_flag) \ 1421 sd_send_scsi_RDWR(un, SCMD_READ, bufaddr, buflen, start_block, \ 1422 path_flag) 1423 #define sd_send_scsi_WRITE(un, bufaddr, buflen, start_block, path_flag) \ 1424 sd_send_scsi_RDWR(un, SCMD_WRITE, bufaddr, buflen, start_block,\ 1425 path_flag) 1426 1427 static int sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, 1428 uint16_t buflen, uchar_t page_code, uchar_t page_control, 1429 uint16_t param_ptr, int path_flag); 1430 1431 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un); 1432 static void sd_free_rqs(struct sd_lun *un); 1433 1434 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, 1435 uchar_t *data, int len, int fmt); 1436 static void sd_panic_for_res_conflict(struct sd_lun *un); 1437 1438 /* 1439 * Disk Ioctl Function Prototypes 1440 */ 1441 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag); 1442 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag); 1443 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag); 1444 1445 /* 1446 * Multi-host Ioctl Prototypes 1447 */ 1448 static int sd_check_mhd(dev_t dev, int interval); 1449 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1450 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt); 1451 static char *sd_sname(uchar_t status); 1452 static void sd_mhd_resvd_recover(void *arg); 1453 static void sd_resv_reclaim_thread(); 1454 static int sd_take_ownership(dev_t dev, struct mhioctkown *p); 1455 static int sd_reserve_release(dev_t dev, int cmd); 1456 static void sd_rmv_resv_reclaim_req(dev_t dev); 1457 static void sd_mhd_reset_notify_cb(caddr_t arg); 1458 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un, 1459 mhioc_inkeys_t *usrp, int flag); 1460 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un, 1461 mhioc_inresvs_t *usrp, int flag); 1462 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag); 1463 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag); 1464 static int sd_mhdioc_release(dev_t dev); 1465 static int sd_mhdioc_register_devid(dev_t dev); 1466 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag); 1467 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag); 1468 1469 /* 1470 * SCSI removable prototypes 1471 */ 1472 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag); 1473 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1474 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1475 static int sr_pause_resume(dev_t dev, int mode); 1476 static int sr_play_msf(dev_t dev, caddr_t data, int flag); 1477 static int sr_play_trkind(dev_t dev, caddr_t data, int flag); 1478 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag); 1479 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag); 1480 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag); 1481 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag); 1482 static int sr_read_cdda(dev_t dev, caddr_t data, int flag); 1483 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag); 1484 static int sr_read_mode1(dev_t dev, caddr_t data, int flag); 1485 static int sr_read_mode2(dev_t dev, caddr_t data, int flag); 1486 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag); 1487 static int sr_sector_mode(dev_t dev, uint32_t blksize); 1488 static int sr_eject(dev_t dev); 1489 static void sr_ejected(register struct sd_lun *un); 1490 static int sr_check_wp(dev_t dev); 1491 static int sd_check_media(dev_t dev, enum dkio_state state); 1492 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1493 static void sd_delayed_cv_broadcast(void *arg); 1494 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); 1495 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); 1496 1497 static int sd_log_page_supported(struct sd_lun *un, int log_page); 1498 1499 /* 1500 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. 1501 */ 1502 static void sd_check_for_writable_cd(struct sd_lun *un, int path_flag); 1503 static int sd_wm_cache_constructor(void *wm, void *un, int flags); 1504 static void sd_wm_cache_destructor(void *wm, void *un); 1505 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb, 1506 daddr_t endb, ushort_t typ); 1507 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb, 1508 daddr_t endb); 1509 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp); 1510 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm); 1511 static void sd_read_modify_write_task(void * arg); 1512 static int 1513 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 1514 struct buf **bpp); 1515 1516 1517 /* 1518 * Function prototypes for failfast support. 1519 */ 1520 static void sd_failfast_flushq(struct sd_lun *un); 1521 static int sd_failfast_flushq_callback(struct buf *bp); 1522 1523 /* 1524 * Function prototypes to check for lsi devices 1525 */ 1526 static void sd_is_lsi(struct sd_lun *un); 1527 1528 /* 1529 * Function prototypes for x86 support 1530 */ 1531 #if defined(__i386) || defined(__amd64) 1532 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 1533 struct scsi_pkt *pkt, struct sd_xbuf *xp); 1534 #endif 1535 1536 1537 /* Function prototypes for cmlb */ 1538 static int sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 1539 diskaddr_t start_block, size_t reqlength, void *tg_cookie); 1540 1541 static int sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie); 1542 1543 /* 1544 * Constants for failfast support: 1545 * 1546 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO 1547 * failfast processing being performed. 1548 * 1549 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing 1550 * failfast processing on all bufs with B_FAILFAST set. 1551 */ 1552 1553 #define SD_FAILFAST_INACTIVE 0 1554 #define SD_FAILFAST_ACTIVE 1 1555 1556 /* 1557 * Bitmask to control behavior of buf(9S) flushes when a transition to 1558 * the failfast state occurs. Optional bits include: 1559 * 1560 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that 1561 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will 1562 * be flushed. 1563 * 1564 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the 1565 * driver, in addition to the regular wait queue. This includes the xbuf 1566 * queues. When clear, only the driver's wait queue will be flushed. 1567 */ 1568 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 1569 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 1570 1571 /* 1572 * The default behavior is to only flush bufs that have B_FAILFAST set, but 1573 * to flush all queues within the driver. 1574 */ 1575 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; 1576 1577 1578 /* 1579 * SD Testing Fault Injection 1580 */ 1581 #ifdef SD_FAULT_INJECTION 1582 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); 1583 static void sd_faultinjection(struct scsi_pkt *pktp); 1584 static void sd_injection_log(char *buf, struct sd_lun *un); 1585 #endif 1586 1587 /* 1588 * Device driver ops vector 1589 */ 1590 static struct cb_ops sd_cb_ops = { 1591 sdopen, /* open */ 1592 sdclose, /* close */ 1593 sdstrategy, /* strategy */ 1594 nodev, /* print */ 1595 sddump, /* dump */ 1596 sdread, /* read */ 1597 sdwrite, /* write */ 1598 sdioctl, /* ioctl */ 1599 nodev, /* devmap */ 1600 nodev, /* mmap */ 1601 nodev, /* segmap */ 1602 nochpoll, /* poll */ 1603 sd_prop_op, /* cb_prop_op */ 1604 0, /* streamtab */ 1605 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */ 1606 CB_REV, /* cb_rev */ 1607 sdaread, /* async I/O read entry point */ 1608 sdawrite /* async I/O write entry point */ 1609 }; 1610 1611 static struct dev_ops sd_ops = { 1612 DEVO_REV, /* devo_rev, */ 1613 0, /* refcnt */ 1614 sdinfo, /* info */ 1615 nulldev, /* identify */ 1616 sdprobe, /* probe */ 1617 sdattach, /* attach */ 1618 sddetach, /* detach */ 1619 nodev, /* reset */ 1620 &sd_cb_ops, /* driver operations */ 1621 NULL, /* bus operations */ 1622 sdpower /* power */ 1623 }; 1624 1625 1626 /* 1627 * This is the loadable module wrapper. 1628 */ 1629 #include <sys/modctl.h> 1630 1631 static struct modldrv modldrv = { 1632 &mod_driverops, /* Type of module. This one is a driver */ 1633 SD_MODULE_NAME, /* Module name. */ 1634 &sd_ops /* driver ops */ 1635 }; 1636 1637 1638 static struct modlinkage modlinkage = { 1639 MODREV_1, 1640 &modldrv, 1641 NULL 1642 }; 1643 1644 static cmlb_tg_ops_t sd_tgops = { 1645 TG_DK_OPS_VERSION_1, 1646 sd_tg_rdwr, 1647 sd_tg_getinfo 1648 }; 1649 1650 static struct scsi_asq_key_strings sd_additional_codes[] = { 1651 0x81, 0, "Logical Unit is Reserved", 1652 0x85, 0, "Audio Address Not Valid", 1653 0xb6, 0, "Media Load Mechanism Failed", 1654 0xB9, 0, "Audio Play Operation Aborted", 1655 0xbf, 0, "Buffer Overflow for Read All Subcodes Command", 1656 0x53, 2, "Medium removal prevented", 1657 0x6f, 0, "Authentication failed during key exchange", 1658 0x6f, 1, "Key not present", 1659 0x6f, 2, "Key not established", 1660 0x6f, 3, "Read without proper authentication", 1661 0x6f, 4, "Mismatched region to this logical unit", 1662 0x6f, 5, "Region reset count error", 1663 0xffff, 0x0, NULL 1664 }; 1665 1666 1667 /* 1668 * Struct for passing printing information for sense data messages 1669 */ 1670 struct sd_sense_info { 1671 int ssi_severity; 1672 int ssi_pfa_flag; 1673 }; 1674 1675 /* 1676 * Table of function pointers for iostart-side routines. Seperate "chains" 1677 * of layered function calls are formed by placing the function pointers 1678 * sequentially in the desired order. Functions are called according to an 1679 * incrementing table index ordering. The last function in each chain must 1680 * be sd_core_iostart(). The corresponding iodone-side routines are expected 1681 * in the sd_iodone_chain[] array. 1682 * 1683 * Note: It may seem more natural to organize both the iostart and iodone 1684 * functions together, into an array of structures (or some similar 1685 * organization) with a common index, rather than two seperate arrays which 1686 * must be maintained in synchronization. The purpose of this division is 1687 * to achiece improved performance: individual arrays allows for more 1688 * effective cache line utilization on certain platforms. 1689 */ 1690 1691 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp); 1692 1693 1694 static sd_chain_t sd_iostart_chain[] = { 1695 1696 /* Chain for buf IO for disk drive targets (PM enabled) */ 1697 sd_mapblockaddr_iostart, /* Index: 0 */ 1698 sd_pm_iostart, /* Index: 1 */ 1699 sd_core_iostart, /* Index: 2 */ 1700 1701 /* Chain for buf IO for disk drive targets (PM disabled) */ 1702 sd_mapblockaddr_iostart, /* Index: 3 */ 1703 sd_core_iostart, /* Index: 4 */ 1704 1705 /* Chain for buf IO for removable-media targets (PM enabled) */ 1706 sd_mapblockaddr_iostart, /* Index: 5 */ 1707 sd_mapblocksize_iostart, /* Index: 6 */ 1708 sd_pm_iostart, /* Index: 7 */ 1709 sd_core_iostart, /* Index: 8 */ 1710 1711 /* Chain for buf IO for removable-media targets (PM disabled) */ 1712 sd_mapblockaddr_iostart, /* Index: 9 */ 1713 sd_mapblocksize_iostart, /* Index: 10 */ 1714 sd_core_iostart, /* Index: 11 */ 1715 1716 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1717 sd_mapblockaddr_iostart, /* Index: 12 */ 1718 sd_checksum_iostart, /* Index: 13 */ 1719 sd_pm_iostart, /* Index: 14 */ 1720 sd_core_iostart, /* Index: 15 */ 1721 1722 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1723 sd_mapblockaddr_iostart, /* Index: 16 */ 1724 sd_checksum_iostart, /* Index: 17 */ 1725 sd_core_iostart, /* Index: 18 */ 1726 1727 /* Chain for USCSI commands (all targets) */ 1728 sd_pm_iostart, /* Index: 19 */ 1729 sd_core_iostart, /* Index: 20 */ 1730 1731 /* Chain for checksumming USCSI commands (all targets) */ 1732 sd_checksum_uscsi_iostart, /* Index: 21 */ 1733 sd_pm_iostart, /* Index: 22 */ 1734 sd_core_iostart, /* Index: 23 */ 1735 1736 /* Chain for "direct" USCSI commands (all targets) */ 1737 sd_core_iostart, /* Index: 24 */ 1738 1739 /* Chain for "direct priority" USCSI commands (all targets) */ 1740 sd_core_iostart, /* Index: 25 */ 1741 }; 1742 1743 /* 1744 * Macros to locate the first function of each iostart chain in the 1745 * sd_iostart_chain[] array. These are located by the index in the array. 1746 */ 1747 #define SD_CHAIN_DISK_IOSTART 0 1748 #define SD_CHAIN_DISK_IOSTART_NO_PM 3 1749 #define SD_CHAIN_RMMEDIA_IOSTART 5 1750 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9 1751 #define SD_CHAIN_CHKSUM_IOSTART 12 1752 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16 1753 #define SD_CHAIN_USCSI_CMD_IOSTART 19 1754 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21 1755 #define SD_CHAIN_DIRECT_CMD_IOSTART 24 1756 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25 1757 1758 1759 /* 1760 * Table of function pointers for the iodone-side routines for the driver- 1761 * internal layering mechanism. The calling sequence for iodone routines 1762 * uses a decrementing table index, so the last routine called in a chain 1763 * must be at the lowest array index location for that chain. The last 1764 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs) 1765 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering 1766 * of the functions in an iodone side chain must correspond to the ordering 1767 * of the iostart routines for that chain. Note that there is no iodone 1768 * side routine that corresponds to sd_core_iostart(), so there is no 1769 * entry in the table for this. 1770 */ 1771 1772 static sd_chain_t sd_iodone_chain[] = { 1773 1774 /* Chain for buf IO for disk drive targets (PM enabled) */ 1775 sd_buf_iodone, /* Index: 0 */ 1776 sd_mapblockaddr_iodone, /* Index: 1 */ 1777 sd_pm_iodone, /* Index: 2 */ 1778 1779 /* Chain for buf IO for disk drive targets (PM disabled) */ 1780 sd_buf_iodone, /* Index: 3 */ 1781 sd_mapblockaddr_iodone, /* Index: 4 */ 1782 1783 /* Chain for buf IO for removable-media targets (PM enabled) */ 1784 sd_buf_iodone, /* Index: 5 */ 1785 sd_mapblockaddr_iodone, /* Index: 6 */ 1786 sd_mapblocksize_iodone, /* Index: 7 */ 1787 sd_pm_iodone, /* Index: 8 */ 1788 1789 /* Chain for buf IO for removable-media targets (PM disabled) */ 1790 sd_buf_iodone, /* Index: 9 */ 1791 sd_mapblockaddr_iodone, /* Index: 10 */ 1792 sd_mapblocksize_iodone, /* Index: 11 */ 1793 1794 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1795 sd_buf_iodone, /* Index: 12 */ 1796 sd_mapblockaddr_iodone, /* Index: 13 */ 1797 sd_checksum_iodone, /* Index: 14 */ 1798 sd_pm_iodone, /* Index: 15 */ 1799 1800 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1801 sd_buf_iodone, /* Index: 16 */ 1802 sd_mapblockaddr_iodone, /* Index: 17 */ 1803 sd_checksum_iodone, /* Index: 18 */ 1804 1805 /* Chain for USCSI commands (non-checksum targets) */ 1806 sd_uscsi_iodone, /* Index: 19 */ 1807 sd_pm_iodone, /* Index: 20 */ 1808 1809 /* Chain for USCSI commands (checksum targets) */ 1810 sd_uscsi_iodone, /* Index: 21 */ 1811 sd_checksum_uscsi_iodone, /* Index: 22 */ 1812 sd_pm_iodone, /* Index: 22 */ 1813 1814 /* Chain for "direct" USCSI commands (all targets) */ 1815 sd_uscsi_iodone, /* Index: 24 */ 1816 1817 /* Chain for "direct priority" USCSI commands (all targets) */ 1818 sd_uscsi_iodone, /* Index: 25 */ 1819 }; 1820 1821 1822 /* 1823 * Macros to locate the "first" function in the sd_iodone_chain[] array for 1824 * each iodone-side chain. These are located by the array index, but as the 1825 * iodone side functions are called in a decrementing-index order, the 1826 * highest index number in each chain must be specified (as these correspond 1827 * to the first function in the iodone chain that will be called by the core 1828 * at IO completion time). 1829 */ 1830 1831 #define SD_CHAIN_DISK_IODONE 2 1832 #define SD_CHAIN_DISK_IODONE_NO_PM 4 1833 #define SD_CHAIN_RMMEDIA_IODONE 8 1834 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11 1835 #define SD_CHAIN_CHKSUM_IODONE 15 1836 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18 1837 #define SD_CHAIN_USCSI_CMD_IODONE 20 1838 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22 1839 #define SD_CHAIN_DIRECT_CMD_IODONE 24 1840 #define SD_CHAIN_PRIORITY_CMD_IODONE 25 1841 1842 1843 1844 1845 /* 1846 * Array to map a layering chain index to the appropriate initpkt routine. 1847 * The redundant entries are present so that the index used for accessing 1848 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1849 * with this table as well. 1850 */ 1851 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **); 1852 1853 static sd_initpkt_t sd_initpkt_map[] = { 1854 1855 /* Chain for buf IO for disk drive targets (PM enabled) */ 1856 sd_initpkt_for_buf, /* Index: 0 */ 1857 sd_initpkt_for_buf, /* Index: 1 */ 1858 sd_initpkt_for_buf, /* Index: 2 */ 1859 1860 /* Chain for buf IO for disk drive targets (PM disabled) */ 1861 sd_initpkt_for_buf, /* Index: 3 */ 1862 sd_initpkt_for_buf, /* Index: 4 */ 1863 1864 /* Chain for buf IO for removable-media targets (PM enabled) */ 1865 sd_initpkt_for_buf, /* Index: 5 */ 1866 sd_initpkt_for_buf, /* Index: 6 */ 1867 sd_initpkt_for_buf, /* Index: 7 */ 1868 sd_initpkt_for_buf, /* Index: 8 */ 1869 1870 /* Chain for buf IO for removable-media targets (PM disabled) */ 1871 sd_initpkt_for_buf, /* Index: 9 */ 1872 sd_initpkt_for_buf, /* Index: 10 */ 1873 sd_initpkt_for_buf, /* Index: 11 */ 1874 1875 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1876 sd_initpkt_for_buf, /* Index: 12 */ 1877 sd_initpkt_for_buf, /* Index: 13 */ 1878 sd_initpkt_for_buf, /* Index: 14 */ 1879 sd_initpkt_for_buf, /* Index: 15 */ 1880 1881 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1882 sd_initpkt_for_buf, /* Index: 16 */ 1883 sd_initpkt_for_buf, /* Index: 17 */ 1884 sd_initpkt_for_buf, /* Index: 18 */ 1885 1886 /* Chain for USCSI commands (non-checksum targets) */ 1887 sd_initpkt_for_uscsi, /* Index: 19 */ 1888 sd_initpkt_for_uscsi, /* Index: 20 */ 1889 1890 /* Chain for USCSI commands (checksum targets) */ 1891 sd_initpkt_for_uscsi, /* Index: 21 */ 1892 sd_initpkt_for_uscsi, /* Index: 22 */ 1893 sd_initpkt_for_uscsi, /* Index: 22 */ 1894 1895 /* Chain for "direct" USCSI commands (all targets) */ 1896 sd_initpkt_for_uscsi, /* Index: 24 */ 1897 1898 /* Chain for "direct priority" USCSI commands (all targets) */ 1899 sd_initpkt_for_uscsi, /* Index: 25 */ 1900 1901 }; 1902 1903 1904 /* 1905 * Array to map a layering chain index to the appropriate destroypktpkt routine. 1906 * The redundant entries are present so that the index used for accessing 1907 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1908 * with this table as well. 1909 */ 1910 typedef void (*sd_destroypkt_t)(struct buf *); 1911 1912 static sd_destroypkt_t sd_destroypkt_map[] = { 1913 1914 /* Chain for buf IO for disk drive targets (PM enabled) */ 1915 sd_destroypkt_for_buf, /* Index: 0 */ 1916 sd_destroypkt_for_buf, /* Index: 1 */ 1917 sd_destroypkt_for_buf, /* Index: 2 */ 1918 1919 /* Chain for buf IO for disk drive targets (PM disabled) */ 1920 sd_destroypkt_for_buf, /* Index: 3 */ 1921 sd_destroypkt_for_buf, /* Index: 4 */ 1922 1923 /* Chain for buf IO for removable-media targets (PM enabled) */ 1924 sd_destroypkt_for_buf, /* Index: 5 */ 1925 sd_destroypkt_for_buf, /* Index: 6 */ 1926 sd_destroypkt_for_buf, /* Index: 7 */ 1927 sd_destroypkt_for_buf, /* Index: 8 */ 1928 1929 /* Chain for buf IO for removable-media targets (PM disabled) */ 1930 sd_destroypkt_for_buf, /* Index: 9 */ 1931 sd_destroypkt_for_buf, /* Index: 10 */ 1932 sd_destroypkt_for_buf, /* Index: 11 */ 1933 1934 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1935 sd_destroypkt_for_buf, /* Index: 12 */ 1936 sd_destroypkt_for_buf, /* Index: 13 */ 1937 sd_destroypkt_for_buf, /* Index: 14 */ 1938 sd_destroypkt_for_buf, /* Index: 15 */ 1939 1940 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1941 sd_destroypkt_for_buf, /* Index: 16 */ 1942 sd_destroypkt_for_buf, /* Index: 17 */ 1943 sd_destroypkt_for_buf, /* Index: 18 */ 1944 1945 /* Chain for USCSI commands (non-checksum targets) */ 1946 sd_destroypkt_for_uscsi, /* Index: 19 */ 1947 sd_destroypkt_for_uscsi, /* Index: 20 */ 1948 1949 /* Chain for USCSI commands (checksum targets) */ 1950 sd_destroypkt_for_uscsi, /* Index: 21 */ 1951 sd_destroypkt_for_uscsi, /* Index: 22 */ 1952 sd_destroypkt_for_uscsi, /* Index: 22 */ 1953 1954 /* Chain for "direct" USCSI commands (all targets) */ 1955 sd_destroypkt_for_uscsi, /* Index: 24 */ 1956 1957 /* Chain for "direct priority" USCSI commands (all targets) */ 1958 sd_destroypkt_for_uscsi, /* Index: 25 */ 1959 1960 }; 1961 1962 1963 1964 /* 1965 * Array to map a layering chain index to the appropriate chain "type". 1966 * The chain type indicates a specific property/usage of the chain. 1967 * The redundant entries are present so that the index used for accessing 1968 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1969 * with this table as well. 1970 */ 1971 1972 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */ 1973 #define SD_CHAIN_BUFIO 1 /* regular buf IO */ 1974 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */ 1975 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */ 1976 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */ 1977 /* (for error recovery) */ 1978 1979 static int sd_chain_type_map[] = { 1980 1981 /* Chain for buf IO for disk drive targets (PM enabled) */ 1982 SD_CHAIN_BUFIO, /* Index: 0 */ 1983 SD_CHAIN_BUFIO, /* Index: 1 */ 1984 SD_CHAIN_BUFIO, /* Index: 2 */ 1985 1986 /* Chain for buf IO for disk drive targets (PM disabled) */ 1987 SD_CHAIN_BUFIO, /* Index: 3 */ 1988 SD_CHAIN_BUFIO, /* Index: 4 */ 1989 1990 /* Chain for buf IO for removable-media targets (PM enabled) */ 1991 SD_CHAIN_BUFIO, /* Index: 5 */ 1992 SD_CHAIN_BUFIO, /* Index: 6 */ 1993 SD_CHAIN_BUFIO, /* Index: 7 */ 1994 SD_CHAIN_BUFIO, /* Index: 8 */ 1995 1996 /* Chain for buf IO for removable-media targets (PM disabled) */ 1997 SD_CHAIN_BUFIO, /* Index: 9 */ 1998 SD_CHAIN_BUFIO, /* Index: 10 */ 1999 SD_CHAIN_BUFIO, /* Index: 11 */ 2000 2001 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2002 SD_CHAIN_BUFIO, /* Index: 12 */ 2003 SD_CHAIN_BUFIO, /* Index: 13 */ 2004 SD_CHAIN_BUFIO, /* Index: 14 */ 2005 SD_CHAIN_BUFIO, /* Index: 15 */ 2006 2007 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2008 SD_CHAIN_BUFIO, /* Index: 16 */ 2009 SD_CHAIN_BUFIO, /* Index: 17 */ 2010 SD_CHAIN_BUFIO, /* Index: 18 */ 2011 2012 /* Chain for USCSI commands (non-checksum targets) */ 2013 SD_CHAIN_USCSI, /* Index: 19 */ 2014 SD_CHAIN_USCSI, /* Index: 20 */ 2015 2016 /* Chain for USCSI commands (checksum targets) */ 2017 SD_CHAIN_USCSI, /* Index: 21 */ 2018 SD_CHAIN_USCSI, /* Index: 22 */ 2019 SD_CHAIN_USCSI, /* Index: 22 */ 2020 2021 /* Chain for "direct" USCSI commands (all targets) */ 2022 SD_CHAIN_DIRECT, /* Index: 24 */ 2023 2024 /* Chain for "direct priority" USCSI commands (all targets) */ 2025 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */ 2026 }; 2027 2028 2029 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */ 2030 #define SD_IS_BUFIO(xp) \ 2031 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO) 2032 2033 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */ 2034 #define SD_IS_DIRECT_PRIORITY(xp) \ 2035 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY) 2036 2037 2038 2039 /* 2040 * Struct, array, and macros to map a specific chain to the appropriate 2041 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays. 2042 * 2043 * The sd_chain_index_map[] array is used at attach time to set the various 2044 * un_xxx_chain type members of the sd_lun softstate to the specific layering 2045 * chain to be used with the instance. This allows different instances to use 2046 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart 2047 * and xb_chain_iodone index values in the sd_xbuf are initialized to these 2048 * values at sd_xbuf init time, this allows (1) layering chains may be changed 2049 * dynamically & without the use of locking; and (2) a layer may update the 2050 * xb_chain_io[start|done] member in a given xbuf with its current index value, 2051 * to allow for deferred processing of an IO within the same chain from a 2052 * different execution context. 2053 */ 2054 2055 struct sd_chain_index { 2056 int sci_iostart_index; 2057 int sci_iodone_index; 2058 }; 2059 2060 static struct sd_chain_index sd_chain_index_map[] = { 2061 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE }, 2062 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM }, 2063 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE }, 2064 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM }, 2065 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE }, 2066 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM }, 2067 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE }, 2068 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE }, 2069 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE }, 2070 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE }, 2071 }; 2072 2073 2074 /* 2075 * The following are indexes into the sd_chain_index_map[] array. 2076 */ 2077 2078 /* un->un_buf_chain_type must be set to one of these */ 2079 #define SD_CHAIN_INFO_DISK 0 2080 #define SD_CHAIN_INFO_DISK_NO_PM 1 2081 #define SD_CHAIN_INFO_RMMEDIA 2 2082 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3 2083 #define SD_CHAIN_INFO_CHKSUM 4 2084 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5 2085 2086 /* un->un_uscsi_chain_type must be set to one of these */ 2087 #define SD_CHAIN_INFO_USCSI_CMD 6 2088 /* USCSI with PM disabled is the same as DIRECT */ 2089 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8 2090 #define SD_CHAIN_INFO_USCSI_CHKSUM 7 2091 2092 /* un->un_direct_chain_type must be set to one of these */ 2093 #define SD_CHAIN_INFO_DIRECT_CMD 8 2094 2095 /* un->un_priority_chain_type must be set to one of these */ 2096 #define SD_CHAIN_INFO_PRIORITY_CMD 9 2097 2098 /* size for devid inquiries */ 2099 #define MAX_INQUIRY_SIZE 0xF0 2100 2101 /* 2102 * Macros used by functions to pass a given buf(9S) struct along to the 2103 * next function in the layering chain for further processing. 2104 * 2105 * In the following macros, passing more than three arguments to the called 2106 * routines causes the optimizer for the SPARC compiler to stop doing tail 2107 * call elimination which results in significant performance degradation. 2108 */ 2109 #define SD_BEGIN_IOSTART(index, un, bp) \ 2110 ((*(sd_iostart_chain[index]))(index, un, bp)) 2111 2112 #define SD_BEGIN_IODONE(index, un, bp) \ 2113 ((*(sd_iodone_chain[index]))(index, un, bp)) 2114 2115 #define SD_NEXT_IOSTART(index, un, bp) \ 2116 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp)) 2117 2118 #define SD_NEXT_IODONE(index, un, bp) \ 2119 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp)) 2120 2121 /* 2122 * Function: _init 2123 * 2124 * Description: This is the driver _init(9E) entry point. 2125 * 2126 * Return Code: Returns the value from mod_install(9F) or 2127 * ddi_soft_state_init(9F) as appropriate. 2128 * 2129 * Context: Called when driver module loaded. 2130 */ 2131 2132 int 2133 _init(void) 2134 { 2135 int err; 2136 2137 /* establish driver name from module name */ 2138 sd_label = mod_modname(&modlinkage); 2139 2140 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun), 2141 SD_MAXUNIT); 2142 2143 if (err != 0) { 2144 return (err); 2145 } 2146 2147 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL); 2148 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL); 2149 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL); 2150 2151 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL); 2152 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL); 2153 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL); 2154 2155 /* 2156 * it's ok to init here even for fibre device 2157 */ 2158 sd_scsi_probe_cache_init(); 2159 2160 sd_scsi_target_lun_init(); 2161 2162 /* 2163 * Creating taskq before mod_install ensures that all callers (threads) 2164 * that enter the module after a successfull mod_install encounter 2165 * a valid taskq. 2166 */ 2167 sd_taskq_create(); 2168 2169 err = mod_install(&modlinkage); 2170 if (err != 0) { 2171 /* delete taskq if install fails */ 2172 sd_taskq_delete(); 2173 2174 mutex_destroy(&sd_detach_mutex); 2175 mutex_destroy(&sd_log_mutex); 2176 mutex_destroy(&sd_label_mutex); 2177 2178 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2179 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2180 cv_destroy(&sd_tr.srq_inprocess_cv); 2181 2182 sd_scsi_probe_cache_fini(); 2183 2184 sd_scsi_target_lun_fini(); 2185 2186 ddi_soft_state_fini(&sd_state); 2187 return (err); 2188 } 2189 2190 return (err); 2191 } 2192 2193 2194 /* 2195 * Function: _fini 2196 * 2197 * Description: This is the driver _fini(9E) entry point. 2198 * 2199 * Return Code: Returns the value from mod_remove(9F) 2200 * 2201 * Context: Called when driver module is unloaded. 2202 */ 2203 2204 int 2205 _fini(void) 2206 { 2207 int err; 2208 2209 if ((err = mod_remove(&modlinkage)) != 0) { 2210 return (err); 2211 } 2212 2213 sd_taskq_delete(); 2214 2215 mutex_destroy(&sd_detach_mutex); 2216 mutex_destroy(&sd_log_mutex); 2217 mutex_destroy(&sd_label_mutex); 2218 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2219 2220 sd_scsi_probe_cache_fini(); 2221 2222 sd_scsi_target_lun_fini(); 2223 2224 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2225 cv_destroy(&sd_tr.srq_inprocess_cv); 2226 2227 ddi_soft_state_fini(&sd_state); 2228 2229 return (err); 2230 } 2231 2232 2233 /* 2234 * Function: _info 2235 * 2236 * Description: This is the driver _info(9E) entry point. 2237 * 2238 * Arguments: modinfop - pointer to the driver modinfo structure 2239 * 2240 * Return Code: Returns the value from mod_info(9F). 2241 * 2242 * Context: Kernel thread context 2243 */ 2244 2245 int 2246 _info(struct modinfo *modinfop) 2247 { 2248 return (mod_info(&modlinkage, modinfop)); 2249 } 2250 2251 2252 /* 2253 * The following routines implement the driver message logging facility. 2254 * They provide component- and level- based debug output filtering. 2255 * Output may also be restricted to messages for a single instance by 2256 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set 2257 * to NULL, then messages for all instances are printed. 2258 * 2259 * These routines have been cloned from each other due to the language 2260 * constraints of macros and variable argument list processing. 2261 */ 2262 2263 2264 /* 2265 * Function: sd_log_err 2266 * 2267 * Description: This routine is called by the SD_ERROR macro for debug 2268 * logging of error conditions. 2269 * 2270 * Arguments: comp - driver component being logged 2271 * dev - pointer to driver info structure 2272 * fmt - error string and format to be logged 2273 */ 2274 2275 static void 2276 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...) 2277 { 2278 va_list ap; 2279 dev_info_t *dev; 2280 2281 ASSERT(un != NULL); 2282 dev = SD_DEVINFO(un); 2283 ASSERT(dev != NULL); 2284 2285 /* 2286 * Filter messages based on the global component and level masks. 2287 * Also print if un matches the value of sd_debug_un, or if 2288 * sd_debug_un is set to NULL. 2289 */ 2290 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) && 2291 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2292 mutex_enter(&sd_log_mutex); 2293 va_start(ap, fmt); 2294 (void) vsprintf(sd_log_buf, fmt, ap); 2295 va_end(ap); 2296 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2297 mutex_exit(&sd_log_mutex); 2298 } 2299 #ifdef SD_FAULT_INJECTION 2300 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2301 if (un->sd_injection_mask & comp) { 2302 mutex_enter(&sd_log_mutex); 2303 va_start(ap, fmt); 2304 (void) vsprintf(sd_log_buf, fmt, ap); 2305 va_end(ap); 2306 sd_injection_log(sd_log_buf, un); 2307 mutex_exit(&sd_log_mutex); 2308 } 2309 #endif 2310 } 2311 2312 2313 /* 2314 * Function: sd_log_info 2315 * 2316 * Description: This routine is called by the SD_INFO macro for debug 2317 * logging of general purpose informational conditions. 2318 * 2319 * Arguments: comp - driver component being logged 2320 * dev - pointer to driver info structure 2321 * fmt - info string and format to be logged 2322 */ 2323 2324 static void 2325 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...) 2326 { 2327 va_list ap; 2328 dev_info_t *dev; 2329 2330 ASSERT(un != NULL); 2331 dev = SD_DEVINFO(un); 2332 ASSERT(dev != NULL); 2333 2334 /* 2335 * Filter messages based on the global component and level masks. 2336 * Also print if un matches the value of sd_debug_un, or if 2337 * sd_debug_un is set to NULL. 2338 */ 2339 if ((sd_component_mask & component) && 2340 (sd_level_mask & SD_LOGMASK_INFO) && 2341 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2342 mutex_enter(&sd_log_mutex); 2343 va_start(ap, fmt); 2344 (void) vsprintf(sd_log_buf, fmt, ap); 2345 va_end(ap); 2346 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2347 mutex_exit(&sd_log_mutex); 2348 } 2349 #ifdef SD_FAULT_INJECTION 2350 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2351 if (un->sd_injection_mask & component) { 2352 mutex_enter(&sd_log_mutex); 2353 va_start(ap, fmt); 2354 (void) vsprintf(sd_log_buf, fmt, ap); 2355 va_end(ap); 2356 sd_injection_log(sd_log_buf, un); 2357 mutex_exit(&sd_log_mutex); 2358 } 2359 #endif 2360 } 2361 2362 2363 /* 2364 * Function: sd_log_trace 2365 * 2366 * Description: This routine is called by the SD_TRACE macro for debug 2367 * logging of trace conditions (i.e. function entry/exit). 2368 * 2369 * Arguments: comp - driver component being logged 2370 * dev - pointer to driver info structure 2371 * fmt - trace string and format to be logged 2372 */ 2373 2374 static void 2375 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...) 2376 { 2377 va_list ap; 2378 dev_info_t *dev; 2379 2380 ASSERT(un != NULL); 2381 dev = SD_DEVINFO(un); 2382 ASSERT(dev != NULL); 2383 2384 /* 2385 * Filter messages based on the global component and level masks. 2386 * Also print if un matches the value of sd_debug_un, or if 2387 * sd_debug_un is set to NULL. 2388 */ 2389 if ((sd_component_mask & component) && 2390 (sd_level_mask & SD_LOGMASK_TRACE) && 2391 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2392 mutex_enter(&sd_log_mutex); 2393 va_start(ap, fmt); 2394 (void) vsprintf(sd_log_buf, fmt, ap); 2395 va_end(ap); 2396 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2397 mutex_exit(&sd_log_mutex); 2398 } 2399 #ifdef SD_FAULT_INJECTION 2400 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2401 if (un->sd_injection_mask & component) { 2402 mutex_enter(&sd_log_mutex); 2403 va_start(ap, fmt); 2404 (void) vsprintf(sd_log_buf, fmt, ap); 2405 va_end(ap); 2406 sd_injection_log(sd_log_buf, un); 2407 mutex_exit(&sd_log_mutex); 2408 } 2409 #endif 2410 } 2411 2412 2413 /* 2414 * Function: sdprobe 2415 * 2416 * Description: This is the driver probe(9e) entry point function. 2417 * 2418 * Arguments: devi - opaque device info handle 2419 * 2420 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful. 2421 * DDI_PROBE_FAILURE: If the probe failed. 2422 * DDI_PROBE_PARTIAL: If the instance is not present now, 2423 * but may be present in the future. 2424 */ 2425 2426 static int 2427 sdprobe(dev_info_t *devi) 2428 { 2429 struct scsi_device *devp; 2430 int rval; 2431 int instance; 2432 2433 /* 2434 * if it wasn't for pln, sdprobe could actually be nulldev 2435 * in the "__fibre" case. 2436 */ 2437 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { 2438 return (DDI_PROBE_DONTCARE); 2439 } 2440 2441 devp = ddi_get_driver_private(devi); 2442 2443 if (devp == NULL) { 2444 /* Ooops... nexus driver is mis-configured... */ 2445 return (DDI_PROBE_FAILURE); 2446 } 2447 2448 instance = ddi_get_instance(devi); 2449 2450 if (ddi_get_soft_state(sd_state, instance) != NULL) { 2451 return (DDI_PROBE_PARTIAL); 2452 } 2453 2454 /* 2455 * Call the SCSA utility probe routine to see if we actually 2456 * have a target at this SCSI nexus. 2457 */ 2458 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) { 2459 case SCSIPROBE_EXISTS: 2460 switch (devp->sd_inq->inq_dtype) { 2461 case DTYPE_DIRECT: 2462 rval = DDI_PROBE_SUCCESS; 2463 break; 2464 case DTYPE_RODIRECT: 2465 /* CDs etc. Can be removable media */ 2466 rval = DDI_PROBE_SUCCESS; 2467 break; 2468 case DTYPE_OPTICAL: 2469 /* 2470 * Rewritable optical driver HP115AA 2471 * Can also be removable media 2472 */ 2473 2474 /* 2475 * Do not attempt to bind to DTYPE_OPTICAL if 2476 * pre solaris 9 sparc sd behavior is required 2477 * 2478 * If first time through and sd_dtype_optical_bind 2479 * has not been set in /etc/system check properties 2480 */ 2481 2482 if (sd_dtype_optical_bind < 0) { 2483 sd_dtype_optical_bind = ddi_prop_get_int 2484 (DDI_DEV_T_ANY, devi, 0, 2485 "optical-device-bind", 1); 2486 } 2487 2488 if (sd_dtype_optical_bind == 0) { 2489 rval = DDI_PROBE_FAILURE; 2490 } else { 2491 rval = DDI_PROBE_SUCCESS; 2492 } 2493 break; 2494 2495 case DTYPE_NOTPRESENT: 2496 default: 2497 rval = DDI_PROBE_FAILURE; 2498 break; 2499 } 2500 break; 2501 default: 2502 rval = DDI_PROBE_PARTIAL; 2503 break; 2504 } 2505 2506 /* 2507 * This routine checks for resource allocation prior to freeing, 2508 * so it will take care of the "smart probing" case where a 2509 * scsi_probe() may or may not have been issued and will *not* 2510 * free previously-freed resources. 2511 */ 2512 scsi_unprobe(devp); 2513 return (rval); 2514 } 2515 2516 2517 /* 2518 * Function: sdinfo 2519 * 2520 * Description: This is the driver getinfo(9e) entry point function. 2521 * Given the device number, return the devinfo pointer from 2522 * the scsi_device structure or the instance number 2523 * associated with the dev_t. 2524 * 2525 * Arguments: dip - pointer to device info structure 2526 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO, 2527 * DDI_INFO_DEVT2INSTANCE) 2528 * arg - driver dev_t 2529 * resultp - user buffer for request response 2530 * 2531 * Return Code: DDI_SUCCESS 2532 * DDI_FAILURE 2533 */ 2534 /* ARGSUSED */ 2535 static int 2536 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2537 { 2538 struct sd_lun *un; 2539 dev_t dev; 2540 int instance; 2541 int error; 2542 2543 switch (infocmd) { 2544 case DDI_INFO_DEVT2DEVINFO: 2545 dev = (dev_t)arg; 2546 instance = SDUNIT(dev); 2547 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 2548 return (DDI_FAILURE); 2549 } 2550 *result = (void *) SD_DEVINFO(un); 2551 error = DDI_SUCCESS; 2552 break; 2553 case DDI_INFO_DEVT2INSTANCE: 2554 dev = (dev_t)arg; 2555 instance = SDUNIT(dev); 2556 *result = (void *)(uintptr_t)instance; 2557 error = DDI_SUCCESS; 2558 break; 2559 default: 2560 error = DDI_FAILURE; 2561 } 2562 return (error); 2563 } 2564 2565 /* 2566 * Function: sd_prop_op 2567 * 2568 * Description: This is the driver prop_op(9e) entry point function. 2569 * Return the number of blocks for the partition in question 2570 * or forward the request to the property facilities. 2571 * 2572 * Arguments: dev - device number 2573 * dip - pointer to device info structure 2574 * prop_op - property operator 2575 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent 2576 * name - pointer to property name 2577 * valuep - pointer or address of the user buffer 2578 * lengthp - property length 2579 * 2580 * Return Code: DDI_PROP_SUCCESS 2581 * DDI_PROP_NOT_FOUND 2582 * DDI_PROP_UNDEFINED 2583 * DDI_PROP_NO_MEMORY 2584 * DDI_PROP_BUF_TOO_SMALL 2585 */ 2586 2587 static int 2588 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 2589 char *name, caddr_t valuep, int *lengthp) 2590 { 2591 int instance = ddi_get_instance(dip); 2592 struct sd_lun *un; 2593 uint64_t nblocks64; 2594 2595 /* 2596 * Our dynamic properties are all device specific and size oriented. 2597 * Requests issued under conditions where size is valid are passed 2598 * to ddi_prop_op_nblocks with the size information, otherwise the 2599 * request is passed to ddi_prop_op. Size depends on valid geometry. 2600 */ 2601 un = ddi_get_soft_state(sd_state, instance); 2602 if ((dev == DDI_DEV_T_ANY) || (un == NULL)) { 2603 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2604 name, valuep, lengthp)); 2605 } else if (!SD_IS_VALID_LABEL(un)) { 2606 (void) cmlb_validate(un->un_cmlbhandle, 0, 2607 (void *)SD_PATH_DIRECT); 2608 if (!SD_IS_VALID_LABEL(un)) 2609 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2610 name, valuep, lengthp)); 2611 } 2612 2613 /* get nblocks value */ 2614 ASSERT(!mutex_owned(SD_MUTEX(un))); 2615 2616 (void) cmlb_partinfo(un->un_cmlbhandle, SDPART(dev), 2617 (diskaddr_t *)&nblocks64, NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 2618 2619 return (ddi_prop_op_nblocks(dev, dip, prop_op, mod_flags, 2620 name, valuep, lengthp, nblocks64)); 2621 } 2622 2623 /* 2624 * The following functions are for smart probing: 2625 * sd_scsi_probe_cache_init() 2626 * sd_scsi_probe_cache_fini() 2627 * sd_scsi_clear_probe_cache() 2628 * sd_scsi_probe_with_cache() 2629 */ 2630 2631 /* 2632 * Function: sd_scsi_probe_cache_init 2633 * 2634 * Description: Initializes the probe response cache mutex and head pointer. 2635 * 2636 * Context: Kernel thread context 2637 */ 2638 2639 static void 2640 sd_scsi_probe_cache_init(void) 2641 { 2642 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL); 2643 sd_scsi_probe_cache_head = NULL; 2644 } 2645 2646 2647 /* 2648 * Function: sd_scsi_probe_cache_fini 2649 * 2650 * Description: Frees all resources associated with the probe response cache. 2651 * 2652 * Context: Kernel thread context 2653 */ 2654 2655 static void 2656 sd_scsi_probe_cache_fini(void) 2657 { 2658 struct sd_scsi_probe_cache *cp; 2659 struct sd_scsi_probe_cache *ncp; 2660 2661 /* Clean up our smart probing linked list */ 2662 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) { 2663 ncp = cp->next; 2664 kmem_free(cp, sizeof (struct sd_scsi_probe_cache)); 2665 } 2666 sd_scsi_probe_cache_head = NULL; 2667 mutex_destroy(&sd_scsi_probe_cache_mutex); 2668 } 2669 2670 2671 /* 2672 * Function: sd_scsi_clear_probe_cache 2673 * 2674 * Description: This routine clears the probe response cache. This is 2675 * done when open() returns ENXIO so that when deferred 2676 * attach is attempted (possibly after a device has been 2677 * turned on) we will retry the probe. Since we don't know 2678 * which target we failed to open, we just clear the 2679 * entire cache. 2680 * 2681 * Context: Kernel thread context 2682 */ 2683 2684 static void 2685 sd_scsi_clear_probe_cache(void) 2686 { 2687 struct sd_scsi_probe_cache *cp; 2688 int i; 2689 2690 mutex_enter(&sd_scsi_probe_cache_mutex); 2691 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2692 /* 2693 * Reset all entries to SCSIPROBE_EXISTS. This will 2694 * force probing to be performed the next time 2695 * sd_scsi_probe_with_cache is called. 2696 */ 2697 for (i = 0; i < NTARGETS_WIDE; i++) { 2698 cp->cache[i] = SCSIPROBE_EXISTS; 2699 } 2700 } 2701 mutex_exit(&sd_scsi_probe_cache_mutex); 2702 } 2703 2704 2705 /* 2706 * Function: sd_scsi_probe_with_cache 2707 * 2708 * Description: This routine implements support for a scsi device probe 2709 * with cache. The driver maintains a cache of the target 2710 * responses to scsi probes. If we get no response from a 2711 * target during a probe inquiry, we remember that, and we 2712 * avoid additional calls to scsi_probe on non-zero LUNs 2713 * on the same target until the cache is cleared. By doing 2714 * so we avoid the 1/4 sec selection timeout for nonzero 2715 * LUNs. lun0 of a target is always probed. 2716 * 2717 * Arguments: devp - Pointer to a scsi_device(9S) structure 2718 * waitfunc - indicates what the allocator routines should 2719 * do when resources are not available. This value 2720 * is passed on to scsi_probe() when that routine 2721 * is called. 2722 * 2723 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache; 2724 * otherwise the value returned by scsi_probe(9F). 2725 * 2726 * Context: Kernel thread context 2727 */ 2728 2729 static int 2730 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)()) 2731 { 2732 struct sd_scsi_probe_cache *cp; 2733 dev_info_t *pdip = ddi_get_parent(devp->sd_dev); 2734 int lun, tgt; 2735 2736 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2737 SCSI_ADDR_PROP_LUN, 0); 2738 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2739 SCSI_ADDR_PROP_TARGET, -1); 2740 2741 /* Make sure caching enabled and target in range */ 2742 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) { 2743 /* do it the old way (no cache) */ 2744 return (scsi_probe(devp, waitfn)); 2745 } 2746 2747 mutex_enter(&sd_scsi_probe_cache_mutex); 2748 2749 /* Find the cache for this scsi bus instance */ 2750 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2751 if (cp->pdip == pdip) { 2752 break; 2753 } 2754 } 2755 2756 /* If we can't find a cache for this pdip, create one */ 2757 if (cp == NULL) { 2758 int i; 2759 2760 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache), 2761 KM_SLEEP); 2762 cp->pdip = pdip; 2763 cp->next = sd_scsi_probe_cache_head; 2764 sd_scsi_probe_cache_head = cp; 2765 for (i = 0; i < NTARGETS_WIDE; i++) { 2766 cp->cache[i] = SCSIPROBE_EXISTS; 2767 } 2768 } 2769 2770 mutex_exit(&sd_scsi_probe_cache_mutex); 2771 2772 /* Recompute the cache for this target if LUN zero */ 2773 if (lun == 0) { 2774 cp->cache[tgt] = SCSIPROBE_EXISTS; 2775 } 2776 2777 /* Don't probe if cache remembers a NORESP from a previous LUN. */ 2778 if (cp->cache[tgt] != SCSIPROBE_EXISTS) { 2779 return (SCSIPROBE_NORESP); 2780 } 2781 2782 /* Do the actual probe; save & return the result */ 2783 return (cp->cache[tgt] = scsi_probe(devp, waitfn)); 2784 } 2785 2786 2787 /* 2788 * Function: sd_scsi_target_lun_init 2789 * 2790 * Description: Initializes the attached lun chain mutex and head pointer. 2791 * 2792 * Context: Kernel thread context 2793 */ 2794 2795 static void 2796 sd_scsi_target_lun_init(void) 2797 { 2798 mutex_init(&sd_scsi_target_lun_mutex, NULL, MUTEX_DRIVER, NULL); 2799 sd_scsi_target_lun_head = NULL; 2800 } 2801 2802 2803 /* 2804 * Function: sd_scsi_target_lun_fini 2805 * 2806 * Description: Frees all resources associated with the attached lun 2807 * chain 2808 * 2809 * Context: Kernel thread context 2810 */ 2811 2812 static void 2813 sd_scsi_target_lun_fini(void) 2814 { 2815 struct sd_scsi_hba_tgt_lun *cp; 2816 struct sd_scsi_hba_tgt_lun *ncp; 2817 2818 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = ncp) { 2819 ncp = cp->next; 2820 kmem_free(cp, sizeof (struct sd_scsi_hba_tgt_lun)); 2821 } 2822 sd_scsi_target_lun_head = NULL; 2823 mutex_destroy(&sd_scsi_target_lun_mutex); 2824 } 2825 2826 2827 /* 2828 * Function: sd_scsi_get_target_lun_count 2829 * 2830 * Description: This routine will check in the attached lun chain to see 2831 * how many luns are attached on the required SCSI controller 2832 * and target. Currently, some capabilities like tagged queue 2833 * are supported per target based by HBA. So all luns in a 2834 * target have the same capabilities. Based on this assumption, 2835 * sd should only set these capabilities once per target. This 2836 * function is called when sd needs to decide how many luns 2837 * already attached on a target. 2838 * 2839 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2840 * controller device. 2841 * target - The target ID on the controller's SCSI bus. 2842 * 2843 * Return Code: The number of luns attached on the required target and 2844 * controller. 2845 * -1 if target ID is not in parallel SCSI scope or the given 2846 * dip is not in the chain. 2847 * 2848 * Context: Kernel thread context 2849 */ 2850 2851 static int 2852 sd_scsi_get_target_lun_count(dev_info_t *dip, int target) 2853 { 2854 struct sd_scsi_hba_tgt_lun *cp; 2855 2856 if ((target < 0) || (target >= NTARGETS_WIDE)) { 2857 return (-1); 2858 } 2859 2860 mutex_enter(&sd_scsi_target_lun_mutex); 2861 2862 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2863 if (cp->pdip == dip) { 2864 break; 2865 } 2866 } 2867 2868 mutex_exit(&sd_scsi_target_lun_mutex); 2869 2870 if (cp == NULL) { 2871 return (-1); 2872 } 2873 2874 return (cp->nlun[target]); 2875 } 2876 2877 2878 /* 2879 * Function: sd_scsi_update_lun_on_target 2880 * 2881 * Description: This routine is used to update the attached lun chain when a 2882 * lun is attached or detached on a target. 2883 * 2884 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2885 * controller device. 2886 * target - The target ID on the controller's SCSI bus. 2887 * flag - Indicate the lun is attached or detached. 2888 * 2889 * Context: Kernel thread context 2890 */ 2891 2892 static void 2893 sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag) 2894 { 2895 struct sd_scsi_hba_tgt_lun *cp; 2896 2897 mutex_enter(&sd_scsi_target_lun_mutex); 2898 2899 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2900 if (cp->pdip == dip) { 2901 break; 2902 } 2903 } 2904 2905 if ((cp == NULL) && (flag == SD_SCSI_LUN_ATTACH)) { 2906 cp = kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun), 2907 KM_SLEEP); 2908 cp->pdip = dip; 2909 cp->next = sd_scsi_target_lun_head; 2910 sd_scsi_target_lun_head = cp; 2911 } 2912 2913 mutex_exit(&sd_scsi_target_lun_mutex); 2914 2915 if (cp != NULL) { 2916 if (flag == SD_SCSI_LUN_ATTACH) { 2917 cp->nlun[target] ++; 2918 } else { 2919 cp->nlun[target] --; 2920 } 2921 } 2922 } 2923 2924 2925 /* 2926 * Function: sd_spin_up_unit 2927 * 2928 * Description: Issues the following commands to spin-up the device: 2929 * START STOP UNIT, and INQUIRY. 2930 * 2931 * Arguments: un - driver soft state (unit) structure 2932 * 2933 * Return Code: 0 - success 2934 * EIO - failure 2935 * EACCES - reservation conflict 2936 * 2937 * Context: Kernel thread context 2938 */ 2939 2940 static int 2941 sd_spin_up_unit(struct sd_lun *un) 2942 { 2943 size_t resid = 0; 2944 int has_conflict = FALSE; 2945 uchar_t *bufaddr; 2946 2947 ASSERT(un != NULL); 2948 2949 /* 2950 * Send a throwaway START UNIT command. 2951 * 2952 * If we fail on this, we don't care presently what precisely 2953 * is wrong. EMC's arrays will also fail this with a check 2954 * condition (0x2/0x4/0x3) if the device is "inactive," but 2955 * we don't want to fail the attach because it may become 2956 * "active" later. 2957 */ 2958 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, SD_PATH_DIRECT) 2959 == EACCES) 2960 has_conflict = TRUE; 2961 2962 /* 2963 * Send another INQUIRY command to the target. This is necessary for 2964 * non-removable media direct access devices because their INQUIRY data 2965 * may not be fully qualified until they are spun up (perhaps via the 2966 * START command above). Note: This seems to be needed for some 2967 * legacy devices only.) The INQUIRY command should succeed even if a 2968 * Reservation Conflict is present. 2969 */ 2970 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP); 2971 if (sd_send_scsi_INQUIRY(un, bufaddr, SUN_INQSIZE, 0, 0, &resid) != 0) { 2972 kmem_free(bufaddr, SUN_INQSIZE); 2973 return (EIO); 2974 } 2975 2976 /* 2977 * If we got enough INQUIRY data, copy it over the old INQUIRY data. 2978 * Note that this routine does not return a failure here even if the 2979 * INQUIRY command did not return any data. This is a legacy behavior. 2980 */ 2981 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) { 2982 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE); 2983 } 2984 2985 kmem_free(bufaddr, SUN_INQSIZE); 2986 2987 /* If we hit a reservation conflict above, tell the caller. */ 2988 if (has_conflict == TRUE) { 2989 return (EACCES); 2990 } 2991 2992 return (0); 2993 } 2994 2995 #ifdef _LP64 2996 /* 2997 * Function: sd_enable_descr_sense 2998 * 2999 * Description: This routine attempts to select descriptor sense format 3000 * using the Control mode page. Devices that support 64 bit 3001 * LBAs (for >2TB luns) should also implement descriptor 3002 * sense data so we will call this function whenever we see 3003 * a lun larger than 2TB. If for some reason the device 3004 * supports 64 bit LBAs but doesn't support descriptor sense 3005 * presumably the mode select will fail. Everything will 3006 * continue to work normally except that we will not get 3007 * complete sense data for commands that fail with an LBA 3008 * larger than 32 bits. 3009 * 3010 * Arguments: un - driver soft state (unit) structure 3011 * 3012 * Context: Kernel thread context only 3013 */ 3014 3015 static void 3016 sd_enable_descr_sense(struct sd_lun *un) 3017 { 3018 uchar_t *header; 3019 struct mode_control_scsi3 *ctrl_bufp; 3020 size_t buflen; 3021 size_t bd_len; 3022 3023 /* 3024 * Read MODE SENSE page 0xA, Control Mode Page 3025 */ 3026 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH + 3027 sizeof (struct mode_control_scsi3); 3028 header = kmem_zalloc(buflen, KM_SLEEP); 3029 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 3030 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT) != 0) { 3031 SD_ERROR(SD_LOG_COMMON, un, 3032 "sd_enable_descr_sense: mode sense ctrl page failed\n"); 3033 goto eds_exit; 3034 } 3035 3036 /* 3037 * Determine size of Block Descriptors in order to locate 3038 * the mode page data. ATAPI devices return 0, SCSI devices 3039 * should return MODE_BLK_DESC_LENGTH. 3040 */ 3041 bd_len = ((struct mode_header *)header)->bdesc_length; 3042 3043 ctrl_bufp = (struct mode_control_scsi3 *) 3044 (header + MODE_HEADER_LENGTH + bd_len); 3045 3046 /* 3047 * Clear PS bit for MODE SELECT 3048 */ 3049 ctrl_bufp->mode_page.ps = 0; 3050 3051 /* 3052 * Set D_SENSE to enable descriptor sense format. 3053 */ 3054 ctrl_bufp->d_sense = 1; 3055 3056 /* 3057 * Use MODE SELECT to commit the change to the D_SENSE bit 3058 */ 3059 if (sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 3060 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT) != 0) { 3061 SD_INFO(SD_LOG_COMMON, un, 3062 "sd_enable_descr_sense: mode select ctrl page failed\n"); 3063 goto eds_exit; 3064 } 3065 3066 eds_exit: 3067 kmem_free(header, buflen); 3068 } 3069 3070 /* 3071 * Function: sd_reenable_dsense_task 3072 * 3073 * Description: Re-enable descriptor sense after device or bus reset 3074 * 3075 * Context: Executes in a taskq() thread context 3076 */ 3077 static void 3078 sd_reenable_dsense_task(void *arg) 3079 { 3080 struct sd_lun *un = arg; 3081 3082 ASSERT(un != NULL); 3083 sd_enable_descr_sense(un); 3084 } 3085 #endif /* _LP64 */ 3086 3087 /* 3088 * Function: sd_set_mmc_caps 3089 * 3090 * Description: This routine determines if the device is MMC compliant and if 3091 * the device supports CDDA via a mode sense of the CDVD 3092 * capabilities mode page. Also checks if the device is a 3093 * dvdram writable device. 3094 * 3095 * Arguments: un - driver soft state (unit) structure 3096 * 3097 * Context: Kernel thread context only 3098 */ 3099 3100 static void 3101 sd_set_mmc_caps(struct sd_lun *un) 3102 { 3103 struct mode_header_grp2 *sense_mhp; 3104 uchar_t *sense_page; 3105 caddr_t buf; 3106 int bd_len; 3107 int status; 3108 struct uscsi_cmd com; 3109 int rtn; 3110 uchar_t *out_data_rw, *out_data_hd; 3111 uchar_t *rqbuf_rw, *rqbuf_hd; 3112 3113 ASSERT(un != NULL); 3114 3115 /* 3116 * The flags which will be set in this function are - mmc compliant, 3117 * dvdram writable device, cdda support. Initialize them to FALSE 3118 * and if a capability is detected - it will be set to TRUE. 3119 */ 3120 un->un_f_mmc_cap = FALSE; 3121 un->un_f_dvdram_writable_device = FALSE; 3122 un->un_f_cfg_cdda = FALSE; 3123 3124 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3125 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 3126 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 3127 3128 if (status != 0) { 3129 /* command failed; just return */ 3130 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3131 return; 3132 } 3133 /* 3134 * If the mode sense request for the CDROM CAPABILITIES 3135 * page (0x2A) succeeds the device is assumed to be MMC. 3136 */ 3137 un->un_f_mmc_cap = TRUE; 3138 3139 /* Get to the page data */ 3140 sense_mhp = (struct mode_header_grp2 *)buf; 3141 bd_len = (sense_mhp->bdesc_length_hi << 8) | 3142 sense_mhp->bdesc_length_lo; 3143 if (bd_len > MODE_BLK_DESC_LENGTH) { 3144 /* 3145 * We did not get back the expected block descriptor 3146 * length so we cannot determine if the device supports 3147 * CDDA. However, we still indicate the device is MMC 3148 * according to the successful response to the page 3149 * 0x2A mode sense request. 3150 */ 3151 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3152 "sd_set_mmc_caps: Mode Sense returned " 3153 "invalid block descriptor length\n"); 3154 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3155 return; 3156 } 3157 3158 /* See if read CDDA is supported */ 3159 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + 3160 bd_len); 3161 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE; 3162 3163 /* See if writing DVD RAM is supported. */ 3164 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE; 3165 if (un->un_f_dvdram_writable_device == TRUE) { 3166 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3167 return; 3168 } 3169 3170 /* 3171 * If the device presents DVD or CD capabilities in the mode 3172 * page, we can return here since a RRD will not have 3173 * these capabilities. 3174 */ 3175 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3176 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3177 return; 3178 } 3179 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3180 3181 /* 3182 * If un->un_f_dvdram_writable_device is still FALSE, 3183 * check for a Removable Rigid Disk (RRD). A RRD 3184 * device is identified by the features RANDOM_WRITABLE and 3185 * HARDWARE_DEFECT_MANAGEMENT. 3186 */ 3187 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3188 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3189 3190 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3191 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3192 RANDOM_WRITABLE, SD_PATH_STANDARD); 3193 if (rtn != 0) { 3194 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3195 kmem_free(rqbuf_rw, SENSE_LENGTH); 3196 return; 3197 } 3198 3199 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3200 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3201 3202 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3203 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3204 HARDWARE_DEFECT_MANAGEMENT, SD_PATH_STANDARD); 3205 if (rtn == 0) { 3206 /* 3207 * We have good information, check for random writable 3208 * and hardware defect features. 3209 */ 3210 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3211 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) { 3212 un->un_f_dvdram_writable_device = TRUE; 3213 } 3214 } 3215 3216 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3217 kmem_free(rqbuf_rw, SENSE_LENGTH); 3218 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3219 kmem_free(rqbuf_hd, SENSE_LENGTH); 3220 } 3221 3222 /* 3223 * Function: sd_check_for_writable_cd 3224 * 3225 * Description: This routine determines if the media in the device is 3226 * writable or not. It uses the get configuration command (0x46) 3227 * to determine if the media is writable 3228 * 3229 * Arguments: un - driver soft state (unit) structure 3230 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" 3231 * chain and the normal command waitq, or 3232 * SD_PATH_DIRECT_PRIORITY to use the USCSI 3233 * "direct" chain and bypass the normal command 3234 * waitq. 3235 * 3236 * Context: Never called at interrupt context. 3237 */ 3238 3239 static void 3240 sd_check_for_writable_cd(struct sd_lun *un, int path_flag) 3241 { 3242 struct uscsi_cmd com; 3243 uchar_t *out_data; 3244 uchar_t *rqbuf; 3245 int rtn; 3246 uchar_t *out_data_rw, *out_data_hd; 3247 uchar_t *rqbuf_rw, *rqbuf_hd; 3248 struct mode_header_grp2 *sense_mhp; 3249 uchar_t *sense_page; 3250 caddr_t buf; 3251 int bd_len; 3252 int status; 3253 3254 ASSERT(un != NULL); 3255 ASSERT(mutex_owned(SD_MUTEX(un))); 3256 3257 /* 3258 * Initialize the writable media to false, if configuration info. 3259 * tells us otherwise then only we will set it. 3260 */ 3261 un->un_f_mmc_writable_media = FALSE; 3262 mutex_exit(SD_MUTEX(un)); 3263 3264 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 3265 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3266 3267 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, SENSE_LENGTH, 3268 out_data, SD_PROFILE_HEADER_LEN, path_flag); 3269 3270 mutex_enter(SD_MUTEX(un)); 3271 if (rtn == 0) { 3272 /* 3273 * We have good information, check for writable DVD. 3274 */ 3275 if ((out_data[6] == 0) && (out_data[7] == 0x12)) { 3276 un->un_f_mmc_writable_media = TRUE; 3277 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3278 kmem_free(rqbuf, SENSE_LENGTH); 3279 return; 3280 } 3281 } 3282 3283 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3284 kmem_free(rqbuf, SENSE_LENGTH); 3285 3286 /* 3287 * Determine if this is a RRD type device. 3288 */ 3289 mutex_exit(SD_MUTEX(un)); 3290 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3291 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 3292 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, path_flag); 3293 mutex_enter(SD_MUTEX(un)); 3294 if (status != 0) { 3295 /* command failed; just return */ 3296 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3297 return; 3298 } 3299 3300 /* Get to the page data */ 3301 sense_mhp = (struct mode_header_grp2 *)buf; 3302 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 3303 if (bd_len > MODE_BLK_DESC_LENGTH) { 3304 /* 3305 * We did not get back the expected block descriptor length so 3306 * we cannot check the mode page. 3307 */ 3308 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3309 "sd_check_for_writable_cd: Mode Sense returned " 3310 "invalid block descriptor length\n"); 3311 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3312 return; 3313 } 3314 3315 /* 3316 * If the device presents DVD or CD capabilities in the mode 3317 * page, we can return here since a RRD device will not have 3318 * these capabilities. 3319 */ 3320 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len); 3321 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3322 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3323 return; 3324 } 3325 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3326 3327 /* 3328 * If un->un_f_mmc_writable_media is still FALSE, 3329 * check for RRD type media. A RRD device is identified 3330 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT. 3331 */ 3332 mutex_exit(SD_MUTEX(un)); 3333 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3334 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3335 3336 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3337 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3338 RANDOM_WRITABLE, path_flag); 3339 if (rtn != 0) { 3340 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3341 kmem_free(rqbuf_rw, SENSE_LENGTH); 3342 mutex_enter(SD_MUTEX(un)); 3343 return; 3344 } 3345 3346 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3347 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3348 3349 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3350 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3351 HARDWARE_DEFECT_MANAGEMENT, path_flag); 3352 mutex_enter(SD_MUTEX(un)); 3353 if (rtn == 0) { 3354 /* 3355 * We have good information, check for random writable 3356 * and hardware defect features as current. 3357 */ 3358 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3359 (out_data_rw[10] & 0x1) && 3360 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) && 3361 (out_data_hd[10] & 0x1)) { 3362 un->un_f_mmc_writable_media = TRUE; 3363 } 3364 } 3365 3366 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3367 kmem_free(rqbuf_rw, SENSE_LENGTH); 3368 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3369 kmem_free(rqbuf_hd, SENSE_LENGTH); 3370 } 3371 3372 /* 3373 * Function: sd_read_unit_properties 3374 * 3375 * Description: The following implements a property lookup mechanism. 3376 * Properties for particular disks (keyed on vendor, model 3377 * and rev numbers) are sought in the sd.conf file via 3378 * sd_process_sdconf_file(), and if not found there, are 3379 * looked for in a list hardcoded in this driver via 3380 * sd_process_sdconf_table() Once located the properties 3381 * are used to update the driver unit structure. 3382 * 3383 * Arguments: un - driver soft state (unit) structure 3384 */ 3385 3386 static void 3387 sd_read_unit_properties(struct sd_lun *un) 3388 { 3389 /* 3390 * sd_process_sdconf_file returns SD_FAILURE if it cannot find 3391 * the "sd-config-list" property (from the sd.conf file) or if 3392 * there was not a match for the inquiry vid/pid. If this event 3393 * occurs the static driver configuration table is searched for 3394 * a match. 3395 */ 3396 ASSERT(un != NULL); 3397 if (sd_process_sdconf_file(un) == SD_FAILURE) { 3398 sd_process_sdconf_table(un); 3399 } 3400 3401 /* check for LSI device */ 3402 sd_is_lsi(un); 3403 3404 3405 } 3406 3407 3408 /* 3409 * Function: sd_process_sdconf_file 3410 * 3411 * Description: Use ddi_getlongprop to obtain the properties from the 3412 * driver's config file (ie, sd.conf) and update the driver 3413 * soft state structure accordingly. 3414 * 3415 * Arguments: un - driver soft state (unit) structure 3416 * 3417 * Return Code: SD_SUCCESS - The properties were successfully set according 3418 * to the driver configuration file. 3419 * SD_FAILURE - The driver config list was not obtained or 3420 * there was no vid/pid match. This indicates that 3421 * the static config table should be used. 3422 * 3423 * The config file has a property, "sd-config-list", which consists of 3424 * one or more duplets as follows: 3425 * 3426 * sd-config-list= 3427 * <duplet>, 3428 * [<duplet>,] 3429 * [<duplet>]; 3430 * 3431 * The structure of each duplet is as follows: 3432 * 3433 * <duplet>:= <vid+pid>,<data-property-name_list> 3434 * 3435 * The first entry of the duplet is the device ID string (the concatenated 3436 * vid & pid; not to be confused with a device_id). This is defined in 3437 * the same way as in the sd_disk_table. 3438 * 3439 * The second part of the duplet is a string that identifies a 3440 * data-property-name-list. The data-property-name-list is defined as 3441 * follows: 3442 * 3443 * <data-property-name-list>:=<data-property-name> [<data-property-name>] 3444 * 3445 * The syntax of <data-property-name> depends on the <version> field. 3446 * 3447 * If version = SD_CONF_VERSION_1 we have the following syntax: 3448 * 3449 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3450 * 3451 * where the prop0 value will be used to set prop0 if bit0 set in the 3452 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1 3453 * 3454 */ 3455 3456 static int 3457 sd_process_sdconf_file(struct sd_lun *un) 3458 { 3459 char *config_list = NULL; 3460 int config_list_len; 3461 int len; 3462 int dupletlen = 0; 3463 char *vidptr; 3464 int vidlen; 3465 char *dnlist_ptr; 3466 char *dataname_ptr; 3467 int dnlist_len; 3468 int dataname_len; 3469 int *data_list; 3470 int data_list_len; 3471 int rval = SD_FAILURE; 3472 int i; 3473 3474 ASSERT(un != NULL); 3475 3476 /* Obtain the configuration list associated with the .conf file */ 3477 if (ddi_getlongprop(DDI_DEV_T_ANY, SD_DEVINFO(un), DDI_PROP_DONTPASS, 3478 sd_config_list, (caddr_t)&config_list, &config_list_len) 3479 != DDI_PROP_SUCCESS) { 3480 return (SD_FAILURE); 3481 } 3482 3483 /* 3484 * Compare vids in each duplet to the inquiry vid - if a match is 3485 * made, get the data value and update the soft state structure 3486 * accordingly. 3487 * 3488 * Note: This algorithm is complex and difficult to maintain. It should 3489 * be replaced with a more robust implementation. 3490 */ 3491 for (len = config_list_len, vidptr = config_list; len > 0; 3492 vidptr += dupletlen, len -= dupletlen) { 3493 /* 3494 * Note: The assumption here is that each vid entry is on 3495 * a unique line from its associated duplet. 3496 */ 3497 vidlen = dupletlen = (int)strlen(vidptr); 3498 if ((vidlen == 0) || 3499 (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS)) { 3500 dupletlen++; 3501 continue; 3502 } 3503 3504 /* 3505 * dnlist contains 1 or more blank separated 3506 * data-property-name entries 3507 */ 3508 dnlist_ptr = vidptr + vidlen + 1; 3509 dnlist_len = (int)strlen(dnlist_ptr); 3510 dupletlen += dnlist_len + 2; 3511 3512 /* 3513 * Set a pointer for the first data-property-name 3514 * entry in the list 3515 */ 3516 dataname_ptr = dnlist_ptr; 3517 dataname_len = 0; 3518 3519 /* 3520 * Loop through all data-property-name entries in the 3521 * data-property-name-list setting the properties for each. 3522 */ 3523 while (dataname_len < dnlist_len) { 3524 int version; 3525 3526 /* 3527 * Determine the length of the current 3528 * data-property-name entry by indexing until a 3529 * blank or NULL is encountered. When the space is 3530 * encountered reset it to a NULL for compliance 3531 * with ddi_getlongprop(). 3532 */ 3533 for (i = 0; ((dataname_ptr[i] != ' ') && 3534 (dataname_ptr[i] != '\0')); i++) { 3535 ; 3536 } 3537 3538 dataname_len += i; 3539 /* If not null terminated, Make it so */ 3540 if (dataname_ptr[i] == ' ') { 3541 dataname_ptr[i] = '\0'; 3542 } 3543 dataname_len++; 3544 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3545 "sd_process_sdconf_file: disk:%s, data:%s\n", 3546 vidptr, dataname_ptr); 3547 3548 /* Get the data list */ 3549 if (ddi_getlongprop(DDI_DEV_T_ANY, SD_DEVINFO(un), 0, 3550 dataname_ptr, (caddr_t)&data_list, &data_list_len) 3551 != DDI_PROP_SUCCESS) { 3552 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3553 "sd_process_sdconf_file: data property (%s)" 3554 " has no value\n", dataname_ptr); 3555 dataname_ptr = dnlist_ptr + dataname_len; 3556 continue; 3557 } 3558 3559 version = data_list[0]; 3560 3561 if (version == SD_CONF_VERSION_1) { 3562 sd_tunables values; 3563 3564 /* Set the properties */ 3565 if (sd_chk_vers1_data(un, data_list[1], 3566 &data_list[2], data_list_len, dataname_ptr) 3567 == SD_SUCCESS) { 3568 sd_get_tunables_from_conf(un, 3569 data_list[1], &data_list[2], 3570 &values); 3571 sd_set_vers1_properties(un, 3572 data_list[1], &values); 3573 rval = SD_SUCCESS; 3574 } else { 3575 rval = SD_FAILURE; 3576 } 3577 } else { 3578 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3579 "data property %s version 0x%x is invalid.", 3580 dataname_ptr, version); 3581 rval = SD_FAILURE; 3582 } 3583 kmem_free(data_list, data_list_len); 3584 dataname_ptr = dnlist_ptr + dataname_len; 3585 } 3586 } 3587 3588 /* free up the memory allocated by ddi_getlongprop */ 3589 if (config_list) { 3590 kmem_free(config_list, config_list_len); 3591 } 3592 3593 return (rval); 3594 } 3595 3596 /* 3597 * Function: sd_get_tunables_from_conf() 3598 * 3599 * 3600 * This function reads the data list from the sd.conf file and pulls 3601 * the values that can have numeric values as arguments and places 3602 * the values in the apropriate sd_tunables member. 3603 * Since the order of the data list members varies across platforms 3604 * This function reads them from the data list in a platform specific 3605 * order and places them into the correct sd_tunable member that is 3606 * a consistant across all platforms. 3607 */ 3608 static void 3609 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list, 3610 sd_tunables *values) 3611 { 3612 int i; 3613 int mask; 3614 3615 bzero(values, sizeof (sd_tunables)); 3616 3617 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3618 3619 mask = 1 << i; 3620 if (mask > flags) { 3621 break; 3622 } 3623 3624 switch (mask & flags) { 3625 case 0: /* This mask bit not set in flags */ 3626 continue; 3627 case SD_CONF_BSET_THROTTLE: 3628 values->sdt_throttle = data_list[i]; 3629 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3630 "sd_get_tunables_from_conf: throttle = %d\n", 3631 values->sdt_throttle); 3632 break; 3633 case SD_CONF_BSET_CTYPE: 3634 values->sdt_ctype = data_list[i]; 3635 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3636 "sd_get_tunables_from_conf: ctype = %d\n", 3637 values->sdt_ctype); 3638 break; 3639 case SD_CONF_BSET_NRR_COUNT: 3640 values->sdt_not_rdy_retries = data_list[i]; 3641 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3642 "sd_get_tunables_from_conf: not_rdy_retries = %d\n", 3643 values->sdt_not_rdy_retries); 3644 break; 3645 case SD_CONF_BSET_BSY_RETRY_COUNT: 3646 values->sdt_busy_retries = data_list[i]; 3647 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3648 "sd_get_tunables_from_conf: busy_retries = %d\n", 3649 values->sdt_busy_retries); 3650 break; 3651 case SD_CONF_BSET_RST_RETRIES: 3652 values->sdt_reset_retries = data_list[i]; 3653 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3654 "sd_get_tunables_from_conf: reset_retries = %d\n", 3655 values->sdt_reset_retries); 3656 break; 3657 case SD_CONF_BSET_RSV_REL_TIME: 3658 values->sdt_reserv_rel_time = data_list[i]; 3659 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3660 "sd_get_tunables_from_conf: reserv_rel_time = %d\n", 3661 values->sdt_reserv_rel_time); 3662 break; 3663 case SD_CONF_BSET_MIN_THROTTLE: 3664 values->sdt_min_throttle = data_list[i]; 3665 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3666 "sd_get_tunables_from_conf: min_throttle = %d\n", 3667 values->sdt_min_throttle); 3668 break; 3669 case SD_CONF_BSET_DISKSORT_DISABLED: 3670 values->sdt_disk_sort_dis = data_list[i]; 3671 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3672 "sd_get_tunables_from_conf: disk_sort_dis = %d\n", 3673 values->sdt_disk_sort_dis); 3674 break; 3675 case SD_CONF_BSET_LUN_RESET_ENABLED: 3676 values->sdt_lun_reset_enable = data_list[i]; 3677 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3678 "sd_get_tunables_from_conf: lun_reset_enable = %d" 3679 "\n", values->sdt_lun_reset_enable); 3680 break; 3681 } 3682 } 3683 } 3684 3685 /* 3686 * Function: sd_process_sdconf_table 3687 * 3688 * Description: Search the static configuration table for a match on the 3689 * inquiry vid/pid and update the driver soft state structure 3690 * according to the table property values for the device. 3691 * 3692 * The form of a configuration table entry is: 3693 * <vid+pid>,<flags>,<property-data> 3694 * "SEAGATE ST42400N",1,63,0,0 (Fibre) 3695 * "SEAGATE ST42400N",1,63,0,0,0,0 (Sparc) 3696 * "SEAGATE ST42400N",1,63,0,0,0,0,0,0,0,0,0,0 (Intel) 3697 * 3698 * Arguments: un - driver soft state (unit) structure 3699 */ 3700 3701 static void 3702 sd_process_sdconf_table(struct sd_lun *un) 3703 { 3704 char *id = NULL; 3705 int table_index; 3706 int idlen; 3707 3708 ASSERT(un != NULL); 3709 for (table_index = 0; table_index < sd_disk_table_size; 3710 table_index++) { 3711 id = sd_disk_table[table_index].device_id; 3712 idlen = strlen(id); 3713 if (idlen == 0) { 3714 continue; 3715 } 3716 3717 /* 3718 * The static configuration table currently does not 3719 * implement version 10 properties. Additionally, 3720 * multiple data-property-name entries are not 3721 * implemented in the static configuration table. 3722 */ 3723 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 3724 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3725 "sd_process_sdconf_table: disk %s\n", id); 3726 sd_set_vers1_properties(un, 3727 sd_disk_table[table_index].flags, 3728 sd_disk_table[table_index].properties); 3729 break; 3730 } 3731 } 3732 } 3733 3734 3735 /* 3736 * Function: sd_sdconf_id_match 3737 * 3738 * Description: This local function implements a case sensitive vid/pid 3739 * comparison as well as the boundary cases of wild card and 3740 * multiple blanks. 3741 * 3742 * Note: An implicit assumption made here is that the scsi 3743 * inquiry structure will always keep the vid, pid and 3744 * revision strings in consecutive sequence, so they can be 3745 * read as a single string. If this assumption is not the 3746 * case, a separate string, to be used for the check, needs 3747 * to be built with these strings concatenated. 3748 * 3749 * Arguments: un - driver soft state (unit) structure 3750 * id - table or config file vid/pid 3751 * idlen - length of the vid/pid (bytes) 3752 * 3753 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 3754 * SD_FAILURE - Indicates no match with the inquiry vid/pid 3755 */ 3756 3757 static int 3758 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen) 3759 { 3760 struct scsi_inquiry *sd_inq; 3761 int rval = SD_SUCCESS; 3762 3763 ASSERT(un != NULL); 3764 sd_inq = un->un_sd->sd_inq; 3765 ASSERT(id != NULL); 3766 3767 /* 3768 * We use the inq_vid as a pointer to a buffer containing the 3769 * vid and pid and use the entire vid/pid length of the table 3770 * entry for the comparison. This works because the inq_pid 3771 * data member follows inq_vid in the scsi_inquiry structure. 3772 */ 3773 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) { 3774 /* 3775 * The user id string is compared to the inquiry vid/pid 3776 * using a case insensitive comparison and ignoring 3777 * multiple spaces. 3778 */ 3779 rval = sd_blank_cmp(un, id, idlen); 3780 if (rval != SD_SUCCESS) { 3781 /* 3782 * User id strings that start and end with a "*" 3783 * are a special case. These do not have a 3784 * specific vendor, and the product string can 3785 * appear anywhere in the 16 byte PID portion of 3786 * the inquiry data. This is a simple strstr() 3787 * type search for the user id in the inquiry data. 3788 */ 3789 if ((id[0] == '*') && (id[idlen - 1] == '*')) { 3790 char *pidptr = &id[1]; 3791 int i; 3792 int j; 3793 int pidstrlen = idlen - 2; 3794 j = sizeof (SD_INQUIRY(un)->inq_pid) - 3795 pidstrlen; 3796 3797 if (j < 0) { 3798 return (SD_FAILURE); 3799 } 3800 for (i = 0; i < j; i++) { 3801 if (bcmp(&SD_INQUIRY(un)->inq_pid[i], 3802 pidptr, pidstrlen) == 0) { 3803 rval = SD_SUCCESS; 3804 break; 3805 } 3806 } 3807 } 3808 } 3809 } 3810 return (rval); 3811 } 3812 3813 3814 /* 3815 * Function: sd_blank_cmp 3816 * 3817 * Description: If the id string starts and ends with a space, treat 3818 * multiple consecutive spaces as equivalent to a single 3819 * space. For example, this causes a sd_disk_table entry 3820 * of " NEC CDROM " to match a device's id string of 3821 * "NEC CDROM". 3822 * 3823 * Note: The success exit condition for this routine is if 3824 * the pointer to the table entry is '\0' and the cnt of 3825 * the inquiry length is zero. This will happen if the inquiry 3826 * string returned by the device is padded with spaces to be 3827 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The 3828 * SCSI spec states that the inquiry string is to be padded with 3829 * spaces. 3830 * 3831 * Arguments: un - driver soft state (unit) structure 3832 * id - table or config file vid/pid 3833 * idlen - length of the vid/pid (bytes) 3834 * 3835 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 3836 * SD_FAILURE - Indicates no match with the inquiry vid/pid 3837 */ 3838 3839 static int 3840 sd_blank_cmp(struct sd_lun *un, char *id, int idlen) 3841 { 3842 char *p1; 3843 char *p2; 3844 int cnt; 3845 cnt = sizeof (SD_INQUIRY(un)->inq_vid) + 3846 sizeof (SD_INQUIRY(un)->inq_pid); 3847 3848 ASSERT(un != NULL); 3849 p2 = un->un_sd->sd_inq->inq_vid; 3850 ASSERT(id != NULL); 3851 p1 = id; 3852 3853 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) { 3854 /* 3855 * Note: string p1 is terminated by a NUL but string p2 3856 * isn't. The end of p2 is determined by cnt. 3857 */ 3858 for (;;) { 3859 /* skip over any extra blanks in both strings */ 3860 while ((*p1 != '\0') && (*p1 == ' ')) { 3861 p1++; 3862 } 3863 while ((cnt != 0) && (*p2 == ' ')) { 3864 p2++; 3865 cnt--; 3866 } 3867 3868 /* compare the two strings */ 3869 if ((cnt == 0) || 3870 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) { 3871 break; 3872 } 3873 while ((cnt > 0) && 3874 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) { 3875 p1++; 3876 p2++; 3877 cnt--; 3878 } 3879 } 3880 } 3881 3882 /* return SD_SUCCESS if both strings match */ 3883 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE); 3884 } 3885 3886 3887 /* 3888 * Function: sd_chk_vers1_data 3889 * 3890 * Description: Verify the version 1 device properties provided by the 3891 * user via the configuration file 3892 * 3893 * Arguments: un - driver soft state (unit) structure 3894 * flags - integer mask indicating properties to be set 3895 * prop_list - integer list of property values 3896 * list_len - length of user provided data 3897 * 3898 * Return Code: SD_SUCCESS - Indicates the user provided data is valid 3899 * SD_FAILURE - Indicates the user provided data is invalid 3900 */ 3901 3902 static int 3903 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 3904 int list_len, char *dataname_ptr) 3905 { 3906 int i; 3907 int mask = 1; 3908 int index = 0; 3909 3910 ASSERT(un != NULL); 3911 3912 /* Check for a NULL property name and list */ 3913 if (dataname_ptr == NULL) { 3914 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3915 "sd_chk_vers1_data: NULL data property name."); 3916 return (SD_FAILURE); 3917 } 3918 if (prop_list == NULL) { 3919 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3920 "sd_chk_vers1_data: %s NULL data property list.", 3921 dataname_ptr); 3922 return (SD_FAILURE); 3923 } 3924 3925 /* Display a warning if undefined bits are set in the flags */ 3926 if (flags & ~SD_CONF_BIT_MASK) { 3927 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3928 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. " 3929 "Properties not set.", 3930 (flags & ~SD_CONF_BIT_MASK), dataname_ptr); 3931 return (SD_FAILURE); 3932 } 3933 3934 /* 3935 * Verify the length of the list by identifying the highest bit set 3936 * in the flags and validating that the property list has a length 3937 * up to the index of this bit. 3938 */ 3939 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3940 if (flags & mask) { 3941 index++; 3942 } 3943 mask = 1 << i; 3944 } 3945 if ((list_len / sizeof (int)) < (index + 2)) { 3946 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3947 "sd_chk_vers1_data: " 3948 "Data property list %s size is incorrect. " 3949 "Properties not set.", dataname_ptr); 3950 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: " 3951 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS); 3952 return (SD_FAILURE); 3953 } 3954 return (SD_SUCCESS); 3955 } 3956 3957 3958 /* 3959 * Function: sd_set_vers1_properties 3960 * 3961 * Description: Set version 1 device properties based on a property list 3962 * retrieved from the driver configuration file or static 3963 * configuration table. Version 1 properties have the format: 3964 * 3965 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3966 * 3967 * where the prop0 value will be used to set prop0 if bit0 3968 * is set in the flags 3969 * 3970 * Arguments: un - driver soft state (unit) structure 3971 * flags - integer mask indicating properties to be set 3972 * prop_list - integer list of property values 3973 */ 3974 3975 static void 3976 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list) 3977 { 3978 ASSERT(un != NULL); 3979 3980 /* 3981 * Set the flag to indicate cache is to be disabled. An attempt 3982 * to disable the cache via sd_cache_control() will be made 3983 * later during attach once the basic initialization is complete. 3984 */ 3985 if (flags & SD_CONF_BSET_NOCACHE) { 3986 un->un_f_opt_disable_cache = TRUE; 3987 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3988 "sd_set_vers1_properties: caching disabled flag set\n"); 3989 } 3990 3991 /* CD-specific configuration parameters */ 3992 if (flags & SD_CONF_BSET_PLAYMSF_BCD) { 3993 un->un_f_cfg_playmsf_bcd = TRUE; 3994 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3995 "sd_set_vers1_properties: playmsf_bcd set\n"); 3996 } 3997 if (flags & SD_CONF_BSET_READSUB_BCD) { 3998 un->un_f_cfg_readsub_bcd = TRUE; 3999 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4000 "sd_set_vers1_properties: readsub_bcd set\n"); 4001 } 4002 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) { 4003 un->un_f_cfg_read_toc_trk_bcd = TRUE; 4004 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4005 "sd_set_vers1_properties: read_toc_trk_bcd set\n"); 4006 } 4007 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) { 4008 un->un_f_cfg_read_toc_addr_bcd = TRUE; 4009 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4010 "sd_set_vers1_properties: read_toc_addr_bcd set\n"); 4011 } 4012 if (flags & SD_CONF_BSET_NO_READ_HEADER) { 4013 un->un_f_cfg_no_read_header = TRUE; 4014 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4015 "sd_set_vers1_properties: no_read_header set\n"); 4016 } 4017 if (flags & SD_CONF_BSET_READ_CD_XD4) { 4018 un->un_f_cfg_read_cd_xd4 = TRUE; 4019 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4020 "sd_set_vers1_properties: read_cd_xd4 set\n"); 4021 } 4022 4023 /* Support for devices which do not have valid/unique serial numbers */ 4024 if (flags & SD_CONF_BSET_FAB_DEVID) { 4025 un->un_f_opt_fab_devid = TRUE; 4026 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4027 "sd_set_vers1_properties: fab_devid bit set\n"); 4028 } 4029 4030 /* Support for user throttle configuration */ 4031 if (flags & SD_CONF_BSET_THROTTLE) { 4032 ASSERT(prop_list != NULL); 4033 un->un_saved_throttle = un->un_throttle = 4034 prop_list->sdt_throttle; 4035 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4036 "sd_set_vers1_properties: throttle set to %d\n", 4037 prop_list->sdt_throttle); 4038 } 4039 4040 /* Set the per disk retry count according to the conf file or table. */ 4041 if (flags & SD_CONF_BSET_NRR_COUNT) { 4042 ASSERT(prop_list != NULL); 4043 if (prop_list->sdt_not_rdy_retries) { 4044 un->un_notready_retry_count = 4045 prop_list->sdt_not_rdy_retries; 4046 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4047 "sd_set_vers1_properties: not ready retry count" 4048 " set to %d\n", un->un_notready_retry_count); 4049 } 4050 } 4051 4052 /* The controller type is reported for generic disk driver ioctls */ 4053 if (flags & SD_CONF_BSET_CTYPE) { 4054 ASSERT(prop_list != NULL); 4055 switch (prop_list->sdt_ctype) { 4056 case CTYPE_CDROM: 4057 un->un_ctype = prop_list->sdt_ctype; 4058 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4059 "sd_set_vers1_properties: ctype set to " 4060 "CTYPE_CDROM\n"); 4061 break; 4062 case CTYPE_CCS: 4063 un->un_ctype = prop_list->sdt_ctype; 4064 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4065 "sd_set_vers1_properties: ctype set to " 4066 "CTYPE_CCS\n"); 4067 break; 4068 case CTYPE_ROD: /* RW optical */ 4069 un->un_ctype = prop_list->sdt_ctype; 4070 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4071 "sd_set_vers1_properties: ctype set to " 4072 "CTYPE_ROD\n"); 4073 break; 4074 default: 4075 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4076 "sd_set_vers1_properties: Could not set " 4077 "invalid ctype value (%d)", 4078 prop_list->sdt_ctype); 4079 } 4080 } 4081 4082 /* Purple failover timeout */ 4083 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) { 4084 ASSERT(prop_list != NULL); 4085 un->un_busy_retry_count = 4086 prop_list->sdt_busy_retries; 4087 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4088 "sd_set_vers1_properties: " 4089 "busy retry count set to %d\n", 4090 un->un_busy_retry_count); 4091 } 4092 4093 /* Purple reset retry count */ 4094 if (flags & SD_CONF_BSET_RST_RETRIES) { 4095 ASSERT(prop_list != NULL); 4096 un->un_reset_retry_count = 4097 prop_list->sdt_reset_retries; 4098 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4099 "sd_set_vers1_properties: " 4100 "reset retry count set to %d\n", 4101 un->un_reset_retry_count); 4102 } 4103 4104 /* Purple reservation release timeout */ 4105 if (flags & SD_CONF_BSET_RSV_REL_TIME) { 4106 ASSERT(prop_list != NULL); 4107 un->un_reserve_release_time = 4108 prop_list->sdt_reserv_rel_time; 4109 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4110 "sd_set_vers1_properties: " 4111 "reservation release timeout set to %d\n", 4112 un->un_reserve_release_time); 4113 } 4114 4115 /* 4116 * Driver flag telling the driver to verify that no commands are pending 4117 * for a device before issuing a Test Unit Ready. This is a workaround 4118 * for a firmware bug in some Seagate eliteI drives. 4119 */ 4120 if (flags & SD_CONF_BSET_TUR_CHECK) { 4121 un->un_f_cfg_tur_check = TRUE; 4122 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4123 "sd_set_vers1_properties: tur queue check set\n"); 4124 } 4125 4126 if (flags & SD_CONF_BSET_MIN_THROTTLE) { 4127 un->un_min_throttle = prop_list->sdt_min_throttle; 4128 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4129 "sd_set_vers1_properties: min throttle set to %d\n", 4130 un->un_min_throttle); 4131 } 4132 4133 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) { 4134 un->un_f_disksort_disabled = 4135 (prop_list->sdt_disk_sort_dis != 0) ? 4136 TRUE : FALSE; 4137 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4138 "sd_set_vers1_properties: disksort disabled " 4139 "flag set to %d\n", 4140 prop_list->sdt_disk_sort_dis); 4141 } 4142 4143 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) { 4144 un->un_f_lun_reset_enabled = 4145 (prop_list->sdt_lun_reset_enable != 0) ? 4146 TRUE : FALSE; 4147 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4148 "sd_set_vers1_properties: lun reset enabled " 4149 "flag set to %d\n", 4150 prop_list->sdt_lun_reset_enable); 4151 } 4152 4153 /* 4154 * Validate the throttle values. 4155 * If any of the numbers are invalid, set everything to defaults. 4156 */ 4157 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4158 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4159 (un->un_min_throttle > un->un_throttle)) { 4160 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4161 un->un_min_throttle = sd_min_throttle; 4162 } 4163 } 4164 4165 /* 4166 * Function: sd_is_lsi() 4167 * 4168 * Description: Check for lsi devices, step throught the static device 4169 * table to match vid/pid. 4170 * 4171 * Args: un - ptr to sd_lun 4172 * 4173 * Notes: When creating new LSI property, need to add the new LSI property 4174 * to this function. 4175 */ 4176 static void 4177 sd_is_lsi(struct sd_lun *un) 4178 { 4179 char *id = NULL; 4180 int table_index; 4181 int idlen; 4182 void *prop; 4183 4184 ASSERT(un != NULL); 4185 for (table_index = 0; table_index < sd_disk_table_size; 4186 table_index++) { 4187 id = sd_disk_table[table_index].device_id; 4188 idlen = strlen(id); 4189 if (idlen == 0) { 4190 continue; 4191 } 4192 4193 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4194 prop = sd_disk_table[table_index].properties; 4195 if (prop == &lsi_properties || 4196 prop == &lsi_oem_properties || 4197 prop == &lsi_properties_scsi || 4198 prop == &symbios_properties) { 4199 un->un_f_cfg_is_lsi = TRUE; 4200 } 4201 break; 4202 } 4203 } 4204 } 4205 4206 /* 4207 * Function: sd_get_physical_geometry 4208 * 4209 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and 4210 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the 4211 * target, and use this information to initialize the physical 4212 * geometry cache specified by pgeom_p. 4213 * 4214 * MODE SENSE is an optional command, so failure in this case 4215 * does not necessarily denote an error. We want to use the 4216 * MODE SENSE commands to derive the physical geometry of the 4217 * device, but if either command fails, the logical geometry is 4218 * used as the fallback for disk label geometry in cmlb. 4219 * 4220 * This requires that un->un_blockcount and un->un_tgt_blocksize 4221 * have already been initialized for the current target and 4222 * that the current values be passed as args so that we don't 4223 * end up ever trying to use -1 as a valid value. This could 4224 * happen if either value is reset while we're not holding 4225 * the mutex. 4226 * 4227 * Arguments: un - driver soft state (unit) structure 4228 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4229 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4230 * to use the USCSI "direct" chain and bypass the normal 4231 * command waitq. 4232 * 4233 * Context: Kernel thread only (can sleep). 4234 */ 4235 4236 static int 4237 sd_get_physical_geometry(struct sd_lun *un, cmlb_geom_t *pgeom_p, 4238 diskaddr_t capacity, int lbasize, int path_flag) 4239 { 4240 struct mode_format *page3p; 4241 struct mode_geometry *page4p; 4242 struct mode_header *headerp; 4243 int sector_size; 4244 int nsect; 4245 int nhead; 4246 int ncyl; 4247 int intrlv; 4248 int spc; 4249 diskaddr_t modesense_capacity; 4250 int rpm; 4251 int bd_len; 4252 int mode_header_length; 4253 uchar_t *p3bufp; 4254 uchar_t *p4bufp; 4255 int cdbsize; 4256 int ret = EIO; 4257 4258 ASSERT(un != NULL); 4259 4260 if (lbasize == 0) { 4261 if (ISCD(un)) { 4262 lbasize = 2048; 4263 } else { 4264 lbasize = un->un_sys_blocksize; 4265 } 4266 } 4267 pgeom_p->g_secsize = (unsigned short)lbasize; 4268 4269 /* 4270 * If the unit is a cd/dvd drive MODE SENSE page three 4271 * and MODE SENSE page four are reserved (see SBC spec 4272 * and MMC spec). To prevent soft errors just return 4273 * using the default LBA size. 4274 */ 4275 if (ISCD(un)) 4276 return (ret); 4277 4278 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0; 4279 4280 /* 4281 * Retrieve MODE SENSE page 3 - Format Device Page 4282 */ 4283 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP); 4284 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p3bufp, 4285 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag) 4286 != 0) { 4287 SD_ERROR(SD_LOG_COMMON, un, 4288 "sd_get_physical_geometry: mode sense page 3 failed\n"); 4289 goto page3_exit; 4290 } 4291 4292 /* 4293 * Determine size of Block Descriptors in order to locate the mode 4294 * page data. ATAPI devices return 0, SCSI devices should return 4295 * MODE_BLK_DESC_LENGTH. 4296 */ 4297 headerp = (struct mode_header *)p3bufp; 4298 if (un->un_f_cfg_is_atapi == TRUE) { 4299 struct mode_header_grp2 *mhp = 4300 (struct mode_header_grp2 *)headerp; 4301 mode_header_length = MODE_HEADER_LENGTH_GRP2; 4302 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4303 } else { 4304 mode_header_length = MODE_HEADER_LENGTH; 4305 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4306 } 4307 4308 if (bd_len > MODE_BLK_DESC_LENGTH) { 4309 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4310 "received unexpected bd_len of %d, page3\n", bd_len); 4311 goto page3_exit; 4312 } 4313 4314 page3p = (struct mode_format *) 4315 ((caddr_t)headerp + mode_header_length + bd_len); 4316 4317 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) { 4318 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4319 "mode sense pg3 code mismatch %d\n", 4320 page3p->mode_page.code); 4321 goto page3_exit; 4322 } 4323 4324 /* 4325 * Use this physical geometry data only if BOTH MODE SENSE commands 4326 * complete successfully; otherwise, revert to the logical geometry. 4327 * So, we need to save everything in temporary variables. 4328 */ 4329 sector_size = BE_16(page3p->data_bytes_sect); 4330 4331 /* 4332 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size 4333 */ 4334 if (sector_size == 0) { 4335 sector_size = un->un_sys_blocksize; 4336 } else { 4337 sector_size &= ~(un->un_sys_blocksize - 1); 4338 } 4339 4340 nsect = BE_16(page3p->sect_track); 4341 intrlv = BE_16(page3p->interleave); 4342 4343 SD_INFO(SD_LOG_COMMON, un, 4344 "sd_get_physical_geometry: Format Parameters (page 3)\n"); 4345 SD_INFO(SD_LOG_COMMON, un, 4346 " mode page: %d; nsect: %d; sector size: %d;\n", 4347 page3p->mode_page.code, nsect, sector_size); 4348 SD_INFO(SD_LOG_COMMON, un, 4349 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv, 4350 BE_16(page3p->track_skew), 4351 BE_16(page3p->cylinder_skew)); 4352 4353 4354 /* 4355 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page 4356 */ 4357 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP); 4358 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p4bufp, 4359 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag) 4360 != 0) { 4361 SD_ERROR(SD_LOG_COMMON, un, 4362 "sd_get_physical_geometry: mode sense page 4 failed\n"); 4363 goto page4_exit; 4364 } 4365 4366 /* 4367 * Determine size of Block Descriptors in order to locate the mode 4368 * page data. ATAPI devices return 0, SCSI devices should return 4369 * MODE_BLK_DESC_LENGTH. 4370 */ 4371 headerp = (struct mode_header *)p4bufp; 4372 if (un->un_f_cfg_is_atapi == TRUE) { 4373 struct mode_header_grp2 *mhp = 4374 (struct mode_header_grp2 *)headerp; 4375 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4376 } else { 4377 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4378 } 4379 4380 if (bd_len > MODE_BLK_DESC_LENGTH) { 4381 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4382 "received unexpected bd_len of %d, page4\n", bd_len); 4383 goto page4_exit; 4384 } 4385 4386 page4p = (struct mode_geometry *) 4387 ((caddr_t)headerp + mode_header_length + bd_len); 4388 4389 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) { 4390 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4391 "mode sense pg4 code mismatch %d\n", 4392 page4p->mode_page.code); 4393 goto page4_exit; 4394 } 4395 4396 /* 4397 * Stash the data now, after we know that both commands completed. 4398 */ 4399 4400 4401 nhead = (int)page4p->heads; /* uchar, so no conversion needed */ 4402 spc = nhead * nsect; 4403 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb; 4404 rpm = BE_16(page4p->rpm); 4405 4406 modesense_capacity = spc * ncyl; 4407 4408 SD_INFO(SD_LOG_COMMON, un, 4409 "sd_get_physical_geometry: Geometry Parameters (page 4)\n"); 4410 SD_INFO(SD_LOG_COMMON, un, 4411 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm); 4412 SD_INFO(SD_LOG_COMMON, un, 4413 " computed capacity(h*s*c): %d;\n", modesense_capacity); 4414 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n", 4415 (void *)pgeom_p, capacity); 4416 4417 /* 4418 * Compensate if the drive's geometry is not rectangular, i.e., 4419 * the product of C * H * S returned by MODE SENSE >= that returned 4420 * by read capacity. This is an idiosyncrasy of the original x86 4421 * disk subsystem. 4422 */ 4423 if (modesense_capacity >= capacity) { 4424 SD_INFO(SD_LOG_COMMON, un, 4425 "sd_get_physical_geometry: adjusting acyl; " 4426 "old: %d; new: %d\n", pgeom_p->g_acyl, 4427 (modesense_capacity - capacity + spc - 1) / spc); 4428 if (sector_size != 0) { 4429 /* 1243403: NEC D38x7 drives don't support sec size */ 4430 pgeom_p->g_secsize = (unsigned short)sector_size; 4431 } 4432 pgeom_p->g_nsect = (unsigned short)nsect; 4433 pgeom_p->g_nhead = (unsigned short)nhead; 4434 pgeom_p->g_capacity = capacity; 4435 pgeom_p->g_acyl = 4436 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc; 4437 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl; 4438 } 4439 4440 pgeom_p->g_rpm = (unsigned short)rpm; 4441 pgeom_p->g_intrlv = (unsigned short)intrlv; 4442 ret = 0; 4443 4444 SD_INFO(SD_LOG_COMMON, un, 4445 "sd_get_physical_geometry: mode sense geometry:\n"); 4446 SD_INFO(SD_LOG_COMMON, un, 4447 " nsect: %d; sector size: %d; interlv: %d\n", 4448 nsect, sector_size, intrlv); 4449 SD_INFO(SD_LOG_COMMON, un, 4450 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n", 4451 nhead, ncyl, rpm, modesense_capacity); 4452 SD_INFO(SD_LOG_COMMON, un, 4453 "sd_get_physical_geometry: (cached)\n"); 4454 SD_INFO(SD_LOG_COMMON, un, 4455 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 4456 pgeom_p->g_ncyl, pgeom_p->g_acyl, 4457 pgeom_p->g_nhead, pgeom_p->g_nsect); 4458 SD_INFO(SD_LOG_COMMON, un, 4459 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n", 4460 pgeom_p->g_secsize, pgeom_p->g_capacity, 4461 pgeom_p->g_intrlv, pgeom_p->g_rpm); 4462 4463 page4_exit: 4464 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH); 4465 page3_exit: 4466 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH); 4467 4468 return (ret); 4469 } 4470 4471 /* 4472 * Function: sd_get_virtual_geometry 4473 * 4474 * Description: Ask the controller to tell us about the target device. 4475 * 4476 * Arguments: un - pointer to softstate 4477 * capacity - disk capacity in #blocks 4478 * lbasize - disk block size in bytes 4479 * 4480 * Context: Kernel thread only 4481 */ 4482 4483 static int 4484 sd_get_virtual_geometry(struct sd_lun *un, cmlb_geom_t *lgeom_p, 4485 diskaddr_t capacity, int lbasize) 4486 { 4487 uint_t geombuf; 4488 int spc; 4489 4490 ASSERT(un != NULL); 4491 4492 /* Set sector size, and total number of sectors */ 4493 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1); 4494 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1); 4495 4496 /* Let the HBA tell us its geometry */ 4497 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1); 4498 4499 /* A value of -1 indicates an undefined "geometry" property */ 4500 if (geombuf == (-1)) { 4501 return (EINVAL); 4502 } 4503 4504 /* Initialize the logical geometry cache. */ 4505 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff; 4506 lgeom_p->g_nsect = geombuf & 0xffff; 4507 lgeom_p->g_secsize = un->un_sys_blocksize; 4508 4509 spc = lgeom_p->g_nhead * lgeom_p->g_nsect; 4510 4511 /* 4512 * Note: The driver originally converted the capacity value from 4513 * target blocks to system blocks. However, the capacity value passed 4514 * to this routine is already in terms of system blocks (this scaling 4515 * is done when the READ CAPACITY command is issued and processed). 4516 * This 'error' may have gone undetected because the usage of g_ncyl 4517 * (which is based upon g_capacity) is very limited within the driver 4518 */ 4519 lgeom_p->g_capacity = capacity; 4520 4521 /* 4522 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The 4523 * hba may return zero values if the device has been removed. 4524 */ 4525 if (spc == 0) { 4526 lgeom_p->g_ncyl = 0; 4527 } else { 4528 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc; 4529 } 4530 lgeom_p->g_acyl = 0; 4531 4532 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n"); 4533 return (0); 4534 4535 } 4536 /* 4537 * Function: sd_update_block_info 4538 * 4539 * Description: Calculate a byte count to sector count bitshift value 4540 * from sector size. 4541 * 4542 * Arguments: un: unit struct. 4543 * lbasize: new target sector size 4544 * capacity: new target capacity, ie. block count 4545 * 4546 * Context: Kernel thread context 4547 */ 4548 4549 static void 4550 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity) 4551 { 4552 if (lbasize != 0) { 4553 un->un_tgt_blocksize = lbasize; 4554 un->un_f_tgt_blocksize_is_valid = TRUE; 4555 } 4556 4557 if (capacity != 0) { 4558 un->un_blockcount = capacity; 4559 un->un_f_blockcount_is_valid = TRUE; 4560 } 4561 } 4562 4563 4564 /* 4565 * Function: sd_register_devid 4566 * 4567 * Description: This routine will obtain the device id information from the 4568 * target, obtain the serial number, and register the device 4569 * id with the ddi framework. 4570 * 4571 * Arguments: devi - the system's dev_info_t for the device. 4572 * un - driver soft state (unit) structure 4573 * reservation_flag - indicates if a reservation conflict 4574 * occurred during attach 4575 * 4576 * Context: Kernel Thread 4577 */ 4578 static void 4579 sd_register_devid(struct sd_lun *un, dev_info_t *devi, int reservation_flag) 4580 { 4581 int rval = 0; 4582 uchar_t *inq80 = NULL; 4583 size_t inq80_len = MAX_INQUIRY_SIZE; 4584 size_t inq80_resid = 0; 4585 uchar_t *inq83 = NULL; 4586 size_t inq83_len = MAX_INQUIRY_SIZE; 4587 size_t inq83_resid = 0; 4588 4589 ASSERT(un != NULL); 4590 ASSERT(mutex_owned(SD_MUTEX(un))); 4591 ASSERT((SD_DEVINFO(un)) == devi); 4592 4593 /* 4594 * This is the case of antiquated Sun disk drives that have the 4595 * FAB_DEVID property set in the disk_table. These drives 4596 * manage the devid's by storing them in last 2 available sectors 4597 * on the drive and have them fabricated by the ddi layer by calling 4598 * ddi_devid_init and passing the DEVID_FAB flag. 4599 */ 4600 if (un->un_f_opt_fab_devid == TRUE) { 4601 /* 4602 * Depending on EINVAL isn't reliable, since a reserved disk 4603 * may result in invalid geometry, so check to make sure a 4604 * reservation conflict did not occur during attach. 4605 */ 4606 if ((sd_get_devid(un) == EINVAL) && 4607 (reservation_flag != SD_TARGET_IS_RESERVED)) { 4608 /* 4609 * The devid is invalid AND there is no reservation 4610 * conflict. Fabricate a new devid. 4611 */ 4612 (void) sd_create_devid(un); 4613 } 4614 4615 /* Register the devid if it exists */ 4616 if (un->un_devid != NULL) { 4617 (void) ddi_devid_register(SD_DEVINFO(un), 4618 un->un_devid); 4619 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4620 "sd_register_devid: Devid Fabricated\n"); 4621 } 4622 return; 4623 } 4624 4625 /* 4626 * We check the availibility of the World Wide Name (0x83) and Unit 4627 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using 4628 * un_vpd_page_mask from them, we decide which way to get the WWN. If 4629 * 0x83 is availible, that is the best choice. Our next choice is 4630 * 0x80. If neither are availible, we munge the devid from the device 4631 * vid/pid/serial # for Sun qualified disks, or use the ddi framework 4632 * to fabricate a devid for non-Sun qualified disks. 4633 */ 4634 if (sd_check_vpd_page_support(un) == 0) { 4635 /* collect page 80 data if available */ 4636 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) { 4637 4638 mutex_exit(SD_MUTEX(un)); 4639 inq80 = kmem_zalloc(inq80_len, KM_SLEEP); 4640 rval = sd_send_scsi_INQUIRY(un, inq80, inq80_len, 4641 0x01, 0x80, &inq80_resid); 4642 4643 if (rval != 0) { 4644 kmem_free(inq80, inq80_len); 4645 inq80 = NULL; 4646 inq80_len = 0; 4647 } 4648 mutex_enter(SD_MUTEX(un)); 4649 } 4650 4651 /* collect page 83 data if available */ 4652 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) { 4653 mutex_exit(SD_MUTEX(un)); 4654 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 4655 rval = sd_send_scsi_INQUIRY(un, inq83, inq83_len, 4656 0x01, 0x83, &inq83_resid); 4657 4658 if (rval != 0) { 4659 kmem_free(inq83, inq83_len); 4660 inq83 = NULL; 4661 inq83_len = 0; 4662 } 4663 mutex_enter(SD_MUTEX(un)); 4664 } 4665 } 4666 4667 /* encode best devid possible based on data available */ 4668 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, 4669 (char *)ddi_driver_name(SD_DEVINFO(un)), 4670 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)), 4671 inq80, inq80_len - inq80_resid, inq83, inq83_len - 4672 inq83_resid, &un->un_devid) == DDI_SUCCESS) { 4673 4674 /* devid successfully encoded, register devid */ 4675 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); 4676 4677 } else { 4678 /* 4679 * Unable to encode a devid based on data available. 4680 * This is not a Sun qualified disk. Older Sun disk 4681 * drives that have the SD_FAB_DEVID property 4682 * set in the disk_table and non Sun qualified 4683 * disks are treated in the same manner. These 4684 * drives manage the devid's by storing them in 4685 * last 2 available sectors on the drive and 4686 * have them fabricated by the ddi layer by 4687 * calling ddi_devid_init and passing the 4688 * DEVID_FAB flag. 4689 * Create a fabricate devid only if there's no 4690 * fabricate devid existed. 4691 */ 4692 if (sd_get_devid(un) == EINVAL) { 4693 (void) sd_create_devid(un); 4694 } 4695 un->un_f_opt_fab_devid = TRUE; 4696 4697 /* Register the devid if it exists */ 4698 if (un->un_devid != NULL) { 4699 (void) ddi_devid_register(SD_DEVINFO(un), 4700 un->un_devid); 4701 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4702 "sd_register_devid: devid fabricated using " 4703 "ddi framework\n"); 4704 } 4705 } 4706 4707 /* clean up resources */ 4708 if (inq80 != NULL) { 4709 kmem_free(inq80, inq80_len); 4710 } 4711 if (inq83 != NULL) { 4712 kmem_free(inq83, inq83_len); 4713 } 4714 } 4715 4716 4717 4718 /* 4719 * Function: sd_get_devid 4720 * 4721 * Description: This routine will return 0 if a valid device id has been 4722 * obtained from the target and stored in the soft state. If a 4723 * valid device id has not been previously read and stored, a 4724 * read attempt will be made. 4725 * 4726 * Arguments: un - driver soft state (unit) structure 4727 * 4728 * Return Code: 0 if we successfully get the device id 4729 * 4730 * Context: Kernel Thread 4731 */ 4732 4733 static int 4734 sd_get_devid(struct sd_lun *un) 4735 { 4736 struct dk_devid *dkdevid; 4737 ddi_devid_t tmpid; 4738 uint_t *ip; 4739 size_t sz; 4740 diskaddr_t blk; 4741 int status; 4742 int chksum; 4743 int i; 4744 size_t buffer_size; 4745 4746 ASSERT(un != NULL); 4747 ASSERT(mutex_owned(SD_MUTEX(un))); 4748 4749 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n", 4750 un); 4751 4752 if (un->un_devid != NULL) { 4753 return (0); 4754 } 4755 4756 mutex_exit(SD_MUTEX(un)); 4757 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 4758 (void *)SD_PATH_DIRECT) != 0) { 4759 mutex_enter(SD_MUTEX(un)); 4760 return (EINVAL); 4761 } 4762 4763 /* 4764 * Read and verify device id, stored in the reserved cylinders at the 4765 * end of the disk. Backup label is on the odd sectors of the last 4766 * track of the last cylinder. Device id will be on track of the next 4767 * to last cylinder. 4768 */ 4769 mutex_enter(SD_MUTEX(un)); 4770 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid)); 4771 mutex_exit(SD_MUTEX(un)); 4772 dkdevid = kmem_alloc(buffer_size, KM_SLEEP); 4773 status = sd_send_scsi_READ(un, dkdevid, buffer_size, blk, 4774 SD_PATH_DIRECT); 4775 if (status != 0) { 4776 goto error; 4777 } 4778 4779 /* Validate the revision */ 4780 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 4781 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 4782 status = EINVAL; 4783 goto error; 4784 } 4785 4786 /* Calculate the checksum */ 4787 chksum = 0; 4788 ip = (uint_t *)dkdevid; 4789 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 4790 i++) { 4791 chksum ^= ip[i]; 4792 } 4793 4794 /* Compare the checksums */ 4795 if (DKD_GETCHKSUM(dkdevid) != chksum) { 4796 status = EINVAL; 4797 goto error; 4798 } 4799 4800 /* Validate the device id */ 4801 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 4802 status = EINVAL; 4803 goto error; 4804 } 4805 4806 /* 4807 * Store the device id in the driver soft state 4808 */ 4809 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 4810 tmpid = kmem_alloc(sz, KM_SLEEP); 4811 4812 mutex_enter(SD_MUTEX(un)); 4813 4814 un->un_devid = tmpid; 4815 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 4816 4817 kmem_free(dkdevid, buffer_size); 4818 4819 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un); 4820 4821 return (status); 4822 error: 4823 mutex_enter(SD_MUTEX(un)); 4824 kmem_free(dkdevid, buffer_size); 4825 return (status); 4826 } 4827 4828 4829 /* 4830 * Function: sd_create_devid 4831 * 4832 * Description: This routine will fabricate the device id and write it 4833 * to the disk. 4834 * 4835 * Arguments: un - driver soft state (unit) structure 4836 * 4837 * Return Code: value of the fabricated device id 4838 * 4839 * Context: Kernel Thread 4840 */ 4841 4842 static ddi_devid_t 4843 sd_create_devid(struct sd_lun *un) 4844 { 4845 ASSERT(un != NULL); 4846 4847 /* Fabricate the devid */ 4848 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid) 4849 == DDI_FAILURE) { 4850 return (NULL); 4851 } 4852 4853 /* Write the devid to disk */ 4854 if (sd_write_deviceid(un) != 0) { 4855 ddi_devid_free(un->un_devid); 4856 un->un_devid = NULL; 4857 } 4858 4859 return (un->un_devid); 4860 } 4861 4862 4863 /* 4864 * Function: sd_write_deviceid 4865 * 4866 * Description: This routine will write the device id to the disk 4867 * reserved sector. 4868 * 4869 * Arguments: un - driver soft state (unit) structure 4870 * 4871 * Return Code: EINVAL 4872 * value returned by sd_send_scsi_cmd 4873 * 4874 * Context: Kernel Thread 4875 */ 4876 4877 static int 4878 sd_write_deviceid(struct sd_lun *un) 4879 { 4880 struct dk_devid *dkdevid; 4881 diskaddr_t blk; 4882 uint_t *ip, chksum; 4883 int status; 4884 int i; 4885 4886 ASSERT(mutex_owned(SD_MUTEX(un))); 4887 4888 mutex_exit(SD_MUTEX(un)); 4889 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 4890 (void *)SD_PATH_DIRECT) != 0) { 4891 mutex_enter(SD_MUTEX(un)); 4892 return (-1); 4893 } 4894 4895 4896 /* Allocate the buffer */ 4897 dkdevid = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP); 4898 4899 /* Fill in the revision */ 4900 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 4901 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 4902 4903 /* Copy in the device id */ 4904 mutex_enter(SD_MUTEX(un)); 4905 bcopy(un->un_devid, &dkdevid->dkd_devid, 4906 ddi_devid_sizeof(un->un_devid)); 4907 mutex_exit(SD_MUTEX(un)); 4908 4909 /* Calculate the checksum */ 4910 chksum = 0; 4911 ip = (uint_t *)dkdevid; 4912 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 4913 i++) { 4914 chksum ^= ip[i]; 4915 } 4916 4917 /* Fill-in checksum */ 4918 DKD_FORMCHKSUM(chksum, dkdevid); 4919 4920 /* Write the reserved sector */ 4921 status = sd_send_scsi_WRITE(un, dkdevid, un->un_sys_blocksize, blk, 4922 SD_PATH_DIRECT); 4923 4924 kmem_free(dkdevid, un->un_sys_blocksize); 4925 4926 mutex_enter(SD_MUTEX(un)); 4927 return (status); 4928 } 4929 4930 4931 /* 4932 * Function: sd_check_vpd_page_support 4933 * 4934 * Description: This routine sends an inquiry command with the EVPD bit set and 4935 * a page code of 0x00 to the device. It is used to determine which 4936 * vital product pages are availible to find the devid. We are 4937 * looking for pages 0x83 or 0x80. If we return a negative 1, the 4938 * device does not support that command. 4939 * 4940 * Arguments: un - driver soft state (unit) structure 4941 * 4942 * Return Code: 0 - success 4943 * 1 - check condition 4944 * 4945 * Context: This routine can sleep. 4946 */ 4947 4948 static int 4949 sd_check_vpd_page_support(struct sd_lun *un) 4950 { 4951 uchar_t *page_list = NULL; 4952 uchar_t page_length = 0xff; /* Use max possible length */ 4953 uchar_t evpd = 0x01; /* Set the EVPD bit */ 4954 uchar_t page_code = 0x00; /* Supported VPD Pages */ 4955 int rval = 0; 4956 int counter; 4957 4958 ASSERT(un != NULL); 4959 ASSERT(mutex_owned(SD_MUTEX(un))); 4960 4961 mutex_exit(SD_MUTEX(un)); 4962 4963 /* 4964 * We'll set the page length to the maximum to save figuring it out 4965 * with an additional call. 4966 */ 4967 page_list = kmem_zalloc(page_length, KM_SLEEP); 4968 4969 rval = sd_send_scsi_INQUIRY(un, page_list, page_length, evpd, 4970 page_code, NULL); 4971 4972 mutex_enter(SD_MUTEX(un)); 4973 4974 /* 4975 * Now we must validate that the device accepted the command, as some 4976 * drives do not support it. If the drive does support it, we will 4977 * return 0, and the supported pages will be in un_vpd_page_mask. If 4978 * not, we return -1. 4979 */ 4980 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) { 4981 /* Loop to find one of the 2 pages we need */ 4982 counter = 4; /* Supported pages start at byte 4, with 0x00 */ 4983 4984 /* 4985 * Pages are returned in ascending order, and 0x83 is what we 4986 * are hoping for. 4987 */ 4988 while ((page_list[counter] <= 0x83) && 4989 (counter <= (page_list[VPD_PAGE_LENGTH] + 4990 VPD_HEAD_OFFSET))) { 4991 /* 4992 * Add 3 because page_list[3] is the number of 4993 * pages minus 3 4994 */ 4995 4996 switch (page_list[counter]) { 4997 case 0x00: 4998 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG; 4999 break; 5000 case 0x80: 5001 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG; 5002 break; 5003 case 0x81: 5004 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG; 5005 break; 5006 case 0x82: 5007 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG; 5008 break; 5009 case 0x83: 5010 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; 5011 break; 5012 } 5013 counter++; 5014 } 5015 5016 } else { 5017 rval = -1; 5018 5019 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5020 "sd_check_vpd_page_support: This drive does not implement " 5021 "VPD pages.\n"); 5022 } 5023 5024 kmem_free(page_list, page_length); 5025 5026 return (rval); 5027 } 5028 5029 5030 /* 5031 * Function: sd_setup_pm 5032 * 5033 * Description: Initialize Power Management on the device 5034 * 5035 * Context: Kernel Thread 5036 */ 5037 5038 static void 5039 sd_setup_pm(struct sd_lun *un, dev_info_t *devi) 5040 { 5041 uint_t log_page_size; 5042 uchar_t *log_page_data; 5043 int rval; 5044 5045 /* 5046 * Since we are called from attach, holding a mutex for 5047 * un is unnecessary. Because some of the routines called 5048 * from here require SD_MUTEX to not be held, assert this 5049 * right up front. 5050 */ 5051 ASSERT(!mutex_owned(SD_MUTEX(un))); 5052 /* 5053 * Since the sd device does not have the 'reg' property, 5054 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 5055 * The following code is to tell cpr that this device 5056 * DOES need to be suspended and resumed. 5057 */ 5058 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 5059 "pm-hardware-state", "needs-suspend-resume"); 5060 5061 /* 5062 * This complies with the new power management framework 5063 * for certain desktop machines. Create the pm_components 5064 * property as a string array property. 5065 */ 5066 if (un->un_f_pm_supported) { 5067 /* 5068 * not all devices have a motor, try it first. 5069 * some devices may return ILLEGAL REQUEST, some 5070 * will hang 5071 * The following START_STOP_UNIT is used to check if target 5072 * device has a motor. 5073 */ 5074 un->un_f_start_stop_supported = TRUE; 5075 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 5076 SD_PATH_DIRECT) != 0) { 5077 un->un_f_start_stop_supported = FALSE; 5078 } 5079 5080 /* 5081 * create pm properties anyways otherwise the parent can't 5082 * go to sleep 5083 */ 5084 (void) sd_create_pm_components(devi, un); 5085 un->un_f_pm_is_enabled = TRUE; 5086 return; 5087 } 5088 5089 if (!un->un_f_log_sense_supported) { 5090 un->un_power_level = SD_SPINDLE_ON; 5091 un->un_f_pm_is_enabled = FALSE; 5092 return; 5093 } 5094 5095 rval = sd_log_page_supported(un, START_STOP_CYCLE_PAGE); 5096 5097 #ifdef SDDEBUG 5098 if (sd_force_pm_supported) { 5099 /* Force a successful result */ 5100 rval = 1; 5101 } 5102 #endif 5103 5104 /* 5105 * If the start-stop cycle counter log page is not supported 5106 * or if the pm-capable property is SD_PM_CAPABLE_FALSE (0) 5107 * then we should not create the pm_components property. 5108 */ 5109 if (rval == -1) { 5110 /* 5111 * Error. 5112 * Reading log sense failed, most likely this is 5113 * an older drive that does not support log sense. 5114 * If this fails auto-pm is not supported. 5115 */ 5116 un->un_power_level = SD_SPINDLE_ON; 5117 un->un_f_pm_is_enabled = FALSE; 5118 5119 } else if (rval == 0) { 5120 /* 5121 * Page not found. 5122 * The start stop cycle counter is implemented as page 5123 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For 5124 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE). 5125 */ 5126 if (sd_log_page_supported(un, START_STOP_CYCLE_VU_PAGE) == 1) { 5127 /* 5128 * Page found, use this one. 5129 */ 5130 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE; 5131 un->un_f_pm_is_enabled = TRUE; 5132 } else { 5133 /* 5134 * Error or page not found. 5135 * auto-pm is not supported for this device. 5136 */ 5137 un->un_power_level = SD_SPINDLE_ON; 5138 un->un_f_pm_is_enabled = FALSE; 5139 } 5140 } else { 5141 /* 5142 * Page found, use it. 5143 */ 5144 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE; 5145 un->un_f_pm_is_enabled = TRUE; 5146 } 5147 5148 5149 if (un->un_f_pm_is_enabled == TRUE) { 5150 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5151 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5152 5153 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 5154 log_page_size, un->un_start_stop_cycle_page, 5155 0x01, 0, SD_PATH_DIRECT); 5156 #ifdef SDDEBUG 5157 if (sd_force_pm_supported) { 5158 /* Force a successful result */ 5159 rval = 0; 5160 } 5161 #endif 5162 5163 /* 5164 * If the Log sense for Page( Start/stop cycle counter page) 5165 * succeeds, then power managment is supported and we can 5166 * enable auto-pm. 5167 */ 5168 if (rval == 0) { 5169 (void) sd_create_pm_components(devi, un); 5170 } else { 5171 un->un_power_level = SD_SPINDLE_ON; 5172 un->un_f_pm_is_enabled = FALSE; 5173 } 5174 5175 kmem_free(log_page_data, log_page_size); 5176 } 5177 } 5178 5179 5180 /* 5181 * Function: sd_create_pm_components 5182 * 5183 * Description: Initialize PM property. 5184 * 5185 * Context: Kernel thread context 5186 */ 5187 5188 static void 5189 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un) 5190 { 5191 char *pm_comp[] = { "NAME=spindle-motor", "0=off", "1=on", NULL }; 5192 5193 ASSERT(!mutex_owned(SD_MUTEX(un))); 5194 5195 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 5196 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 5197 /* 5198 * When components are initially created they are idle, 5199 * power up any non-removables. 5200 * Note: the return value of pm_raise_power can't be used 5201 * for determining if PM should be enabled for this device. 5202 * Even if you check the return values and remove this 5203 * property created above, the PM framework will not honor the 5204 * change after the first call to pm_raise_power. Hence, 5205 * removal of that property does not help if pm_raise_power 5206 * fails. In the case of removable media, the start/stop 5207 * will fail if the media is not present. 5208 */ 5209 if (un->un_f_attach_spinup && (pm_raise_power(SD_DEVINFO(un), 0, 5210 SD_SPINDLE_ON) == DDI_SUCCESS)) { 5211 mutex_enter(SD_MUTEX(un)); 5212 un->un_power_level = SD_SPINDLE_ON; 5213 mutex_enter(&un->un_pm_mutex); 5214 /* Set to on and not busy. */ 5215 un->un_pm_count = 0; 5216 } else { 5217 mutex_enter(SD_MUTEX(un)); 5218 un->un_power_level = SD_SPINDLE_OFF; 5219 mutex_enter(&un->un_pm_mutex); 5220 /* Set to off. */ 5221 un->un_pm_count = -1; 5222 } 5223 mutex_exit(&un->un_pm_mutex); 5224 mutex_exit(SD_MUTEX(un)); 5225 } else { 5226 un->un_power_level = SD_SPINDLE_ON; 5227 un->un_f_pm_is_enabled = FALSE; 5228 } 5229 } 5230 5231 5232 /* 5233 * Function: sd_ddi_suspend 5234 * 5235 * Description: Performs system power-down operations. This includes 5236 * setting the drive state to indicate its suspended so 5237 * that no new commands will be accepted. Also, wait for 5238 * all commands that are in transport or queued to a timer 5239 * for retry to complete. All timeout threads are cancelled. 5240 * 5241 * Return Code: DDI_FAILURE or DDI_SUCCESS 5242 * 5243 * Context: Kernel thread context 5244 */ 5245 5246 static int 5247 sd_ddi_suspend(dev_info_t *devi) 5248 { 5249 struct sd_lun *un; 5250 clock_t wait_cmds_complete; 5251 5252 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5253 if (un == NULL) { 5254 return (DDI_FAILURE); 5255 } 5256 5257 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n"); 5258 5259 mutex_enter(SD_MUTEX(un)); 5260 5261 /* Return success if the device is already suspended. */ 5262 if (un->un_state == SD_STATE_SUSPENDED) { 5263 mutex_exit(SD_MUTEX(un)); 5264 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5265 "device already suspended, exiting\n"); 5266 return (DDI_SUCCESS); 5267 } 5268 5269 /* Return failure if the device is being used by HA */ 5270 if (un->un_resvd_status & 5271 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) { 5272 mutex_exit(SD_MUTEX(un)); 5273 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5274 "device in use by HA, exiting\n"); 5275 return (DDI_FAILURE); 5276 } 5277 5278 /* 5279 * Return failure if the device is in a resource wait 5280 * or power changing state. 5281 */ 5282 if ((un->un_state == SD_STATE_RWAIT) || 5283 (un->un_state == SD_STATE_PM_CHANGING)) { 5284 mutex_exit(SD_MUTEX(un)); 5285 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5286 "device in resource wait state, exiting\n"); 5287 return (DDI_FAILURE); 5288 } 5289 5290 5291 un->un_save_state = un->un_last_state; 5292 New_state(un, SD_STATE_SUSPENDED); 5293 5294 /* 5295 * Wait for all commands that are in transport or queued to a timer 5296 * for retry to complete. 5297 * 5298 * While waiting, no new commands will be accepted or sent because of 5299 * the new state we set above. 5300 * 5301 * Wait till current operation has completed. If we are in the resource 5302 * wait state (with an intr outstanding) then we need to wait till the 5303 * intr completes and starts the next cmd. We want to wait for 5304 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND. 5305 */ 5306 wait_cmds_complete = ddi_get_lbolt() + 5307 (sd_wait_cmds_complete * drv_usectohz(1000000)); 5308 5309 while (un->un_ncmds_in_transport != 0) { 5310 /* 5311 * Fail if commands do not finish in the specified time. 5312 */ 5313 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un), 5314 wait_cmds_complete) == -1) { 5315 /* 5316 * Undo the state changes made above. Everything 5317 * must go back to it's original value. 5318 */ 5319 Restore_state(un); 5320 un->un_last_state = un->un_save_state; 5321 /* Wake up any threads that might be waiting. */ 5322 cv_broadcast(&un->un_suspend_cv); 5323 mutex_exit(SD_MUTEX(un)); 5324 SD_ERROR(SD_LOG_IO_PM, un, 5325 "sd_ddi_suspend: failed due to outstanding cmds\n"); 5326 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n"); 5327 return (DDI_FAILURE); 5328 } 5329 } 5330 5331 /* 5332 * Cancel SCSI watch thread and timeouts, if any are active 5333 */ 5334 5335 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) { 5336 opaque_t temp_token = un->un_swr_token; 5337 mutex_exit(SD_MUTEX(un)); 5338 scsi_watch_suspend(temp_token); 5339 mutex_enter(SD_MUTEX(un)); 5340 } 5341 5342 if (un->un_reset_throttle_timeid != NULL) { 5343 timeout_id_t temp_id = un->un_reset_throttle_timeid; 5344 un->un_reset_throttle_timeid = NULL; 5345 mutex_exit(SD_MUTEX(un)); 5346 (void) untimeout(temp_id); 5347 mutex_enter(SD_MUTEX(un)); 5348 } 5349 5350 if (un->un_dcvb_timeid != NULL) { 5351 timeout_id_t temp_id = un->un_dcvb_timeid; 5352 un->un_dcvb_timeid = NULL; 5353 mutex_exit(SD_MUTEX(un)); 5354 (void) untimeout(temp_id); 5355 mutex_enter(SD_MUTEX(un)); 5356 } 5357 5358 mutex_enter(&un->un_pm_mutex); 5359 if (un->un_pm_timeid != NULL) { 5360 timeout_id_t temp_id = un->un_pm_timeid; 5361 un->un_pm_timeid = NULL; 5362 mutex_exit(&un->un_pm_mutex); 5363 mutex_exit(SD_MUTEX(un)); 5364 (void) untimeout(temp_id); 5365 mutex_enter(SD_MUTEX(un)); 5366 } else { 5367 mutex_exit(&un->un_pm_mutex); 5368 } 5369 5370 if (un->un_retry_timeid != NULL) { 5371 timeout_id_t temp_id = un->un_retry_timeid; 5372 un->un_retry_timeid = NULL; 5373 mutex_exit(SD_MUTEX(un)); 5374 (void) untimeout(temp_id); 5375 mutex_enter(SD_MUTEX(un)); 5376 } 5377 5378 if (un->un_direct_priority_timeid != NULL) { 5379 timeout_id_t temp_id = un->un_direct_priority_timeid; 5380 un->un_direct_priority_timeid = NULL; 5381 mutex_exit(SD_MUTEX(un)); 5382 (void) untimeout(temp_id); 5383 mutex_enter(SD_MUTEX(un)); 5384 } 5385 5386 if (un->un_f_is_fibre == TRUE) { 5387 /* 5388 * Remove callbacks for insert and remove events 5389 */ 5390 if (un->un_insert_event != NULL) { 5391 mutex_exit(SD_MUTEX(un)); 5392 (void) ddi_remove_event_handler(un->un_insert_cb_id); 5393 mutex_enter(SD_MUTEX(un)); 5394 un->un_insert_event = NULL; 5395 } 5396 5397 if (un->un_remove_event != NULL) { 5398 mutex_exit(SD_MUTEX(un)); 5399 (void) ddi_remove_event_handler(un->un_remove_cb_id); 5400 mutex_enter(SD_MUTEX(un)); 5401 un->un_remove_event = NULL; 5402 } 5403 } 5404 5405 mutex_exit(SD_MUTEX(un)); 5406 5407 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); 5408 5409 return (DDI_SUCCESS); 5410 } 5411 5412 5413 /* 5414 * Function: sd_ddi_pm_suspend 5415 * 5416 * Description: Set the drive state to low power. 5417 * Someone else is required to actually change the drive 5418 * power level. 5419 * 5420 * Arguments: un - driver soft state (unit) structure 5421 * 5422 * Return Code: DDI_FAILURE or DDI_SUCCESS 5423 * 5424 * Context: Kernel thread context 5425 */ 5426 5427 static int 5428 sd_ddi_pm_suspend(struct sd_lun *un) 5429 { 5430 ASSERT(un != NULL); 5431 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: entry\n"); 5432 5433 ASSERT(!mutex_owned(SD_MUTEX(un))); 5434 mutex_enter(SD_MUTEX(un)); 5435 5436 /* 5437 * Exit if power management is not enabled for this device, or if 5438 * the device is being used by HA. 5439 */ 5440 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status & 5441 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) { 5442 mutex_exit(SD_MUTEX(un)); 5443 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exiting\n"); 5444 return (DDI_SUCCESS); 5445 } 5446 5447 SD_INFO(SD_LOG_POWER, un, "sd_ddi_pm_suspend: un_ncmds_in_driver=%ld\n", 5448 un->un_ncmds_in_driver); 5449 5450 /* 5451 * See if the device is not busy, ie.: 5452 * - we have no commands in the driver for this device 5453 * - not waiting for resources 5454 */ 5455 if ((un->un_ncmds_in_driver == 0) && 5456 (un->un_state != SD_STATE_RWAIT)) { 5457 /* 5458 * The device is not busy, so it is OK to go to low power state. 5459 * Indicate low power, but rely on someone else to actually 5460 * change it. 5461 */ 5462 mutex_enter(&un->un_pm_mutex); 5463 un->un_pm_count = -1; 5464 mutex_exit(&un->un_pm_mutex); 5465 un->un_power_level = SD_SPINDLE_OFF; 5466 } 5467 5468 mutex_exit(SD_MUTEX(un)); 5469 5470 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exit\n"); 5471 5472 return (DDI_SUCCESS); 5473 } 5474 5475 5476 /* 5477 * Function: sd_ddi_resume 5478 * 5479 * Description: Performs system power-up operations.. 5480 * 5481 * Return Code: DDI_SUCCESS 5482 * DDI_FAILURE 5483 * 5484 * Context: Kernel thread context 5485 */ 5486 5487 static int 5488 sd_ddi_resume(dev_info_t *devi) 5489 { 5490 struct sd_lun *un; 5491 5492 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5493 if (un == NULL) { 5494 return (DDI_FAILURE); 5495 } 5496 5497 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n"); 5498 5499 mutex_enter(SD_MUTEX(un)); 5500 Restore_state(un); 5501 5502 /* 5503 * Restore the state which was saved to give the 5504 * the right state in un_last_state 5505 */ 5506 un->un_last_state = un->un_save_state; 5507 /* 5508 * Note: throttle comes back at full. 5509 * Also note: this MUST be done before calling pm_raise_power 5510 * otherwise the system can get hung in biowait. The scenario where 5511 * this'll happen is under cpr suspend. Writing of the system 5512 * state goes through sddump, which writes 0 to un_throttle. If 5513 * writing the system state then fails, example if the partition is 5514 * too small, then cpr attempts a resume. If throttle isn't restored 5515 * from the saved value until after calling pm_raise_power then 5516 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs 5517 * in biowait. 5518 */ 5519 un->un_throttle = un->un_saved_throttle; 5520 5521 /* 5522 * The chance of failure is very rare as the only command done in power 5523 * entry point is START command when you transition from 0->1 or 5524 * unknown->1. Put it to SPINDLE ON state irrespective of the state at 5525 * which suspend was done. Ignore the return value as the resume should 5526 * not be failed. In the case of removable media the media need not be 5527 * inserted and hence there is a chance that raise power will fail with 5528 * media not present. 5529 */ 5530 if (un->un_f_attach_spinup) { 5531 mutex_exit(SD_MUTEX(un)); 5532 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 5533 mutex_enter(SD_MUTEX(un)); 5534 } 5535 5536 /* 5537 * Don't broadcast to the suspend cv and therefore possibly 5538 * start I/O until after power has been restored. 5539 */ 5540 cv_broadcast(&un->un_suspend_cv); 5541 cv_broadcast(&un->un_state_cv); 5542 5543 /* restart thread */ 5544 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { 5545 scsi_watch_resume(un->un_swr_token); 5546 } 5547 5548 #if (defined(__fibre)) 5549 if (un->un_f_is_fibre == TRUE) { 5550 /* 5551 * Add callbacks for insert and remove events 5552 */ 5553 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 5554 sd_init_event_callbacks(un); 5555 } 5556 } 5557 #endif 5558 5559 /* 5560 * Transport any pending commands to the target. 5561 * 5562 * If this is a low-activity device commands in queue will have to wait 5563 * until new commands come in, which may take awhile. Also, we 5564 * specifically don't check un_ncmds_in_transport because we know that 5565 * there really are no commands in progress after the unit was 5566 * suspended and we could have reached the throttle level, been 5567 * suspended, and have no new commands coming in for awhile. Highly 5568 * unlikely, but so is the low-activity disk scenario. 5569 */ 5570 ddi_xbuf_dispatch(un->un_xbuf_attr); 5571 5572 sd_start_cmds(un, NULL); 5573 mutex_exit(SD_MUTEX(un)); 5574 5575 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n"); 5576 5577 return (DDI_SUCCESS); 5578 } 5579 5580 5581 /* 5582 * Function: sd_ddi_pm_resume 5583 * 5584 * Description: Set the drive state to powered on. 5585 * Someone else is required to actually change the drive 5586 * power level. 5587 * 5588 * Arguments: un - driver soft state (unit) structure 5589 * 5590 * Return Code: DDI_SUCCESS 5591 * 5592 * Context: Kernel thread context 5593 */ 5594 5595 static int 5596 sd_ddi_pm_resume(struct sd_lun *un) 5597 { 5598 ASSERT(un != NULL); 5599 5600 ASSERT(!mutex_owned(SD_MUTEX(un))); 5601 mutex_enter(SD_MUTEX(un)); 5602 un->un_power_level = SD_SPINDLE_ON; 5603 5604 ASSERT(!mutex_owned(&un->un_pm_mutex)); 5605 mutex_enter(&un->un_pm_mutex); 5606 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 5607 un->un_pm_count++; 5608 ASSERT(un->un_pm_count == 0); 5609 /* 5610 * Note: no longer do the cv_broadcast on un_suspend_cv. The 5611 * un_suspend_cv is for a system resume, not a power management 5612 * device resume. (4297749) 5613 * cv_broadcast(&un->un_suspend_cv); 5614 */ 5615 } 5616 mutex_exit(&un->un_pm_mutex); 5617 mutex_exit(SD_MUTEX(un)); 5618 5619 return (DDI_SUCCESS); 5620 } 5621 5622 5623 /* 5624 * Function: sd_pm_idletimeout_handler 5625 * 5626 * Description: A timer routine that's active only while a device is busy. 5627 * The purpose is to extend slightly the pm framework's busy 5628 * view of the device to prevent busy/idle thrashing for 5629 * back-to-back commands. Do this by comparing the current time 5630 * to the time at which the last command completed and when the 5631 * difference is greater than sd_pm_idletime, call 5632 * pm_idle_component. In addition to indicating idle to the pm 5633 * framework, update the chain type to again use the internal pm 5634 * layers of the driver. 5635 * 5636 * Arguments: arg - driver soft state (unit) structure 5637 * 5638 * Context: Executes in a timeout(9F) thread context 5639 */ 5640 5641 static void 5642 sd_pm_idletimeout_handler(void *arg) 5643 { 5644 struct sd_lun *un = arg; 5645 5646 time_t now; 5647 5648 mutex_enter(&sd_detach_mutex); 5649 if (un->un_detach_count != 0) { 5650 /* Abort if the instance is detaching */ 5651 mutex_exit(&sd_detach_mutex); 5652 return; 5653 } 5654 mutex_exit(&sd_detach_mutex); 5655 5656 now = ddi_get_time(); 5657 /* 5658 * Grab both mutexes, in the proper order, since we're accessing 5659 * both PM and softstate variables. 5660 */ 5661 mutex_enter(SD_MUTEX(un)); 5662 mutex_enter(&un->un_pm_mutex); 5663 if (((now - un->un_pm_idle_time) > sd_pm_idletime) && 5664 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) { 5665 /* 5666 * Update the chain types. 5667 * This takes affect on the next new command received. 5668 */ 5669 if (un->un_f_non_devbsize_supported) { 5670 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 5671 } else { 5672 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 5673 } 5674 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 5675 5676 SD_TRACE(SD_LOG_IO_PM, un, 5677 "sd_pm_idletimeout_handler: idling device\n"); 5678 (void) pm_idle_component(SD_DEVINFO(un), 0); 5679 un->un_pm_idle_timeid = NULL; 5680 } else { 5681 un->un_pm_idle_timeid = 5682 timeout(sd_pm_idletimeout_handler, un, 5683 (drv_usectohz((clock_t)300000))); /* 300 ms. */ 5684 } 5685 mutex_exit(&un->un_pm_mutex); 5686 mutex_exit(SD_MUTEX(un)); 5687 } 5688 5689 5690 /* 5691 * Function: sd_pm_timeout_handler 5692 * 5693 * Description: Callback to tell framework we are idle. 5694 * 5695 * Context: timeout(9f) thread context. 5696 */ 5697 5698 static void 5699 sd_pm_timeout_handler(void *arg) 5700 { 5701 struct sd_lun *un = arg; 5702 5703 (void) pm_idle_component(SD_DEVINFO(un), 0); 5704 mutex_enter(&un->un_pm_mutex); 5705 un->un_pm_timeid = NULL; 5706 mutex_exit(&un->un_pm_mutex); 5707 } 5708 5709 5710 /* 5711 * Function: sdpower 5712 * 5713 * Description: PM entry point. 5714 * 5715 * Return Code: DDI_SUCCESS 5716 * DDI_FAILURE 5717 * 5718 * Context: Kernel thread context 5719 */ 5720 5721 static int 5722 sdpower(dev_info_t *devi, int component, int level) 5723 { 5724 struct sd_lun *un; 5725 int instance; 5726 int rval = DDI_SUCCESS; 5727 uint_t i, log_page_size, maxcycles, ncycles; 5728 uchar_t *log_page_data; 5729 int log_sense_page; 5730 int medium_present; 5731 time_t intvlp; 5732 dev_t dev; 5733 struct pm_trans_data sd_pm_tran_data; 5734 uchar_t save_state; 5735 int sval; 5736 uchar_t state_before_pm; 5737 int got_semaphore_here; 5738 5739 instance = ddi_get_instance(devi); 5740 5741 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 5742 (SD_SPINDLE_OFF > level) || (level > SD_SPINDLE_ON) || 5743 component != 0) { 5744 return (DDI_FAILURE); 5745 } 5746 5747 dev = sd_make_device(SD_DEVINFO(un)); 5748 5749 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level); 5750 5751 /* 5752 * Must synchronize power down with close. 5753 * Attempt to decrement/acquire the open/close semaphore, 5754 * but do NOT wait on it. If it's not greater than zero, 5755 * ie. it can't be decremented without waiting, then 5756 * someone else, either open or close, already has it 5757 * and the try returns 0. Use that knowledge here to determine 5758 * if it's OK to change the device power level. 5759 * Also, only increment it on exit if it was decremented, ie. gotten, 5760 * here. 5761 */ 5762 got_semaphore_here = sema_tryp(&un->un_semoclose); 5763 5764 mutex_enter(SD_MUTEX(un)); 5765 5766 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n", 5767 un->un_ncmds_in_driver); 5768 5769 /* 5770 * If un_ncmds_in_driver is non-zero it indicates commands are 5771 * already being processed in the driver, or if the semaphore was 5772 * not gotten here it indicates an open or close is being processed. 5773 * At the same time somebody is requesting to go low power which 5774 * can't happen, therefore we need to return failure. 5775 */ 5776 if ((level == SD_SPINDLE_OFF) && 5777 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) { 5778 mutex_exit(SD_MUTEX(un)); 5779 5780 if (got_semaphore_here != 0) { 5781 sema_v(&un->un_semoclose); 5782 } 5783 SD_TRACE(SD_LOG_IO_PM, un, 5784 "sdpower: exit, device has queued cmds.\n"); 5785 return (DDI_FAILURE); 5786 } 5787 5788 /* 5789 * if it is OFFLINE that means the disk is completely dead 5790 * in our case we have to put the disk in on or off by sending commands 5791 * Of course that will fail anyway so return back here. 5792 * 5793 * Power changes to a device that's OFFLINE or SUSPENDED 5794 * are not allowed. 5795 */ 5796 if ((un->un_state == SD_STATE_OFFLINE) || 5797 (un->un_state == SD_STATE_SUSPENDED)) { 5798 mutex_exit(SD_MUTEX(un)); 5799 5800 if (got_semaphore_here != 0) { 5801 sema_v(&un->un_semoclose); 5802 } 5803 SD_TRACE(SD_LOG_IO_PM, un, 5804 "sdpower: exit, device is off-line.\n"); 5805 return (DDI_FAILURE); 5806 } 5807 5808 /* 5809 * Change the device's state to indicate it's power level 5810 * is being changed. Do this to prevent a power off in the 5811 * middle of commands, which is especially bad on devices 5812 * that are really powered off instead of just spun down. 5813 */ 5814 state_before_pm = un->un_state; 5815 un->un_state = SD_STATE_PM_CHANGING; 5816 5817 mutex_exit(SD_MUTEX(un)); 5818 5819 /* 5820 * If "pm-capable" property is set to TRUE by HBA drivers, 5821 * bypass the following checking, otherwise, check the log 5822 * sense information for this device 5823 */ 5824 if ((level == SD_SPINDLE_OFF) && un->un_f_log_sense_supported) { 5825 /* 5826 * Get the log sense information to understand whether the 5827 * the powercycle counts have gone beyond the threshhold. 5828 */ 5829 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5830 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5831 5832 mutex_enter(SD_MUTEX(un)); 5833 log_sense_page = un->un_start_stop_cycle_page; 5834 mutex_exit(SD_MUTEX(un)); 5835 5836 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 5837 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT); 5838 #ifdef SDDEBUG 5839 if (sd_force_pm_supported) { 5840 /* Force a successful result */ 5841 rval = 0; 5842 } 5843 #endif 5844 if (rval != 0) { 5845 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 5846 "Log Sense Failed\n"); 5847 kmem_free(log_page_data, log_page_size); 5848 /* Cannot support power management on those drives */ 5849 5850 if (got_semaphore_here != 0) { 5851 sema_v(&un->un_semoclose); 5852 } 5853 /* 5854 * On exit put the state back to it's original value 5855 * and broadcast to anyone waiting for the power 5856 * change completion. 5857 */ 5858 mutex_enter(SD_MUTEX(un)); 5859 un->un_state = state_before_pm; 5860 cv_broadcast(&un->un_suspend_cv); 5861 mutex_exit(SD_MUTEX(un)); 5862 SD_TRACE(SD_LOG_IO_PM, un, 5863 "sdpower: exit, Log Sense Failed.\n"); 5864 return (DDI_FAILURE); 5865 } 5866 5867 /* 5868 * From the page data - Convert the essential information to 5869 * pm_trans_data 5870 */ 5871 maxcycles = 5872 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) | 5873 (log_page_data[0x1E] << 8) | log_page_data[0x1F]; 5874 5875 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles; 5876 5877 ncycles = 5878 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) | 5879 (log_page_data[0x26] << 8) | log_page_data[0x27]; 5880 5881 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles; 5882 5883 for (i = 0; i < DC_SCSI_MFR_LEN; i++) { 5884 sd_pm_tran_data.un.scsi_cycles.svc_date[i] = 5885 log_page_data[8+i]; 5886 } 5887 5888 kmem_free(log_page_data, log_page_size); 5889 5890 /* 5891 * Call pm_trans_check routine to get the Ok from 5892 * the global policy 5893 */ 5894 5895 sd_pm_tran_data.format = DC_SCSI_FORMAT; 5896 sd_pm_tran_data.un.scsi_cycles.flag = 0; 5897 5898 rval = pm_trans_check(&sd_pm_tran_data, &intvlp); 5899 #ifdef SDDEBUG 5900 if (sd_force_pm_supported) { 5901 /* Force a successful result */ 5902 rval = 1; 5903 } 5904 #endif 5905 switch (rval) { 5906 case 0: 5907 /* 5908 * Not Ok to Power cycle or error in parameters passed 5909 * Would have given the advised time to consider power 5910 * cycle. Based on the new intvlp parameter we are 5911 * supposed to pretend we are busy so that pm framework 5912 * will never call our power entry point. Because of 5913 * that install a timeout handler and wait for the 5914 * recommended time to elapse so that power management 5915 * can be effective again. 5916 * 5917 * To effect this behavior, call pm_busy_component to 5918 * indicate to the framework this device is busy. 5919 * By not adjusting un_pm_count the rest of PM in 5920 * the driver will function normally, and independant 5921 * of this but because the framework is told the device 5922 * is busy it won't attempt powering down until it gets 5923 * a matching idle. The timeout handler sends this. 5924 * Note: sd_pm_entry can't be called here to do this 5925 * because sdpower may have been called as a result 5926 * of a call to pm_raise_power from within sd_pm_entry. 5927 * 5928 * If a timeout handler is already active then 5929 * don't install another. 5930 */ 5931 mutex_enter(&un->un_pm_mutex); 5932 if (un->un_pm_timeid == NULL) { 5933 un->un_pm_timeid = 5934 timeout(sd_pm_timeout_handler, 5935 un, intvlp * drv_usectohz(1000000)); 5936 mutex_exit(&un->un_pm_mutex); 5937 (void) pm_busy_component(SD_DEVINFO(un), 0); 5938 } else { 5939 mutex_exit(&un->un_pm_mutex); 5940 } 5941 if (got_semaphore_here != 0) { 5942 sema_v(&un->un_semoclose); 5943 } 5944 /* 5945 * On exit put the state back to it's original value 5946 * and broadcast to anyone waiting for the power 5947 * change completion. 5948 */ 5949 mutex_enter(SD_MUTEX(un)); 5950 un->un_state = state_before_pm; 5951 cv_broadcast(&un->un_suspend_cv); 5952 mutex_exit(SD_MUTEX(un)); 5953 5954 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, " 5955 "trans check Failed, not ok to power cycle.\n"); 5956 return (DDI_FAILURE); 5957 5958 case -1: 5959 if (got_semaphore_here != 0) { 5960 sema_v(&un->un_semoclose); 5961 } 5962 /* 5963 * On exit put the state back to it's original value 5964 * and broadcast to anyone waiting for the power 5965 * change completion. 5966 */ 5967 mutex_enter(SD_MUTEX(un)); 5968 un->un_state = state_before_pm; 5969 cv_broadcast(&un->un_suspend_cv); 5970 mutex_exit(SD_MUTEX(un)); 5971 SD_TRACE(SD_LOG_IO_PM, un, 5972 "sdpower: exit, trans check command Failed.\n"); 5973 return (DDI_FAILURE); 5974 } 5975 } 5976 5977 if (level == SD_SPINDLE_OFF) { 5978 /* 5979 * Save the last state... if the STOP FAILS we need it 5980 * for restoring 5981 */ 5982 mutex_enter(SD_MUTEX(un)); 5983 save_state = un->un_last_state; 5984 /* 5985 * There must not be any cmds. getting processed 5986 * in the driver when we get here. Power to the 5987 * device is potentially going off. 5988 */ 5989 ASSERT(un->un_ncmds_in_driver == 0); 5990 mutex_exit(SD_MUTEX(un)); 5991 5992 /* 5993 * For now suspend the device completely before spindle is 5994 * turned off 5995 */ 5996 if ((rval = sd_ddi_pm_suspend(un)) == DDI_FAILURE) { 5997 if (got_semaphore_here != 0) { 5998 sema_v(&un->un_semoclose); 5999 } 6000 /* 6001 * On exit put the state back to it's original value 6002 * and broadcast to anyone waiting for the power 6003 * change completion. 6004 */ 6005 mutex_enter(SD_MUTEX(un)); 6006 un->un_state = state_before_pm; 6007 cv_broadcast(&un->un_suspend_cv); 6008 mutex_exit(SD_MUTEX(un)); 6009 SD_TRACE(SD_LOG_IO_PM, un, 6010 "sdpower: exit, PM suspend Failed.\n"); 6011 return (DDI_FAILURE); 6012 } 6013 } 6014 6015 /* 6016 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open, 6017 * close, or strategy. Dump no long uses this routine, it uses it's 6018 * own code so it can be done in polled mode. 6019 */ 6020 6021 medium_present = TRUE; 6022 6023 /* 6024 * When powering up, issue a TUR in case the device is at unit 6025 * attention. Don't do retries. Bypass the PM layer, otherwise 6026 * a deadlock on un_pm_busy_cv will occur. 6027 */ 6028 if (level == SD_SPINDLE_ON) { 6029 (void) sd_send_scsi_TEST_UNIT_READY(un, 6030 SD_DONT_RETRY_TUR | SD_BYPASS_PM); 6031 } 6032 6033 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n", 6034 ((level == SD_SPINDLE_ON) ? "START" : "STOP")); 6035 6036 sval = sd_send_scsi_START_STOP_UNIT(un, 6037 ((level == SD_SPINDLE_ON) ? SD_TARGET_START : SD_TARGET_STOP), 6038 SD_PATH_DIRECT); 6039 /* Command failed, check for media present. */ 6040 if ((sval == ENXIO) && un->un_f_has_removable_media) { 6041 medium_present = FALSE; 6042 } 6043 6044 /* 6045 * The conditions of interest here are: 6046 * if a spindle off with media present fails, 6047 * then restore the state and return an error. 6048 * else if a spindle on fails, 6049 * then return an error (there's no state to restore). 6050 * In all other cases we setup for the new state 6051 * and return success. 6052 */ 6053 switch (level) { 6054 case SD_SPINDLE_OFF: 6055 if ((medium_present == TRUE) && (sval != 0)) { 6056 /* The stop command from above failed */ 6057 rval = DDI_FAILURE; 6058 /* 6059 * The stop command failed, and we have media 6060 * present. Put the level back by calling the 6061 * sd_pm_resume() and set the state back to 6062 * it's previous value. 6063 */ 6064 (void) sd_ddi_pm_resume(un); 6065 mutex_enter(SD_MUTEX(un)); 6066 un->un_last_state = save_state; 6067 mutex_exit(SD_MUTEX(un)); 6068 break; 6069 } 6070 /* 6071 * The stop command from above succeeded. 6072 */ 6073 if (un->un_f_monitor_media_state) { 6074 /* 6075 * Terminate watch thread in case of removable media 6076 * devices going into low power state. This is as per 6077 * the requirements of pm framework, otherwise commands 6078 * will be generated for the device (through watch 6079 * thread), even when the device is in low power state. 6080 */ 6081 mutex_enter(SD_MUTEX(un)); 6082 un->un_f_watcht_stopped = FALSE; 6083 if (un->un_swr_token != NULL) { 6084 opaque_t temp_token = un->un_swr_token; 6085 un->un_f_watcht_stopped = TRUE; 6086 un->un_swr_token = NULL; 6087 mutex_exit(SD_MUTEX(un)); 6088 (void) scsi_watch_request_terminate(temp_token, 6089 SCSI_WATCH_TERMINATE_WAIT); 6090 } else { 6091 mutex_exit(SD_MUTEX(un)); 6092 } 6093 } 6094 break; 6095 6096 default: /* The level requested is spindle on... */ 6097 /* 6098 * Legacy behavior: return success on a failed spinup 6099 * if there is no media in the drive. 6100 * Do this by looking at medium_present here. 6101 */ 6102 if ((sval != 0) && medium_present) { 6103 /* The start command from above failed */ 6104 rval = DDI_FAILURE; 6105 break; 6106 } 6107 /* 6108 * The start command from above succeeded 6109 * Resume the devices now that we have 6110 * started the disks 6111 */ 6112 (void) sd_ddi_pm_resume(un); 6113 6114 /* 6115 * Resume the watch thread since it was suspended 6116 * when the device went into low power mode. 6117 */ 6118 if (un->un_f_monitor_media_state) { 6119 mutex_enter(SD_MUTEX(un)); 6120 if (un->un_f_watcht_stopped == TRUE) { 6121 opaque_t temp_token; 6122 6123 un->un_f_watcht_stopped = FALSE; 6124 mutex_exit(SD_MUTEX(un)); 6125 temp_token = scsi_watch_request_submit( 6126 SD_SCSI_DEVP(un), 6127 sd_check_media_time, 6128 SENSE_LENGTH, sd_media_watch_cb, 6129 (caddr_t)dev); 6130 mutex_enter(SD_MUTEX(un)); 6131 un->un_swr_token = temp_token; 6132 } 6133 mutex_exit(SD_MUTEX(un)); 6134 } 6135 } 6136 if (got_semaphore_here != 0) { 6137 sema_v(&un->un_semoclose); 6138 } 6139 /* 6140 * On exit put the state back to it's original value 6141 * and broadcast to anyone waiting for the power 6142 * change completion. 6143 */ 6144 mutex_enter(SD_MUTEX(un)); 6145 un->un_state = state_before_pm; 6146 cv_broadcast(&un->un_suspend_cv); 6147 mutex_exit(SD_MUTEX(un)); 6148 6149 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval); 6150 6151 return (rval); 6152 } 6153 6154 6155 6156 /* 6157 * Function: sdattach 6158 * 6159 * Description: Driver's attach(9e) entry point function. 6160 * 6161 * Arguments: devi - opaque device info handle 6162 * cmd - attach type 6163 * 6164 * Return Code: DDI_SUCCESS 6165 * DDI_FAILURE 6166 * 6167 * Context: Kernel thread context 6168 */ 6169 6170 static int 6171 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 6172 { 6173 switch (cmd) { 6174 case DDI_ATTACH: 6175 return (sd_unit_attach(devi)); 6176 case DDI_RESUME: 6177 return (sd_ddi_resume(devi)); 6178 default: 6179 break; 6180 } 6181 return (DDI_FAILURE); 6182 } 6183 6184 6185 /* 6186 * Function: sddetach 6187 * 6188 * Description: Driver's detach(9E) entry point function. 6189 * 6190 * Arguments: devi - opaque device info handle 6191 * cmd - detach type 6192 * 6193 * Return Code: DDI_SUCCESS 6194 * DDI_FAILURE 6195 * 6196 * Context: Kernel thread context 6197 */ 6198 6199 static int 6200 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 6201 { 6202 switch (cmd) { 6203 case DDI_DETACH: 6204 return (sd_unit_detach(devi)); 6205 case DDI_SUSPEND: 6206 return (sd_ddi_suspend(devi)); 6207 default: 6208 break; 6209 } 6210 return (DDI_FAILURE); 6211 } 6212 6213 6214 /* 6215 * Function: sd_sync_with_callback 6216 * 6217 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft 6218 * state while the callback routine is active. 6219 * 6220 * Arguments: un: softstate structure for the instance 6221 * 6222 * Context: Kernel thread context 6223 */ 6224 6225 static void 6226 sd_sync_with_callback(struct sd_lun *un) 6227 { 6228 ASSERT(un != NULL); 6229 6230 mutex_enter(SD_MUTEX(un)); 6231 6232 ASSERT(un->un_in_callback >= 0); 6233 6234 while (un->un_in_callback > 0) { 6235 mutex_exit(SD_MUTEX(un)); 6236 delay(2); 6237 mutex_enter(SD_MUTEX(un)); 6238 } 6239 6240 mutex_exit(SD_MUTEX(un)); 6241 } 6242 6243 /* 6244 * Function: sd_unit_attach 6245 * 6246 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates 6247 * the soft state structure for the device and performs 6248 * all necessary structure and device initializations. 6249 * 6250 * Arguments: devi: the system's dev_info_t for the device. 6251 * 6252 * Return Code: DDI_SUCCESS if attach is successful. 6253 * DDI_FAILURE if any part of the attach fails. 6254 * 6255 * Context: Called at attach(9e) time for the DDI_ATTACH flag. 6256 * Kernel thread context only. Can sleep. 6257 */ 6258 6259 static int 6260 sd_unit_attach(dev_info_t *devi) 6261 { 6262 struct scsi_device *devp; 6263 struct sd_lun *un; 6264 char *variantp; 6265 int reservation_flag = SD_TARGET_IS_UNRESERVED; 6266 int instance; 6267 int rval; 6268 int wc_enabled; 6269 int tgt; 6270 uint64_t capacity; 6271 uint_t lbasize = 0; 6272 dev_info_t *pdip = ddi_get_parent(devi); 6273 int offbyone = 0; 6274 int geom_label_valid = 0; 6275 6276 /* 6277 * Retrieve the target driver's private data area. This was set 6278 * up by the HBA. 6279 */ 6280 devp = ddi_get_driver_private(devi); 6281 6282 /* 6283 * Retrieve the target ID of the device. 6284 */ 6285 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6286 SCSI_ADDR_PROP_TARGET, -1); 6287 6288 /* 6289 * Since we have no idea what state things were left in by the last 6290 * user of the device, set up some 'default' settings, ie. turn 'em 6291 * off. The scsi_ifsetcap calls force re-negotiations with the drive. 6292 * Do this before the scsi_probe, which sends an inquiry. 6293 * This is a fix for bug (4430280). 6294 * Of special importance is wide-xfer. The drive could have been left 6295 * in wide transfer mode by the last driver to communicate with it, 6296 * this includes us. If that's the case, and if the following is not 6297 * setup properly or we don't re-negotiate with the drive prior to 6298 * transferring data to/from the drive, it causes bus parity errors, 6299 * data overruns, and unexpected interrupts. This first occurred when 6300 * the fix for bug (4378686) was made. 6301 */ 6302 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1); 6303 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1); 6304 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1); 6305 6306 /* 6307 * Currently, scsi_ifsetcap sets tagged-qing capability for all LUNs 6308 * on a target. Setting it per lun instance actually sets the 6309 * capability of this target, which affects those luns already 6310 * attached on the same target. So during attach, we can only disable 6311 * this capability only when no other lun has been attached on this 6312 * target. By doing this, we assume a target has the same tagged-qing 6313 * capability for every lun. The condition can be removed when HBA 6314 * is changed to support per lun based tagged-qing capability. 6315 */ 6316 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 6317 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1); 6318 } 6319 6320 /* 6321 * Use scsi_probe() to issue an INQUIRY command to the device. 6322 * This call will allocate and fill in the scsi_inquiry structure 6323 * and point the sd_inq member of the scsi_device structure to it. 6324 * If the attach succeeds, then this memory will not be de-allocated 6325 * (via scsi_unprobe()) until the instance is detached. 6326 */ 6327 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 6328 goto probe_failed; 6329 } 6330 6331 /* 6332 * Check the device type as specified in the inquiry data and 6333 * claim it if it is of a type that we support. 6334 */ 6335 switch (devp->sd_inq->inq_dtype) { 6336 case DTYPE_DIRECT: 6337 break; 6338 case DTYPE_RODIRECT: 6339 break; 6340 case DTYPE_OPTICAL: 6341 break; 6342 case DTYPE_NOTPRESENT: 6343 default: 6344 /* Unsupported device type; fail the attach. */ 6345 goto probe_failed; 6346 } 6347 6348 /* 6349 * Allocate the soft state structure for this unit. 6350 * 6351 * We rely upon this memory being set to all zeroes by 6352 * ddi_soft_state_zalloc(). We assume that any member of the 6353 * soft state structure that is not explicitly initialized by 6354 * this routine will have a value of zero. 6355 */ 6356 instance = ddi_get_instance(devp->sd_dev); 6357 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) { 6358 goto probe_failed; 6359 } 6360 6361 /* 6362 * Retrieve a pointer to the newly-allocated soft state. 6363 * 6364 * This should NEVER fail if the ddi_soft_state_zalloc() call above 6365 * was successful, unless something has gone horribly wrong and the 6366 * ddi's soft state internals are corrupt (in which case it is 6367 * probably better to halt here than just fail the attach....) 6368 */ 6369 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 6370 panic("sd_unit_attach: NULL soft state on instance:0x%x", 6371 instance); 6372 /*NOTREACHED*/ 6373 } 6374 6375 /* 6376 * Link the back ptr of the driver soft state to the scsi_device 6377 * struct for this lun. 6378 * Save a pointer to the softstate in the driver-private area of 6379 * the scsi_device struct. 6380 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until 6381 * we first set un->un_sd below. 6382 */ 6383 un->un_sd = devp; 6384 devp->sd_private = (opaque_t)un; 6385 6386 /* 6387 * The following must be after devp is stored in the soft state struct. 6388 */ 6389 #ifdef SDDEBUG 6390 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6391 "%s_unit_attach: un:0x%p instance:%d\n", 6392 ddi_driver_name(devi), un, instance); 6393 #endif 6394 6395 /* 6396 * Set up the device type and node type (for the minor nodes). 6397 * By default we assume that the device can at least support the 6398 * Common Command Set. Call it a CD-ROM if it reports itself 6399 * as a RODIRECT device. 6400 */ 6401 switch (devp->sd_inq->inq_dtype) { 6402 case DTYPE_RODIRECT: 6403 un->un_node_type = DDI_NT_CD_CHAN; 6404 un->un_ctype = CTYPE_CDROM; 6405 break; 6406 case DTYPE_OPTICAL: 6407 un->un_node_type = DDI_NT_BLOCK_CHAN; 6408 un->un_ctype = CTYPE_ROD; 6409 break; 6410 default: 6411 un->un_node_type = DDI_NT_BLOCK_CHAN; 6412 un->un_ctype = CTYPE_CCS; 6413 break; 6414 } 6415 6416 /* 6417 * Try to read the interconnect type from the HBA. 6418 * 6419 * Note: This driver is currently compiled as two binaries, a parallel 6420 * scsi version (sd) and a fibre channel version (ssd). All functional 6421 * differences are determined at compile time. In the future a single 6422 * binary will be provided and the inteconnect type will be used to 6423 * differentiate between fibre and parallel scsi behaviors. At that time 6424 * it will be necessary for all fibre channel HBAs to support this 6425 * property. 6426 * 6427 * set un_f_is_fiber to TRUE ( default fiber ) 6428 */ 6429 un->un_f_is_fibre = TRUE; 6430 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { 6431 case INTERCONNECT_SSA: 6432 un->un_interconnect_type = SD_INTERCONNECT_SSA; 6433 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6434 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); 6435 break; 6436 case INTERCONNECT_PARALLEL: 6437 un->un_f_is_fibre = FALSE; 6438 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 6439 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6440 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); 6441 break; 6442 case INTERCONNECT_SATA: 6443 un->un_f_is_fibre = FALSE; 6444 un->un_interconnect_type = SD_INTERCONNECT_SATA; 6445 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6446 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un); 6447 break; 6448 case INTERCONNECT_FIBRE: 6449 un->un_interconnect_type = SD_INTERCONNECT_FIBRE; 6450 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6451 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); 6452 break; 6453 case INTERCONNECT_FABRIC: 6454 un->un_interconnect_type = SD_INTERCONNECT_FABRIC; 6455 un->un_node_type = DDI_NT_BLOCK_FABRIC; 6456 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6457 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); 6458 break; 6459 default: 6460 #ifdef SD_DEFAULT_INTERCONNECT_TYPE 6461 /* 6462 * The HBA does not support the "interconnect-type" property 6463 * (or did not provide a recognized type). 6464 * 6465 * Note: This will be obsoleted when a single fibre channel 6466 * and parallel scsi driver is delivered. In the meantime the 6467 * interconnect type will be set to the platform default.If that 6468 * type is not parallel SCSI, it means that we should be 6469 * assuming "ssd" semantics. However, here this also means that 6470 * the FC HBA is not supporting the "interconnect-type" property 6471 * like we expect it to, so log this occurrence. 6472 */ 6473 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; 6474 if (!SD_IS_PARALLEL_SCSI(un)) { 6475 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6476 "sd_unit_attach: un:0x%p Assuming " 6477 "INTERCONNECT_FIBRE\n", un); 6478 } else { 6479 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6480 "sd_unit_attach: un:0x%p Assuming " 6481 "INTERCONNECT_PARALLEL\n", un); 6482 un->un_f_is_fibre = FALSE; 6483 } 6484 #else 6485 /* 6486 * Note: This source will be implemented when a single fibre 6487 * channel and parallel scsi driver is delivered. The default 6488 * will be to assume that if a device does not support the 6489 * "interconnect-type" property it is a parallel SCSI HBA and 6490 * we will set the interconnect type for parallel scsi. 6491 */ 6492 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 6493 un->un_f_is_fibre = FALSE; 6494 #endif 6495 break; 6496 } 6497 6498 if (un->un_f_is_fibre == TRUE) { 6499 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == 6500 SCSI_VERSION_3) { 6501 switch (un->un_interconnect_type) { 6502 case SD_INTERCONNECT_FIBRE: 6503 case SD_INTERCONNECT_SSA: 6504 un->un_node_type = DDI_NT_BLOCK_WWN; 6505 break; 6506 default: 6507 break; 6508 } 6509 } 6510 } 6511 6512 /* 6513 * Initialize the Request Sense command for the target 6514 */ 6515 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { 6516 goto alloc_rqs_failed; 6517 } 6518 6519 /* 6520 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc 6521 * with seperate binary for sd and ssd. 6522 * 6523 * x86 has 1 binary, un_retry_count is set base on connection type. 6524 * The hardcoded values will go away when Sparc uses 1 binary 6525 * for sd and ssd. This hardcoded values need to match 6526 * SD_RETRY_COUNT in sddef.h 6527 * The value used is base on interconnect type. 6528 * fibre = 3, parallel = 5 6529 */ 6530 #if defined(__i386) || defined(__amd64) 6531 un->un_retry_count = un->un_f_is_fibre ? 3 : 5; 6532 #else 6533 un->un_retry_count = SD_RETRY_COUNT; 6534 #endif 6535 6536 /* 6537 * Set the per disk retry count to the default number of retries 6538 * for disks and CDROMs. This value can be overridden by the 6539 * disk property list or an entry in sd.conf. 6540 */ 6541 un->un_notready_retry_count = 6542 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un) 6543 : DISK_NOT_READY_RETRY_COUNT(un); 6544 6545 /* 6546 * Set the busy retry count to the default value of un_retry_count. 6547 * This can be overridden by entries in sd.conf or the device 6548 * config table. 6549 */ 6550 un->un_busy_retry_count = un->un_retry_count; 6551 6552 /* 6553 * Init the reset threshold for retries. This number determines 6554 * how many retries must be performed before a reset can be issued 6555 * (for certain error conditions). This can be overridden by entries 6556 * in sd.conf or the device config table. 6557 */ 6558 un->un_reset_retry_count = (un->un_retry_count / 2); 6559 6560 /* 6561 * Set the victim_retry_count to the default un_retry_count 6562 */ 6563 un->un_victim_retry_count = (2 * un->un_retry_count); 6564 6565 /* 6566 * Set the reservation release timeout to the default value of 6567 * 5 seconds. This can be overridden by entries in ssd.conf or the 6568 * device config table. 6569 */ 6570 un->un_reserve_release_time = 5; 6571 6572 /* 6573 * Set up the default maximum transfer size. Note that this may 6574 * get updated later in the attach, when setting up default wide 6575 * operations for disks. 6576 */ 6577 #if defined(__i386) || defined(__amd64) 6578 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; 6579 #else 6580 un->un_max_xfer_size = (uint_t)maxphys; 6581 #endif 6582 6583 /* 6584 * Get "allow bus device reset" property (defaults to "enabled" if 6585 * the property was not defined). This is to disable bus resets for 6586 * certain kinds of error recovery. Note: In the future when a run-time 6587 * fibre check is available the soft state flag should default to 6588 * enabled. 6589 */ 6590 if (un->un_f_is_fibre == TRUE) { 6591 un->un_f_allow_bus_device_reset = TRUE; 6592 } else { 6593 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6594 "allow-bus-device-reset", 1) != 0) { 6595 un->un_f_allow_bus_device_reset = TRUE; 6596 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6597 "sd_unit_attach: un:0x%p Bus device reset enabled\n", 6598 un); 6599 } else { 6600 un->un_f_allow_bus_device_reset = FALSE; 6601 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6602 "sd_unit_attach: un:0x%p Bus device reset disabled\n", 6603 un); 6604 } 6605 } 6606 6607 /* 6608 * Check if this is an ATAPI device. ATAPI devices use Group 1 6609 * Read/Write commands and Group 2 Mode Sense/Select commands. 6610 * 6611 * Note: The "obsolete" way of doing this is to check for the "atapi" 6612 * property. The new "variant" property with a value of "atapi" has been 6613 * introduced so that future 'variants' of standard SCSI behavior (like 6614 * atapi) could be specified by the underlying HBA drivers by supplying 6615 * a new value for the "variant" property, instead of having to define a 6616 * new property. 6617 */ 6618 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { 6619 un->un_f_cfg_is_atapi = TRUE; 6620 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6621 "sd_unit_attach: un:0x%p Atapi device\n", un); 6622 } 6623 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", 6624 &variantp) == DDI_PROP_SUCCESS) { 6625 if (strcmp(variantp, "atapi") == 0) { 6626 un->un_f_cfg_is_atapi = TRUE; 6627 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6628 "sd_unit_attach: un:0x%p Atapi device\n", un); 6629 } 6630 ddi_prop_free(variantp); 6631 } 6632 6633 un->un_cmd_timeout = SD_IO_TIME; 6634 6635 /* Info on current states, statuses, etc. (Updated frequently) */ 6636 un->un_state = SD_STATE_NORMAL; 6637 un->un_last_state = SD_STATE_NORMAL; 6638 6639 /* Control & status info for command throttling */ 6640 un->un_throttle = sd_max_throttle; 6641 un->un_saved_throttle = sd_max_throttle; 6642 un->un_min_throttle = sd_min_throttle; 6643 6644 if (un->un_f_is_fibre == TRUE) { 6645 un->un_f_use_adaptive_throttle = TRUE; 6646 } else { 6647 un->un_f_use_adaptive_throttle = FALSE; 6648 } 6649 6650 /* Removable media support. */ 6651 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 6652 un->un_mediastate = DKIO_NONE; 6653 un->un_specified_mediastate = DKIO_NONE; 6654 6655 /* CVs for suspend/resume (PM or DR) */ 6656 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 6657 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 6658 6659 /* Power management support. */ 6660 un->un_power_level = SD_SPINDLE_UNINIT; 6661 6662 cv_init(&un->un_wcc_cv, NULL, CV_DRIVER, NULL); 6663 un->un_f_wcc_inprog = 0; 6664 6665 /* 6666 * The open/close semaphore is used to serialize threads executing 6667 * in the driver's open & close entry point routines for a given 6668 * instance. 6669 */ 6670 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 6671 6672 /* 6673 * The conf file entry and softstate variable is a forceful override, 6674 * meaning a non-zero value must be entered to change the default. 6675 */ 6676 un->un_f_disksort_disabled = FALSE; 6677 6678 /* 6679 * Retrieve the properties from the static driver table or the driver 6680 * configuration file (.conf) for this unit and update the soft state 6681 * for the device as needed for the indicated properties. 6682 * Note: the property configuration needs to occur here as some of the 6683 * following routines may have dependancies on soft state flags set 6684 * as part of the driver property configuration. 6685 */ 6686 sd_read_unit_properties(un); 6687 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6688 "sd_unit_attach: un:0x%p property configuration complete.\n", un); 6689 6690 /* 6691 * Only if a device has "hotpluggable" property, it is 6692 * treated as hotpluggable device. Otherwise, it is 6693 * regarded as non-hotpluggable one. 6694 */ 6695 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable", 6696 -1) != -1) { 6697 un->un_f_is_hotpluggable = TRUE; 6698 } 6699 6700 /* 6701 * set unit's attributes(flags) according to "hotpluggable" and 6702 * RMB bit in INQUIRY data. 6703 */ 6704 sd_set_unit_attributes(un, devi); 6705 6706 /* 6707 * By default, we mark the capacity, lbasize, and geometry 6708 * as invalid. Only if we successfully read a valid capacity 6709 * will we update the un_blockcount and un_tgt_blocksize with the 6710 * valid values (the geometry will be validated later). 6711 */ 6712 un->un_f_blockcount_is_valid = FALSE; 6713 un->un_f_tgt_blocksize_is_valid = FALSE; 6714 6715 /* 6716 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine 6717 * otherwise. 6718 */ 6719 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE; 6720 un->un_blockcount = 0; 6721 6722 /* 6723 * Set up the per-instance info needed to determine the correct 6724 * CDBs and other info for issuing commands to the target. 6725 */ 6726 sd_init_cdb_limits(un); 6727 6728 /* 6729 * Set up the IO chains to use, based upon the target type. 6730 */ 6731 if (un->un_f_non_devbsize_supported) { 6732 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 6733 } else { 6734 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 6735 } 6736 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 6737 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD; 6738 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD; 6739 6740 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf), 6741 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit, 6742 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER); 6743 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi); 6744 6745 6746 if (ISCD(un)) { 6747 un->un_additional_codes = sd_additional_codes; 6748 } else { 6749 un->un_additional_codes = NULL; 6750 } 6751 6752 /* 6753 * Create the kstats here so they can be available for attach-time 6754 * routines that send commands to the unit (either polled or via 6755 * sd_send_scsi_cmd). 6756 * 6757 * Note: This is a critical sequence that needs to be maintained: 6758 * 1) Instantiate the kstats here, before any routines using the 6759 * iopath (i.e. sd_send_scsi_cmd). 6760 * 2) Instantiate and initialize the partition stats 6761 * (sd_set_pstats). 6762 * 3) Initialize the error stats (sd_set_errstats), following 6763 * sd_validate_geometry(),sd_register_devid(), 6764 * and sd_cache_control(). 6765 */ 6766 6767 un->un_stats = kstat_create(sd_label, instance, 6768 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 6769 if (un->un_stats != NULL) { 6770 un->un_stats->ks_lock = SD_MUTEX(un); 6771 kstat_install(un->un_stats); 6772 } 6773 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6774 "sd_unit_attach: un:0x%p un_stats created\n", un); 6775 6776 sd_create_errstats(un, instance); 6777 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6778 "sd_unit_attach: un:0x%p errstats created\n", un); 6779 6780 /* 6781 * The following if/else code was relocated here from below as part 6782 * of the fix for bug (4430280). However with the default setup added 6783 * on entry to this routine, it's no longer absolutely necessary for 6784 * this to be before the call to sd_spin_up_unit. 6785 */ 6786 if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) { 6787 /* 6788 * If SCSI-2 tagged queueing is supported by the target 6789 * and by the host adapter then we will enable it. 6790 */ 6791 un->un_tagflags = 0; 6792 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && 6793 (devp->sd_inq->inq_cmdque) && 6794 (un->un_f_arq_enabled == TRUE)) { 6795 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 6796 1, 1) == 1) { 6797 un->un_tagflags = FLAG_STAG; 6798 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6799 "sd_unit_attach: un:0x%p tag queueing " 6800 "enabled\n", un); 6801 } else if (scsi_ifgetcap(SD_ADDRESS(un), 6802 "untagged-qing", 0) == 1) { 6803 un->un_f_opt_queueing = TRUE; 6804 un->un_saved_throttle = un->un_throttle = 6805 min(un->un_throttle, 3); 6806 } else { 6807 un->un_f_opt_queueing = FALSE; 6808 un->un_saved_throttle = un->un_throttle = 1; 6809 } 6810 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) 6811 == 1) && (un->un_f_arq_enabled == TRUE)) { 6812 /* The Host Adapter supports internal queueing. */ 6813 un->un_f_opt_queueing = TRUE; 6814 un->un_saved_throttle = un->un_throttle = 6815 min(un->un_throttle, 3); 6816 } else { 6817 un->un_f_opt_queueing = FALSE; 6818 un->un_saved_throttle = un->un_throttle = 1; 6819 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6820 "sd_unit_attach: un:0x%p no tag queueing\n", un); 6821 } 6822 6823 /* 6824 * Enable large transfers for SATA/SAS drives 6825 */ 6826 if (SD_IS_SERIAL(un)) { 6827 un->un_max_xfer_size = 6828 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 6829 sd_max_xfer_size, SD_MAX_XFER_SIZE); 6830 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6831 "sd_unit_attach: un:0x%p max transfer " 6832 "size=0x%x\n", un, un->un_max_xfer_size); 6833 6834 } 6835 6836 /* Setup or tear down default wide operations for disks */ 6837 6838 /* 6839 * Note: Legacy: it may be possible for both "sd_max_xfer_size" 6840 * and "ssd_max_xfer_size" to exist simultaneously on the same 6841 * system and be set to different values. In the future this 6842 * code may need to be updated when the ssd module is 6843 * obsoleted and removed from the system. (4299588) 6844 */ 6845 if (SD_IS_PARALLEL_SCSI(un) && 6846 (devp->sd_inq->inq_rdf == RDF_SCSI2) && 6847 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { 6848 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 6849 1, 1) == 1) { 6850 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6851 "sd_unit_attach: un:0x%p Wide Transfer " 6852 "enabled\n", un); 6853 } 6854 6855 /* 6856 * If tagged queuing has also been enabled, then 6857 * enable large xfers 6858 */ 6859 if (un->un_saved_throttle == sd_max_throttle) { 6860 un->un_max_xfer_size = 6861 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 6862 sd_max_xfer_size, SD_MAX_XFER_SIZE); 6863 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6864 "sd_unit_attach: un:0x%p max transfer " 6865 "size=0x%x\n", un, un->un_max_xfer_size); 6866 } 6867 } else { 6868 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 6869 0, 1) == 1) { 6870 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6871 "sd_unit_attach: un:0x%p " 6872 "Wide Transfer disabled\n", un); 6873 } 6874 } 6875 } else { 6876 un->un_tagflags = FLAG_STAG; 6877 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, 6878 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); 6879 } 6880 6881 /* 6882 * If this target supports LUN reset, try to enable it. 6883 */ 6884 if (un->un_f_lun_reset_enabled) { 6885 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { 6886 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 6887 "un:0x%p lun_reset capability set\n", un); 6888 } else { 6889 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 6890 "un:0x%p lun-reset capability not set\n", un); 6891 } 6892 } 6893 6894 /* 6895 * At this point in the attach, we have enough info in the 6896 * soft state to be able to issue commands to the target. 6897 * 6898 * All command paths used below MUST issue their commands as 6899 * SD_PATH_DIRECT. This is important as intermediate layers 6900 * are not all initialized yet (such as PM). 6901 */ 6902 6903 /* 6904 * Send a TEST UNIT READY command to the device. This should clear 6905 * any outstanding UNIT ATTENTION that may be present. 6906 * 6907 * Note: Don't check for success, just track if there is a reservation, 6908 * this is a throw away command to clear any unit attentions. 6909 * 6910 * Note: This MUST be the first command issued to the target during 6911 * attach to ensure power on UNIT ATTENTIONS are cleared. 6912 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated 6913 * with attempts at spinning up a device with no media. 6914 */ 6915 if (sd_send_scsi_TEST_UNIT_READY(un, SD_DONT_RETRY_TUR) == EACCES) { 6916 reservation_flag = SD_TARGET_IS_RESERVED; 6917 } 6918 6919 /* 6920 * If the device is NOT a removable media device, attempt to spin 6921 * it up (using the START_STOP_UNIT command) and read its capacity 6922 * (using the READ CAPACITY command). Note, however, that either 6923 * of these could fail and in some cases we would continue with 6924 * the attach despite the failure (see below). 6925 */ 6926 if (un->un_f_descr_format_supported) { 6927 switch (sd_spin_up_unit(un)) { 6928 case 0: 6929 /* 6930 * Spin-up was successful; now try to read the 6931 * capacity. If successful then save the results 6932 * and mark the capacity & lbasize as valid. 6933 */ 6934 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6935 "sd_unit_attach: un:0x%p spin-up successful\n", un); 6936 6937 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, 6938 &lbasize, SD_PATH_DIRECT)) { 6939 case 0: { 6940 if (capacity > DK_MAX_BLOCKS) { 6941 #ifdef _LP64 6942 if (capacity + 1 > 6943 SD_GROUP1_MAX_ADDRESS) { 6944 /* 6945 * Enable descriptor format 6946 * sense data so that we can 6947 * get 64 bit sense data 6948 * fields. 6949 */ 6950 sd_enable_descr_sense(un); 6951 } 6952 #else 6953 /* 32-bit kernels can't handle this */ 6954 scsi_log(SD_DEVINFO(un), 6955 sd_label, CE_WARN, 6956 "disk has %llu blocks, which " 6957 "is too large for a 32-bit " 6958 "kernel", capacity); 6959 6960 #if defined(__i386) || defined(__amd64) 6961 /* 6962 * 1TB disk was treated as (1T - 512)B 6963 * in the past, so that it might have 6964 * valid VTOC and solaris partitions, 6965 * we have to allow it to continue to 6966 * work. 6967 */ 6968 if (capacity -1 > DK_MAX_BLOCKS) 6969 #endif 6970 goto spinup_failed; 6971 #endif 6972 } 6973 6974 /* 6975 * Here it's not necessary to check the case: 6976 * the capacity of the device is bigger than 6977 * what the max hba cdb can support. Because 6978 * sd_send_scsi_READ_CAPACITY will retrieve 6979 * the capacity by sending USCSI command, which 6980 * is constrained by the max hba cdb. Actually, 6981 * sd_send_scsi_READ_CAPACITY will return 6982 * EINVAL when using bigger cdb than required 6983 * cdb length. Will handle this case in 6984 * "case EINVAL". 6985 */ 6986 6987 /* 6988 * The following relies on 6989 * sd_send_scsi_READ_CAPACITY never 6990 * returning 0 for capacity and/or lbasize. 6991 */ 6992 sd_update_block_info(un, lbasize, capacity); 6993 6994 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6995 "sd_unit_attach: un:0x%p capacity = %ld " 6996 "blocks; lbasize= %ld.\n", un, 6997 un->un_blockcount, un->un_tgt_blocksize); 6998 6999 break; 7000 } 7001 case EINVAL: 7002 /* 7003 * In the case where the max-cdb-length property 7004 * is smaller than the required CDB length for 7005 * a SCSI device, a target driver can fail to 7006 * attach to that device. 7007 */ 7008 scsi_log(SD_DEVINFO(un), 7009 sd_label, CE_WARN, 7010 "disk capacity is too large " 7011 "for current cdb length"); 7012 goto spinup_failed; 7013 case EACCES: 7014 /* 7015 * Should never get here if the spin-up 7016 * succeeded, but code it in anyway. 7017 * From here, just continue with the attach... 7018 */ 7019 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7020 "sd_unit_attach: un:0x%p " 7021 "sd_send_scsi_READ_CAPACITY " 7022 "returned reservation conflict\n", un); 7023 reservation_flag = SD_TARGET_IS_RESERVED; 7024 break; 7025 default: 7026 /* 7027 * Likewise, should never get here if the 7028 * spin-up succeeded. Just continue with 7029 * the attach... 7030 */ 7031 break; 7032 } 7033 break; 7034 case EACCES: 7035 /* 7036 * Device is reserved by another host. In this case 7037 * we could not spin it up or read the capacity, but 7038 * we continue with the attach anyway. 7039 */ 7040 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7041 "sd_unit_attach: un:0x%p spin-up reservation " 7042 "conflict.\n", un); 7043 reservation_flag = SD_TARGET_IS_RESERVED; 7044 break; 7045 default: 7046 /* Fail the attach if the spin-up failed. */ 7047 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7048 "sd_unit_attach: un:0x%p spin-up failed.", un); 7049 goto spinup_failed; 7050 } 7051 } 7052 7053 /* 7054 * Check to see if this is a MMC drive 7055 */ 7056 if (ISCD(un)) { 7057 sd_set_mmc_caps(un); 7058 } 7059 7060 7061 /* 7062 * Add a zero-length attribute to tell the world we support 7063 * kernel ioctls (for layered drivers) 7064 */ 7065 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7066 DDI_KERNEL_IOCTL, NULL, 0); 7067 7068 /* 7069 * Add a boolean property to tell the world we support 7070 * the B_FAILFAST flag (for layered drivers) 7071 */ 7072 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7073 "ddi-failfast-supported", NULL, 0); 7074 7075 /* 7076 * Initialize power management 7077 */ 7078 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); 7079 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); 7080 sd_setup_pm(un, devi); 7081 if (un->un_f_pm_is_enabled == FALSE) { 7082 /* 7083 * For performance, point to a jump table that does 7084 * not include pm. 7085 * The direct and priority chains don't change with PM. 7086 * 7087 * Note: this is currently done based on individual device 7088 * capabilities. When an interface for determining system 7089 * power enabled state becomes available, or when additional 7090 * layers are added to the command chain, these values will 7091 * have to be re-evaluated for correctness. 7092 */ 7093 if (un->un_f_non_devbsize_supported) { 7094 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM; 7095 } else { 7096 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM; 7097 } 7098 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 7099 } 7100 7101 /* 7102 * This property is set to 0 by HA software to avoid retries 7103 * on a reserved disk. (The preferred property name is 7104 * "retry-on-reservation-conflict") (1189689) 7105 * 7106 * Note: The use of a global here can have unintended consequences. A 7107 * per instance variable is preferrable to match the capabilities of 7108 * different underlying hba's (4402600) 7109 */ 7110 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, 7111 DDI_PROP_DONTPASS, "retry-on-reservation-conflict", 7112 sd_retry_on_reservation_conflict); 7113 if (sd_retry_on_reservation_conflict != 0) { 7114 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, 7115 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, 7116 sd_retry_on_reservation_conflict); 7117 } 7118 7119 /* Set up options for QFULL handling. */ 7120 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7121 "qfull-retries", -1)) != -1) { 7122 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries", 7123 rval, 1); 7124 } 7125 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7126 "qfull-retry-interval", -1)) != -1) { 7127 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval", 7128 rval, 1); 7129 } 7130 7131 /* 7132 * This just prints a message that announces the existence of the 7133 * device. The message is always printed in the system logfile, but 7134 * only appears on the console if the system is booted with the 7135 * -v (verbose) argument. 7136 */ 7137 ddi_report_dev(devi); 7138 7139 un->un_mediastate = DKIO_NONE; 7140 7141 cmlb_alloc_handle(&un->un_cmlbhandle); 7142 7143 #if defined(__i386) || defined(__amd64) 7144 /* 7145 * On x86, compensate for off-by-1 legacy error 7146 */ 7147 if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable && 7148 (lbasize == un->un_sys_blocksize)) 7149 offbyone = CMLB_OFF_BY_ONE; 7150 #endif 7151 7152 if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype, 7153 un->un_f_has_removable_media, un->un_f_is_hotpluggable, 7154 un->un_node_type, offbyone, un->un_cmlbhandle, 7155 (void *)SD_PATH_DIRECT) != 0) { 7156 goto cmlb_attach_failed; 7157 } 7158 7159 7160 /* 7161 * Read and validate the device's geometry (ie, disk label) 7162 * A new unformatted drive will not have a valid geometry, but 7163 * the driver needs to successfully attach to this device so 7164 * the drive can be formatted via ioctls. 7165 */ 7166 geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0, 7167 (void *)SD_PATH_DIRECT) == 0) ? 1: 0; 7168 7169 mutex_enter(SD_MUTEX(un)); 7170 7171 /* 7172 * Read and initialize the devid for the unit. 7173 */ 7174 ASSERT(un->un_errstats != NULL); 7175 if (un->un_f_devid_supported) { 7176 sd_register_devid(un, devi, reservation_flag); 7177 } 7178 mutex_exit(SD_MUTEX(un)); 7179 7180 #if (defined(__fibre)) 7181 /* 7182 * Register callbacks for fibre only. You can't do this soley 7183 * on the basis of the devid_type because this is hba specific. 7184 * We need to query our hba capabilities to find out whether to 7185 * register or not. 7186 */ 7187 if (un->un_f_is_fibre) { 7188 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 7189 sd_init_event_callbacks(un); 7190 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7191 "sd_unit_attach: un:0x%p event callbacks inserted", un); 7192 } 7193 } 7194 #endif 7195 7196 if (un->un_f_opt_disable_cache == TRUE) { 7197 /* 7198 * Disable both read cache and write cache. This is 7199 * the historic behavior of the keywords in the config file. 7200 */ 7201 if (sd_cache_control(un, SD_CACHE_DISABLE, SD_CACHE_DISABLE) != 7202 0) { 7203 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7204 "sd_unit_attach: un:0x%p Could not disable " 7205 "caching", un); 7206 goto devid_failed; 7207 } 7208 } 7209 7210 /* 7211 * Check the value of the WCE bit now and 7212 * set un_f_write_cache_enabled accordingly. 7213 */ 7214 (void) sd_get_write_cache_enabled(un, &wc_enabled); 7215 mutex_enter(SD_MUTEX(un)); 7216 un->un_f_write_cache_enabled = (wc_enabled != 0); 7217 mutex_exit(SD_MUTEX(un)); 7218 7219 /* 7220 * Find out what type of reservation this disk supports. 7221 */ 7222 switch (sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 0, NULL)) { 7223 case 0: 7224 /* 7225 * SCSI-3 reservations are supported. 7226 */ 7227 un->un_reservation_type = SD_SCSI3_RESERVATION; 7228 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7229 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un); 7230 break; 7231 case ENOTSUP: 7232 /* 7233 * The PERSISTENT RESERVE IN command would not be recognized by 7234 * a SCSI-2 device, so assume the reservation type is SCSI-2. 7235 */ 7236 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7237 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un); 7238 un->un_reservation_type = SD_SCSI2_RESERVATION; 7239 break; 7240 default: 7241 /* 7242 * default to SCSI-3 reservations 7243 */ 7244 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7245 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un); 7246 un->un_reservation_type = SD_SCSI3_RESERVATION; 7247 break; 7248 } 7249 7250 /* 7251 * Set the pstat and error stat values here, so data obtained during the 7252 * previous attach-time routines is available. 7253 * 7254 * Note: This is a critical sequence that needs to be maintained: 7255 * 1) Instantiate the kstats before any routines using the iopath 7256 * (i.e. sd_send_scsi_cmd). 7257 * 2) Initialize the error stats (sd_set_errstats) and partition 7258 * stats (sd_set_pstats)here, following 7259 * cmlb_validate_geometry(), sd_register_devid(), and 7260 * sd_cache_control(). 7261 */ 7262 7263 if (un->un_f_pkstats_enabled && geom_label_valid) { 7264 sd_set_pstats(un); 7265 SD_TRACE(SD_LOG_IO_PARTITION, un, 7266 "sd_unit_attach: un:0x%p pstats created and set\n", un); 7267 } 7268 7269 sd_set_errstats(un); 7270 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7271 "sd_unit_attach: un:0x%p errstats set\n", un); 7272 7273 7274 /* 7275 * After successfully attaching an instance, we record the information 7276 * of how many luns have been attached on the relative target and 7277 * controller for parallel SCSI. This information is used when sd tries 7278 * to set the tagged queuing capability in HBA. 7279 */ 7280 if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) { 7281 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH); 7282 } 7283 7284 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7285 "sd_unit_attach: un:0x%p exit success\n", un); 7286 7287 return (DDI_SUCCESS); 7288 7289 /* 7290 * An error occurred during the attach; clean up & return failure. 7291 */ 7292 7293 devid_failed: 7294 7295 setup_pm_failed: 7296 ddi_remove_minor_node(devi, NULL); 7297 7298 cmlb_attach_failed: 7299 /* 7300 * Cleanup from the scsi_ifsetcap() calls (437868) 7301 */ 7302 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 7303 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 7304 7305 /* 7306 * Refer to the comments of setting tagged-qing in the beginning of 7307 * sd_unit_attach. We can only disable tagged queuing when there is 7308 * no lun attached on the target. 7309 */ 7310 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 7311 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 7312 } 7313 7314 if (un->un_f_is_fibre == FALSE) { 7315 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 7316 } 7317 7318 spinup_failed: 7319 7320 mutex_enter(SD_MUTEX(un)); 7321 7322 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ 7323 if (un->un_direct_priority_timeid != NULL) { 7324 timeout_id_t temp_id = un->un_direct_priority_timeid; 7325 un->un_direct_priority_timeid = NULL; 7326 mutex_exit(SD_MUTEX(un)); 7327 (void) untimeout(temp_id); 7328 mutex_enter(SD_MUTEX(un)); 7329 } 7330 7331 /* Cancel any pending start/stop timeouts */ 7332 if (un->un_startstop_timeid != NULL) { 7333 timeout_id_t temp_id = un->un_startstop_timeid; 7334 un->un_startstop_timeid = NULL; 7335 mutex_exit(SD_MUTEX(un)); 7336 (void) untimeout(temp_id); 7337 mutex_enter(SD_MUTEX(un)); 7338 } 7339 7340 /* Cancel any pending reset-throttle timeouts */ 7341 if (un->un_reset_throttle_timeid != NULL) { 7342 timeout_id_t temp_id = un->un_reset_throttle_timeid; 7343 un->un_reset_throttle_timeid = NULL; 7344 mutex_exit(SD_MUTEX(un)); 7345 (void) untimeout(temp_id); 7346 mutex_enter(SD_MUTEX(un)); 7347 } 7348 7349 /* Cancel any pending retry timeouts */ 7350 if (un->un_retry_timeid != NULL) { 7351 timeout_id_t temp_id = un->un_retry_timeid; 7352 un->un_retry_timeid = NULL; 7353 mutex_exit(SD_MUTEX(un)); 7354 (void) untimeout(temp_id); 7355 mutex_enter(SD_MUTEX(un)); 7356 } 7357 7358 /* Cancel any pending delayed cv broadcast timeouts */ 7359 if (un->un_dcvb_timeid != NULL) { 7360 timeout_id_t temp_id = un->un_dcvb_timeid; 7361 un->un_dcvb_timeid = NULL; 7362 mutex_exit(SD_MUTEX(un)); 7363 (void) untimeout(temp_id); 7364 mutex_enter(SD_MUTEX(un)); 7365 } 7366 7367 mutex_exit(SD_MUTEX(un)); 7368 7369 /* There should not be any in-progress I/O so ASSERT this check */ 7370 ASSERT(un->un_ncmds_in_transport == 0); 7371 ASSERT(un->un_ncmds_in_driver == 0); 7372 7373 /* Do not free the softstate if the callback routine is active */ 7374 sd_sync_with_callback(un); 7375 7376 /* 7377 * Partition stats apparently are not used with removables. These would 7378 * not have been created during attach, so no need to clean them up... 7379 */ 7380 if (un->un_stats != NULL) { 7381 kstat_delete(un->un_stats); 7382 un->un_stats = NULL; 7383 } 7384 if (un->un_errstats != NULL) { 7385 kstat_delete(un->un_errstats); 7386 un->un_errstats = NULL; 7387 } 7388 7389 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 7390 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 7391 7392 ddi_prop_remove_all(devi); 7393 sema_destroy(&un->un_semoclose); 7394 cv_destroy(&un->un_state_cv); 7395 7396 getrbuf_failed: 7397 7398 sd_free_rqs(un); 7399 7400 alloc_rqs_failed: 7401 7402 devp->sd_private = NULL; 7403 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ 7404 7405 get_softstate_failed: 7406 /* 7407 * Note: the man pages are unclear as to whether or not doing a 7408 * ddi_soft_state_free(sd_state, instance) is the right way to 7409 * clean up after the ddi_soft_state_zalloc() if the subsequent 7410 * ddi_get_soft_state() fails. The implication seems to be 7411 * that the get_soft_state cannot fail if the zalloc succeeds. 7412 */ 7413 ddi_soft_state_free(sd_state, instance); 7414 7415 probe_failed: 7416 scsi_unprobe(devp); 7417 #ifdef SDDEBUG 7418 if ((sd_component_mask & SD_LOG_ATTACH_DETACH) && 7419 (sd_level_mask & SD_LOGMASK_TRACE)) { 7420 cmn_err(CE_CONT, "sd_unit_attach: un:0x%p exit failure\n", 7421 (void *)un); 7422 } 7423 #endif 7424 return (DDI_FAILURE); 7425 } 7426 7427 7428 /* 7429 * Function: sd_unit_detach 7430 * 7431 * Description: Performs DDI_DETACH processing for sddetach(). 7432 * 7433 * Return Code: DDI_SUCCESS 7434 * DDI_FAILURE 7435 * 7436 * Context: Kernel thread context 7437 */ 7438 7439 static int 7440 sd_unit_detach(dev_info_t *devi) 7441 { 7442 struct scsi_device *devp; 7443 struct sd_lun *un; 7444 int i; 7445 int tgt; 7446 dev_t dev; 7447 dev_info_t *pdip = ddi_get_parent(devi); 7448 int instance = ddi_get_instance(devi); 7449 7450 mutex_enter(&sd_detach_mutex); 7451 7452 /* 7453 * Fail the detach for any of the following: 7454 * - Unable to get the sd_lun struct for the instance 7455 * - A layered driver has an outstanding open on the instance 7456 * - Another thread is already detaching this instance 7457 * - Another thread is currently performing an open 7458 */ 7459 devp = ddi_get_driver_private(devi); 7460 if ((devp == NULL) || 7461 ((un = (struct sd_lun *)devp->sd_private) == NULL) || 7462 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) || 7463 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) { 7464 mutex_exit(&sd_detach_mutex); 7465 return (DDI_FAILURE); 7466 } 7467 7468 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); 7469 7470 /* 7471 * Mark this instance as currently in a detach, to inhibit any 7472 * opens from a layered driver. 7473 */ 7474 un->un_detach_count++; 7475 mutex_exit(&sd_detach_mutex); 7476 7477 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7478 SCSI_ADDR_PROP_TARGET, -1); 7479 7480 dev = sd_make_device(SD_DEVINFO(un)); 7481 7482 #ifndef lint 7483 _NOTE(COMPETING_THREADS_NOW); 7484 #endif 7485 7486 mutex_enter(SD_MUTEX(un)); 7487 7488 /* 7489 * Fail the detach if there are any outstanding layered 7490 * opens on this device. 7491 */ 7492 for (i = 0; i < NDKMAP; i++) { 7493 if (un->un_ocmap.lyropen[i] != 0) { 7494 goto err_notclosed; 7495 } 7496 } 7497 7498 /* 7499 * Verify there are NO outstanding commands issued to this device. 7500 * ie, un_ncmds_in_transport == 0. 7501 * It's possible to have outstanding commands through the physio 7502 * code path, even though everything's closed. 7503 */ 7504 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || 7505 (un->un_direct_priority_timeid != NULL) || 7506 (un->un_state == SD_STATE_RWAIT)) { 7507 mutex_exit(SD_MUTEX(un)); 7508 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7509 "sd_dr_detach: Detach failure due to outstanding cmds\n"); 7510 goto err_stillbusy; 7511 } 7512 7513 /* 7514 * If we have the device reserved, release the reservation. 7515 */ 7516 if ((un->un_resvd_status & SD_RESERVE) && 7517 !(un->un_resvd_status & SD_LOST_RESERVE)) { 7518 mutex_exit(SD_MUTEX(un)); 7519 /* 7520 * Note: sd_reserve_release sends a command to the device 7521 * via the sd_ioctlcmd() path, and can sleep. 7522 */ 7523 if (sd_reserve_release(dev, SD_RELEASE) != 0) { 7524 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7525 "sd_dr_detach: Cannot release reservation \n"); 7526 } 7527 } else { 7528 mutex_exit(SD_MUTEX(un)); 7529 } 7530 7531 /* 7532 * Untimeout any reserve recover, throttle reset, restart unit 7533 * and delayed broadcast timeout threads. Protect the timeout pointer 7534 * from getting nulled by their callback functions. 7535 */ 7536 mutex_enter(SD_MUTEX(un)); 7537 if (un->un_resvd_timeid != NULL) { 7538 timeout_id_t temp_id = un->un_resvd_timeid; 7539 un->un_resvd_timeid = NULL; 7540 mutex_exit(SD_MUTEX(un)); 7541 (void) untimeout(temp_id); 7542 mutex_enter(SD_MUTEX(un)); 7543 } 7544 7545 if (un->un_reset_throttle_timeid != NULL) { 7546 timeout_id_t temp_id = un->un_reset_throttle_timeid; 7547 un->un_reset_throttle_timeid = NULL; 7548 mutex_exit(SD_MUTEX(un)); 7549 (void) untimeout(temp_id); 7550 mutex_enter(SD_MUTEX(un)); 7551 } 7552 7553 if (un->un_startstop_timeid != NULL) { 7554 timeout_id_t temp_id = un->un_startstop_timeid; 7555 un->un_startstop_timeid = NULL; 7556 mutex_exit(SD_MUTEX(un)); 7557 (void) untimeout(temp_id); 7558 mutex_enter(SD_MUTEX(un)); 7559 } 7560 7561 if (un->un_dcvb_timeid != NULL) { 7562 timeout_id_t temp_id = un->un_dcvb_timeid; 7563 un->un_dcvb_timeid = NULL; 7564 mutex_exit(SD_MUTEX(un)); 7565 (void) untimeout(temp_id); 7566 } else { 7567 mutex_exit(SD_MUTEX(un)); 7568 } 7569 7570 /* Remove any pending reservation reclaim requests for this device */ 7571 sd_rmv_resv_reclaim_req(dev); 7572 7573 mutex_enter(SD_MUTEX(un)); 7574 7575 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ 7576 if (un->un_direct_priority_timeid != NULL) { 7577 timeout_id_t temp_id = un->un_direct_priority_timeid; 7578 un->un_direct_priority_timeid = NULL; 7579 mutex_exit(SD_MUTEX(un)); 7580 (void) untimeout(temp_id); 7581 mutex_enter(SD_MUTEX(un)); 7582 } 7583 7584 /* Cancel any active multi-host disk watch thread requests */ 7585 if (un->un_mhd_token != NULL) { 7586 mutex_exit(SD_MUTEX(un)); 7587 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); 7588 if (scsi_watch_request_terminate(un->un_mhd_token, 7589 SCSI_WATCH_TERMINATE_NOWAIT)) { 7590 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7591 "sd_dr_detach: Cannot cancel mhd watch request\n"); 7592 /* 7593 * Note: We are returning here after having removed 7594 * some driver timeouts above. This is consistent with 7595 * the legacy implementation but perhaps the watch 7596 * terminate call should be made with the wait flag set. 7597 */ 7598 goto err_stillbusy; 7599 } 7600 mutex_enter(SD_MUTEX(un)); 7601 un->un_mhd_token = NULL; 7602 } 7603 7604 if (un->un_swr_token != NULL) { 7605 mutex_exit(SD_MUTEX(un)); 7606 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); 7607 if (scsi_watch_request_terminate(un->un_swr_token, 7608 SCSI_WATCH_TERMINATE_NOWAIT)) { 7609 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7610 "sd_dr_detach: Cannot cancel swr watch request\n"); 7611 /* 7612 * Note: We are returning here after having removed 7613 * some driver timeouts above. This is consistent with 7614 * the legacy implementation but perhaps the watch 7615 * terminate call should be made with the wait flag set. 7616 */ 7617 goto err_stillbusy; 7618 } 7619 mutex_enter(SD_MUTEX(un)); 7620 un->un_swr_token = NULL; 7621 } 7622 7623 mutex_exit(SD_MUTEX(un)); 7624 7625 /* 7626 * Clear any scsi_reset_notifies. We clear the reset notifies 7627 * if we have not registered one. 7628 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! 7629 */ 7630 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 7631 sd_mhd_reset_notify_cb, (caddr_t)un); 7632 7633 /* 7634 * protect the timeout pointers from getting nulled by 7635 * their callback functions during the cancellation process. 7636 * In such a scenario untimeout can be invoked with a null value. 7637 */ 7638 _NOTE(NO_COMPETING_THREADS_NOW); 7639 7640 mutex_enter(&un->un_pm_mutex); 7641 if (un->un_pm_idle_timeid != NULL) { 7642 timeout_id_t temp_id = un->un_pm_idle_timeid; 7643 un->un_pm_idle_timeid = NULL; 7644 mutex_exit(&un->un_pm_mutex); 7645 7646 /* 7647 * Timeout is active; cancel it. 7648 * Note that it'll never be active on a device 7649 * that does not support PM therefore we don't 7650 * have to check before calling pm_idle_component. 7651 */ 7652 (void) untimeout(temp_id); 7653 (void) pm_idle_component(SD_DEVINFO(un), 0); 7654 mutex_enter(&un->un_pm_mutex); 7655 } 7656 7657 /* 7658 * Check whether there is already a timeout scheduled for power 7659 * management. If yes then don't lower the power here, that's. 7660 * the timeout handler's job. 7661 */ 7662 if (un->un_pm_timeid != NULL) { 7663 timeout_id_t temp_id = un->un_pm_timeid; 7664 un->un_pm_timeid = NULL; 7665 mutex_exit(&un->un_pm_mutex); 7666 /* 7667 * Timeout is active; cancel it. 7668 * Note that it'll never be active on a device 7669 * that does not support PM therefore we don't 7670 * have to check before calling pm_idle_component. 7671 */ 7672 (void) untimeout(temp_id); 7673 (void) pm_idle_component(SD_DEVINFO(un), 0); 7674 7675 } else { 7676 mutex_exit(&un->un_pm_mutex); 7677 if ((un->un_f_pm_is_enabled == TRUE) && 7678 (pm_lower_power(SD_DEVINFO(un), 0, SD_SPINDLE_OFF) != 7679 DDI_SUCCESS)) { 7680 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7681 "sd_dr_detach: Lower power request failed, ignoring.\n"); 7682 /* 7683 * Fix for bug: 4297749, item # 13 7684 * The above test now includes a check to see if PM is 7685 * supported by this device before call 7686 * pm_lower_power(). 7687 * Note, the following is not dead code. The call to 7688 * pm_lower_power above will generate a call back into 7689 * our sdpower routine which might result in a timeout 7690 * handler getting activated. Therefore the following 7691 * code is valid and necessary. 7692 */ 7693 mutex_enter(&un->un_pm_mutex); 7694 if (un->un_pm_timeid != NULL) { 7695 timeout_id_t temp_id = un->un_pm_timeid; 7696 un->un_pm_timeid = NULL; 7697 mutex_exit(&un->un_pm_mutex); 7698 (void) untimeout(temp_id); 7699 (void) pm_idle_component(SD_DEVINFO(un), 0); 7700 } else { 7701 mutex_exit(&un->un_pm_mutex); 7702 } 7703 } 7704 } 7705 7706 /* 7707 * Cleanup from the scsi_ifsetcap() calls (437868) 7708 * Relocated here from above to be after the call to 7709 * pm_lower_power, which was getting errors. 7710 */ 7711 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 7712 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 7713 7714 /* 7715 * Currently, tagged queuing is supported per target based by HBA. 7716 * Setting this per lun instance actually sets the capability of this 7717 * target in HBA, which affects those luns already attached on the 7718 * same target. So during detach, we can only disable this capability 7719 * only when this is the only lun left on this target. By doing 7720 * this, we assume a target has the same tagged queuing capability 7721 * for every lun. The condition can be removed when HBA is changed to 7722 * support per lun based tagged queuing capability. 7723 */ 7724 if (sd_scsi_get_target_lun_count(pdip, tgt) <= 1) { 7725 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 7726 } 7727 7728 if (un->un_f_is_fibre == FALSE) { 7729 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 7730 } 7731 7732 /* 7733 * Remove any event callbacks, fibre only 7734 */ 7735 if (un->un_f_is_fibre == TRUE) { 7736 if ((un->un_insert_event != NULL) && 7737 (ddi_remove_event_handler(un->un_insert_cb_id) != 7738 DDI_SUCCESS)) { 7739 /* 7740 * Note: We are returning here after having done 7741 * substantial cleanup above. This is consistent 7742 * with the legacy implementation but this may not 7743 * be the right thing to do. 7744 */ 7745 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7746 "sd_dr_detach: Cannot cancel insert event\n"); 7747 goto err_remove_event; 7748 } 7749 un->un_insert_event = NULL; 7750 7751 if ((un->un_remove_event != NULL) && 7752 (ddi_remove_event_handler(un->un_remove_cb_id) != 7753 DDI_SUCCESS)) { 7754 /* 7755 * Note: We are returning here after having done 7756 * substantial cleanup above. This is consistent 7757 * with the legacy implementation but this may not 7758 * be the right thing to do. 7759 */ 7760 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7761 "sd_dr_detach: Cannot cancel remove event\n"); 7762 goto err_remove_event; 7763 } 7764 un->un_remove_event = NULL; 7765 } 7766 7767 /* Do not free the softstate if the callback routine is active */ 7768 sd_sync_with_callback(un); 7769 7770 cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 7771 cmlb_free_handle(&un->un_cmlbhandle); 7772 7773 /* 7774 * Hold the detach mutex here, to make sure that no other threads ever 7775 * can access a (partially) freed soft state structure. 7776 */ 7777 mutex_enter(&sd_detach_mutex); 7778 7779 /* 7780 * Clean up the soft state struct. 7781 * Cleanup is done in reverse order of allocs/inits. 7782 * At this point there should be no competing threads anymore. 7783 */ 7784 7785 /* Unregister and free device id. */ 7786 ddi_devid_unregister(devi); 7787 if (un->un_devid) { 7788 ddi_devid_free(un->un_devid); 7789 un->un_devid = NULL; 7790 } 7791 7792 /* 7793 * Destroy wmap cache if it exists. 7794 */ 7795 if (un->un_wm_cache != NULL) { 7796 kmem_cache_destroy(un->un_wm_cache); 7797 un->un_wm_cache = NULL; 7798 } 7799 7800 /* 7801 * kstat cleanup is done in detach for all device types (4363169). 7802 * We do not want to fail detach if the device kstats are not deleted 7803 * since there is a confusion about the devo_refcnt for the device. 7804 * We just delete the kstats and let detach complete successfully. 7805 */ 7806 if (un->un_stats != NULL) { 7807 kstat_delete(un->un_stats); 7808 un->un_stats = NULL; 7809 } 7810 if (un->un_errstats != NULL) { 7811 kstat_delete(un->un_errstats); 7812 un->un_errstats = NULL; 7813 } 7814 7815 /* Remove partition stats */ 7816 if (un->un_f_pkstats_enabled) { 7817 for (i = 0; i < NSDMAP; i++) { 7818 if (un->un_pstats[i] != NULL) { 7819 kstat_delete(un->un_pstats[i]); 7820 un->un_pstats[i] = NULL; 7821 } 7822 } 7823 } 7824 7825 /* Remove xbuf registration */ 7826 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 7827 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 7828 7829 /* Remove driver properties */ 7830 ddi_prop_remove_all(devi); 7831 7832 mutex_destroy(&un->un_pm_mutex); 7833 cv_destroy(&un->un_pm_busy_cv); 7834 7835 cv_destroy(&un->un_wcc_cv); 7836 7837 /* Open/close semaphore */ 7838 sema_destroy(&un->un_semoclose); 7839 7840 /* Removable media condvar. */ 7841 cv_destroy(&un->un_state_cv); 7842 7843 /* Suspend/resume condvar. */ 7844 cv_destroy(&un->un_suspend_cv); 7845 cv_destroy(&un->un_disk_busy_cv); 7846 7847 sd_free_rqs(un); 7848 7849 /* Free up soft state */ 7850 devp->sd_private = NULL; 7851 7852 bzero(un, sizeof (struct sd_lun)); 7853 ddi_soft_state_free(sd_state, instance); 7854 7855 mutex_exit(&sd_detach_mutex); 7856 7857 /* This frees up the INQUIRY data associated with the device. */ 7858 scsi_unprobe(devp); 7859 7860 /* 7861 * After successfully detaching an instance, we update the information 7862 * of how many luns have been attached in the relative target and 7863 * controller for parallel SCSI. This information is used when sd tries 7864 * to set the tagged queuing capability in HBA. 7865 * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to 7866 * check if the device is parallel SCSI. However, we don't need to 7867 * check here because we've already checked during attach. No device 7868 * that is not parallel SCSI is in the chain. 7869 */ 7870 if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) { 7871 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH); 7872 } 7873 7874 return (DDI_SUCCESS); 7875 7876 err_notclosed: 7877 mutex_exit(SD_MUTEX(un)); 7878 7879 err_stillbusy: 7880 _NOTE(NO_COMPETING_THREADS_NOW); 7881 7882 err_remove_event: 7883 mutex_enter(&sd_detach_mutex); 7884 un->un_detach_count--; 7885 mutex_exit(&sd_detach_mutex); 7886 7887 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); 7888 return (DDI_FAILURE); 7889 } 7890 7891 7892 /* 7893 * Function: sd_create_errstats 7894 * 7895 * Description: This routine instantiates the device error stats. 7896 * 7897 * Note: During attach the stats are instantiated first so they are 7898 * available for attach-time routines that utilize the driver 7899 * iopath to send commands to the device. The stats are initialized 7900 * separately so data obtained during some attach-time routines is 7901 * available. (4362483) 7902 * 7903 * Arguments: un - driver soft state (unit) structure 7904 * instance - driver instance 7905 * 7906 * Context: Kernel thread context 7907 */ 7908 7909 static void 7910 sd_create_errstats(struct sd_lun *un, int instance) 7911 { 7912 struct sd_errstats *stp; 7913 char kstatmodule_err[KSTAT_STRLEN]; 7914 char kstatname[KSTAT_STRLEN]; 7915 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t)); 7916 7917 ASSERT(un != NULL); 7918 7919 if (un->un_errstats != NULL) { 7920 return; 7921 } 7922 7923 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err), 7924 "%serr", sd_label); 7925 (void) snprintf(kstatname, sizeof (kstatname), 7926 "%s%d,err", sd_label, instance); 7927 7928 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname, 7929 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 7930 7931 if (un->un_errstats == NULL) { 7932 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7933 "sd_create_errstats: Failed kstat_create\n"); 7934 return; 7935 } 7936 7937 stp = (struct sd_errstats *)un->un_errstats->ks_data; 7938 kstat_named_init(&stp->sd_softerrs, "Soft Errors", 7939 KSTAT_DATA_UINT32); 7940 kstat_named_init(&stp->sd_harderrs, "Hard Errors", 7941 KSTAT_DATA_UINT32); 7942 kstat_named_init(&stp->sd_transerrs, "Transport Errors", 7943 KSTAT_DATA_UINT32); 7944 kstat_named_init(&stp->sd_vid, "Vendor", 7945 KSTAT_DATA_CHAR); 7946 kstat_named_init(&stp->sd_pid, "Product", 7947 KSTAT_DATA_CHAR); 7948 kstat_named_init(&stp->sd_revision, "Revision", 7949 KSTAT_DATA_CHAR); 7950 kstat_named_init(&stp->sd_serial, "Serial No", 7951 KSTAT_DATA_CHAR); 7952 kstat_named_init(&stp->sd_capacity, "Size", 7953 KSTAT_DATA_ULONGLONG); 7954 kstat_named_init(&stp->sd_rq_media_err, "Media Error", 7955 KSTAT_DATA_UINT32); 7956 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready", 7957 KSTAT_DATA_UINT32); 7958 kstat_named_init(&stp->sd_rq_nodev_err, "No Device", 7959 KSTAT_DATA_UINT32); 7960 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable", 7961 KSTAT_DATA_UINT32); 7962 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request", 7963 KSTAT_DATA_UINT32); 7964 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis", 7965 KSTAT_DATA_UINT32); 7966 7967 un->un_errstats->ks_private = un; 7968 un->un_errstats->ks_update = nulldev; 7969 7970 kstat_install(un->un_errstats); 7971 } 7972 7973 7974 /* 7975 * Function: sd_set_errstats 7976 * 7977 * Description: This routine sets the value of the vendor id, product id, 7978 * revision, serial number, and capacity device error stats. 7979 * 7980 * Note: During attach the stats are instantiated first so they are 7981 * available for attach-time routines that utilize the driver 7982 * iopath to send commands to the device. The stats are initialized 7983 * separately so data obtained during some attach-time routines is 7984 * available. (4362483) 7985 * 7986 * Arguments: un - driver soft state (unit) structure 7987 * 7988 * Context: Kernel thread context 7989 */ 7990 7991 static void 7992 sd_set_errstats(struct sd_lun *un) 7993 { 7994 struct sd_errstats *stp; 7995 7996 ASSERT(un != NULL); 7997 ASSERT(un->un_errstats != NULL); 7998 stp = (struct sd_errstats *)un->un_errstats->ks_data; 7999 ASSERT(stp != NULL); 8000 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8); 8001 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16); 8002 (void) strncpy(stp->sd_revision.value.c, 8003 un->un_sd->sd_inq->inq_revision, 4); 8004 8005 /* 8006 * All the errstats are persistent across detach/attach, 8007 * so reset all the errstats here in case of the hot 8008 * replacement of disk drives, except for not changed 8009 * Sun qualified drives. 8010 */ 8011 if ((bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) != 0) || 8012 (bcmp(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8013 sizeof (SD_INQUIRY(un)->inq_serial)) != 0)) { 8014 stp->sd_softerrs.value.ui32 = 0; 8015 stp->sd_harderrs.value.ui32 = 0; 8016 stp->sd_transerrs.value.ui32 = 0; 8017 stp->sd_rq_media_err.value.ui32 = 0; 8018 stp->sd_rq_ntrdy_err.value.ui32 = 0; 8019 stp->sd_rq_nodev_err.value.ui32 = 0; 8020 stp->sd_rq_recov_err.value.ui32 = 0; 8021 stp->sd_rq_illrq_err.value.ui32 = 0; 8022 stp->sd_rq_pfa_err.value.ui32 = 0; 8023 } 8024 8025 /* 8026 * Set the "Serial No" kstat for Sun qualified drives (indicated by 8027 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid) 8028 * (4376302)) 8029 */ 8030 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) { 8031 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8032 sizeof (SD_INQUIRY(un)->inq_serial)); 8033 } 8034 8035 if (un->un_f_blockcount_is_valid != TRUE) { 8036 /* 8037 * Set capacity error stat to 0 for no media. This ensures 8038 * a valid capacity is displayed in response to 'iostat -E' 8039 * when no media is present in the device. 8040 */ 8041 stp->sd_capacity.value.ui64 = 0; 8042 } else { 8043 /* 8044 * Multiply un_blockcount by un->un_sys_blocksize to get 8045 * capacity. 8046 * 8047 * Note: for non-512 blocksize devices "un_blockcount" has been 8048 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by 8049 * (un_tgt_blocksize / un->un_sys_blocksize). 8050 */ 8051 stp->sd_capacity.value.ui64 = (uint64_t) 8052 ((uint64_t)un->un_blockcount * un->un_sys_blocksize); 8053 } 8054 } 8055 8056 8057 /* 8058 * Function: sd_set_pstats 8059 * 8060 * Description: This routine instantiates and initializes the partition 8061 * stats for each partition with more than zero blocks. 8062 * (4363169) 8063 * 8064 * Arguments: un - driver soft state (unit) structure 8065 * 8066 * Context: Kernel thread context 8067 */ 8068 8069 static void 8070 sd_set_pstats(struct sd_lun *un) 8071 { 8072 char kstatname[KSTAT_STRLEN]; 8073 int instance; 8074 int i; 8075 diskaddr_t nblks = 0; 8076 char *partname = NULL; 8077 8078 ASSERT(un != NULL); 8079 8080 instance = ddi_get_instance(SD_DEVINFO(un)); 8081 8082 /* Note:x86: is this a VTOC8/VTOC16 difference? */ 8083 for (i = 0; i < NSDMAP; i++) { 8084 8085 if (cmlb_partinfo(un->un_cmlbhandle, i, 8086 &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0) 8087 continue; 8088 mutex_enter(SD_MUTEX(un)); 8089 8090 if ((un->un_pstats[i] == NULL) && 8091 (nblks != 0)) { 8092 8093 (void) snprintf(kstatname, sizeof (kstatname), 8094 "%s%d,%s", sd_label, instance, 8095 partname); 8096 8097 un->un_pstats[i] = kstat_create(sd_label, 8098 instance, kstatname, "partition", KSTAT_TYPE_IO, 8099 1, KSTAT_FLAG_PERSISTENT); 8100 if (un->un_pstats[i] != NULL) { 8101 un->un_pstats[i]->ks_lock = SD_MUTEX(un); 8102 kstat_install(un->un_pstats[i]); 8103 } 8104 } 8105 mutex_exit(SD_MUTEX(un)); 8106 } 8107 } 8108 8109 8110 #if (defined(__fibre)) 8111 /* 8112 * Function: sd_init_event_callbacks 8113 * 8114 * Description: This routine initializes the insertion and removal event 8115 * callbacks. (fibre only) 8116 * 8117 * Arguments: un - driver soft state (unit) structure 8118 * 8119 * Context: Kernel thread context 8120 */ 8121 8122 static void 8123 sd_init_event_callbacks(struct sd_lun *un) 8124 { 8125 ASSERT(un != NULL); 8126 8127 if ((un->un_insert_event == NULL) && 8128 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, 8129 &un->un_insert_event) == DDI_SUCCESS)) { 8130 /* 8131 * Add the callback for an insertion event 8132 */ 8133 (void) ddi_add_event_handler(SD_DEVINFO(un), 8134 un->un_insert_event, sd_event_callback, (void *)un, 8135 &(un->un_insert_cb_id)); 8136 } 8137 8138 if ((un->un_remove_event == NULL) && 8139 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, 8140 &un->un_remove_event) == DDI_SUCCESS)) { 8141 /* 8142 * Add the callback for a removal event 8143 */ 8144 (void) ddi_add_event_handler(SD_DEVINFO(un), 8145 un->un_remove_event, sd_event_callback, (void *)un, 8146 &(un->un_remove_cb_id)); 8147 } 8148 } 8149 8150 8151 /* 8152 * Function: sd_event_callback 8153 * 8154 * Description: This routine handles insert/remove events (photon). The 8155 * state is changed to OFFLINE which can be used to supress 8156 * error msgs. (fibre only) 8157 * 8158 * Arguments: un - driver soft state (unit) structure 8159 * 8160 * Context: Callout thread context 8161 */ 8162 /* ARGSUSED */ 8163 static void 8164 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, 8165 void *bus_impldata) 8166 { 8167 struct sd_lun *un = (struct sd_lun *)arg; 8168 8169 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); 8170 if (event == un->un_insert_event) { 8171 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); 8172 mutex_enter(SD_MUTEX(un)); 8173 if (un->un_state == SD_STATE_OFFLINE) { 8174 if (un->un_last_state != SD_STATE_SUSPENDED) { 8175 un->un_state = un->un_last_state; 8176 } else { 8177 /* 8178 * We have gone through SUSPEND/RESUME while 8179 * we were offline. Restore the last state 8180 */ 8181 un->un_state = un->un_save_state; 8182 } 8183 } 8184 mutex_exit(SD_MUTEX(un)); 8185 8186 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); 8187 } else if (event == un->un_remove_event) { 8188 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); 8189 mutex_enter(SD_MUTEX(un)); 8190 /* 8191 * We need to handle an event callback that occurs during 8192 * the suspend operation, since we don't prevent it. 8193 */ 8194 if (un->un_state != SD_STATE_OFFLINE) { 8195 if (un->un_state != SD_STATE_SUSPENDED) { 8196 New_state(un, SD_STATE_OFFLINE); 8197 } else { 8198 un->un_last_state = SD_STATE_OFFLINE; 8199 } 8200 } 8201 mutex_exit(SD_MUTEX(un)); 8202 } else { 8203 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 8204 "!Unknown event\n"); 8205 } 8206 8207 } 8208 #endif 8209 8210 /* 8211 * Function: sd_cache_control() 8212 * 8213 * Description: This routine is the driver entry point for setting 8214 * read and write caching by modifying the WCE (write cache 8215 * enable) and RCD (read cache disable) bits of mode 8216 * page 8 (MODEPAGE_CACHING). 8217 * 8218 * Arguments: un - driver soft state (unit) structure 8219 * rcd_flag - flag for controlling the read cache 8220 * wce_flag - flag for controlling the write cache 8221 * 8222 * Return Code: EIO 8223 * code returned by sd_send_scsi_MODE_SENSE and 8224 * sd_send_scsi_MODE_SELECT 8225 * 8226 * Context: Kernel Thread 8227 */ 8228 8229 static int 8230 sd_cache_control(struct sd_lun *un, int rcd_flag, int wce_flag) 8231 { 8232 struct mode_caching *mode_caching_page; 8233 uchar_t *header; 8234 size_t buflen; 8235 int hdrlen; 8236 int bd_len; 8237 int rval = 0; 8238 struct mode_header_grp2 *mhp; 8239 8240 ASSERT(un != NULL); 8241 8242 /* 8243 * Do a test unit ready, otherwise a mode sense may not work if this 8244 * is the first command sent to the device after boot. 8245 */ 8246 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 8247 8248 if (un->un_f_cfg_is_atapi == TRUE) { 8249 hdrlen = MODE_HEADER_LENGTH_GRP2; 8250 } else { 8251 hdrlen = MODE_HEADER_LENGTH; 8252 } 8253 8254 /* 8255 * Allocate memory for the retrieved mode page and its headers. Set 8256 * a pointer to the page itself. Use mode_cache_scsi3 to insure 8257 * we get all of the mode sense data otherwise, the mode select 8258 * will fail. mode_cache_scsi3 is a superset of mode_caching. 8259 */ 8260 buflen = hdrlen + MODE_BLK_DESC_LENGTH + 8261 sizeof (struct mode_cache_scsi3); 8262 8263 header = kmem_zalloc(buflen, KM_SLEEP); 8264 8265 /* Get the information from the device. */ 8266 if (un->un_f_cfg_is_atapi == TRUE) { 8267 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, header, buflen, 8268 MODEPAGE_CACHING, SD_PATH_DIRECT); 8269 } else { 8270 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 8271 MODEPAGE_CACHING, SD_PATH_DIRECT); 8272 } 8273 if (rval != 0) { 8274 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 8275 "sd_cache_control: Mode Sense Failed\n"); 8276 kmem_free(header, buflen); 8277 return (rval); 8278 } 8279 8280 /* 8281 * Determine size of Block Descriptors in order to locate 8282 * the mode page data. ATAPI devices return 0, SCSI devices 8283 * should return MODE_BLK_DESC_LENGTH. 8284 */ 8285 if (un->un_f_cfg_is_atapi == TRUE) { 8286 mhp = (struct mode_header_grp2 *)header; 8287 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 8288 } else { 8289 bd_len = ((struct mode_header *)header)->bdesc_length; 8290 } 8291 8292 if (bd_len > MODE_BLK_DESC_LENGTH) { 8293 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 8294 "sd_cache_control: Mode Sense returned invalid " 8295 "block descriptor length\n"); 8296 kmem_free(header, buflen); 8297 return (EIO); 8298 } 8299 8300 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 8301 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 8302 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 8303 " caching page code mismatch %d\n", 8304 mode_caching_page->mode_page.code); 8305 kmem_free(header, buflen); 8306 return (EIO); 8307 } 8308 8309 /* Check the relevant bits on successful mode sense. */ 8310 if ((mode_caching_page->rcd && rcd_flag == SD_CACHE_ENABLE) || 8311 (!mode_caching_page->rcd && rcd_flag == SD_CACHE_DISABLE) || 8312 (mode_caching_page->wce && wce_flag == SD_CACHE_DISABLE) || 8313 (!mode_caching_page->wce && wce_flag == SD_CACHE_ENABLE)) { 8314 8315 size_t sbuflen; 8316 uchar_t save_pg; 8317 8318 /* 8319 * Construct select buffer length based on the 8320 * length of the sense data returned. 8321 */ 8322 sbuflen = hdrlen + MODE_BLK_DESC_LENGTH + 8323 sizeof (struct mode_page) + 8324 (int)mode_caching_page->mode_page.length; 8325 8326 /* 8327 * Set the caching bits as requested. 8328 */ 8329 if (rcd_flag == SD_CACHE_ENABLE) 8330 mode_caching_page->rcd = 0; 8331 else if (rcd_flag == SD_CACHE_DISABLE) 8332 mode_caching_page->rcd = 1; 8333 8334 if (wce_flag == SD_CACHE_ENABLE) 8335 mode_caching_page->wce = 1; 8336 else if (wce_flag == SD_CACHE_DISABLE) 8337 mode_caching_page->wce = 0; 8338 8339 /* 8340 * Save the page if the mode sense says the 8341 * drive supports it. 8342 */ 8343 save_pg = mode_caching_page->mode_page.ps ? 8344 SD_SAVE_PAGE : SD_DONTSAVE_PAGE; 8345 8346 /* Clear reserved bits before mode select. */ 8347 mode_caching_page->mode_page.ps = 0; 8348 8349 /* 8350 * Clear out mode header for mode select. 8351 * The rest of the retrieved page will be reused. 8352 */ 8353 bzero(header, hdrlen); 8354 8355 if (un->un_f_cfg_is_atapi == TRUE) { 8356 mhp = (struct mode_header_grp2 *)header; 8357 mhp->bdesc_length_hi = bd_len >> 8; 8358 mhp->bdesc_length_lo = (uchar_t)bd_len & 0xff; 8359 } else { 8360 ((struct mode_header *)header)->bdesc_length = bd_len; 8361 } 8362 8363 /* Issue mode select to change the cache settings */ 8364 if (un->un_f_cfg_is_atapi == TRUE) { 8365 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, header, 8366 sbuflen, save_pg, SD_PATH_DIRECT); 8367 } else { 8368 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 8369 sbuflen, save_pg, SD_PATH_DIRECT); 8370 } 8371 } 8372 8373 kmem_free(header, buflen); 8374 return (rval); 8375 } 8376 8377 8378 /* 8379 * Function: sd_get_write_cache_enabled() 8380 * 8381 * Description: This routine is the driver entry point for determining if 8382 * write caching is enabled. It examines the WCE (write cache 8383 * enable) bits of mode page 8 (MODEPAGE_CACHING). 8384 * 8385 * Arguments: un - driver soft state (unit) structure 8386 * is_enabled - pointer to int where write cache enabled state 8387 * is returned (non-zero -> write cache enabled) 8388 * 8389 * 8390 * Return Code: EIO 8391 * code returned by sd_send_scsi_MODE_SENSE 8392 * 8393 * Context: Kernel Thread 8394 * 8395 * NOTE: If ioctl is added to disable write cache, this sequence should 8396 * be followed so that no locking is required for accesses to 8397 * un->un_f_write_cache_enabled: 8398 * do mode select to clear wce 8399 * do synchronize cache to flush cache 8400 * set un->un_f_write_cache_enabled = FALSE 8401 * 8402 * Conversely, an ioctl to enable the write cache should be done 8403 * in this order: 8404 * set un->un_f_write_cache_enabled = TRUE 8405 * do mode select to set wce 8406 */ 8407 8408 static int 8409 sd_get_write_cache_enabled(struct sd_lun *un, int *is_enabled) 8410 { 8411 struct mode_caching *mode_caching_page; 8412 uchar_t *header; 8413 size_t buflen; 8414 int hdrlen; 8415 int bd_len; 8416 int rval = 0; 8417 8418 ASSERT(un != NULL); 8419 ASSERT(is_enabled != NULL); 8420 8421 /* in case of error, flag as enabled */ 8422 *is_enabled = TRUE; 8423 8424 /* 8425 * Do a test unit ready, otherwise a mode sense may not work if this 8426 * is the first command sent to the device after boot. 8427 */ 8428 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 8429 8430 if (un->un_f_cfg_is_atapi == TRUE) { 8431 hdrlen = MODE_HEADER_LENGTH_GRP2; 8432 } else { 8433 hdrlen = MODE_HEADER_LENGTH; 8434 } 8435 8436 /* 8437 * Allocate memory for the retrieved mode page and its headers. Set 8438 * a pointer to the page itself. 8439 */ 8440 buflen = hdrlen + MODE_BLK_DESC_LENGTH + sizeof (struct mode_caching); 8441 header = kmem_zalloc(buflen, KM_SLEEP); 8442 8443 /* Get the information from the device. */ 8444 if (un->un_f_cfg_is_atapi == TRUE) { 8445 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, header, buflen, 8446 MODEPAGE_CACHING, SD_PATH_DIRECT); 8447 } else { 8448 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 8449 MODEPAGE_CACHING, SD_PATH_DIRECT); 8450 } 8451 if (rval != 0) { 8452 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 8453 "sd_get_write_cache_enabled: Mode Sense Failed\n"); 8454 kmem_free(header, buflen); 8455 return (rval); 8456 } 8457 8458 /* 8459 * Determine size of Block Descriptors in order to locate 8460 * the mode page data. ATAPI devices return 0, SCSI devices 8461 * should return MODE_BLK_DESC_LENGTH. 8462 */ 8463 if (un->un_f_cfg_is_atapi == TRUE) { 8464 struct mode_header_grp2 *mhp; 8465 mhp = (struct mode_header_grp2 *)header; 8466 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 8467 } else { 8468 bd_len = ((struct mode_header *)header)->bdesc_length; 8469 } 8470 8471 if (bd_len > MODE_BLK_DESC_LENGTH) { 8472 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 8473 "sd_get_write_cache_enabled: Mode Sense returned invalid " 8474 "block descriptor length\n"); 8475 kmem_free(header, buflen); 8476 return (EIO); 8477 } 8478 8479 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 8480 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 8481 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 8482 " caching page code mismatch %d\n", 8483 mode_caching_page->mode_page.code); 8484 kmem_free(header, buflen); 8485 return (EIO); 8486 } 8487 *is_enabled = mode_caching_page->wce; 8488 8489 kmem_free(header, buflen); 8490 return (0); 8491 } 8492 8493 8494 /* 8495 * Function: sd_make_device 8496 * 8497 * Description: Utility routine to return the Solaris device number from 8498 * the data in the device's dev_info structure. 8499 * 8500 * Return Code: The Solaris device number 8501 * 8502 * Context: Any 8503 */ 8504 8505 static dev_t 8506 sd_make_device(dev_info_t *devi) 8507 { 8508 return (makedevice(ddi_name_to_major(ddi_get_name(devi)), 8509 ddi_get_instance(devi) << SDUNIT_SHIFT)); 8510 } 8511 8512 8513 /* 8514 * Function: sd_pm_entry 8515 * 8516 * Description: Called at the start of a new command to manage power 8517 * and busy status of a device. This includes determining whether 8518 * the current power state of the device is sufficient for 8519 * performing the command or whether it must be changed. 8520 * The PM framework is notified appropriately. 8521 * Only with a return status of DDI_SUCCESS will the 8522 * component be busy to the framework. 8523 * 8524 * All callers of sd_pm_entry must check the return status 8525 * and only call sd_pm_exit it it was DDI_SUCCESS. A status 8526 * of DDI_FAILURE indicates the device failed to power up. 8527 * In this case un_pm_count has been adjusted so the result 8528 * on exit is still powered down, ie. count is less than 0. 8529 * Calling sd_pm_exit with this count value hits an ASSERT. 8530 * 8531 * Return Code: DDI_SUCCESS or DDI_FAILURE 8532 * 8533 * Context: Kernel thread context. 8534 */ 8535 8536 static int 8537 sd_pm_entry(struct sd_lun *un) 8538 { 8539 int return_status = DDI_SUCCESS; 8540 8541 ASSERT(!mutex_owned(SD_MUTEX(un))); 8542 ASSERT(!mutex_owned(&un->un_pm_mutex)); 8543 8544 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n"); 8545 8546 if (un->un_f_pm_is_enabled == FALSE) { 8547 SD_TRACE(SD_LOG_IO_PM, un, 8548 "sd_pm_entry: exiting, PM not enabled\n"); 8549 return (return_status); 8550 } 8551 8552 /* 8553 * Just increment a counter if PM is enabled. On the transition from 8554 * 0 ==> 1, mark the device as busy. The iodone side will decrement 8555 * the count with each IO and mark the device as idle when the count 8556 * hits 0. 8557 * 8558 * If the count is less than 0 the device is powered down. If a powered 8559 * down device is successfully powered up then the count must be 8560 * incremented to reflect the power up. Note that it'll get incremented 8561 * a second time to become busy. 8562 * 8563 * Because the following has the potential to change the device state 8564 * and must release the un_pm_mutex to do so, only one thread can be 8565 * allowed through at a time. 8566 */ 8567 8568 mutex_enter(&un->un_pm_mutex); 8569 while (un->un_pm_busy == TRUE) { 8570 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex); 8571 } 8572 un->un_pm_busy = TRUE; 8573 8574 if (un->un_pm_count < 1) { 8575 8576 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n"); 8577 8578 /* 8579 * Indicate we are now busy so the framework won't attempt to 8580 * power down the device. This call will only fail if either 8581 * we passed a bad component number or the device has no 8582 * components. Neither of these should ever happen. 8583 */ 8584 mutex_exit(&un->un_pm_mutex); 8585 return_status = pm_busy_component(SD_DEVINFO(un), 0); 8586 ASSERT(return_status == DDI_SUCCESS); 8587 8588 mutex_enter(&un->un_pm_mutex); 8589 8590 if (un->un_pm_count < 0) { 8591 mutex_exit(&un->un_pm_mutex); 8592 8593 SD_TRACE(SD_LOG_IO_PM, un, 8594 "sd_pm_entry: power up component\n"); 8595 8596 /* 8597 * pm_raise_power will cause sdpower to be called 8598 * which brings the device power level to the 8599 * desired state, ON in this case. If successful, 8600 * un_pm_count and un_power_level will be updated 8601 * appropriately. 8602 */ 8603 return_status = pm_raise_power(SD_DEVINFO(un), 0, 8604 SD_SPINDLE_ON); 8605 8606 mutex_enter(&un->un_pm_mutex); 8607 8608 if (return_status != DDI_SUCCESS) { 8609 /* 8610 * Power up failed. 8611 * Idle the device and adjust the count 8612 * so the result on exit is that we're 8613 * still powered down, ie. count is less than 0. 8614 */ 8615 SD_TRACE(SD_LOG_IO_PM, un, 8616 "sd_pm_entry: power up failed," 8617 " idle the component\n"); 8618 8619 (void) pm_idle_component(SD_DEVINFO(un), 0); 8620 un->un_pm_count--; 8621 } else { 8622 /* 8623 * Device is powered up, verify the 8624 * count is non-negative. 8625 * This is debug only. 8626 */ 8627 ASSERT(un->un_pm_count == 0); 8628 } 8629 } 8630 8631 if (return_status == DDI_SUCCESS) { 8632 /* 8633 * For performance, now that the device has been tagged 8634 * as busy, and it's known to be powered up, update the 8635 * chain types to use jump tables that do not include 8636 * pm. This significantly lowers the overhead and 8637 * therefore improves performance. 8638 */ 8639 8640 mutex_exit(&un->un_pm_mutex); 8641 mutex_enter(SD_MUTEX(un)); 8642 SD_TRACE(SD_LOG_IO_PM, un, 8643 "sd_pm_entry: changing uscsi_chain_type from %d\n", 8644 un->un_uscsi_chain_type); 8645 8646 if (un->un_f_non_devbsize_supported) { 8647 un->un_buf_chain_type = 8648 SD_CHAIN_INFO_RMMEDIA_NO_PM; 8649 } else { 8650 un->un_buf_chain_type = 8651 SD_CHAIN_INFO_DISK_NO_PM; 8652 } 8653 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 8654 8655 SD_TRACE(SD_LOG_IO_PM, un, 8656 " changed uscsi_chain_type to %d\n", 8657 un->un_uscsi_chain_type); 8658 mutex_exit(SD_MUTEX(un)); 8659 mutex_enter(&un->un_pm_mutex); 8660 8661 if (un->un_pm_idle_timeid == NULL) { 8662 /* 300 ms. */ 8663 un->un_pm_idle_timeid = 8664 timeout(sd_pm_idletimeout_handler, un, 8665 (drv_usectohz((clock_t)300000))); 8666 /* 8667 * Include an extra call to busy which keeps the 8668 * device busy with-respect-to the PM layer 8669 * until the timer fires, at which time it'll 8670 * get the extra idle call. 8671 */ 8672 (void) pm_busy_component(SD_DEVINFO(un), 0); 8673 } 8674 } 8675 } 8676 un->un_pm_busy = FALSE; 8677 /* Next... */ 8678 cv_signal(&un->un_pm_busy_cv); 8679 8680 un->un_pm_count++; 8681 8682 SD_TRACE(SD_LOG_IO_PM, un, 8683 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count); 8684 8685 mutex_exit(&un->un_pm_mutex); 8686 8687 return (return_status); 8688 } 8689 8690 8691 /* 8692 * Function: sd_pm_exit 8693 * 8694 * Description: Called at the completion of a command to manage busy 8695 * status for the device. If the device becomes idle the 8696 * PM framework is notified. 8697 * 8698 * Context: Kernel thread context 8699 */ 8700 8701 static void 8702 sd_pm_exit(struct sd_lun *un) 8703 { 8704 ASSERT(!mutex_owned(SD_MUTEX(un))); 8705 ASSERT(!mutex_owned(&un->un_pm_mutex)); 8706 8707 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n"); 8708 8709 /* 8710 * After attach the following flag is only read, so don't 8711 * take the penalty of acquiring a mutex for it. 8712 */ 8713 if (un->un_f_pm_is_enabled == TRUE) { 8714 8715 mutex_enter(&un->un_pm_mutex); 8716 un->un_pm_count--; 8717 8718 SD_TRACE(SD_LOG_IO_PM, un, 8719 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count); 8720 8721 ASSERT(un->un_pm_count >= 0); 8722 if (un->un_pm_count == 0) { 8723 mutex_exit(&un->un_pm_mutex); 8724 8725 SD_TRACE(SD_LOG_IO_PM, un, 8726 "sd_pm_exit: idle component\n"); 8727 8728 (void) pm_idle_component(SD_DEVINFO(un), 0); 8729 8730 } else { 8731 mutex_exit(&un->un_pm_mutex); 8732 } 8733 } 8734 8735 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n"); 8736 } 8737 8738 8739 /* 8740 * Function: sdopen 8741 * 8742 * Description: Driver's open(9e) entry point function. 8743 * 8744 * Arguments: dev_i - pointer to device number 8745 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE) 8746 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 8747 * cred_p - user credential pointer 8748 * 8749 * Return Code: EINVAL 8750 * ENXIO 8751 * EIO 8752 * EROFS 8753 * EBUSY 8754 * 8755 * Context: Kernel thread context 8756 */ 8757 /* ARGSUSED */ 8758 static int 8759 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 8760 { 8761 struct sd_lun *un; 8762 int nodelay; 8763 int part; 8764 uint64_t partmask; 8765 int instance; 8766 dev_t dev; 8767 int rval = EIO; 8768 diskaddr_t nblks = 0; 8769 8770 /* Validate the open type */ 8771 if (otyp >= OTYPCNT) { 8772 return (EINVAL); 8773 } 8774 8775 dev = *dev_p; 8776 instance = SDUNIT(dev); 8777 mutex_enter(&sd_detach_mutex); 8778 8779 /* 8780 * Fail the open if there is no softstate for the instance, or 8781 * if another thread somewhere is trying to detach the instance. 8782 */ 8783 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 8784 (un->un_detach_count != 0)) { 8785 mutex_exit(&sd_detach_mutex); 8786 /* 8787 * The probe cache only needs to be cleared when open (9e) fails 8788 * with ENXIO (4238046). 8789 */ 8790 /* 8791 * un-conditionally clearing probe cache is ok with 8792 * separate sd/ssd binaries 8793 * x86 platform can be an issue with both parallel 8794 * and fibre in 1 binary 8795 */ 8796 sd_scsi_clear_probe_cache(); 8797 return (ENXIO); 8798 } 8799 8800 /* 8801 * The un_layer_count is to prevent another thread in specfs from 8802 * trying to detach the instance, which can happen when we are 8803 * called from a higher-layer driver instead of thru specfs. 8804 * This will not be needed when DDI provides a layered driver 8805 * interface that allows specfs to know that an instance is in 8806 * use by a layered driver & should not be detached. 8807 * 8808 * Note: the semantics for layered driver opens are exactly one 8809 * close for every open. 8810 */ 8811 if (otyp == OTYP_LYR) { 8812 un->un_layer_count++; 8813 } 8814 8815 /* 8816 * Keep a count of the current # of opens in progress. This is because 8817 * some layered drivers try to call us as a regular open. This can 8818 * cause problems that we cannot prevent, however by keeping this count 8819 * we can at least keep our open and detach routines from racing against 8820 * each other under such conditions. 8821 */ 8822 un->un_opens_in_progress++; 8823 mutex_exit(&sd_detach_mutex); 8824 8825 nodelay = (flag & (FNDELAY | FNONBLOCK)); 8826 part = SDPART(dev); 8827 partmask = 1 << part; 8828 8829 /* 8830 * We use a semaphore here in order to serialize 8831 * open and close requests on the device. 8832 */ 8833 sema_p(&un->un_semoclose); 8834 8835 mutex_enter(SD_MUTEX(un)); 8836 8837 /* 8838 * All device accesses go thru sdstrategy() where we check 8839 * on suspend status but there could be a scsi_poll command, 8840 * which bypasses sdstrategy(), so we need to check pm 8841 * status. 8842 */ 8843 8844 if (!nodelay) { 8845 while ((un->un_state == SD_STATE_SUSPENDED) || 8846 (un->un_state == SD_STATE_PM_CHANGING)) { 8847 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 8848 } 8849 8850 mutex_exit(SD_MUTEX(un)); 8851 if (sd_pm_entry(un) != DDI_SUCCESS) { 8852 rval = EIO; 8853 SD_ERROR(SD_LOG_OPEN_CLOSE, un, 8854 "sdopen: sd_pm_entry failed\n"); 8855 goto open_failed_with_pm; 8856 } 8857 mutex_enter(SD_MUTEX(un)); 8858 } 8859 8860 /* check for previous exclusive open */ 8861 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); 8862 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 8863 "sdopen: exclopen=%x, flag=%x, regopen=%x\n", 8864 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 8865 8866 if (un->un_exclopen & (partmask)) { 8867 goto excl_open_fail; 8868 } 8869 8870 if (flag & FEXCL) { 8871 int i; 8872 if (un->un_ocmap.lyropen[part]) { 8873 goto excl_open_fail; 8874 } 8875 for (i = 0; i < (OTYPCNT - 1); i++) { 8876 if (un->un_ocmap.regopen[i] & (partmask)) { 8877 goto excl_open_fail; 8878 } 8879 } 8880 } 8881 8882 /* 8883 * Check the write permission if this is a removable media device, 8884 * NDELAY has not been set, and writable permission is requested. 8885 * 8886 * Note: If NDELAY was set and this is write-protected media the WRITE 8887 * attempt will fail with EIO as part of the I/O processing. This is a 8888 * more permissive implementation that allows the open to succeed and 8889 * WRITE attempts to fail when appropriate. 8890 */ 8891 if (un->un_f_chk_wp_open) { 8892 if ((flag & FWRITE) && (!nodelay)) { 8893 mutex_exit(SD_MUTEX(un)); 8894 /* 8895 * Defer the check for write permission on writable 8896 * DVD drive till sdstrategy and will not fail open even 8897 * if FWRITE is set as the device can be writable 8898 * depending upon the media and the media can change 8899 * after the call to open(). 8900 */ 8901 if (un->un_f_dvdram_writable_device == FALSE) { 8902 if (ISCD(un) || sr_check_wp(dev)) { 8903 rval = EROFS; 8904 mutex_enter(SD_MUTEX(un)); 8905 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 8906 "write to cd or write protected media\n"); 8907 goto open_fail; 8908 } 8909 } 8910 mutex_enter(SD_MUTEX(un)); 8911 } 8912 } 8913 8914 /* 8915 * If opening in NDELAY/NONBLOCK mode, just return. 8916 * Check if disk is ready and has a valid geometry later. 8917 */ 8918 if (!nodelay) { 8919 mutex_exit(SD_MUTEX(un)); 8920 rval = sd_ready_and_valid(un); 8921 mutex_enter(SD_MUTEX(un)); 8922 /* 8923 * Fail if device is not ready or if the number of disk 8924 * blocks is zero or negative for non CD devices. 8925 */ 8926 8927 nblks = 0; 8928 8929 if (rval == SD_READY_VALID && (!ISCD(un))) { 8930 /* if cmlb_partinfo fails, nblks remains 0 */ 8931 mutex_exit(SD_MUTEX(un)); 8932 (void) cmlb_partinfo(un->un_cmlbhandle, part, &nblks, 8933 NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 8934 mutex_enter(SD_MUTEX(un)); 8935 } 8936 8937 if ((rval != SD_READY_VALID) || 8938 (!ISCD(un) && nblks <= 0)) { 8939 rval = un->un_f_has_removable_media ? ENXIO : EIO; 8940 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 8941 "device not ready or invalid disk block value\n"); 8942 goto open_fail; 8943 } 8944 #if defined(__i386) || defined(__amd64) 8945 } else { 8946 uchar_t *cp; 8947 /* 8948 * x86 requires special nodelay handling, so that p0 is 8949 * always defined and accessible. 8950 * Invalidate geometry only if device is not already open. 8951 */ 8952 cp = &un->un_ocmap.chkd[0]; 8953 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 8954 if (*cp != (uchar_t)0) { 8955 break; 8956 } 8957 cp++; 8958 } 8959 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 8960 mutex_exit(SD_MUTEX(un)); 8961 cmlb_invalidate(un->un_cmlbhandle, 8962 (void *)SD_PATH_DIRECT); 8963 mutex_enter(SD_MUTEX(un)); 8964 } 8965 8966 #endif 8967 } 8968 8969 if (otyp == OTYP_LYR) { 8970 un->un_ocmap.lyropen[part]++; 8971 } else { 8972 un->un_ocmap.regopen[otyp] |= partmask; 8973 } 8974 8975 /* Set up open and exclusive open flags */ 8976 if (flag & FEXCL) { 8977 un->un_exclopen |= (partmask); 8978 } 8979 8980 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " 8981 "open of part %d type %d\n", part, otyp); 8982 8983 mutex_exit(SD_MUTEX(un)); 8984 if (!nodelay) { 8985 sd_pm_exit(un); 8986 } 8987 8988 sema_v(&un->un_semoclose); 8989 8990 mutex_enter(&sd_detach_mutex); 8991 un->un_opens_in_progress--; 8992 mutex_exit(&sd_detach_mutex); 8993 8994 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n"); 8995 return (DDI_SUCCESS); 8996 8997 excl_open_fail: 8998 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n"); 8999 rval = EBUSY; 9000 9001 open_fail: 9002 mutex_exit(SD_MUTEX(un)); 9003 9004 /* 9005 * On a failed open we must exit the pm management. 9006 */ 9007 if (!nodelay) { 9008 sd_pm_exit(un); 9009 } 9010 open_failed_with_pm: 9011 sema_v(&un->un_semoclose); 9012 9013 mutex_enter(&sd_detach_mutex); 9014 un->un_opens_in_progress--; 9015 if (otyp == OTYP_LYR) { 9016 un->un_layer_count--; 9017 } 9018 mutex_exit(&sd_detach_mutex); 9019 9020 return (rval); 9021 } 9022 9023 9024 /* 9025 * Function: sdclose 9026 * 9027 * Description: Driver's close(9e) entry point function. 9028 * 9029 * Arguments: dev - device number 9030 * flag - file status flag, informational only 9031 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9032 * cred_p - user credential pointer 9033 * 9034 * Return Code: ENXIO 9035 * 9036 * Context: Kernel thread context 9037 */ 9038 /* ARGSUSED */ 9039 static int 9040 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 9041 { 9042 struct sd_lun *un; 9043 uchar_t *cp; 9044 int part; 9045 int nodelay; 9046 int rval = 0; 9047 9048 /* Validate the open type */ 9049 if (otyp >= OTYPCNT) { 9050 return (ENXIO); 9051 } 9052 9053 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9054 return (ENXIO); 9055 } 9056 9057 part = SDPART(dev); 9058 nodelay = flag & (FNDELAY | FNONBLOCK); 9059 9060 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9061 "sdclose: close of part %d type %d\n", part, otyp); 9062 9063 /* 9064 * We use a semaphore here in order to serialize 9065 * open and close requests on the device. 9066 */ 9067 sema_p(&un->un_semoclose); 9068 9069 mutex_enter(SD_MUTEX(un)); 9070 9071 /* Don't proceed if power is being changed. */ 9072 while (un->un_state == SD_STATE_PM_CHANGING) { 9073 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9074 } 9075 9076 if (un->un_exclopen & (1 << part)) { 9077 un->un_exclopen &= ~(1 << part); 9078 } 9079 9080 /* Update the open partition map */ 9081 if (otyp == OTYP_LYR) { 9082 un->un_ocmap.lyropen[part] -= 1; 9083 } else { 9084 un->un_ocmap.regopen[otyp] &= ~(1 << part); 9085 } 9086 9087 cp = &un->un_ocmap.chkd[0]; 9088 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9089 if (*cp != NULL) { 9090 break; 9091 } 9092 cp++; 9093 } 9094 9095 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9096 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n"); 9097 9098 /* 9099 * We avoid persistance upon the last close, and set 9100 * the throttle back to the maximum. 9101 */ 9102 un->un_throttle = un->un_saved_throttle; 9103 9104 if (un->un_state == SD_STATE_OFFLINE) { 9105 if (un->un_f_is_fibre == FALSE) { 9106 scsi_log(SD_DEVINFO(un), sd_label, 9107 CE_WARN, "offline\n"); 9108 } 9109 mutex_exit(SD_MUTEX(un)); 9110 cmlb_invalidate(un->un_cmlbhandle, 9111 (void *)SD_PATH_DIRECT); 9112 mutex_enter(SD_MUTEX(un)); 9113 9114 } else { 9115 /* 9116 * Flush any outstanding writes in NVRAM cache. 9117 * Note: SYNCHRONIZE CACHE is an optional SCSI-2 9118 * cmd, it may not work for non-Pluto devices. 9119 * SYNCHRONIZE CACHE is not required for removables, 9120 * except DVD-RAM drives. 9121 * 9122 * Also note: because SYNCHRONIZE CACHE is currently 9123 * the only command issued here that requires the 9124 * drive be powered up, only do the power up before 9125 * sending the Sync Cache command. If additional 9126 * commands are added which require a powered up 9127 * drive, the following sequence may have to change. 9128 * 9129 * And finally, note that parallel SCSI on SPARC 9130 * only issues a Sync Cache to DVD-RAM, a newly 9131 * supported device. 9132 */ 9133 #if defined(__i386) || defined(__amd64) 9134 if (un->un_f_sync_cache_supported || 9135 un->un_f_dvdram_writable_device == TRUE) { 9136 #else 9137 if (un->un_f_dvdram_writable_device == TRUE) { 9138 #endif 9139 mutex_exit(SD_MUTEX(un)); 9140 if (sd_pm_entry(un) == DDI_SUCCESS) { 9141 rval = 9142 sd_send_scsi_SYNCHRONIZE_CACHE(un, 9143 NULL); 9144 /* ignore error if not supported */ 9145 if (rval == ENOTSUP) { 9146 rval = 0; 9147 } else if (rval != 0) { 9148 rval = EIO; 9149 } 9150 sd_pm_exit(un); 9151 } else { 9152 rval = EIO; 9153 } 9154 mutex_enter(SD_MUTEX(un)); 9155 } 9156 9157 /* 9158 * For devices which supports DOOR_LOCK, send an ALLOW 9159 * MEDIA REMOVAL command, but don't get upset if it 9160 * fails. We need to raise the power of the drive before 9161 * we can call sd_send_scsi_DOORLOCK() 9162 */ 9163 if (un->un_f_doorlock_supported) { 9164 mutex_exit(SD_MUTEX(un)); 9165 if (sd_pm_entry(un) == DDI_SUCCESS) { 9166 rval = sd_send_scsi_DOORLOCK(un, 9167 SD_REMOVAL_ALLOW, SD_PATH_DIRECT); 9168 9169 sd_pm_exit(un); 9170 if (ISCD(un) && (rval != 0) && 9171 (nodelay != 0)) { 9172 rval = ENXIO; 9173 } 9174 } else { 9175 rval = EIO; 9176 } 9177 mutex_enter(SD_MUTEX(un)); 9178 } 9179 9180 /* 9181 * If a device has removable media, invalidate all 9182 * parameters related to media, such as geometry, 9183 * blocksize, and blockcount. 9184 */ 9185 if (un->un_f_has_removable_media) { 9186 sr_ejected(un); 9187 } 9188 9189 /* 9190 * Destroy the cache (if it exists) which was 9191 * allocated for the write maps since this is 9192 * the last close for this media. 9193 */ 9194 if (un->un_wm_cache) { 9195 /* 9196 * Check if there are pending commands. 9197 * and if there are give a warning and 9198 * do not destroy the cache. 9199 */ 9200 if (un->un_ncmds_in_driver > 0) { 9201 scsi_log(SD_DEVINFO(un), 9202 sd_label, CE_WARN, 9203 "Unable to clean up memory " 9204 "because of pending I/O\n"); 9205 } else { 9206 kmem_cache_destroy( 9207 un->un_wm_cache); 9208 un->un_wm_cache = NULL; 9209 } 9210 } 9211 } 9212 } 9213 9214 mutex_exit(SD_MUTEX(un)); 9215 sema_v(&un->un_semoclose); 9216 9217 if (otyp == OTYP_LYR) { 9218 mutex_enter(&sd_detach_mutex); 9219 /* 9220 * The detach routine may run when the layer count 9221 * drops to zero. 9222 */ 9223 un->un_layer_count--; 9224 mutex_exit(&sd_detach_mutex); 9225 } 9226 9227 return (rval); 9228 } 9229 9230 9231 /* 9232 * Function: sd_ready_and_valid 9233 * 9234 * Description: Test if device is ready and has a valid geometry. 9235 * 9236 * Arguments: dev - device number 9237 * un - driver soft state (unit) structure 9238 * 9239 * Return Code: SD_READY_VALID ready and valid label 9240 * SD_NOT_READY_VALID not ready, no label 9241 * SD_RESERVED_BY_OTHERS reservation conflict 9242 * 9243 * Context: Never called at interrupt context. 9244 */ 9245 9246 static int 9247 sd_ready_and_valid(struct sd_lun *un) 9248 { 9249 struct sd_errstats *stp; 9250 uint64_t capacity; 9251 uint_t lbasize; 9252 int rval = SD_READY_VALID; 9253 char name_str[48]; 9254 int is_valid; 9255 9256 ASSERT(un != NULL); 9257 ASSERT(!mutex_owned(SD_MUTEX(un))); 9258 9259 mutex_enter(SD_MUTEX(un)); 9260 /* 9261 * If a device has removable media, we must check if media is 9262 * ready when checking if this device is ready and valid. 9263 */ 9264 if (un->un_f_has_removable_media) { 9265 mutex_exit(SD_MUTEX(un)); 9266 if (sd_send_scsi_TEST_UNIT_READY(un, 0) != 0) { 9267 rval = SD_NOT_READY_VALID; 9268 mutex_enter(SD_MUTEX(un)); 9269 goto done; 9270 } 9271 9272 is_valid = SD_IS_VALID_LABEL(un); 9273 mutex_enter(SD_MUTEX(un)); 9274 if (!is_valid || 9275 (un->un_f_blockcount_is_valid == FALSE) || 9276 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 9277 9278 /* capacity has to be read every open. */ 9279 mutex_exit(SD_MUTEX(un)); 9280 if (sd_send_scsi_READ_CAPACITY(un, &capacity, 9281 &lbasize, SD_PATH_DIRECT) != 0) { 9282 cmlb_invalidate(un->un_cmlbhandle, 9283 (void *)SD_PATH_DIRECT); 9284 mutex_enter(SD_MUTEX(un)); 9285 rval = SD_NOT_READY_VALID; 9286 goto done; 9287 } else { 9288 mutex_enter(SD_MUTEX(un)); 9289 sd_update_block_info(un, lbasize, capacity); 9290 } 9291 } 9292 9293 /* 9294 * Check if the media in the device is writable or not. 9295 */ 9296 if (!is_valid && ISCD(un)) { 9297 sd_check_for_writable_cd(un, SD_PATH_DIRECT); 9298 } 9299 9300 } else { 9301 /* 9302 * Do a test unit ready to clear any unit attention from non-cd 9303 * devices. 9304 */ 9305 mutex_exit(SD_MUTEX(un)); 9306 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 9307 mutex_enter(SD_MUTEX(un)); 9308 } 9309 9310 9311 /* 9312 * If this is a non 512 block device, allocate space for 9313 * the wmap cache. This is being done here since every time 9314 * a media is changed this routine will be called and the 9315 * block size is a function of media rather than device. 9316 */ 9317 if (un->un_f_non_devbsize_supported && NOT_DEVBSIZE(un)) { 9318 if (!(un->un_wm_cache)) { 9319 (void) snprintf(name_str, sizeof (name_str), 9320 "%s%d_cache", 9321 ddi_driver_name(SD_DEVINFO(un)), 9322 ddi_get_instance(SD_DEVINFO(un))); 9323 un->un_wm_cache = kmem_cache_create( 9324 name_str, sizeof (struct sd_w_map), 9325 8, sd_wm_cache_constructor, 9326 sd_wm_cache_destructor, NULL, 9327 (void *)un, NULL, 0); 9328 if (!(un->un_wm_cache)) { 9329 rval = ENOMEM; 9330 goto done; 9331 } 9332 } 9333 } 9334 9335 if (un->un_state == SD_STATE_NORMAL) { 9336 /* 9337 * If the target is not yet ready here (defined by a TUR 9338 * failure), invalidate the geometry and print an 'offline' 9339 * message. This is a legacy message, as the state of the 9340 * target is not actually changed to SD_STATE_OFFLINE. 9341 * 9342 * If the TUR fails for EACCES (Reservation Conflict), 9343 * SD_RESERVED_BY_OTHERS will be returned to indicate 9344 * reservation conflict. If the TUR fails for other 9345 * reasons, SD_NOT_READY_VALID will be returned. 9346 */ 9347 int err; 9348 9349 mutex_exit(SD_MUTEX(un)); 9350 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 9351 mutex_enter(SD_MUTEX(un)); 9352 9353 if (err != 0) { 9354 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 9355 "offline or reservation conflict\n"); 9356 mutex_exit(SD_MUTEX(un)); 9357 cmlb_invalidate(un->un_cmlbhandle, 9358 (void *)SD_PATH_DIRECT); 9359 mutex_enter(SD_MUTEX(un)); 9360 if (err == EACCES) { 9361 rval = SD_RESERVED_BY_OTHERS; 9362 } else { 9363 rval = SD_NOT_READY_VALID; 9364 } 9365 goto done; 9366 } 9367 } 9368 9369 if (un->un_f_format_in_progress == FALSE) { 9370 mutex_exit(SD_MUTEX(un)); 9371 if (cmlb_validate(un->un_cmlbhandle, 0, 9372 (void *)SD_PATH_DIRECT) != 0) { 9373 rval = SD_NOT_READY_VALID; 9374 mutex_enter(SD_MUTEX(un)); 9375 goto done; 9376 } 9377 if (un->un_f_pkstats_enabled) { 9378 sd_set_pstats(un); 9379 SD_TRACE(SD_LOG_IO_PARTITION, un, 9380 "sd_ready_and_valid: un:0x%p pstats created and " 9381 "set\n", un); 9382 } 9383 mutex_enter(SD_MUTEX(un)); 9384 } 9385 9386 /* 9387 * If this device supports DOOR_LOCK command, try and send 9388 * this command to PREVENT MEDIA REMOVAL, but don't get upset 9389 * if it fails. For a CD, however, it is an error 9390 */ 9391 if (un->un_f_doorlock_supported) { 9392 mutex_exit(SD_MUTEX(un)); 9393 if ((sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 9394 SD_PATH_DIRECT) != 0) && ISCD(un)) { 9395 rval = SD_NOT_READY_VALID; 9396 mutex_enter(SD_MUTEX(un)); 9397 goto done; 9398 } 9399 mutex_enter(SD_MUTEX(un)); 9400 } 9401 9402 /* The state has changed, inform the media watch routines */ 9403 un->un_mediastate = DKIO_INSERTED; 9404 cv_broadcast(&un->un_state_cv); 9405 rval = SD_READY_VALID; 9406 9407 done: 9408 9409 /* 9410 * Initialize the capacity kstat value, if no media previously 9411 * (capacity kstat is 0) and a media has been inserted 9412 * (un_blockcount > 0). 9413 */ 9414 if (un->un_errstats != NULL) { 9415 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9416 if ((stp->sd_capacity.value.ui64 == 0) && 9417 (un->un_f_blockcount_is_valid == TRUE)) { 9418 stp->sd_capacity.value.ui64 = 9419 (uint64_t)((uint64_t)un->un_blockcount * 9420 un->un_sys_blocksize); 9421 } 9422 } 9423 9424 mutex_exit(SD_MUTEX(un)); 9425 return (rval); 9426 } 9427 9428 9429 /* 9430 * Function: sdmin 9431 * 9432 * Description: Routine to limit the size of a data transfer. Used in 9433 * conjunction with physio(9F). 9434 * 9435 * Arguments: bp - pointer to the indicated buf(9S) struct. 9436 * 9437 * Context: Kernel thread context. 9438 */ 9439 9440 static void 9441 sdmin(struct buf *bp) 9442 { 9443 struct sd_lun *un; 9444 int instance; 9445 9446 instance = SDUNIT(bp->b_edev); 9447 9448 un = ddi_get_soft_state(sd_state, instance); 9449 ASSERT(un != NULL); 9450 9451 if (bp->b_bcount > un->un_max_xfer_size) { 9452 bp->b_bcount = un->un_max_xfer_size; 9453 } 9454 } 9455 9456 9457 /* 9458 * Function: sdread 9459 * 9460 * Description: Driver's read(9e) entry point function. 9461 * 9462 * Arguments: dev - device number 9463 * uio - structure pointer describing where data is to be stored 9464 * in user's space 9465 * cred_p - user credential pointer 9466 * 9467 * Return Code: ENXIO 9468 * EIO 9469 * EINVAL 9470 * value returned by physio 9471 * 9472 * Context: Kernel thread context. 9473 */ 9474 /* ARGSUSED */ 9475 static int 9476 sdread(dev_t dev, struct uio *uio, cred_t *cred_p) 9477 { 9478 struct sd_lun *un = NULL; 9479 int secmask; 9480 int err; 9481 9482 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9483 return (ENXIO); 9484 } 9485 9486 ASSERT(!mutex_owned(SD_MUTEX(un))); 9487 9488 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9489 mutex_enter(SD_MUTEX(un)); 9490 /* 9491 * Because the call to sd_ready_and_valid will issue I/O we 9492 * must wait here if either the device is suspended or 9493 * if it's power level is changing. 9494 */ 9495 while ((un->un_state == SD_STATE_SUSPENDED) || 9496 (un->un_state == SD_STATE_PM_CHANGING)) { 9497 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9498 } 9499 un->un_ncmds_in_driver++; 9500 mutex_exit(SD_MUTEX(un)); 9501 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9502 mutex_enter(SD_MUTEX(un)); 9503 un->un_ncmds_in_driver--; 9504 ASSERT(un->un_ncmds_in_driver >= 0); 9505 mutex_exit(SD_MUTEX(un)); 9506 return (EIO); 9507 } 9508 mutex_enter(SD_MUTEX(un)); 9509 un->un_ncmds_in_driver--; 9510 ASSERT(un->un_ncmds_in_driver >= 0); 9511 mutex_exit(SD_MUTEX(un)); 9512 } 9513 9514 /* 9515 * Read requests are restricted to multiples of the system block size. 9516 */ 9517 secmask = un->un_sys_blocksize - 1; 9518 9519 if (uio->uio_loffset & ((offset_t)(secmask))) { 9520 SD_ERROR(SD_LOG_READ_WRITE, un, 9521 "sdread: file offset not modulo %d\n", 9522 un->un_sys_blocksize); 9523 err = EINVAL; 9524 } else if (uio->uio_iov->iov_len & (secmask)) { 9525 SD_ERROR(SD_LOG_READ_WRITE, un, 9526 "sdread: transfer length not modulo %d\n", 9527 un->un_sys_blocksize); 9528 err = EINVAL; 9529 } else { 9530 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio); 9531 } 9532 return (err); 9533 } 9534 9535 9536 /* 9537 * Function: sdwrite 9538 * 9539 * Description: Driver's write(9e) entry point function. 9540 * 9541 * Arguments: dev - device number 9542 * uio - structure pointer describing where data is stored in 9543 * user's space 9544 * cred_p - user credential pointer 9545 * 9546 * Return Code: ENXIO 9547 * EIO 9548 * EINVAL 9549 * value returned by physio 9550 * 9551 * Context: Kernel thread context. 9552 */ 9553 /* ARGSUSED */ 9554 static int 9555 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 9556 { 9557 struct sd_lun *un = NULL; 9558 int secmask; 9559 int err; 9560 9561 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9562 return (ENXIO); 9563 } 9564 9565 ASSERT(!mutex_owned(SD_MUTEX(un))); 9566 9567 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9568 mutex_enter(SD_MUTEX(un)); 9569 /* 9570 * Because the call to sd_ready_and_valid will issue I/O we 9571 * must wait here if either the device is suspended or 9572 * if it's power level is changing. 9573 */ 9574 while ((un->un_state == SD_STATE_SUSPENDED) || 9575 (un->un_state == SD_STATE_PM_CHANGING)) { 9576 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9577 } 9578 un->un_ncmds_in_driver++; 9579 mutex_exit(SD_MUTEX(un)); 9580 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9581 mutex_enter(SD_MUTEX(un)); 9582 un->un_ncmds_in_driver--; 9583 ASSERT(un->un_ncmds_in_driver >= 0); 9584 mutex_exit(SD_MUTEX(un)); 9585 return (EIO); 9586 } 9587 mutex_enter(SD_MUTEX(un)); 9588 un->un_ncmds_in_driver--; 9589 ASSERT(un->un_ncmds_in_driver >= 0); 9590 mutex_exit(SD_MUTEX(un)); 9591 } 9592 9593 /* 9594 * Write requests are restricted to multiples of the system block size. 9595 */ 9596 secmask = un->un_sys_blocksize - 1; 9597 9598 if (uio->uio_loffset & ((offset_t)(secmask))) { 9599 SD_ERROR(SD_LOG_READ_WRITE, un, 9600 "sdwrite: file offset not modulo %d\n", 9601 un->un_sys_blocksize); 9602 err = EINVAL; 9603 } else if (uio->uio_iov->iov_len & (secmask)) { 9604 SD_ERROR(SD_LOG_READ_WRITE, un, 9605 "sdwrite: transfer length not modulo %d\n", 9606 un->un_sys_blocksize); 9607 err = EINVAL; 9608 } else { 9609 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio); 9610 } 9611 return (err); 9612 } 9613 9614 9615 /* 9616 * Function: sdaread 9617 * 9618 * Description: Driver's aread(9e) entry point function. 9619 * 9620 * Arguments: dev - device number 9621 * aio - structure pointer describing where data is to be stored 9622 * cred_p - user credential pointer 9623 * 9624 * Return Code: ENXIO 9625 * EIO 9626 * EINVAL 9627 * value returned by aphysio 9628 * 9629 * Context: Kernel thread context. 9630 */ 9631 /* ARGSUSED */ 9632 static int 9633 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 9634 { 9635 struct sd_lun *un = NULL; 9636 struct uio *uio = aio->aio_uio; 9637 int secmask; 9638 int err; 9639 9640 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9641 return (ENXIO); 9642 } 9643 9644 ASSERT(!mutex_owned(SD_MUTEX(un))); 9645 9646 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9647 mutex_enter(SD_MUTEX(un)); 9648 /* 9649 * Because the call to sd_ready_and_valid will issue I/O we 9650 * must wait here if either the device is suspended or 9651 * if it's power level is changing. 9652 */ 9653 while ((un->un_state == SD_STATE_SUSPENDED) || 9654 (un->un_state == SD_STATE_PM_CHANGING)) { 9655 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9656 } 9657 un->un_ncmds_in_driver++; 9658 mutex_exit(SD_MUTEX(un)); 9659 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9660 mutex_enter(SD_MUTEX(un)); 9661 un->un_ncmds_in_driver--; 9662 ASSERT(un->un_ncmds_in_driver >= 0); 9663 mutex_exit(SD_MUTEX(un)); 9664 return (EIO); 9665 } 9666 mutex_enter(SD_MUTEX(un)); 9667 un->un_ncmds_in_driver--; 9668 ASSERT(un->un_ncmds_in_driver >= 0); 9669 mutex_exit(SD_MUTEX(un)); 9670 } 9671 9672 /* 9673 * Read requests are restricted to multiples of the system block size. 9674 */ 9675 secmask = un->un_sys_blocksize - 1; 9676 9677 if (uio->uio_loffset & ((offset_t)(secmask))) { 9678 SD_ERROR(SD_LOG_READ_WRITE, un, 9679 "sdaread: file offset not modulo %d\n", 9680 un->un_sys_blocksize); 9681 err = EINVAL; 9682 } else if (uio->uio_iov->iov_len & (secmask)) { 9683 SD_ERROR(SD_LOG_READ_WRITE, un, 9684 "sdaread: transfer length not modulo %d\n", 9685 un->un_sys_blocksize); 9686 err = EINVAL; 9687 } else { 9688 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio); 9689 } 9690 return (err); 9691 } 9692 9693 9694 /* 9695 * Function: sdawrite 9696 * 9697 * Description: Driver's awrite(9e) entry point function. 9698 * 9699 * Arguments: dev - device number 9700 * aio - structure pointer describing where data is stored 9701 * cred_p - user credential pointer 9702 * 9703 * Return Code: ENXIO 9704 * EIO 9705 * EINVAL 9706 * value returned by aphysio 9707 * 9708 * Context: Kernel thread context. 9709 */ 9710 /* ARGSUSED */ 9711 static int 9712 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 9713 { 9714 struct sd_lun *un = NULL; 9715 struct uio *uio = aio->aio_uio; 9716 int secmask; 9717 int err; 9718 9719 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9720 return (ENXIO); 9721 } 9722 9723 ASSERT(!mutex_owned(SD_MUTEX(un))); 9724 9725 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9726 mutex_enter(SD_MUTEX(un)); 9727 /* 9728 * Because the call to sd_ready_and_valid will issue I/O we 9729 * must wait here if either the device is suspended or 9730 * if it's power level is changing. 9731 */ 9732 while ((un->un_state == SD_STATE_SUSPENDED) || 9733 (un->un_state == SD_STATE_PM_CHANGING)) { 9734 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9735 } 9736 un->un_ncmds_in_driver++; 9737 mutex_exit(SD_MUTEX(un)); 9738 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9739 mutex_enter(SD_MUTEX(un)); 9740 un->un_ncmds_in_driver--; 9741 ASSERT(un->un_ncmds_in_driver >= 0); 9742 mutex_exit(SD_MUTEX(un)); 9743 return (EIO); 9744 } 9745 mutex_enter(SD_MUTEX(un)); 9746 un->un_ncmds_in_driver--; 9747 ASSERT(un->un_ncmds_in_driver >= 0); 9748 mutex_exit(SD_MUTEX(un)); 9749 } 9750 9751 /* 9752 * Write requests are restricted to multiples of the system block size. 9753 */ 9754 secmask = un->un_sys_blocksize - 1; 9755 9756 if (uio->uio_loffset & ((offset_t)(secmask))) { 9757 SD_ERROR(SD_LOG_READ_WRITE, un, 9758 "sdawrite: file offset not modulo %d\n", 9759 un->un_sys_blocksize); 9760 err = EINVAL; 9761 } else if (uio->uio_iov->iov_len & (secmask)) { 9762 SD_ERROR(SD_LOG_READ_WRITE, un, 9763 "sdawrite: transfer length not modulo %d\n", 9764 un->un_sys_blocksize); 9765 err = EINVAL; 9766 } else { 9767 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio); 9768 } 9769 return (err); 9770 } 9771 9772 9773 9774 9775 9776 /* 9777 * Driver IO processing follows the following sequence: 9778 * 9779 * sdioctl(9E) sdstrategy(9E) biodone(9F) 9780 * | | ^ 9781 * v v | 9782 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+ 9783 * | | | | 9784 * v | | | 9785 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone() 9786 * | | ^ ^ 9787 * v v | | 9788 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | | 9789 * | | | | 9790 * +---+ | +------------+ +-------+ 9791 * | | | | 9792 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 9793 * | v | | 9794 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() | 9795 * | | ^ | 9796 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 9797 * | v | | 9798 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() | 9799 * | | ^ | 9800 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 9801 * | v | | 9802 * | sd_checksum_iostart() sd_checksum_iodone() | 9803 * | | ^ | 9804 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+ 9805 * | v | | 9806 * | sd_pm_iostart() sd_pm_iodone() | 9807 * | | ^ | 9808 * | | | | 9809 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+ 9810 * | ^ 9811 * v | 9812 * sd_core_iostart() | 9813 * | | 9814 * | +------>(*destroypkt)() 9815 * +-> sd_start_cmds() <-+ | | 9816 * | | | v 9817 * | | | scsi_destroy_pkt(9F) 9818 * | | | 9819 * +->(*initpkt)() +- sdintr() 9820 * | | | | 9821 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx() 9822 * | +-> scsi_setup_cdb(9F) | 9823 * | | 9824 * +--> scsi_transport(9F) | 9825 * | | 9826 * +----> SCSA ---->+ 9827 * 9828 * 9829 * This code is based upon the following presumtions: 9830 * 9831 * - iostart and iodone functions operate on buf(9S) structures. These 9832 * functions perform the necessary operations on the buf(9S) and pass 9833 * them along to the next function in the chain by using the macros 9834 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE() 9835 * (for iodone side functions). 9836 * 9837 * - The iostart side functions may sleep. The iodone side functions 9838 * are called under interrupt context and may NOT sleep. Therefore 9839 * iodone side functions also may not call iostart side functions. 9840 * (NOTE: iostart side functions should NOT sleep for memory, as 9841 * this could result in deadlock.) 9842 * 9843 * - An iostart side function may call its corresponding iodone side 9844 * function directly (if necessary). 9845 * 9846 * - In the event of an error, an iostart side function can return a buf(9S) 9847 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and 9848 * b_error in the usual way of course). 9849 * 9850 * - The taskq mechanism may be used by the iodone side functions to dispatch 9851 * requests to the iostart side functions. The iostart side functions in 9852 * this case would be called under the context of a taskq thread, so it's 9853 * OK for them to block/sleep/spin in this case. 9854 * 9855 * - iostart side functions may allocate "shadow" buf(9S) structs and 9856 * pass them along to the next function in the chain. The corresponding 9857 * iodone side functions must coalesce the "shadow" bufs and return 9858 * the "original" buf to the next higher layer. 9859 * 9860 * - The b_private field of the buf(9S) struct holds a pointer to 9861 * an sd_xbuf struct, which contains information needed to 9862 * construct the scsi_pkt for the command. 9863 * 9864 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each 9865 * layer must acquire & release the SD_MUTEX(un) as needed. 9866 */ 9867 9868 9869 /* 9870 * Create taskq for all targets in the system. This is created at 9871 * _init(9E) and destroyed at _fini(9E). 9872 * 9873 * Note: here we set the minalloc to a reasonably high number to ensure that 9874 * we will have an adequate supply of task entries available at interrupt time. 9875 * This is used in conjunction with the TASKQ_PREPOPULATE flag in 9876 * sd_create_taskq(). Since we do not want to sleep for allocations at 9877 * interrupt time, set maxalloc equal to minalloc. That way we will just fail 9878 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq 9879 * requests any one instant in time. 9880 */ 9881 #define SD_TASKQ_NUMTHREADS 8 9882 #define SD_TASKQ_MINALLOC 256 9883 #define SD_TASKQ_MAXALLOC 256 9884 9885 static taskq_t *sd_tq = NULL; 9886 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq)) 9887 9888 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; 9889 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; 9890 9891 /* 9892 * The following task queue is being created for the write part of 9893 * read-modify-write of non-512 block size devices. 9894 * Limit the number of threads to 1 for now. This number has been choosen 9895 * considering the fact that it applies only to dvd ram drives/MO drives 9896 * currently. Performance for which is not main criteria at this stage. 9897 * Note: It needs to be explored if we can use a single taskq in future 9898 */ 9899 #define SD_WMR_TASKQ_NUMTHREADS 1 9900 static taskq_t *sd_wmr_tq = NULL; 9901 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq)) 9902 9903 /* 9904 * Function: sd_taskq_create 9905 * 9906 * Description: Create taskq thread(s) and preallocate task entries 9907 * 9908 * Return Code: Returns a pointer to the allocated taskq_t. 9909 * 9910 * Context: Can sleep. Requires blockable context. 9911 * 9912 * Notes: - The taskq() facility currently is NOT part of the DDI. 9913 * (definitely NOT recommeded for 3rd-party drivers!) :-) 9914 * - taskq_create() will block for memory, also it will panic 9915 * if it cannot create the requested number of threads. 9916 * - Currently taskq_create() creates threads that cannot be 9917 * swapped. 9918 * - We use TASKQ_PREPOPULATE to ensure we have an adequate 9919 * supply of taskq entries at interrupt time (ie, so that we 9920 * do not have to sleep for memory) 9921 */ 9922 9923 static void 9924 sd_taskq_create(void) 9925 { 9926 char taskq_name[TASKQ_NAMELEN]; 9927 9928 ASSERT(sd_tq == NULL); 9929 ASSERT(sd_wmr_tq == NULL); 9930 9931 (void) snprintf(taskq_name, sizeof (taskq_name), 9932 "%s_drv_taskq", sd_label); 9933 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS, 9934 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 9935 TASKQ_PREPOPULATE)); 9936 9937 (void) snprintf(taskq_name, sizeof (taskq_name), 9938 "%s_rmw_taskq", sd_label); 9939 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS, 9940 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 9941 TASKQ_PREPOPULATE)); 9942 } 9943 9944 9945 /* 9946 * Function: sd_taskq_delete 9947 * 9948 * Description: Complementary cleanup routine for sd_taskq_create(). 9949 * 9950 * Context: Kernel thread context. 9951 */ 9952 9953 static void 9954 sd_taskq_delete(void) 9955 { 9956 ASSERT(sd_tq != NULL); 9957 ASSERT(sd_wmr_tq != NULL); 9958 taskq_destroy(sd_tq); 9959 taskq_destroy(sd_wmr_tq); 9960 sd_tq = NULL; 9961 sd_wmr_tq = NULL; 9962 } 9963 9964 9965 /* 9966 * Function: sdstrategy 9967 * 9968 * Description: Driver's strategy (9E) entry point function. 9969 * 9970 * Arguments: bp - pointer to buf(9S) 9971 * 9972 * Return Code: Always returns zero 9973 * 9974 * Context: Kernel thread context. 9975 */ 9976 9977 static int 9978 sdstrategy(struct buf *bp) 9979 { 9980 struct sd_lun *un; 9981 9982 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 9983 if (un == NULL) { 9984 bioerror(bp, EIO); 9985 bp->b_resid = bp->b_bcount; 9986 biodone(bp); 9987 return (0); 9988 } 9989 /* As was done in the past, fail new cmds. if state is dumping. */ 9990 if (un->un_state == SD_STATE_DUMPING) { 9991 bioerror(bp, ENXIO); 9992 bp->b_resid = bp->b_bcount; 9993 biodone(bp); 9994 return (0); 9995 } 9996 9997 ASSERT(!mutex_owned(SD_MUTEX(un))); 9998 9999 /* 10000 * Commands may sneak in while we released the mutex in 10001 * DDI_SUSPEND, we should block new commands. However, old 10002 * commands that are still in the driver at this point should 10003 * still be allowed to drain. 10004 */ 10005 mutex_enter(SD_MUTEX(un)); 10006 /* 10007 * Must wait here if either the device is suspended or 10008 * if it's power level is changing. 10009 */ 10010 while ((un->un_state == SD_STATE_SUSPENDED) || 10011 (un->un_state == SD_STATE_PM_CHANGING)) { 10012 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10013 } 10014 10015 un->un_ncmds_in_driver++; 10016 10017 /* 10018 * atapi: Since we are running the CD for now in PIO mode we need to 10019 * call bp_mapin here to avoid bp_mapin called interrupt context under 10020 * the HBA's init_pkt routine. 10021 */ 10022 if (un->un_f_cfg_is_atapi == TRUE) { 10023 mutex_exit(SD_MUTEX(un)); 10024 bp_mapin(bp); 10025 mutex_enter(SD_MUTEX(un)); 10026 } 10027 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n", 10028 un->un_ncmds_in_driver); 10029 10030 mutex_exit(SD_MUTEX(un)); 10031 10032 /* 10033 * This will (eventually) allocate the sd_xbuf area and 10034 * call sd_xbuf_strategy(). We just want to return the 10035 * result of ddi_xbuf_qstrategy so that we have an opt- 10036 * imized tail call which saves us a stack frame. 10037 */ 10038 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); 10039 } 10040 10041 10042 /* 10043 * Function: sd_xbuf_strategy 10044 * 10045 * Description: Function for initiating IO operations via the 10046 * ddi_xbuf_qstrategy() mechanism. 10047 * 10048 * Context: Kernel thread context. 10049 */ 10050 10051 static void 10052 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg) 10053 { 10054 struct sd_lun *un = arg; 10055 10056 ASSERT(bp != NULL); 10057 ASSERT(xp != NULL); 10058 ASSERT(un != NULL); 10059 ASSERT(!mutex_owned(SD_MUTEX(un))); 10060 10061 /* 10062 * Initialize the fields in the xbuf and save a pointer to the 10063 * xbuf in bp->b_private. 10064 */ 10065 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL); 10066 10067 /* Send the buf down the iostart chain */ 10068 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp); 10069 } 10070 10071 10072 /* 10073 * Function: sd_xbuf_init 10074 * 10075 * Description: Prepare the given sd_xbuf struct for use. 10076 * 10077 * Arguments: un - ptr to softstate 10078 * bp - ptr to associated buf(9S) 10079 * xp - ptr to associated sd_xbuf 10080 * chain_type - IO chain type to use: 10081 * SD_CHAIN_NULL 10082 * SD_CHAIN_BUFIO 10083 * SD_CHAIN_USCSI 10084 * SD_CHAIN_DIRECT 10085 * SD_CHAIN_DIRECT_PRIORITY 10086 * pktinfop - ptr to private data struct for scsi_pkt(9S) 10087 * initialization; may be NULL if none. 10088 * 10089 * Context: Kernel thread context 10090 */ 10091 10092 static void 10093 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 10094 uchar_t chain_type, void *pktinfop) 10095 { 10096 int index; 10097 10098 ASSERT(un != NULL); 10099 ASSERT(bp != NULL); 10100 ASSERT(xp != NULL); 10101 10102 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n", 10103 bp, chain_type); 10104 10105 xp->xb_un = un; 10106 xp->xb_pktp = NULL; 10107 xp->xb_pktinfo = pktinfop; 10108 xp->xb_private = bp->b_private; 10109 xp->xb_blkno = (daddr_t)bp->b_blkno; 10110 10111 /* 10112 * Set up the iostart and iodone chain indexes in the xbuf, based 10113 * upon the specified chain type to use. 10114 */ 10115 switch (chain_type) { 10116 case SD_CHAIN_NULL: 10117 /* 10118 * Fall thru to just use the values for the buf type, even 10119 * tho for the NULL chain these values will never be used. 10120 */ 10121 /* FALLTHRU */ 10122 case SD_CHAIN_BUFIO: 10123 index = un->un_buf_chain_type; 10124 break; 10125 case SD_CHAIN_USCSI: 10126 index = un->un_uscsi_chain_type; 10127 break; 10128 case SD_CHAIN_DIRECT: 10129 index = un->un_direct_chain_type; 10130 break; 10131 case SD_CHAIN_DIRECT_PRIORITY: 10132 index = un->un_priority_chain_type; 10133 break; 10134 default: 10135 /* We're really broken if we ever get here... */ 10136 panic("sd_xbuf_init: illegal chain type!"); 10137 /*NOTREACHED*/ 10138 } 10139 10140 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index; 10141 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index; 10142 10143 /* 10144 * It might be a bit easier to simply bzero the entire xbuf above, 10145 * but it turns out that since we init a fair number of members anyway, 10146 * we save a fair number cycles by doing explicit assignment of zero. 10147 */ 10148 xp->xb_pkt_flags = 0; 10149 xp->xb_dma_resid = 0; 10150 xp->xb_retry_count = 0; 10151 xp->xb_victim_retry_count = 0; 10152 xp->xb_ua_retry_count = 0; 10153 xp->xb_sense_bp = NULL; 10154 xp->xb_sense_status = 0; 10155 xp->xb_sense_state = 0; 10156 xp->xb_sense_resid = 0; 10157 10158 bp->b_private = xp; 10159 bp->b_flags &= ~(B_DONE | B_ERROR); 10160 bp->b_resid = 0; 10161 bp->av_forw = NULL; 10162 bp->av_back = NULL; 10163 bioerror(bp, 0); 10164 10165 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n"); 10166 } 10167 10168 10169 /* 10170 * Function: sd_uscsi_strategy 10171 * 10172 * Description: Wrapper for calling into the USCSI chain via physio(9F) 10173 * 10174 * Arguments: bp - buf struct ptr 10175 * 10176 * Return Code: Always returns 0 10177 * 10178 * Context: Kernel thread context 10179 */ 10180 10181 static int 10182 sd_uscsi_strategy(struct buf *bp) 10183 { 10184 struct sd_lun *un; 10185 struct sd_uscsi_info *uip; 10186 struct sd_xbuf *xp; 10187 uchar_t chain_type; 10188 10189 ASSERT(bp != NULL); 10190 10191 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 10192 if (un == NULL) { 10193 bioerror(bp, EIO); 10194 bp->b_resid = bp->b_bcount; 10195 biodone(bp); 10196 return (0); 10197 } 10198 10199 ASSERT(!mutex_owned(SD_MUTEX(un))); 10200 10201 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp); 10202 10203 mutex_enter(SD_MUTEX(un)); 10204 /* 10205 * atapi: Since we are running the CD for now in PIO mode we need to 10206 * call bp_mapin here to avoid bp_mapin called interrupt context under 10207 * the HBA's init_pkt routine. 10208 */ 10209 if (un->un_f_cfg_is_atapi == TRUE) { 10210 mutex_exit(SD_MUTEX(un)); 10211 bp_mapin(bp); 10212 mutex_enter(SD_MUTEX(un)); 10213 } 10214 un->un_ncmds_in_driver++; 10215 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n", 10216 un->un_ncmds_in_driver); 10217 mutex_exit(SD_MUTEX(un)); 10218 10219 /* 10220 * A pointer to a struct sd_uscsi_info is expected in bp->b_private 10221 */ 10222 ASSERT(bp->b_private != NULL); 10223 uip = (struct sd_uscsi_info *)bp->b_private; 10224 10225 switch (uip->ui_flags) { 10226 case SD_PATH_DIRECT: 10227 chain_type = SD_CHAIN_DIRECT; 10228 break; 10229 case SD_PATH_DIRECT_PRIORITY: 10230 chain_type = SD_CHAIN_DIRECT_PRIORITY; 10231 break; 10232 default: 10233 chain_type = SD_CHAIN_USCSI; 10234 break; 10235 } 10236 10237 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 10238 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp); 10239 10240 /* Use the index obtained within xbuf_init */ 10241 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp); 10242 10243 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp); 10244 10245 return (0); 10246 } 10247 10248 /* 10249 * Function: sd_send_scsi_cmd 10250 * 10251 * Description: Runs a USCSI command for user (when called thru sdioctl), 10252 * or for the driver 10253 * 10254 * Arguments: dev - the dev_t for the device 10255 * incmd - ptr to a valid uscsi_cmd struct 10256 * flag - bit flag, indicating open settings, 32/64 bit type 10257 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 10258 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 10259 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 10260 * to use the USCSI "direct" chain and bypass the normal 10261 * command waitq. 10262 * 10263 * Return Code: 0 - successful completion of the given command 10264 * EIO - scsi_uscsi_handle_command() failed 10265 * ENXIO - soft state not found for specified dev 10266 * EINVAL 10267 * EFAULT - copyin/copyout error 10268 * return code of scsi_uscsi_handle_command(): 10269 * EIO 10270 * ENXIO 10271 * EACCES 10272 * 10273 * Context: Waits for command to complete. Can sleep. 10274 */ 10275 10276 static int 10277 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 10278 enum uio_seg dataspace, int path_flag) 10279 { 10280 struct sd_uscsi_info *uip; 10281 struct uscsi_cmd *uscmd; 10282 struct sd_lun *un; 10283 int format = 0; 10284 int rval; 10285 10286 un = ddi_get_soft_state(sd_state, SDUNIT(dev)); 10287 if (un == NULL) { 10288 return (ENXIO); 10289 } 10290 10291 ASSERT(!mutex_owned(SD_MUTEX(un))); 10292 10293 #ifdef SDDEBUG 10294 switch (dataspace) { 10295 case UIO_USERSPACE: 10296 SD_TRACE(SD_LOG_IO, un, 10297 "sd_send_scsi_cmd: entry: un:0x%p UIO_USERSPACE\n", un); 10298 break; 10299 case UIO_SYSSPACE: 10300 SD_TRACE(SD_LOG_IO, un, 10301 "sd_send_scsi_cmd: entry: un:0x%p UIO_SYSSPACE\n", un); 10302 break; 10303 default: 10304 SD_TRACE(SD_LOG_IO, un, 10305 "sd_send_scsi_cmd: entry: un:0x%p UNEXPECTED SPACE\n", un); 10306 break; 10307 } 10308 #endif 10309 10310 rval = scsi_uscsi_alloc_and_copyin((intptr_t)incmd, flag, 10311 SD_ADDRESS(un), &uscmd); 10312 if (rval != 0) { 10313 SD_TRACE(SD_LOG_IO, un, "sd_sense_scsi_cmd: " 10314 "scsi_uscsi_alloc_and_copyin failed\n", un); 10315 return (rval); 10316 } 10317 10318 if ((uscmd->uscsi_cdb != NULL) && 10319 (uscmd->uscsi_cdb[0] == SCMD_FORMAT)) { 10320 mutex_enter(SD_MUTEX(un)); 10321 un->un_f_format_in_progress = TRUE; 10322 mutex_exit(SD_MUTEX(un)); 10323 format = 1; 10324 } 10325 10326 /* 10327 * Allocate an sd_uscsi_info struct and fill it with the info 10328 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 10329 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 10330 * since we allocate the buf here in this function, we do not 10331 * need to preserve the prior contents of b_private. 10332 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 10333 */ 10334 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 10335 uip->ui_flags = path_flag; 10336 uip->ui_cmdp = uscmd; 10337 10338 /* 10339 * Commands sent with priority are intended for error recovery 10340 * situations, and do not have retries performed. 10341 */ 10342 if (path_flag == SD_PATH_DIRECT_PRIORITY) { 10343 uscmd->uscsi_flags |= USCSI_DIAGNOSE; 10344 } 10345 uscmd->uscsi_flags &= ~USCSI_NOINTR; 10346 10347 rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd, 10348 sd_uscsi_strategy, NULL, uip); 10349 10350 #ifdef SDDEBUG 10351 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 10352 "uscsi_status: 0x%02x uscsi_resid:0x%x\n", 10353 uscmd->uscsi_status, uscmd->uscsi_resid); 10354 if (uscmd->uscsi_bufaddr != NULL) { 10355 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 10356 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n", 10357 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen); 10358 if (dataspace == UIO_SYSSPACE) { 10359 SD_DUMP_MEMORY(un, SD_LOG_IO, 10360 "data", (uchar_t *)uscmd->uscsi_bufaddr, 10361 uscmd->uscsi_buflen, SD_LOG_HEX); 10362 } 10363 } 10364 #endif 10365 10366 if (format == 1) { 10367 mutex_enter(SD_MUTEX(un)); 10368 un->un_f_format_in_progress = FALSE; 10369 mutex_exit(SD_MUTEX(un)); 10370 } 10371 10372 (void) scsi_uscsi_copyout_and_free((intptr_t)incmd, uscmd); 10373 kmem_free(uip, sizeof (struct sd_uscsi_info)); 10374 10375 return (rval); 10376 } 10377 10378 10379 /* 10380 * Function: sd_buf_iodone 10381 * 10382 * Description: Frees the sd_xbuf & returns the buf to its originator. 10383 * 10384 * Context: May be called from interrupt context. 10385 */ 10386 /* ARGSUSED */ 10387 static void 10388 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp) 10389 { 10390 struct sd_xbuf *xp; 10391 10392 ASSERT(un != NULL); 10393 ASSERT(bp != NULL); 10394 ASSERT(!mutex_owned(SD_MUTEX(un))); 10395 10396 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n"); 10397 10398 xp = SD_GET_XBUF(bp); 10399 ASSERT(xp != NULL); 10400 10401 mutex_enter(SD_MUTEX(un)); 10402 10403 /* 10404 * Grab time when the cmd completed. 10405 * This is used for determining if the system has been 10406 * idle long enough to make it idle to the PM framework. 10407 * This is for lowering the overhead, and therefore improving 10408 * performance per I/O operation. 10409 */ 10410 un->un_pm_idle_time = ddi_get_time(); 10411 10412 un->un_ncmds_in_driver--; 10413 ASSERT(un->un_ncmds_in_driver >= 0); 10414 SD_INFO(SD_LOG_IO, un, "sd_buf_iodone: un_ncmds_in_driver = %ld\n", 10415 un->un_ncmds_in_driver); 10416 10417 mutex_exit(SD_MUTEX(un)); 10418 10419 ddi_xbuf_done(bp, un->un_xbuf_attr); /* xbuf is gone after this */ 10420 biodone(bp); /* bp is gone after this */ 10421 10422 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n"); 10423 } 10424 10425 10426 /* 10427 * Function: sd_uscsi_iodone 10428 * 10429 * Description: Frees the sd_xbuf & returns the buf to its originator. 10430 * 10431 * Context: May be called from interrupt context. 10432 */ 10433 /* ARGSUSED */ 10434 static void 10435 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 10436 { 10437 struct sd_xbuf *xp; 10438 10439 ASSERT(un != NULL); 10440 ASSERT(bp != NULL); 10441 10442 xp = SD_GET_XBUF(bp); 10443 ASSERT(xp != NULL); 10444 ASSERT(!mutex_owned(SD_MUTEX(un))); 10445 10446 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n"); 10447 10448 bp->b_private = xp->xb_private; 10449 10450 mutex_enter(SD_MUTEX(un)); 10451 10452 /* 10453 * Grab time when the cmd completed. 10454 * This is used for determining if the system has been 10455 * idle long enough to make it idle to the PM framework. 10456 * This is for lowering the overhead, and therefore improving 10457 * performance per I/O operation. 10458 */ 10459 un->un_pm_idle_time = ddi_get_time(); 10460 10461 un->un_ncmds_in_driver--; 10462 ASSERT(un->un_ncmds_in_driver >= 0); 10463 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", 10464 un->un_ncmds_in_driver); 10465 10466 mutex_exit(SD_MUTEX(un)); 10467 10468 kmem_free(xp, sizeof (struct sd_xbuf)); 10469 biodone(bp); 10470 10471 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n"); 10472 } 10473 10474 10475 /* 10476 * Function: sd_mapblockaddr_iostart 10477 * 10478 * Description: Verify request lies withing the partition limits for 10479 * the indicated minor device. Issue "overrun" buf if 10480 * request would exceed partition range. Converts 10481 * partition-relative block address to absolute. 10482 * 10483 * Context: Can sleep 10484 * 10485 * Issues: This follows what the old code did, in terms of accessing 10486 * some of the partition info in the unit struct without holding 10487 * the mutext. This is a general issue, if the partition info 10488 * can be altered while IO is in progress... as soon as we send 10489 * a buf, its partitioning can be invalid before it gets to the 10490 * device. Probably the right fix is to move partitioning out 10491 * of the driver entirely. 10492 */ 10493 10494 static void 10495 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp) 10496 { 10497 diskaddr_t nblocks; /* #blocks in the given partition */ 10498 daddr_t blocknum; /* Block number specified by the buf */ 10499 size_t requested_nblocks; 10500 size_t available_nblocks; 10501 int partition; 10502 diskaddr_t partition_offset; 10503 struct sd_xbuf *xp; 10504 10505 10506 ASSERT(un != NULL); 10507 ASSERT(bp != NULL); 10508 ASSERT(!mutex_owned(SD_MUTEX(un))); 10509 10510 SD_TRACE(SD_LOG_IO_PARTITION, un, 10511 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp); 10512 10513 xp = SD_GET_XBUF(bp); 10514 ASSERT(xp != NULL); 10515 10516 /* 10517 * If the geometry is not indicated as valid, attempt to access 10518 * the unit & verify the geometry/label. This can be the case for 10519 * removable-media devices, of if the device was opened in 10520 * NDELAY/NONBLOCK mode. 10521 */ 10522 if (!SD_IS_VALID_LABEL(un) && 10523 (sd_ready_and_valid(un) != SD_READY_VALID)) { 10524 /* 10525 * For removable devices it is possible to start an I/O 10526 * without a media by opening the device in nodelay mode. 10527 * Also for writable CDs there can be many scenarios where 10528 * there is no geometry yet but volume manager is trying to 10529 * issue a read() just because it can see TOC on the CD. So 10530 * do not print a message for removables. 10531 */ 10532 if (!un->un_f_has_removable_media) { 10533 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10534 "i/o to invalid geometry\n"); 10535 } 10536 bioerror(bp, EIO); 10537 bp->b_resid = bp->b_bcount; 10538 SD_BEGIN_IODONE(index, un, bp); 10539 return; 10540 } 10541 10542 partition = SDPART(bp->b_edev); 10543 10544 nblocks = 0; 10545 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 10546 &nblocks, &partition_offset, NULL, NULL, (void *)SD_PATH_DIRECT); 10547 10548 /* 10549 * blocknum is the starting block number of the request. At this 10550 * point it is still relative to the start of the minor device. 10551 */ 10552 blocknum = xp->xb_blkno; 10553 10554 /* 10555 * Legacy: If the starting block number is one past the last block 10556 * in the partition, do not set B_ERROR in the buf. 10557 */ 10558 if (blocknum == nblocks) { 10559 goto error_exit; 10560 } 10561 10562 /* 10563 * Confirm that the first block of the request lies within the 10564 * partition limits. Also the requested number of bytes must be 10565 * a multiple of the system block size. 10566 */ 10567 if ((blocknum < 0) || (blocknum >= nblocks) || 10568 ((bp->b_bcount & (un->un_sys_blocksize - 1)) != 0)) { 10569 bp->b_flags |= B_ERROR; 10570 goto error_exit; 10571 } 10572 10573 /* 10574 * If the requsted # blocks exceeds the available # blocks, that 10575 * is an overrun of the partition. 10576 */ 10577 requested_nblocks = SD_BYTES2SYSBLOCKS(un, bp->b_bcount); 10578 available_nblocks = (size_t)(nblocks - blocknum); 10579 ASSERT(nblocks >= blocknum); 10580 10581 if (requested_nblocks > available_nblocks) { 10582 /* 10583 * Allocate an "overrun" buf to allow the request to proceed 10584 * for the amount of space available in the partition. The 10585 * amount not transferred will be added into the b_resid 10586 * when the operation is complete. The overrun buf 10587 * replaces the original buf here, and the original buf 10588 * is saved inside the overrun buf, for later use. 10589 */ 10590 size_t resid = SD_SYSBLOCKS2BYTES(un, 10591 (offset_t)(requested_nblocks - available_nblocks)); 10592 size_t count = bp->b_bcount - resid; 10593 /* 10594 * Note: count is an unsigned entity thus it'll NEVER 10595 * be less than 0 so ASSERT the original values are 10596 * correct. 10597 */ 10598 ASSERT(bp->b_bcount >= resid); 10599 10600 bp = sd_bioclone_alloc(bp, count, blocknum, 10601 (int (*)(struct buf *)) sd_mapblockaddr_iodone); 10602 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */ 10603 ASSERT(xp != NULL); 10604 } 10605 10606 /* At this point there should be no residual for this buf. */ 10607 ASSERT(bp->b_resid == 0); 10608 10609 /* Convert the block number to an absolute address. */ 10610 xp->xb_blkno += partition_offset; 10611 10612 SD_NEXT_IOSTART(index, un, bp); 10613 10614 SD_TRACE(SD_LOG_IO_PARTITION, un, 10615 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp); 10616 10617 return; 10618 10619 error_exit: 10620 bp->b_resid = bp->b_bcount; 10621 SD_BEGIN_IODONE(index, un, bp); 10622 SD_TRACE(SD_LOG_IO_PARTITION, un, 10623 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp); 10624 } 10625 10626 10627 /* 10628 * Function: sd_mapblockaddr_iodone 10629 * 10630 * Description: Completion-side processing for partition management. 10631 * 10632 * Context: May be called under interrupt context 10633 */ 10634 10635 static void 10636 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp) 10637 { 10638 /* int partition; */ /* Not used, see below. */ 10639 ASSERT(un != NULL); 10640 ASSERT(bp != NULL); 10641 ASSERT(!mutex_owned(SD_MUTEX(un))); 10642 10643 SD_TRACE(SD_LOG_IO_PARTITION, un, 10644 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp); 10645 10646 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) { 10647 /* 10648 * We have an "overrun" buf to deal with... 10649 */ 10650 struct sd_xbuf *xp; 10651 struct buf *obp; /* ptr to the original buf */ 10652 10653 xp = SD_GET_XBUF(bp); 10654 ASSERT(xp != NULL); 10655 10656 /* Retrieve the pointer to the original buf */ 10657 obp = (struct buf *)xp->xb_private; 10658 ASSERT(obp != NULL); 10659 10660 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid); 10661 bioerror(obp, bp->b_error); 10662 10663 sd_bioclone_free(bp); 10664 10665 /* 10666 * Get back the original buf. 10667 * Note that since the restoration of xb_blkno below 10668 * was removed, the sd_xbuf is not needed. 10669 */ 10670 bp = obp; 10671 /* 10672 * xp = SD_GET_XBUF(bp); 10673 * ASSERT(xp != NULL); 10674 */ 10675 } 10676 10677 /* 10678 * Convert sd->xb_blkno back to a minor-device relative value. 10679 * Note: this has been commented out, as it is not needed in the 10680 * current implementation of the driver (ie, since this function 10681 * is at the top of the layering chains, so the info will be 10682 * discarded) and it is in the "hot" IO path. 10683 * 10684 * partition = getminor(bp->b_edev) & SDPART_MASK; 10685 * xp->xb_blkno -= un->un_offset[partition]; 10686 */ 10687 10688 SD_NEXT_IODONE(index, un, bp); 10689 10690 SD_TRACE(SD_LOG_IO_PARTITION, un, 10691 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp); 10692 } 10693 10694 10695 /* 10696 * Function: sd_mapblocksize_iostart 10697 * 10698 * Description: Convert between system block size (un->un_sys_blocksize) 10699 * and target block size (un->un_tgt_blocksize). 10700 * 10701 * Context: Can sleep to allocate resources. 10702 * 10703 * Assumptions: A higher layer has already performed any partition validation, 10704 * and converted the xp->xb_blkno to an absolute value relative 10705 * to the start of the device. 10706 * 10707 * It is also assumed that the higher layer has implemented 10708 * an "overrun" mechanism for the case where the request would 10709 * read/write beyond the end of a partition. In this case we 10710 * assume (and ASSERT) that bp->b_resid == 0. 10711 * 10712 * Note: The implementation for this routine assumes the target 10713 * block size remains constant between allocation and transport. 10714 */ 10715 10716 static void 10717 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp) 10718 { 10719 struct sd_mapblocksize_info *bsp; 10720 struct sd_xbuf *xp; 10721 offset_t first_byte; 10722 daddr_t start_block, end_block; 10723 daddr_t request_bytes; 10724 ushort_t is_aligned = FALSE; 10725 10726 ASSERT(un != NULL); 10727 ASSERT(bp != NULL); 10728 ASSERT(!mutex_owned(SD_MUTEX(un))); 10729 ASSERT(bp->b_resid == 0); 10730 10731 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 10732 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp); 10733 10734 /* 10735 * For a non-writable CD, a write request is an error 10736 */ 10737 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) && 10738 (un->un_f_mmc_writable_media == FALSE)) { 10739 bioerror(bp, EIO); 10740 bp->b_resid = bp->b_bcount; 10741 SD_BEGIN_IODONE(index, un, bp); 10742 return; 10743 } 10744 10745 /* 10746 * We do not need a shadow buf if the device is using 10747 * un->un_sys_blocksize as its block size or if bcount == 0. 10748 * In this case there is no layer-private data block allocated. 10749 */ 10750 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 10751 (bp->b_bcount == 0)) { 10752 goto done; 10753 } 10754 10755 #if defined(__i386) || defined(__amd64) 10756 /* We do not support non-block-aligned transfers for ROD devices */ 10757 ASSERT(!ISROD(un)); 10758 #endif 10759 10760 xp = SD_GET_XBUF(bp); 10761 ASSERT(xp != NULL); 10762 10763 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 10764 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n", 10765 un->un_tgt_blocksize, un->un_sys_blocksize); 10766 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 10767 "request start block:0x%x\n", xp->xb_blkno); 10768 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 10769 "request len:0x%x\n", bp->b_bcount); 10770 10771 /* 10772 * Allocate the layer-private data area for the mapblocksize layer. 10773 * Layers are allowed to use the xp_private member of the sd_xbuf 10774 * struct to store the pointer to their layer-private data block, but 10775 * each layer also has the responsibility of restoring the prior 10776 * contents of xb_private before returning the buf/xbuf to the 10777 * higher layer that sent it. 10778 * 10779 * Here we save the prior contents of xp->xb_private into the 10780 * bsp->mbs_oprivate field of our layer-private data area. This value 10781 * is restored by sd_mapblocksize_iodone() just prior to freeing up 10782 * the layer-private area and returning the buf/xbuf to the layer 10783 * that sent it. 10784 * 10785 * Note that here we use kmem_zalloc for the allocation as there are 10786 * parts of the mapblocksize code that expect certain fields to be 10787 * zero unless explicitly set to a required value. 10788 */ 10789 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 10790 bsp->mbs_oprivate = xp->xb_private; 10791 xp->xb_private = bsp; 10792 10793 /* 10794 * This treats the data on the disk (target) as an array of bytes. 10795 * first_byte is the byte offset, from the beginning of the device, 10796 * to the location of the request. This is converted from a 10797 * un->un_sys_blocksize block address to a byte offset, and then back 10798 * to a block address based upon a un->un_tgt_blocksize block size. 10799 * 10800 * xp->xb_blkno should be absolute upon entry into this function, 10801 * but, but it is based upon partitions that use the "system" 10802 * block size. It must be adjusted to reflect the block size of 10803 * the target. 10804 * 10805 * Note that end_block is actually the block that follows the last 10806 * block of the request, but that's what is needed for the computation. 10807 */ 10808 first_byte = SD_SYSBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 10809 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize; 10810 end_block = (first_byte + bp->b_bcount + un->un_tgt_blocksize - 1) / 10811 un->un_tgt_blocksize; 10812 10813 /* request_bytes is rounded up to a multiple of the target block size */ 10814 request_bytes = (end_block - start_block) * un->un_tgt_blocksize; 10815 10816 /* 10817 * See if the starting address of the request and the request 10818 * length are aligned on a un->un_tgt_blocksize boundary. If aligned 10819 * then we do not need to allocate a shadow buf to handle the request. 10820 */ 10821 if (((first_byte % un->un_tgt_blocksize) == 0) && 10822 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) { 10823 is_aligned = TRUE; 10824 } 10825 10826 if ((bp->b_flags & B_READ) == 0) { 10827 /* 10828 * Lock the range for a write operation. An aligned request is 10829 * considered a simple write; otherwise the request must be a 10830 * read-modify-write. 10831 */ 10832 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1, 10833 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW); 10834 } 10835 10836 /* 10837 * Alloc a shadow buf if the request is not aligned. Also, this is 10838 * where the READ command is generated for a read-modify-write. (The 10839 * write phase is deferred until after the read completes.) 10840 */ 10841 if (is_aligned == FALSE) { 10842 10843 struct sd_mapblocksize_info *shadow_bsp; 10844 struct sd_xbuf *shadow_xp; 10845 struct buf *shadow_bp; 10846 10847 /* 10848 * Allocate the shadow buf and it associated xbuf. Note that 10849 * after this call the xb_blkno value in both the original 10850 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the 10851 * same: absolute relative to the start of the device, and 10852 * adjusted for the target block size. The b_blkno in the 10853 * shadow buf will also be set to this value. We should never 10854 * change b_blkno in the original bp however. 10855 * 10856 * Note also that the shadow buf will always need to be a 10857 * READ command, regardless of whether the incoming command 10858 * is a READ or a WRITE. 10859 */ 10860 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ, 10861 xp->xb_blkno, 10862 (int (*)(struct buf *)) sd_mapblocksize_iodone); 10863 10864 shadow_xp = SD_GET_XBUF(shadow_bp); 10865 10866 /* 10867 * Allocate the layer-private data for the shadow buf. 10868 * (No need to preserve xb_private in the shadow xbuf.) 10869 */ 10870 shadow_xp->xb_private = shadow_bsp = 10871 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 10872 10873 /* 10874 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone 10875 * to figure out where the start of the user data is (based upon 10876 * the system block size) in the data returned by the READ 10877 * command (which will be based upon the target blocksize). Note 10878 * that this is only really used if the request is unaligned. 10879 */ 10880 bsp->mbs_copy_offset = (ssize_t)(first_byte - 10881 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize)); 10882 ASSERT((bsp->mbs_copy_offset >= 0) && 10883 (bsp->mbs_copy_offset < un->un_tgt_blocksize)); 10884 10885 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset; 10886 10887 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index; 10888 10889 /* Transfer the wmap (if any) to the shadow buf */ 10890 shadow_bsp->mbs_wmp = bsp->mbs_wmp; 10891 bsp->mbs_wmp = NULL; 10892 10893 /* 10894 * The shadow buf goes on from here in place of the 10895 * original buf. 10896 */ 10897 shadow_bsp->mbs_orig_bp = bp; 10898 bp = shadow_bp; 10899 } 10900 10901 SD_INFO(SD_LOG_IO_RMMEDIA, un, 10902 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno); 10903 SD_INFO(SD_LOG_IO_RMMEDIA, un, 10904 "sd_mapblocksize_iostart: tgt request len:0x%x\n", 10905 request_bytes); 10906 SD_INFO(SD_LOG_IO_RMMEDIA, un, 10907 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp); 10908 10909 done: 10910 SD_NEXT_IOSTART(index, un, bp); 10911 10912 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 10913 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp); 10914 } 10915 10916 10917 /* 10918 * Function: sd_mapblocksize_iodone 10919 * 10920 * Description: Completion side processing for block-size mapping. 10921 * 10922 * Context: May be called under interrupt context 10923 */ 10924 10925 static void 10926 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp) 10927 { 10928 struct sd_mapblocksize_info *bsp; 10929 struct sd_xbuf *xp; 10930 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */ 10931 struct buf *orig_bp; /* ptr to the original buf */ 10932 offset_t shadow_end; 10933 offset_t request_end; 10934 offset_t shadow_start; 10935 ssize_t copy_offset; 10936 size_t copy_length; 10937 size_t shortfall; 10938 uint_t is_write; /* TRUE if this bp is a WRITE */ 10939 uint_t has_wmap; /* TRUE is this bp has a wmap */ 10940 10941 ASSERT(un != NULL); 10942 ASSERT(bp != NULL); 10943 10944 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 10945 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp); 10946 10947 /* 10948 * There is no shadow buf or layer-private data if the target is 10949 * using un->un_sys_blocksize as its block size or if bcount == 0. 10950 */ 10951 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 10952 (bp->b_bcount == 0)) { 10953 goto exit; 10954 } 10955 10956 xp = SD_GET_XBUF(bp); 10957 ASSERT(xp != NULL); 10958 10959 /* Retrieve the pointer to the layer-private data area from the xbuf. */ 10960 bsp = xp->xb_private; 10961 10962 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE; 10963 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE; 10964 10965 if (is_write) { 10966 /* 10967 * For a WRITE request we must free up the block range that 10968 * we have locked up. This holds regardless of whether this is 10969 * an aligned write request or a read-modify-write request. 10970 */ 10971 sd_range_unlock(un, bsp->mbs_wmp); 10972 bsp->mbs_wmp = NULL; 10973 } 10974 10975 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) { 10976 /* 10977 * An aligned read or write command will have no shadow buf; 10978 * there is not much else to do with it. 10979 */ 10980 goto done; 10981 } 10982 10983 orig_bp = bsp->mbs_orig_bp; 10984 ASSERT(orig_bp != NULL); 10985 orig_xp = SD_GET_XBUF(orig_bp); 10986 ASSERT(orig_xp != NULL); 10987 ASSERT(!mutex_owned(SD_MUTEX(un))); 10988 10989 if (!is_write && has_wmap) { 10990 /* 10991 * A READ with a wmap means this is the READ phase of a 10992 * read-modify-write. If an error occurred on the READ then 10993 * we do not proceed with the WRITE phase or copy any data. 10994 * Just release the write maps and return with an error. 10995 */ 10996 if ((bp->b_resid != 0) || (bp->b_error != 0)) { 10997 orig_bp->b_resid = orig_bp->b_bcount; 10998 bioerror(orig_bp, bp->b_error); 10999 sd_range_unlock(un, bsp->mbs_wmp); 11000 goto freebuf_done; 11001 } 11002 } 11003 11004 /* 11005 * Here is where we set up to copy the data from the shadow buf 11006 * into the space associated with the original buf. 11007 * 11008 * To deal with the conversion between block sizes, these 11009 * computations treat the data as an array of bytes, with the 11010 * first byte (byte 0) corresponding to the first byte in the 11011 * first block on the disk. 11012 */ 11013 11014 /* 11015 * shadow_start and shadow_len indicate the location and size of 11016 * the data returned with the shadow IO request. 11017 */ 11018 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 11019 shadow_end = shadow_start + bp->b_bcount - bp->b_resid; 11020 11021 /* 11022 * copy_offset gives the offset (in bytes) from the start of the first 11023 * block of the READ request to the beginning of the data. We retrieve 11024 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved 11025 * there by sd_mapblockize_iostart(). copy_length gives the amount of 11026 * data to be copied (in bytes). 11027 */ 11028 copy_offset = bsp->mbs_copy_offset; 11029 ASSERT((copy_offset >= 0) && (copy_offset < un->un_tgt_blocksize)); 11030 copy_length = orig_bp->b_bcount; 11031 request_end = shadow_start + copy_offset + orig_bp->b_bcount; 11032 11033 /* 11034 * Set up the resid and error fields of orig_bp as appropriate. 11035 */ 11036 if (shadow_end >= request_end) { 11037 /* We got all the requested data; set resid to zero */ 11038 orig_bp->b_resid = 0; 11039 } else { 11040 /* 11041 * We failed to get enough data to fully satisfy the original 11042 * request. Just copy back whatever data we got and set 11043 * up the residual and error code as required. 11044 * 11045 * 'shortfall' is the amount by which the data received with the 11046 * shadow buf has "fallen short" of the requested amount. 11047 */ 11048 shortfall = (size_t)(request_end - shadow_end); 11049 11050 if (shortfall > orig_bp->b_bcount) { 11051 /* 11052 * We did not get enough data to even partially 11053 * fulfill the original request. The residual is 11054 * equal to the amount requested. 11055 */ 11056 orig_bp->b_resid = orig_bp->b_bcount; 11057 } else { 11058 /* 11059 * We did not get all the data that we requested 11060 * from the device, but we will try to return what 11061 * portion we did get. 11062 */ 11063 orig_bp->b_resid = shortfall; 11064 } 11065 ASSERT(copy_length >= orig_bp->b_resid); 11066 copy_length -= orig_bp->b_resid; 11067 } 11068 11069 /* Propagate the error code from the shadow buf to the original buf */ 11070 bioerror(orig_bp, bp->b_error); 11071 11072 if (is_write) { 11073 goto freebuf_done; /* No data copying for a WRITE */ 11074 } 11075 11076 if (has_wmap) { 11077 /* 11078 * This is a READ command from the READ phase of a 11079 * read-modify-write request. We have to copy the data given 11080 * by the user OVER the data returned by the READ command, 11081 * then convert the command from a READ to a WRITE and send 11082 * it back to the target. 11083 */ 11084 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset, 11085 copy_length); 11086 11087 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */ 11088 11089 /* 11090 * Dispatch the WRITE command to the taskq thread, which 11091 * will in turn send the command to the target. When the 11092 * WRITE command completes, we (sd_mapblocksize_iodone()) 11093 * will get called again as part of the iodone chain 11094 * processing for it. Note that we will still be dealing 11095 * with the shadow buf at that point. 11096 */ 11097 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp, 11098 KM_NOSLEEP) != 0) { 11099 /* 11100 * Dispatch was successful so we are done. Return 11101 * without going any higher up the iodone chain. Do 11102 * not free up any layer-private data until after the 11103 * WRITE completes. 11104 */ 11105 return; 11106 } 11107 11108 /* 11109 * Dispatch of the WRITE command failed; set up the error 11110 * condition and send this IO back up the iodone chain. 11111 */ 11112 bioerror(orig_bp, EIO); 11113 orig_bp->b_resid = orig_bp->b_bcount; 11114 11115 } else { 11116 /* 11117 * This is a regular READ request (ie, not a RMW). Copy the 11118 * data from the shadow buf into the original buf. The 11119 * copy_offset compensates for any "misalignment" between the 11120 * shadow buf (with its un->un_tgt_blocksize blocks) and the 11121 * original buf (with its un->un_sys_blocksize blocks). 11122 */ 11123 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr, 11124 copy_length); 11125 } 11126 11127 freebuf_done: 11128 11129 /* 11130 * At this point we still have both the shadow buf AND the original 11131 * buf to deal with, as well as the layer-private data area in each. 11132 * Local variables are as follows: 11133 * 11134 * bp -- points to shadow buf 11135 * xp -- points to xbuf of shadow buf 11136 * bsp -- points to layer-private data area of shadow buf 11137 * orig_bp -- points to original buf 11138 * 11139 * First free the shadow buf and its associated xbuf, then free the 11140 * layer-private data area from the shadow buf. There is no need to 11141 * restore xb_private in the shadow xbuf. 11142 */ 11143 sd_shadow_buf_free(bp); 11144 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 11145 11146 /* 11147 * Now update the local variables to point to the original buf, xbuf, 11148 * and layer-private area. 11149 */ 11150 bp = orig_bp; 11151 xp = SD_GET_XBUF(bp); 11152 ASSERT(xp != NULL); 11153 ASSERT(xp == orig_xp); 11154 bsp = xp->xb_private; 11155 ASSERT(bsp != NULL); 11156 11157 done: 11158 /* 11159 * Restore xb_private to whatever it was set to by the next higher 11160 * layer in the chain, then free the layer-private data area. 11161 */ 11162 xp->xb_private = bsp->mbs_oprivate; 11163 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 11164 11165 exit: 11166 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp), 11167 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp); 11168 11169 SD_NEXT_IODONE(index, un, bp); 11170 } 11171 11172 11173 /* 11174 * Function: sd_checksum_iostart 11175 * 11176 * Description: A stub function for a layer that's currently not used. 11177 * For now just a placeholder. 11178 * 11179 * Context: Kernel thread context 11180 */ 11181 11182 static void 11183 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp) 11184 { 11185 ASSERT(un != NULL); 11186 ASSERT(bp != NULL); 11187 ASSERT(!mutex_owned(SD_MUTEX(un))); 11188 SD_NEXT_IOSTART(index, un, bp); 11189 } 11190 11191 11192 /* 11193 * Function: sd_checksum_iodone 11194 * 11195 * Description: A stub function for a layer that's currently not used. 11196 * For now just a placeholder. 11197 * 11198 * Context: May be called under interrupt context 11199 */ 11200 11201 static void 11202 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp) 11203 { 11204 ASSERT(un != NULL); 11205 ASSERT(bp != NULL); 11206 ASSERT(!mutex_owned(SD_MUTEX(un))); 11207 SD_NEXT_IODONE(index, un, bp); 11208 } 11209 11210 11211 /* 11212 * Function: sd_checksum_uscsi_iostart 11213 * 11214 * Description: A stub function for a layer that's currently not used. 11215 * For now just a placeholder. 11216 * 11217 * Context: Kernel thread context 11218 */ 11219 11220 static void 11221 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp) 11222 { 11223 ASSERT(un != NULL); 11224 ASSERT(bp != NULL); 11225 ASSERT(!mutex_owned(SD_MUTEX(un))); 11226 SD_NEXT_IOSTART(index, un, bp); 11227 } 11228 11229 11230 /* 11231 * Function: sd_checksum_uscsi_iodone 11232 * 11233 * Description: A stub function for a layer that's currently not used. 11234 * For now just a placeholder. 11235 * 11236 * Context: May be called under interrupt context 11237 */ 11238 11239 static void 11240 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 11241 { 11242 ASSERT(un != NULL); 11243 ASSERT(bp != NULL); 11244 ASSERT(!mutex_owned(SD_MUTEX(un))); 11245 SD_NEXT_IODONE(index, un, bp); 11246 } 11247 11248 11249 /* 11250 * Function: sd_pm_iostart 11251 * 11252 * Description: iostart-side routine for Power mangement. 11253 * 11254 * Context: Kernel thread context 11255 */ 11256 11257 static void 11258 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp) 11259 { 11260 ASSERT(un != NULL); 11261 ASSERT(bp != NULL); 11262 ASSERT(!mutex_owned(SD_MUTEX(un))); 11263 ASSERT(!mutex_owned(&un->un_pm_mutex)); 11264 11265 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n"); 11266 11267 if (sd_pm_entry(un) != DDI_SUCCESS) { 11268 /* 11269 * Set up to return the failed buf back up the 'iodone' 11270 * side of the calling chain. 11271 */ 11272 bioerror(bp, EIO); 11273 bp->b_resid = bp->b_bcount; 11274 11275 SD_BEGIN_IODONE(index, un, bp); 11276 11277 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 11278 return; 11279 } 11280 11281 SD_NEXT_IOSTART(index, un, bp); 11282 11283 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 11284 } 11285 11286 11287 /* 11288 * Function: sd_pm_iodone 11289 * 11290 * Description: iodone-side routine for power mangement. 11291 * 11292 * Context: may be called from interrupt context 11293 */ 11294 11295 static void 11296 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp) 11297 { 11298 ASSERT(un != NULL); 11299 ASSERT(bp != NULL); 11300 ASSERT(!mutex_owned(&un->un_pm_mutex)); 11301 11302 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n"); 11303 11304 /* 11305 * After attach the following flag is only read, so don't 11306 * take the penalty of acquiring a mutex for it. 11307 */ 11308 if (un->un_f_pm_is_enabled == TRUE) { 11309 sd_pm_exit(un); 11310 } 11311 11312 SD_NEXT_IODONE(index, un, bp); 11313 11314 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n"); 11315 } 11316 11317 11318 /* 11319 * Function: sd_core_iostart 11320 * 11321 * Description: Primary driver function for enqueuing buf(9S) structs from 11322 * the system and initiating IO to the target device 11323 * 11324 * Context: Kernel thread context. Can sleep. 11325 * 11326 * Assumptions: - The given xp->xb_blkno is absolute 11327 * (ie, relative to the start of the device). 11328 * - The IO is to be done using the native blocksize of 11329 * the device, as specified in un->un_tgt_blocksize. 11330 */ 11331 /* ARGSUSED */ 11332 static void 11333 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp) 11334 { 11335 struct sd_xbuf *xp; 11336 11337 ASSERT(un != NULL); 11338 ASSERT(bp != NULL); 11339 ASSERT(!mutex_owned(SD_MUTEX(un))); 11340 ASSERT(bp->b_resid == 0); 11341 11342 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp); 11343 11344 xp = SD_GET_XBUF(bp); 11345 ASSERT(xp != NULL); 11346 11347 mutex_enter(SD_MUTEX(un)); 11348 11349 /* 11350 * If we are currently in the failfast state, fail any new IO 11351 * that has B_FAILFAST set, then return. 11352 */ 11353 if ((bp->b_flags & B_FAILFAST) && 11354 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) { 11355 mutex_exit(SD_MUTEX(un)); 11356 bioerror(bp, EIO); 11357 bp->b_resid = bp->b_bcount; 11358 SD_BEGIN_IODONE(index, un, bp); 11359 return; 11360 } 11361 11362 if (SD_IS_DIRECT_PRIORITY(xp)) { 11363 /* 11364 * Priority command -- transport it immediately. 11365 * 11366 * Note: We may want to assert that USCSI_DIAGNOSE is set, 11367 * because all direct priority commands should be associated 11368 * with error recovery actions which we don't want to retry. 11369 */ 11370 sd_start_cmds(un, bp); 11371 } else { 11372 /* 11373 * Normal command -- add it to the wait queue, then start 11374 * transporting commands from the wait queue. 11375 */ 11376 sd_add_buf_to_waitq(un, bp); 11377 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 11378 sd_start_cmds(un, NULL); 11379 } 11380 11381 mutex_exit(SD_MUTEX(un)); 11382 11383 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp); 11384 } 11385 11386 11387 /* 11388 * Function: sd_init_cdb_limits 11389 * 11390 * Description: This is to handle scsi_pkt initialization differences 11391 * between the driver platforms. 11392 * 11393 * Legacy behaviors: 11394 * 11395 * If the block number or the sector count exceeds the 11396 * capabilities of a Group 0 command, shift over to a 11397 * Group 1 command. We don't blindly use Group 1 11398 * commands because a) some drives (CDC Wren IVs) get a 11399 * bit confused, and b) there is probably a fair amount 11400 * of speed difference for a target to receive and decode 11401 * a 10 byte command instead of a 6 byte command. 11402 * 11403 * The xfer time difference of 6 vs 10 byte CDBs is 11404 * still significant so this code is still worthwhile. 11405 * 10 byte CDBs are very inefficient with the fas HBA driver 11406 * and older disks. Each CDB byte took 1 usec with some 11407 * popular disks. 11408 * 11409 * Context: Must be called at attach time 11410 */ 11411 11412 static void 11413 sd_init_cdb_limits(struct sd_lun *un) 11414 { 11415 int hba_cdb_limit; 11416 11417 /* 11418 * Use CDB_GROUP1 commands for most devices except for 11419 * parallel SCSI fixed drives in which case we get better 11420 * performance using CDB_GROUP0 commands (where applicable). 11421 */ 11422 un->un_mincdb = SD_CDB_GROUP1; 11423 #if !defined(__fibre) 11424 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && 11425 !un->un_f_has_removable_media) { 11426 un->un_mincdb = SD_CDB_GROUP0; 11427 } 11428 #endif 11429 11430 /* 11431 * Try to read the max-cdb-length supported by HBA. 11432 */ 11433 un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1); 11434 if (0 >= un->un_max_hba_cdb) { 11435 un->un_max_hba_cdb = CDB_GROUP4; 11436 hba_cdb_limit = SD_CDB_GROUP4; 11437 } else if (0 < un->un_max_hba_cdb && 11438 un->un_max_hba_cdb < CDB_GROUP1) { 11439 hba_cdb_limit = SD_CDB_GROUP0; 11440 } else if (CDB_GROUP1 <= un->un_max_hba_cdb && 11441 un->un_max_hba_cdb < CDB_GROUP5) { 11442 hba_cdb_limit = SD_CDB_GROUP1; 11443 } else if (CDB_GROUP5 <= un->un_max_hba_cdb && 11444 un->un_max_hba_cdb < CDB_GROUP4) { 11445 hba_cdb_limit = SD_CDB_GROUP5; 11446 } else { 11447 hba_cdb_limit = SD_CDB_GROUP4; 11448 } 11449 11450 /* 11451 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4 11452 * commands for fixed disks unless we are building for a 32 bit 11453 * kernel. 11454 */ 11455 #ifdef _LP64 11456 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 11457 min(hba_cdb_limit, SD_CDB_GROUP4); 11458 #else 11459 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 11460 min(hba_cdb_limit, SD_CDB_GROUP1); 11461 #endif 11462 11463 /* 11464 * x86 systems require the PKT_DMA_PARTIAL flag 11465 */ 11466 #if defined(__x86) 11467 un->un_pkt_flags = PKT_DMA_PARTIAL; 11468 #else 11469 un->un_pkt_flags = 0; 11470 #endif 11471 11472 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) 11473 ? sizeof (struct scsi_arq_status) : 1); 11474 un->un_cmd_timeout = (ushort_t)sd_io_time; 11475 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; 11476 } 11477 11478 11479 /* 11480 * Function: sd_initpkt_for_buf 11481 * 11482 * Description: Allocate and initialize for transport a scsi_pkt struct, 11483 * based upon the info specified in the given buf struct. 11484 * 11485 * Assumes the xb_blkno in the request is absolute (ie, 11486 * relative to the start of the device (NOT partition!). 11487 * Also assumes that the request is using the native block 11488 * size of the device (as returned by the READ CAPACITY 11489 * command). 11490 * 11491 * Return Code: SD_PKT_ALLOC_SUCCESS 11492 * SD_PKT_ALLOC_FAILURE 11493 * SD_PKT_ALLOC_FAILURE_NO_DMA 11494 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 11495 * 11496 * Context: Kernel thread and may be called from software interrupt context 11497 * as part of a sdrunout callback. This function may not block or 11498 * call routines that block 11499 */ 11500 11501 static int 11502 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp) 11503 { 11504 struct sd_xbuf *xp; 11505 struct scsi_pkt *pktp = NULL; 11506 struct sd_lun *un; 11507 size_t blockcount; 11508 daddr_t startblock; 11509 int rval; 11510 int cmd_flags; 11511 11512 ASSERT(bp != NULL); 11513 ASSERT(pktpp != NULL); 11514 xp = SD_GET_XBUF(bp); 11515 ASSERT(xp != NULL); 11516 un = SD_GET_UN(bp); 11517 ASSERT(un != NULL); 11518 ASSERT(mutex_owned(SD_MUTEX(un))); 11519 ASSERT(bp->b_resid == 0); 11520 11521 SD_TRACE(SD_LOG_IO_CORE, un, 11522 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); 11523 11524 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11525 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { 11526 /* 11527 * Already have a scsi_pkt -- just need DMA resources. 11528 * We must recompute the CDB in case the mapping returns 11529 * a nonzero pkt_resid. 11530 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer 11531 * that is being retried, the unmap/remap of the DMA resouces 11532 * will result in the entire transfer starting over again 11533 * from the very first block. 11534 */ 11535 ASSERT(xp->xb_pktp != NULL); 11536 pktp = xp->xb_pktp; 11537 } else { 11538 pktp = NULL; 11539 } 11540 #endif /* __i386 || __amd64 */ 11541 11542 startblock = xp->xb_blkno; /* Absolute block num. */ 11543 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 11544 11545 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11546 11547 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); 11548 11549 #else 11550 11551 cmd_flags = un->un_pkt_flags | xp->xb_pkt_flags; 11552 11553 #endif 11554 11555 /* 11556 * sd_setup_rw_pkt will determine the appropriate CDB group to use, 11557 * call scsi_init_pkt, and build the CDB. 11558 */ 11559 rval = sd_setup_rw_pkt(un, &pktp, bp, 11560 cmd_flags, sdrunout, (caddr_t)un, 11561 startblock, blockcount); 11562 11563 if (rval == 0) { 11564 /* 11565 * Success. 11566 * 11567 * If partial DMA is being used and required for this transfer. 11568 * set it up here. 11569 */ 11570 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 && 11571 (pktp->pkt_resid != 0)) { 11572 11573 /* 11574 * Save the CDB length and pkt_resid for the 11575 * next xfer 11576 */ 11577 xp->xb_dma_resid = pktp->pkt_resid; 11578 11579 /* rezero resid */ 11580 pktp->pkt_resid = 0; 11581 11582 } else { 11583 xp->xb_dma_resid = 0; 11584 } 11585 11586 pktp->pkt_flags = un->un_tagflags; 11587 pktp->pkt_time = un->un_cmd_timeout; 11588 pktp->pkt_comp = sdintr; 11589 11590 pktp->pkt_private = bp; 11591 *pktpp = pktp; 11592 11593 SD_TRACE(SD_LOG_IO_CORE, un, 11594 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); 11595 11596 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11597 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; 11598 #endif 11599 11600 return (SD_PKT_ALLOC_SUCCESS); 11601 11602 } 11603 11604 /* 11605 * SD_PKT_ALLOC_FAILURE is the only expected failure code 11606 * from sd_setup_rw_pkt. 11607 */ 11608 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 11609 11610 if (rval == SD_PKT_ALLOC_FAILURE) { 11611 *pktpp = NULL; 11612 /* 11613 * Set the driver state to RWAIT to indicate the driver 11614 * is waiting on resource allocations. The driver will not 11615 * suspend, pm_suspend, or detatch while the state is RWAIT. 11616 */ 11617 New_state(un, SD_STATE_RWAIT); 11618 11619 SD_ERROR(SD_LOG_IO_CORE, un, 11620 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp); 11621 11622 if ((bp->b_flags & B_ERROR) != 0) { 11623 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 11624 } 11625 return (SD_PKT_ALLOC_FAILURE); 11626 } else { 11627 /* 11628 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL 11629 * 11630 * This should never happen. Maybe someone messed with the 11631 * kernel's minphys? 11632 */ 11633 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 11634 "Request rejected: too large for CDB: " 11635 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount); 11636 SD_ERROR(SD_LOG_IO_CORE, un, 11637 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp); 11638 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 11639 11640 } 11641 } 11642 11643 11644 /* 11645 * Function: sd_destroypkt_for_buf 11646 * 11647 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing). 11648 * 11649 * Context: Kernel thread or interrupt context 11650 */ 11651 11652 static void 11653 sd_destroypkt_for_buf(struct buf *bp) 11654 { 11655 ASSERT(bp != NULL); 11656 ASSERT(SD_GET_UN(bp) != NULL); 11657 11658 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 11659 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp); 11660 11661 ASSERT(SD_GET_PKTP(bp) != NULL); 11662 scsi_destroy_pkt(SD_GET_PKTP(bp)); 11663 11664 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 11665 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp); 11666 } 11667 11668 /* 11669 * Function: sd_setup_rw_pkt 11670 * 11671 * Description: Determines appropriate CDB group for the requested LBA 11672 * and transfer length, calls scsi_init_pkt, and builds 11673 * the CDB. Do not use for partial DMA transfers except 11674 * for the initial transfer since the CDB size must 11675 * remain constant. 11676 * 11677 * Context: Kernel thread and may be called from software interrupt 11678 * context as part of a sdrunout callback. This function may not 11679 * block or call routines that block 11680 */ 11681 11682 11683 int 11684 sd_setup_rw_pkt(struct sd_lun *un, 11685 struct scsi_pkt **pktpp, struct buf *bp, int flags, 11686 int (*callback)(caddr_t), caddr_t callback_arg, 11687 diskaddr_t lba, uint32_t blockcount) 11688 { 11689 struct scsi_pkt *return_pktp; 11690 union scsi_cdb *cdbp; 11691 struct sd_cdbinfo *cp = NULL; 11692 int i; 11693 11694 /* 11695 * See which size CDB to use, based upon the request. 11696 */ 11697 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) { 11698 11699 /* 11700 * Check lba and block count against sd_cdbtab limits. 11701 * In the partial DMA case, we have to use the same size 11702 * CDB for all the transfers. Check lba + blockcount 11703 * against the max LBA so we know that segment of the 11704 * transfer can use the CDB we select. 11705 */ 11706 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) && 11707 (blockcount <= sd_cdbtab[i].sc_maxlen)) { 11708 11709 /* 11710 * The command will fit into the CDB type 11711 * specified by sd_cdbtab[i]. 11712 */ 11713 cp = sd_cdbtab + i; 11714 11715 /* 11716 * Call scsi_init_pkt so we can fill in the 11717 * CDB. 11718 */ 11719 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp, 11720 bp, cp->sc_grpcode, un->un_status_len, 0, 11721 flags, callback, callback_arg); 11722 11723 if (return_pktp != NULL) { 11724 11725 /* 11726 * Return new value of pkt 11727 */ 11728 *pktpp = return_pktp; 11729 11730 /* 11731 * To be safe, zero the CDB insuring there is 11732 * no leftover data from a previous command. 11733 */ 11734 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode); 11735 11736 /* 11737 * Handle partial DMA mapping 11738 */ 11739 if (return_pktp->pkt_resid != 0) { 11740 11741 /* 11742 * Not going to xfer as many blocks as 11743 * originally expected 11744 */ 11745 blockcount -= 11746 SD_BYTES2TGTBLOCKS(un, 11747 return_pktp->pkt_resid); 11748 } 11749 11750 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp; 11751 11752 /* 11753 * Set command byte based on the CDB 11754 * type we matched. 11755 */ 11756 cdbp->scc_cmd = cp->sc_grpmask | 11757 ((bp->b_flags & B_READ) ? 11758 SCMD_READ : SCMD_WRITE); 11759 11760 SD_FILL_SCSI1_LUN(un, return_pktp); 11761 11762 /* 11763 * Fill in LBA and length 11764 */ 11765 ASSERT((cp->sc_grpcode == CDB_GROUP1) || 11766 (cp->sc_grpcode == CDB_GROUP4) || 11767 (cp->sc_grpcode == CDB_GROUP0) || 11768 (cp->sc_grpcode == CDB_GROUP5)); 11769 11770 if (cp->sc_grpcode == CDB_GROUP1) { 11771 FORMG1ADDR(cdbp, lba); 11772 FORMG1COUNT(cdbp, blockcount); 11773 return (0); 11774 } else if (cp->sc_grpcode == CDB_GROUP4) { 11775 FORMG4LONGADDR(cdbp, lba); 11776 FORMG4COUNT(cdbp, blockcount); 11777 return (0); 11778 } else if (cp->sc_grpcode == CDB_GROUP0) { 11779 FORMG0ADDR(cdbp, lba); 11780 FORMG0COUNT(cdbp, blockcount); 11781 return (0); 11782 } else if (cp->sc_grpcode == CDB_GROUP5) { 11783 FORMG5ADDR(cdbp, lba); 11784 FORMG5COUNT(cdbp, blockcount); 11785 return (0); 11786 } 11787 11788 /* 11789 * It should be impossible to not match one 11790 * of the CDB types above, so we should never 11791 * reach this point. Set the CDB command byte 11792 * to test-unit-ready to avoid writing 11793 * to somewhere we don't intend. 11794 */ 11795 cdbp->scc_cmd = SCMD_TEST_UNIT_READY; 11796 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 11797 } else { 11798 /* 11799 * Couldn't get scsi_pkt 11800 */ 11801 return (SD_PKT_ALLOC_FAILURE); 11802 } 11803 } 11804 } 11805 11806 /* 11807 * None of the available CDB types were suitable. This really 11808 * should never happen: on a 64 bit system we support 11809 * READ16/WRITE16 which will hold an entire 64 bit disk address 11810 * and on a 32 bit system we will refuse to bind to a device 11811 * larger than 2TB so addresses will never be larger than 32 bits. 11812 */ 11813 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 11814 } 11815 11816 #if defined(__i386) || defined(__amd64) 11817 /* 11818 * Function: sd_setup_next_rw_pkt 11819 * 11820 * Description: Setup packet for partial DMA transfers, except for the 11821 * initial transfer. sd_setup_rw_pkt should be used for 11822 * the initial transfer. 11823 * 11824 * Context: Kernel thread and may be called from interrupt context. 11825 */ 11826 11827 int 11828 sd_setup_next_rw_pkt(struct sd_lun *un, 11829 struct scsi_pkt *pktp, struct buf *bp, 11830 diskaddr_t lba, uint32_t blockcount) 11831 { 11832 uchar_t com; 11833 union scsi_cdb *cdbp; 11834 uchar_t cdb_group_id; 11835 11836 ASSERT(pktp != NULL); 11837 ASSERT(pktp->pkt_cdbp != NULL); 11838 11839 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 11840 com = cdbp->scc_cmd; 11841 cdb_group_id = CDB_GROUPID(com); 11842 11843 ASSERT((cdb_group_id == CDB_GROUPID_0) || 11844 (cdb_group_id == CDB_GROUPID_1) || 11845 (cdb_group_id == CDB_GROUPID_4) || 11846 (cdb_group_id == CDB_GROUPID_5)); 11847 11848 /* 11849 * Move pkt to the next portion of the xfer. 11850 * func is NULL_FUNC so we do not have to release 11851 * the disk mutex here. 11852 */ 11853 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0, 11854 NULL_FUNC, NULL) == pktp) { 11855 /* Success. Handle partial DMA */ 11856 if (pktp->pkt_resid != 0) { 11857 blockcount -= 11858 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid); 11859 } 11860 11861 cdbp->scc_cmd = com; 11862 SD_FILL_SCSI1_LUN(un, pktp); 11863 if (cdb_group_id == CDB_GROUPID_1) { 11864 FORMG1ADDR(cdbp, lba); 11865 FORMG1COUNT(cdbp, blockcount); 11866 return (0); 11867 } else if (cdb_group_id == CDB_GROUPID_4) { 11868 FORMG4LONGADDR(cdbp, lba); 11869 FORMG4COUNT(cdbp, blockcount); 11870 return (0); 11871 } else if (cdb_group_id == CDB_GROUPID_0) { 11872 FORMG0ADDR(cdbp, lba); 11873 FORMG0COUNT(cdbp, blockcount); 11874 return (0); 11875 } else if (cdb_group_id == CDB_GROUPID_5) { 11876 FORMG5ADDR(cdbp, lba); 11877 FORMG5COUNT(cdbp, blockcount); 11878 return (0); 11879 } 11880 11881 /* Unreachable */ 11882 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 11883 } 11884 11885 /* 11886 * Error setting up next portion of cmd transfer. 11887 * Something is definitely very wrong and this 11888 * should not happen. 11889 */ 11890 return (SD_PKT_ALLOC_FAILURE); 11891 } 11892 #endif /* defined(__i386) || defined(__amd64) */ 11893 11894 /* 11895 * Function: sd_initpkt_for_uscsi 11896 * 11897 * Description: Allocate and initialize for transport a scsi_pkt struct, 11898 * based upon the info specified in the given uscsi_cmd struct. 11899 * 11900 * Return Code: SD_PKT_ALLOC_SUCCESS 11901 * SD_PKT_ALLOC_FAILURE 11902 * SD_PKT_ALLOC_FAILURE_NO_DMA 11903 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 11904 * 11905 * Context: Kernel thread and may be called from software interrupt context 11906 * as part of a sdrunout callback. This function may not block or 11907 * call routines that block 11908 */ 11909 11910 static int 11911 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp) 11912 { 11913 struct uscsi_cmd *uscmd; 11914 struct sd_xbuf *xp; 11915 struct scsi_pkt *pktp; 11916 struct sd_lun *un; 11917 uint32_t flags = 0; 11918 11919 ASSERT(bp != NULL); 11920 ASSERT(pktpp != NULL); 11921 xp = SD_GET_XBUF(bp); 11922 ASSERT(xp != NULL); 11923 un = SD_GET_UN(bp); 11924 ASSERT(un != NULL); 11925 ASSERT(mutex_owned(SD_MUTEX(un))); 11926 11927 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 11928 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 11929 ASSERT(uscmd != NULL); 11930 11931 SD_TRACE(SD_LOG_IO_CORE, un, 11932 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp); 11933 11934 /* 11935 * Allocate the scsi_pkt for the command. 11936 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path 11937 * during scsi_init_pkt time and will continue to use the 11938 * same path as long as the same scsi_pkt is used without 11939 * intervening scsi_dma_free(). Since uscsi command does 11940 * not call scsi_dmafree() before retry failed command, it 11941 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT 11942 * set such that scsi_vhci can use other available path for 11943 * retry. Besides, ucsci command does not allow DMA breakup, 11944 * so there is no need to set PKT_DMA_PARTIAL flag. 11945 */ 11946 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 11947 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 11948 sizeof (struct scsi_arq_status), 0, 11949 (un->un_pkt_flags & ~PKT_DMA_PARTIAL), 11950 sdrunout, (caddr_t)un); 11951 11952 if (pktp == NULL) { 11953 *pktpp = NULL; 11954 /* 11955 * Set the driver state to RWAIT to indicate the driver 11956 * is waiting on resource allocations. The driver will not 11957 * suspend, pm_suspend, or detatch while the state is RWAIT. 11958 */ 11959 New_state(un, SD_STATE_RWAIT); 11960 11961 SD_ERROR(SD_LOG_IO_CORE, un, 11962 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp); 11963 11964 if ((bp->b_flags & B_ERROR) != 0) { 11965 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 11966 } 11967 return (SD_PKT_ALLOC_FAILURE); 11968 } 11969 11970 /* 11971 * We do not do DMA breakup for USCSI commands, so return failure 11972 * here if all the needed DMA resources were not allocated. 11973 */ 11974 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) && 11975 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) { 11976 scsi_destroy_pkt(pktp); 11977 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: " 11978 "No partial DMA for USCSI. exit: buf:0x%p\n", bp); 11979 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL); 11980 } 11981 11982 /* Init the cdb from the given uscsi struct */ 11983 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp, 11984 uscmd->uscsi_cdb[0], 0, 0, 0); 11985 11986 SD_FILL_SCSI1_LUN(un, pktp); 11987 11988 /* 11989 * Set up the optional USCSI flags. See the uscsi (7I) man page 11990 * for listing of the supported flags. 11991 */ 11992 11993 if (uscmd->uscsi_flags & USCSI_SILENT) { 11994 flags |= FLAG_SILENT; 11995 } 11996 11997 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) { 11998 flags |= FLAG_DIAGNOSE; 11999 } 12000 12001 if (uscmd->uscsi_flags & USCSI_ISOLATE) { 12002 flags |= FLAG_ISOLATE; 12003 } 12004 12005 if (un->un_f_is_fibre == FALSE) { 12006 if (uscmd->uscsi_flags & USCSI_RENEGOT) { 12007 flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 12008 } 12009 } 12010 12011 /* 12012 * Set the pkt flags here so we save time later. 12013 * Note: These flags are NOT in the uscsi man page!!! 12014 */ 12015 if (uscmd->uscsi_flags & USCSI_HEAD) { 12016 flags |= FLAG_HEAD; 12017 } 12018 12019 if (uscmd->uscsi_flags & USCSI_NOINTR) { 12020 flags |= FLAG_NOINTR; 12021 } 12022 12023 /* 12024 * For tagged queueing, things get a bit complicated. 12025 * Check first for head of queue and last for ordered queue. 12026 * If neither head nor order, use the default driver tag flags. 12027 */ 12028 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) { 12029 if (uscmd->uscsi_flags & USCSI_HTAG) { 12030 flags |= FLAG_HTAG; 12031 } else if (uscmd->uscsi_flags & USCSI_OTAG) { 12032 flags |= FLAG_OTAG; 12033 } else { 12034 flags |= un->un_tagflags & FLAG_TAGMASK; 12035 } 12036 } 12037 12038 if (uscmd->uscsi_flags & USCSI_NODISCON) { 12039 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON; 12040 } 12041 12042 pktp->pkt_flags = flags; 12043 12044 /* Copy the caller's CDB into the pkt... */ 12045 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen); 12046 12047 if (uscmd->uscsi_timeout == 0) { 12048 pktp->pkt_time = un->un_uscsi_timeout; 12049 } else { 12050 pktp->pkt_time = uscmd->uscsi_timeout; 12051 } 12052 12053 /* need it later to identify USCSI request in sdintr */ 12054 xp->xb_pkt_flags |= SD_XB_USCSICMD; 12055 12056 xp->xb_sense_resid = uscmd->uscsi_rqresid; 12057 12058 pktp->pkt_private = bp; 12059 pktp->pkt_comp = sdintr; 12060 *pktpp = pktp; 12061 12062 SD_TRACE(SD_LOG_IO_CORE, un, 12063 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp); 12064 12065 return (SD_PKT_ALLOC_SUCCESS); 12066 } 12067 12068 12069 /* 12070 * Function: sd_destroypkt_for_uscsi 12071 * 12072 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi 12073 * IOs.. Also saves relevant info into the associated uscsi_cmd 12074 * struct. 12075 * 12076 * Context: May be called under interrupt context 12077 */ 12078 12079 static void 12080 sd_destroypkt_for_uscsi(struct buf *bp) 12081 { 12082 struct uscsi_cmd *uscmd; 12083 struct sd_xbuf *xp; 12084 struct scsi_pkt *pktp; 12085 struct sd_lun *un; 12086 12087 ASSERT(bp != NULL); 12088 xp = SD_GET_XBUF(bp); 12089 ASSERT(xp != NULL); 12090 un = SD_GET_UN(bp); 12091 ASSERT(un != NULL); 12092 ASSERT(!mutex_owned(SD_MUTEX(un))); 12093 pktp = SD_GET_PKTP(bp); 12094 ASSERT(pktp != NULL); 12095 12096 SD_TRACE(SD_LOG_IO_CORE, un, 12097 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp); 12098 12099 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 12100 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 12101 ASSERT(uscmd != NULL); 12102 12103 /* Save the status and the residual into the uscsi_cmd struct */ 12104 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 12105 uscmd->uscsi_resid = bp->b_resid; 12106 12107 /* 12108 * If enabled, copy any saved sense data into the area specified 12109 * by the uscsi command. 12110 */ 12111 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 12112 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 12113 /* 12114 * Note: uscmd->uscsi_rqbuf should always point to a buffer 12115 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd()) 12116 */ 12117 uscmd->uscsi_rqstatus = xp->xb_sense_status; 12118 uscmd->uscsi_rqresid = xp->xb_sense_resid; 12119 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, SENSE_LENGTH); 12120 } 12121 12122 /* We are done with the scsi_pkt; free it now */ 12123 ASSERT(SD_GET_PKTP(bp) != NULL); 12124 scsi_destroy_pkt(SD_GET_PKTP(bp)); 12125 12126 SD_TRACE(SD_LOG_IO_CORE, un, 12127 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp); 12128 } 12129 12130 12131 /* 12132 * Function: sd_bioclone_alloc 12133 * 12134 * Description: Allocate a buf(9S) and init it as per the given buf 12135 * and the various arguments. The associated sd_xbuf 12136 * struct is (nearly) duplicated. The struct buf *bp 12137 * argument is saved in new_xp->xb_private. 12138 * 12139 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 12140 * datalen - size of data area for the shadow bp 12141 * blkno - starting LBA 12142 * func - function pointer for b_iodone in the shadow buf. (May 12143 * be NULL if none.) 12144 * 12145 * Return Code: Pointer to allocates buf(9S) struct 12146 * 12147 * Context: Can sleep. 12148 */ 12149 12150 static struct buf * 12151 sd_bioclone_alloc(struct buf *bp, size_t datalen, 12152 daddr_t blkno, int (*func)(struct buf *)) 12153 { 12154 struct sd_lun *un; 12155 struct sd_xbuf *xp; 12156 struct sd_xbuf *new_xp; 12157 struct buf *new_bp; 12158 12159 ASSERT(bp != NULL); 12160 xp = SD_GET_XBUF(bp); 12161 ASSERT(xp != NULL); 12162 un = SD_GET_UN(bp); 12163 ASSERT(un != NULL); 12164 ASSERT(!mutex_owned(SD_MUTEX(un))); 12165 12166 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func, 12167 NULL, KM_SLEEP); 12168 12169 new_bp->b_lblkno = blkno; 12170 12171 /* 12172 * Allocate an xbuf for the shadow bp and copy the contents of the 12173 * original xbuf into it. 12174 */ 12175 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 12176 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 12177 12178 /* 12179 * The given bp is automatically saved in the xb_private member 12180 * of the new xbuf. Callers are allowed to depend on this. 12181 */ 12182 new_xp->xb_private = bp; 12183 12184 new_bp->b_private = new_xp; 12185 12186 return (new_bp); 12187 } 12188 12189 /* 12190 * Function: sd_shadow_buf_alloc 12191 * 12192 * Description: Allocate a buf(9S) and init it as per the given buf 12193 * and the various arguments. The associated sd_xbuf 12194 * struct is (nearly) duplicated. The struct buf *bp 12195 * argument is saved in new_xp->xb_private. 12196 * 12197 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 12198 * datalen - size of data area for the shadow bp 12199 * bflags - B_READ or B_WRITE (pseudo flag) 12200 * blkno - starting LBA 12201 * func - function pointer for b_iodone in the shadow buf. (May 12202 * be NULL if none.) 12203 * 12204 * Return Code: Pointer to allocates buf(9S) struct 12205 * 12206 * Context: Can sleep. 12207 */ 12208 12209 static struct buf * 12210 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags, 12211 daddr_t blkno, int (*func)(struct buf *)) 12212 { 12213 struct sd_lun *un; 12214 struct sd_xbuf *xp; 12215 struct sd_xbuf *new_xp; 12216 struct buf *new_bp; 12217 12218 ASSERT(bp != NULL); 12219 xp = SD_GET_XBUF(bp); 12220 ASSERT(xp != NULL); 12221 un = SD_GET_UN(bp); 12222 ASSERT(un != NULL); 12223 ASSERT(!mutex_owned(SD_MUTEX(un))); 12224 12225 if (bp->b_flags & (B_PAGEIO | B_PHYS)) { 12226 bp_mapin(bp); 12227 } 12228 12229 bflags &= (B_READ | B_WRITE); 12230 #if defined(__i386) || defined(__amd64) 12231 new_bp = getrbuf(KM_SLEEP); 12232 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); 12233 new_bp->b_bcount = datalen; 12234 new_bp->b_flags = bflags | 12235 (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW)); 12236 #else 12237 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, 12238 datalen, bflags, SLEEP_FUNC, NULL); 12239 #endif 12240 new_bp->av_forw = NULL; 12241 new_bp->av_back = NULL; 12242 new_bp->b_dev = bp->b_dev; 12243 new_bp->b_blkno = blkno; 12244 new_bp->b_iodone = func; 12245 new_bp->b_edev = bp->b_edev; 12246 new_bp->b_resid = 0; 12247 12248 /* We need to preserve the B_FAILFAST flag */ 12249 if (bp->b_flags & B_FAILFAST) { 12250 new_bp->b_flags |= B_FAILFAST; 12251 } 12252 12253 /* 12254 * Allocate an xbuf for the shadow bp and copy the contents of the 12255 * original xbuf into it. 12256 */ 12257 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 12258 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 12259 12260 /* Need later to copy data between the shadow buf & original buf! */ 12261 new_xp->xb_pkt_flags |= PKT_CONSISTENT; 12262 12263 /* 12264 * The given bp is automatically saved in the xb_private member 12265 * of the new xbuf. Callers are allowed to depend on this. 12266 */ 12267 new_xp->xb_private = bp; 12268 12269 new_bp->b_private = new_xp; 12270 12271 return (new_bp); 12272 } 12273 12274 /* 12275 * Function: sd_bioclone_free 12276 * 12277 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations 12278 * in the larger than partition operation. 12279 * 12280 * Context: May be called under interrupt context 12281 */ 12282 12283 static void 12284 sd_bioclone_free(struct buf *bp) 12285 { 12286 struct sd_xbuf *xp; 12287 12288 ASSERT(bp != NULL); 12289 xp = SD_GET_XBUF(bp); 12290 ASSERT(xp != NULL); 12291 12292 /* 12293 * Call bp_mapout() before freeing the buf, in case a lower 12294 * layer or HBA had done a bp_mapin(). we must do this here 12295 * as we are the "originator" of the shadow buf. 12296 */ 12297 bp_mapout(bp); 12298 12299 /* 12300 * Null out b_iodone before freeing the bp, to ensure that the driver 12301 * never gets confused by a stale value in this field. (Just a little 12302 * extra defensiveness here.) 12303 */ 12304 bp->b_iodone = NULL; 12305 12306 freerbuf(bp); 12307 12308 kmem_free(xp, sizeof (struct sd_xbuf)); 12309 } 12310 12311 /* 12312 * Function: sd_shadow_buf_free 12313 * 12314 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations. 12315 * 12316 * Context: May be called under interrupt context 12317 */ 12318 12319 static void 12320 sd_shadow_buf_free(struct buf *bp) 12321 { 12322 struct sd_xbuf *xp; 12323 12324 ASSERT(bp != NULL); 12325 xp = SD_GET_XBUF(bp); 12326 ASSERT(xp != NULL); 12327 12328 #if defined(__sparc) 12329 /* 12330 * Call bp_mapout() before freeing the buf, in case a lower 12331 * layer or HBA had done a bp_mapin(). we must do this here 12332 * as we are the "originator" of the shadow buf. 12333 */ 12334 bp_mapout(bp); 12335 #endif 12336 12337 /* 12338 * Null out b_iodone before freeing the bp, to ensure that the driver 12339 * never gets confused by a stale value in this field. (Just a little 12340 * extra defensiveness here.) 12341 */ 12342 bp->b_iodone = NULL; 12343 12344 #if defined(__i386) || defined(__amd64) 12345 kmem_free(bp->b_un.b_addr, bp->b_bcount); 12346 freerbuf(bp); 12347 #else 12348 scsi_free_consistent_buf(bp); 12349 #endif 12350 12351 kmem_free(xp, sizeof (struct sd_xbuf)); 12352 } 12353 12354 12355 /* 12356 * Function: sd_print_transport_rejected_message 12357 * 12358 * Description: This implements the ludicrously complex rules for printing 12359 * a "transport rejected" message. This is to address the 12360 * specific problem of having a flood of this error message 12361 * produced when a failover occurs. 12362 * 12363 * Context: Any. 12364 */ 12365 12366 static void 12367 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp, 12368 int code) 12369 { 12370 ASSERT(un != NULL); 12371 ASSERT(mutex_owned(SD_MUTEX(un))); 12372 ASSERT(xp != NULL); 12373 12374 /* 12375 * Print the "transport rejected" message under the following 12376 * conditions: 12377 * 12378 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set 12379 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR. 12380 * - If the error code IS a TRAN_FATAL_ERROR, then the message is 12381 * printed the FIRST time a TRAN_FATAL_ERROR is returned from 12382 * scsi_transport(9F) (which indicates that the target might have 12383 * gone off-line). This uses the un->un_tran_fatal_count 12384 * count, which is incremented whenever a TRAN_FATAL_ERROR is 12385 * received, and reset to zero whenver a TRAN_ACCEPT is returned 12386 * from scsi_transport(). 12387 * 12388 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of 12389 * the preceeding cases in order for the message to be printed. 12390 */ 12391 if ((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) { 12392 if ((sd_level_mask & SD_LOGMASK_DIAG) || 12393 (code != TRAN_FATAL_ERROR) || 12394 (un->un_tran_fatal_count == 1)) { 12395 switch (code) { 12396 case TRAN_BADPKT: 12397 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12398 "transport rejected bad packet\n"); 12399 break; 12400 case TRAN_FATAL_ERROR: 12401 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12402 "transport rejected fatal error\n"); 12403 break; 12404 default: 12405 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12406 "transport rejected (%d)\n", code); 12407 break; 12408 } 12409 } 12410 } 12411 } 12412 12413 12414 /* 12415 * Function: sd_add_buf_to_waitq 12416 * 12417 * Description: Add the given buf(9S) struct to the wait queue for the 12418 * instance. If sorting is enabled, then the buf is added 12419 * to the queue via an elevator sort algorithm (a la 12420 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key. 12421 * If sorting is not enabled, then the buf is just added 12422 * to the end of the wait queue. 12423 * 12424 * Return Code: void 12425 * 12426 * Context: Does not sleep/block, therefore technically can be called 12427 * from any context. However if sorting is enabled then the 12428 * execution time is indeterminate, and may take long if 12429 * the wait queue grows large. 12430 */ 12431 12432 static void 12433 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp) 12434 { 12435 struct buf *ap; 12436 12437 ASSERT(bp != NULL); 12438 ASSERT(un != NULL); 12439 ASSERT(mutex_owned(SD_MUTEX(un))); 12440 12441 /* If the queue is empty, add the buf as the only entry & return. */ 12442 if (un->un_waitq_headp == NULL) { 12443 ASSERT(un->un_waitq_tailp == NULL); 12444 un->un_waitq_headp = un->un_waitq_tailp = bp; 12445 bp->av_forw = NULL; 12446 return; 12447 } 12448 12449 ASSERT(un->un_waitq_tailp != NULL); 12450 12451 /* 12452 * If sorting is disabled, just add the buf to the tail end of 12453 * the wait queue and return. 12454 */ 12455 if (un->un_f_disksort_disabled) { 12456 un->un_waitq_tailp->av_forw = bp; 12457 un->un_waitq_tailp = bp; 12458 bp->av_forw = NULL; 12459 return; 12460 } 12461 12462 /* 12463 * Sort thru the list of requests currently on the wait queue 12464 * and add the new buf request at the appropriate position. 12465 * 12466 * The un->un_waitq_headp is an activity chain pointer on which 12467 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The 12468 * first queue holds those requests which are positioned after 12469 * the current SD_GET_BLKNO() (in the first request); the second holds 12470 * requests which came in after their SD_GET_BLKNO() number was passed. 12471 * Thus we implement a one way scan, retracting after reaching 12472 * the end of the drive to the first request on the second 12473 * queue, at which time it becomes the first queue. 12474 * A one-way scan is natural because of the way UNIX read-ahead 12475 * blocks are allocated. 12476 * 12477 * If we lie after the first request, then we must locate the 12478 * second request list and add ourselves to it. 12479 */ 12480 ap = un->un_waitq_headp; 12481 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) { 12482 while (ap->av_forw != NULL) { 12483 /* 12484 * Look for an "inversion" in the (normally 12485 * ascending) block numbers. This indicates 12486 * the start of the second request list. 12487 */ 12488 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) { 12489 /* 12490 * Search the second request list for the 12491 * first request at a larger block number. 12492 * We go before that; however if there is 12493 * no such request, we go at the end. 12494 */ 12495 do { 12496 if (SD_GET_BLKNO(bp) < 12497 SD_GET_BLKNO(ap->av_forw)) { 12498 goto insert; 12499 } 12500 ap = ap->av_forw; 12501 } while (ap->av_forw != NULL); 12502 goto insert; /* after last */ 12503 } 12504 ap = ap->av_forw; 12505 } 12506 12507 /* 12508 * No inversions... we will go after the last, and 12509 * be the first request in the second request list. 12510 */ 12511 goto insert; 12512 } 12513 12514 /* 12515 * Request is at/after the current request... 12516 * sort in the first request list. 12517 */ 12518 while (ap->av_forw != NULL) { 12519 /* 12520 * We want to go after the current request (1) if 12521 * there is an inversion after it (i.e. it is the end 12522 * of the first request list), or (2) if the next 12523 * request is a larger block no. than our request. 12524 */ 12525 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) || 12526 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) { 12527 goto insert; 12528 } 12529 ap = ap->av_forw; 12530 } 12531 12532 /* 12533 * Neither a second list nor a larger request, therefore 12534 * we go at the end of the first list (which is the same 12535 * as the end of the whole schebang). 12536 */ 12537 insert: 12538 bp->av_forw = ap->av_forw; 12539 ap->av_forw = bp; 12540 12541 /* 12542 * If we inserted onto the tail end of the waitq, make sure the 12543 * tail pointer is updated. 12544 */ 12545 if (ap == un->un_waitq_tailp) { 12546 un->un_waitq_tailp = bp; 12547 } 12548 } 12549 12550 12551 /* 12552 * Function: sd_start_cmds 12553 * 12554 * Description: Remove and transport cmds from the driver queues. 12555 * 12556 * Arguments: un - pointer to the unit (soft state) struct for the target. 12557 * 12558 * immed_bp - ptr to a buf to be transported immediately. Only 12559 * the immed_bp is transported; bufs on the waitq are not 12560 * processed and the un_retry_bp is not checked. If immed_bp is 12561 * NULL, then normal queue processing is performed. 12562 * 12563 * Context: May be called from kernel thread context, interrupt context, 12564 * or runout callback context. This function may not block or 12565 * call routines that block. 12566 */ 12567 12568 static void 12569 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) 12570 { 12571 struct sd_xbuf *xp; 12572 struct buf *bp; 12573 void (*statp)(kstat_io_t *); 12574 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12575 void (*saved_statp)(kstat_io_t *); 12576 #endif 12577 int rval; 12578 12579 ASSERT(un != NULL); 12580 ASSERT(mutex_owned(SD_MUTEX(un))); 12581 ASSERT(un->un_ncmds_in_transport >= 0); 12582 ASSERT(un->un_throttle >= 0); 12583 12584 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); 12585 12586 do { 12587 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12588 saved_statp = NULL; 12589 #endif 12590 12591 /* 12592 * If we are syncing or dumping, fail the command to 12593 * avoid recursively calling back into scsi_transport(). 12594 * The dump I/O itself uses a separate code path so this 12595 * only prevents non-dump I/O from being sent while dumping. 12596 * File system sync takes place before dumping begins. 12597 * During panic, filesystem I/O is allowed provided 12598 * un_in_callback is <= 1. This is to prevent recursion 12599 * such as sd_start_cmds -> scsi_transport -> sdintr -> 12600 * sd_start_cmds and so on. See panic.c for more information 12601 * about the states the system can be in during panic. 12602 */ 12603 if ((un->un_state == SD_STATE_DUMPING) || 12604 (ddi_in_panic() && (un->un_in_callback > 1))) { 12605 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12606 "sd_start_cmds: panicking\n"); 12607 goto exit; 12608 } 12609 12610 if ((bp = immed_bp) != NULL) { 12611 /* 12612 * We have a bp that must be transported immediately. 12613 * It's OK to transport the immed_bp here without doing 12614 * the throttle limit check because the immed_bp is 12615 * always used in a retry/recovery case. This means 12616 * that we know we are not at the throttle limit by 12617 * virtue of the fact that to get here we must have 12618 * already gotten a command back via sdintr(). This also 12619 * relies on (1) the command on un_retry_bp preventing 12620 * further commands from the waitq from being issued; 12621 * and (2) the code in sd_retry_command checking the 12622 * throttle limit before issuing a delayed or immediate 12623 * retry. This holds even if the throttle limit is 12624 * currently ratcheted down from its maximum value. 12625 */ 12626 statp = kstat_runq_enter; 12627 if (bp == un->un_retry_bp) { 12628 ASSERT((un->un_retry_statp == NULL) || 12629 (un->un_retry_statp == kstat_waitq_enter) || 12630 (un->un_retry_statp == 12631 kstat_runq_back_to_waitq)); 12632 /* 12633 * If the waitq kstat was incremented when 12634 * sd_set_retry_bp() queued this bp for a retry, 12635 * then we must set up statp so that the waitq 12636 * count will get decremented correctly below. 12637 * Also we must clear un->un_retry_statp to 12638 * ensure that we do not act on a stale value 12639 * in this field. 12640 */ 12641 if ((un->un_retry_statp == kstat_waitq_enter) || 12642 (un->un_retry_statp == 12643 kstat_runq_back_to_waitq)) { 12644 statp = kstat_waitq_to_runq; 12645 } 12646 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12647 saved_statp = un->un_retry_statp; 12648 #endif 12649 un->un_retry_statp = NULL; 12650 12651 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 12652 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " 12653 "un_throttle:%d un_ncmds_in_transport:%d\n", 12654 un, un->un_retry_bp, un->un_throttle, 12655 un->un_ncmds_in_transport); 12656 } else { 12657 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: " 12658 "processing priority bp:0x%p\n", bp); 12659 } 12660 12661 } else if ((bp = un->un_waitq_headp) != NULL) { 12662 /* 12663 * A command on the waitq is ready to go, but do not 12664 * send it if: 12665 * 12666 * (1) the throttle limit has been reached, or 12667 * (2) a retry is pending, or 12668 * (3) a START_STOP_UNIT callback pending, or 12669 * (4) a callback for a SD_PATH_DIRECT_PRIORITY 12670 * command is pending. 12671 * 12672 * For all of these conditions, IO processing will 12673 * restart after the condition is cleared. 12674 */ 12675 if (un->un_ncmds_in_transport >= un->un_throttle) { 12676 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12677 "sd_start_cmds: exiting, " 12678 "throttle limit reached!\n"); 12679 goto exit; 12680 } 12681 if (un->un_retry_bp != NULL) { 12682 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12683 "sd_start_cmds: exiting, retry pending!\n"); 12684 goto exit; 12685 } 12686 if (un->un_startstop_timeid != NULL) { 12687 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12688 "sd_start_cmds: exiting, " 12689 "START_STOP pending!\n"); 12690 goto exit; 12691 } 12692 if (un->un_direct_priority_timeid != NULL) { 12693 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12694 "sd_start_cmds: exiting, " 12695 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n"); 12696 goto exit; 12697 } 12698 12699 /* Dequeue the command */ 12700 un->un_waitq_headp = bp->av_forw; 12701 if (un->un_waitq_headp == NULL) { 12702 un->un_waitq_tailp = NULL; 12703 } 12704 bp->av_forw = NULL; 12705 statp = kstat_waitq_to_runq; 12706 SD_TRACE(SD_LOG_IO_CORE, un, 12707 "sd_start_cmds: processing waitq bp:0x%p\n", bp); 12708 12709 } else { 12710 /* No work to do so bail out now */ 12711 SD_TRACE(SD_LOG_IO_CORE, un, 12712 "sd_start_cmds: no more work, exiting!\n"); 12713 goto exit; 12714 } 12715 12716 /* 12717 * Reset the state to normal. This is the mechanism by which 12718 * the state transitions from either SD_STATE_RWAIT or 12719 * SD_STATE_OFFLINE to SD_STATE_NORMAL. 12720 * If state is SD_STATE_PM_CHANGING then this command is 12721 * part of the device power control and the state must 12722 * not be put back to normal. Doing so would would 12723 * allow new commands to proceed when they shouldn't, 12724 * the device may be going off. 12725 */ 12726 if ((un->un_state != SD_STATE_SUSPENDED) && 12727 (un->un_state != SD_STATE_PM_CHANGING)) { 12728 New_state(un, SD_STATE_NORMAL); 12729 } 12730 12731 xp = SD_GET_XBUF(bp); 12732 ASSERT(xp != NULL); 12733 12734 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12735 /* 12736 * Allocate the scsi_pkt if we need one, or attach DMA 12737 * resources if we have a scsi_pkt that needs them. The 12738 * latter should only occur for commands that are being 12739 * retried. 12740 */ 12741 if ((xp->xb_pktp == NULL) || 12742 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { 12743 #else 12744 if (xp->xb_pktp == NULL) { 12745 #endif 12746 /* 12747 * There is no scsi_pkt allocated for this buf. Call 12748 * the initpkt function to allocate & init one. 12749 * 12750 * The scsi_init_pkt runout callback functionality is 12751 * implemented as follows: 12752 * 12753 * 1) The initpkt function always calls 12754 * scsi_init_pkt(9F) with sdrunout specified as the 12755 * callback routine. 12756 * 2) A successful packet allocation is initialized and 12757 * the I/O is transported. 12758 * 3) The I/O associated with an allocation resource 12759 * failure is left on its queue to be retried via 12760 * runout or the next I/O. 12761 * 4) The I/O associated with a DMA error is removed 12762 * from the queue and failed with EIO. Processing of 12763 * the transport queues is also halted to be 12764 * restarted via runout or the next I/O. 12765 * 5) The I/O associated with a CDB size or packet 12766 * size error is removed from the queue and failed 12767 * with EIO. Processing of the transport queues is 12768 * continued. 12769 * 12770 * Note: there is no interface for canceling a runout 12771 * callback. To prevent the driver from detaching or 12772 * suspending while a runout is pending the driver 12773 * state is set to SD_STATE_RWAIT 12774 * 12775 * Note: using the scsi_init_pkt callback facility can 12776 * result in an I/O request persisting at the head of 12777 * the list which cannot be satisfied even after 12778 * multiple retries. In the future the driver may 12779 * implement some kind of maximum runout count before 12780 * failing an I/O. 12781 * 12782 * Note: the use of funcp below may seem superfluous, 12783 * but it helps warlock figure out the correct 12784 * initpkt function calls (see [s]sd.wlcmd). 12785 */ 12786 struct scsi_pkt *pktp; 12787 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp); 12788 12789 ASSERT(bp != un->un_rqs_bp); 12790 12791 funcp = sd_initpkt_map[xp->xb_chain_iostart]; 12792 switch ((*funcp)(bp, &pktp)) { 12793 case SD_PKT_ALLOC_SUCCESS: 12794 xp->xb_pktp = pktp; 12795 SD_TRACE(SD_LOG_IO_CORE, un, 12796 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n", 12797 pktp); 12798 goto got_pkt; 12799 12800 case SD_PKT_ALLOC_FAILURE: 12801 /* 12802 * Temporary (hopefully) resource depletion. 12803 * Since retries and RQS commands always have a 12804 * scsi_pkt allocated, these cases should never 12805 * get here. So the only cases this needs to 12806 * handle is a bp from the waitq (which we put 12807 * back onto the waitq for sdrunout), or a bp 12808 * sent as an immed_bp (which we just fail). 12809 */ 12810 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12811 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); 12812 12813 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12814 12815 if (bp == immed_bp) { 12816 /* 12817 * If SD_XB_DMA_FREED is clear, then 12818 * this is a failure to allocate a 12819 * scsi_pkt, and we must fail the 12820 * command. 12821 */ 12822 if ((xp->xb_pkt_flags & 12823 SD_XB_DMA_FREED) == 0) { 12824 break; 12825 } 12826 12827 /* 12828 * If this immediate command is NOT our 12829 * un_retry_bp, then we must fail it. 12830 */ 12831 if (bp != un->un_retry_bp) { 12832 break; 12833 } 12834 12835 /* 12836 * We get here if this cmd is our 12837 * un_retry_bp that was DMAFREED, but 12838 * scsi_init_pkt() failed to reallocate 12839 * DMA resources when we attempted to 12840 * retry it. This can happen when an 12841 * mpxio failover is in progress, but 12842 * we don't want to just fail the 12843 * command in this case. 12844 * 12845 * Use timeout(9F) to restart it after 12846 * a 100ms delay. We don't want to 12847 * let sdrunout() restart it, because 12848 * sdrunout() is just supposed to start 12849 * commands that are sitting on the 12850 * wait queue. The un_retry_bp stays 12851 * set until the command completes, but 12852 * sdrunout can be called many times 12853 * before that happens. Since sdrunout 12854 * cannot tell if the un_retry_bp is 12855 * already in the transport, it could 12856 * end up calling scsi_transport() for 12857 * the un_retry_bp multiple times. 12858 * 12859 * Also: don't schedule the callback 12860 * if some other callback is already 12861 * pending. 12862 */ 12863 if (un->un_retry_statp == NULL) { 12864 /* 12865 * restore the kstat pointer to 12866 * keep kstat counts coherent 12867 * when we do retry the command. 12868 */ 12869 un->un_retry_statp = 12870 saved_statp; 12871 } 12872 12873 if ((un->un_startstop_timeid == NULL) && 12874 (un->un_retry_timeid == NULL) && 12875 (un->un_direct_priority_timeid == 12876 NULL)) { 12877 12878 un->un_retry_timeid = 12879 timeout( 12880 sd_start_retry_command, 12881 un, SD_RESTART_TIMEOUT); 12882 } 12883 goto exit; 12884 } 12885 12886 #else 12887 if (bp == immed_bp) { 12888 break; /* Just fail the command */ 12889 } 12890 #endif 12891 12892 /* Add the buf back to the head of the waitq */ 12893 bp->av_forw = un->un_waitq_headp; 12894 un->un_waitq_headp = bp; 12895 if (un->un_waitq_tailp == NULL) { 12896 un->un_waitq_tailp = bp; 12897 } 12898 goto exit; 12899 12900 case SD_PKT_ALLOC_FAILURE_NO_DMA: 12901 /* 12902 * HBA DMA resource failure. Fail the command 12903 * and continue processing of the queues. 12904 */ 12905 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12906 "sd_start_cmds: " 12907 "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); 12908 break; 12909 12910 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: 12911 /* 12912 * Note:x86: Partial DMA mapping not supported 12913 * for USCSI commands, and all the needed DMA 12914 * resources were not allocated. 12915 */ 12916 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12917 "sd_start_cmds: " 12918 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); 12919 break; 12920 12921 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: 12922 /* 12923 * Note:x86: Request cannot fit into CDB based 12924 * on lba and len. 12925 */ 12926 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12927 "sd_start_cmds: " 12928 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); 12929 break; 12930 12931 default: 12932 /* Should NEVER get here! */ 12933 panic("scsi_initpkt error"); 12934 /*NOTREACHED*/ 12935 } 12936 12937 /* 12938 * Fatal error in allocating a scsi_pkt for this buf. 12939 * Update kstats & return the buf with an error code. 12940 * We must use sd_return_failed_command_no_restart() to 12941 * avoid a recursive call back into sd_start_cmds(). 12942 * However this also means that we must keep processing 12943 * the waitq here in order to avoid stalling. 12944 */ 12945 if (statp == kstat_waitq_to_runq) { 12946 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 12947 } 12948 sd_return_failed_command_no_restart(un, bp, EIO); 12949 if (bp == immed_bp) { 12950 /* immed_bp is gone by now, so clear this */ 12951 immed_bp = NULL; 12952 } 12953 continue; 12954 } 12955 got_pkt: 12956 if (bp == immed_bp) { 12957 /* goto the head of the class.... */ 12958 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 12959 } 12960 12961 un->un_ncmds_in_transport++; 12962 SD_UPDATE_KSTATS(un, statp, bp); 12963 12964 /* 12965 * Call scsi_transport() to send the command to the target. 12966 * According to SCSA architecture, we must drop the mutex here 12967 * before calling scsi_transport() in order to avoid deadlock. 12968 * Note that the scsi_pkt's completion routine can be executed 12969 * (from interrupt context) even before the call to 12970 * scsi_transport() returns. 12971 */ 12972 SD_TRACE(SD_LOG_IO_CORE, un, 12973 "sd_start_cmds: calling scsi_transport()\n"); 12974 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); 12975 12976 mutex_exit(SD_MUTEX(un)); 12977 rval = scsi_transport(xp->xb_pktp); 12978 mutex_enter(SD_MUTEX(un)); 12979 12980 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12981 "sd_start_cmds: scsi_transport() returned %d\n", rval); 12982 12983 switch (rval) { 12984 case TRAN_ACCEPT: 12985 /* Clear this with every pkt accepted by the HBA */ 12986 un->un_tran_fatal_count = 0; 12987 break; /* Success; try the next cmd (if any) */ 12988 12989 case TRAN_BUSY: 12990 un->un_ncmds_in_transport--; 12991 ASSERT(un->un_ncmds_in_transport >= 0); 12992 12993 /* 12994 * Don't retry request sense, the sense data 12995 * is lost when another request is sent. 12996 * Free up the rqs buf and retry 12997 * the original failed cmd. Update kstat. 12998 */ 12999 if (bp == un->un_rqs_bp) { 13000 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13001 bp = sd_mark_rqs_idle(un, xp); 13002 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 13003 NULL, NULL, EIO, SD_BSY_TIMEOUT / 500, 13004 kstat_waitq_enter); 13005 goto exit; 13006 } 13007 13008 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13009 /* 13010 * Free the DMA resources for the scsi_pkt. This will 13011 * allow mpxio to select another path the next time 13012 * we call scsi_transport() with this scsi_pkt. 13013 * See sdintr() for the rationalization behind this. 13014 */ 13015 if ((un->un_f_is_fibre == TRUE) && 13016 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 13017 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { 13018 scsi_dmafree(xp->xb_pktp); 13019 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 13020 } 13021 #endif 13022 13023 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { 13024 /* 13025 * Commands that are SD_PATH_DIRECT_PRIORITY 13026 * are for error recovery situations. These do 13027 * not use the normal command waitq, so if they 13028 * get a TRAN_BUSY we cannot put them back onto 13029 * the waitq for later retry. One possible 13030 * problem is that there could already be some 13031 * other command on un_retry_bp that is waiting 13032 * for this one to complete, so we would be 13033 * deadlocked if we put this command back onto 13034 * the waitq for later retry (since un_retry_bp 13035 * must complete before the driver gets back to 13036 * commands on the waitq). 13037 * 13038 * To avoid deadlock we must schedule a callback 13039 * that will restart this command after a set 13040 * interval. This should keep retrying for as 13041 * long as the underlying transport keeps 13042 * returning TRAN_BUSY (just like for other 13043 * commands). Use the same timeout interval as 13044 * for the ordinary TRAN_BUSY retry. 13045 */ 13046 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13047 "sd_start_cmds: scsi_transport() returned " 13048 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n"); 13049 13050 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13051 un->un_direct_priority_timeid = 13052 timeout(sd_start_direct_priority_command, 13053 bp, SD_BSY_TIMEOUT / 500); 13054 13055 goto exit; 13056 } 13057 13058 /* 13059 * For TRAN_BUSY, we want to reduce the throttle value, 13060 * unless we are retrying a command. 13061 */ 13062 if (bp != un->un_retry_bp) { 13063 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); 13064 } 13065 13066 /* 13067 * Set up the bp to be tried again 10 ms later. 13068 * Note:x86: Is there a timeout value in the sd_lun 13069 * for this condition? 13070 */ 13071 sd_set_retry_bp(un, bp, SD_BSY_TIMEOUT / 500, 13072 kstat_runq_back_to_waitq); 13073 goto exit; 13074 13075 case TRAN_FATAL_ERROR: 13076 un->un_tran_fatal_count++; 13077 /* FALLTHRU */ 13078 13079 case TRAN_BADPKT: 13080 default: 13081 un->un_ncmds_in_transport--; 13082 ASSERT(un->un_ncmds_in_transport >= 0); 13083 13084 /* 13085 * If this is our REQUEST SENSE command with a 13086 * transport error, we must get back the pointers 13087 * to the original buf, and mark the REQUEST 13088 * SENSE command as "available". 13089 */ 13090 if (bp == un->un_rqs_bp) { 13091 bp = sd_mark_rqs_idle(un, xp); 13092 xp = SD_GET_XBUF(bp); 13093 } else { 13094 /* 13095 * Legacy behavior: do not update transport 13096 * error count for request sense commands. 13097 */ 13098 SD_UPDATE_ERRSTATS(un, sd_transerrs); 13099 } 13100 13101 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13102 sd_print_transport_rejected_message(un, xp, rval); 13103 13104 /* 13105 * We must use sd_return_failed_command_no_restart() to 13106 * avoid a recursive call back into sd_start_cmds(). 13107 * However this also means that we must keep processing 13108 * the waitq here in order to avoid stalling. 13109 */ 13110 sd_return_failed_command_no_restart(un, bp, EIO); 13111 13112 /* 13113 * Notify any threads waiting in sd_ddi_suspend() that 13114 * a command completion has occurred. 13115 */ 13116 if (un->un_state == SD_STATE_SUSPENDED) { 13117 cv_broadcast(&un->un_disk_busy_cv); 13118 } 13119 13120 if (bp == immed_bp) { 13121 /* immed_bp is gone by now, so clear this */ 13122 immed_bp = NULL; 13123 } 13124 break; 13125 } 13126 13127 } while (immed_bp == NULL); 13128 13129 exit: 13130 ASSERT(mutex_owned(SD_MUTEX(un))); 13131 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n"); 13132 } 13133 13134 13135 /* 13136 * Function: sd_return_command 13137 * 13138 * Description: Returns a command to its originator (with or without an 13139 * error). Also starts commands waiting to be transported 13140 * to the target. 13141 * 13142 * Context: May be called from interrupt, kernel, or timeout context 13143 */ 13144 13145 static void 13146 sd_return_command(struct sd_lun *un, struct buf *bp) 13147 { 13148 struct sd_xbuf *xp; 13149 #if defined(__i386) || defined(__amd64) 13150 struct scsi_pkt *pktp; 13151 #endif 13152 13153 ASSERT(bp != NULL); 13154 ASSERT(un != NULL); 13155 ASSERT(mutex_owned(SD_MUTEX(un))); 13156 ASSERT(bp != un->un_rqs_bp); 13157 xp = SD_GET_XBUF(bp); 13158 ASSERT(xp != NULL); 13159 13160 #if defined(__i386) || defined(__amd64) 13161 pktp = SD_GET_PKTP(bp); 13162 #endif 13163 13164 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n"); 13165 13166 #if defined(__i386) || defined(__amd64) 13167 /* 13168 * Note:x86: check for the "sdrestart failed" case. 13169 */ 13170 if (((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) && 13171 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) && 13172 (xp->xb_pktp->pkt_resid == 0)) { 13173 13174 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) { 13175 /* 13176 * Successfully set up next portion of cmd 13177 * transfer, try sending it 13178 */ 13179 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 13180 NULL, NULL, 0, (clock_t)0, NULL); 13181 sd_start_cmds(un, NULL); 13182 return; /* Note:x86: need a return here? */ 13183 } 13184 } 13185 #endif 13186 13187 /* 13188 * If this is the failfast bp, clear it from un_failfast_bp. This 13189 * can happen if upon being re-tried the failfast bp either 13190 * succeeded or encountered another error (possibly even a different 13191 * error than the one that precipitated the failfast state, but in 13192 * that case it would have had to exhaust retries as well). Regardless, 13193 * this should not occur whenever the instance is in the active 13194 * failfast state. 13195 */ 13196 if (bp == un->un_failfast_bp) { 13197 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 13198 un->un_failfast_bp = NULL; 13199 } 13200 13201 /* 13202 * Clear the failfast state upon successful completion of ANY cmd. 13203 */ 13204 if (bp->b_error == 0) { 13205 un->un_failfast_state = SD_FAILFAST_INACTIVE; 13206 } 13207 13208 /* 13209 * This is used if the command was retried one or more times. Show that 13210 * we are done with it, and allow processing of the waitq to resume. 13211 */ 13212 if (bp == un->un_retry_bp) { 13213 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13214 "sd_return_command: un:0x%p: " 13215 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 13216 un->un_retry_bp = NULL; 13217 un->un_retry_statp = NULL; 13218 } 13219 13220 SD_UPDATE_RDWR_STATS(un, bp); 13221 SD_UPDATE_PARTITION_STATS(un, bp); 13222 13223 switch (un->un_state) { 13224 case SD_STATE_SUSPENDED: 13225 /* 13226 * Notify any threads waiting in sd_ddi_suspend() that 13227 * a command completion has occurred. 13228 */ 13229 cv_broadcast(&un->un_disk_busy_cv); 13230 break; 13231 default: 13232 sd_start_cmds(un, NULL); 13233 break; 13234 } 13235 13236 /* Return this command up the iodone chain to its originator. */ 13237 mutex_exit(SD_MUTEX(un)); 13238 13239 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 13240 xp->xb_pktp = NULL; 13241 13242 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 13243 13244 ASSERT(!mutex_owned(SD_MUTEX(un))); 13245 mutex_enter(SD_MUTEX(un)); 13246 13247 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n"); 13248 } 13249 13250 13251 /* 13252 * Function: sd_return_failed_command 13253 * 13254 * Description: Command completion when an error occurred. 13255 * 13256 * Context: May be called from interrupt context 13257 */ 13258 13259 static void 13260 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode) 13261 { 13262 ASSERT(bp != NULL); 13263 ASSERT(un != NULL); 13264 ASSERT(mutex_owned(SD_MUTEX(un))); 13265 13266 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13267 "sd_return_failed_command: entry\n"); 13268 13269 /* 13270 * b_resid could already be nonzero due to a partial data 13271 * transfer, so do not change it here. 13272 */ 13273 SD_BIOERROR(bp, errcode); 13274 13275 sd_return_command(un, bp); 13276 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13277 "sd_return_failed_command: exit\n"); 13278 } 13279 13280 13281 /* 13282 * Function: sd_return_failed_command_no_restart 13283 * 13284 * Description: Same as sd_return_failed_command, but ensures that no 13285 * call back into sd_start_cmds will be issued. 13286 * 13287 * Context: May be called from interrupt context 13288 */ 13289 13290 static void 13291 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp, 13292 int errcode) 13293 { 13294 struct sd_xbuf *xp; 13295 13296 ASSERT(bp != NULL); 13297 ASSERT(un != NULL); 13298 ASSERT(mutex_owned(SD_MUTEX(un))); 13299 xp = SD_GET_XBUF(bp); 13300 ASSERT(xp != NULL); 13301 ASSERT(errcode != 0); 13302 13303 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13304 "sd_return_failed_command_no_restart: entry\n"); 13305 13306 /* 13307 * b_resid could already be nonzero due to a partial data 13308 * transfer, so do not change it here. 13309 */ 13310 SD_BIOERROR(bp, errcode); 13311 13312 /* 13313 * If this is the failfast bp, clear it. This can happen if the 13314 * failfast bp encounterd a fatal error when we attempted to 13315 * re-try it (such as a scsi_transport(9F) failure). However 13316 * we should NOT be in an active failfast state if the failfast 13317 * bp is not NULL. 13318 */ 13319 if (bp == un->un_failfast_bp) { 13320 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 13321 un->un_failfast_bp = NULL; 13322 } 13323 13324 if (bp == un->un_retry_bp) { 13325 /* 13326 * This command was retried one or more times. Show that we are 13327 * done with it, and allow processing of the waitq to resume. 13328 */ 13329 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13330 "sd_return_failed_command_no_restart: " 13331 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 13332 un->un_retry_bp = NULL; 13333 un->un_retry_statp = NULL; 13334 } 13335 13336 SD_UPDATE_RDWR_STATS(un, bp); 13337 SD_UPDATE_PARTITION_STATS(un, bp); 13338 13339 mutex_exit(SD_MUTEX(un)); 13340 13341 if (xp->xb_pktp != NULL) { 13342 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 13343 xp->xb_pktp = NULL; 13344 } 13345 13346 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 13347 13348 mutex_enter(SD_MUTEX(un)); 13349 13350 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13351 "sd_return_failed_command_no_restart: exit\n"); 13352 } 13353 13354 13355 /* 13356 * Function: sd_retry_command 13357 * 13358 * Description: queue up a command for retry, or (optionally) fail it 13359 * if retry counts are exhausted. 13360 * 13361 * Arguments: un - Pointer to the sd_lun struct for the target. 13362 * 13363 * bp - Pointer to the buf for the command to be retried. 13364 * 13365 * retry_check_flag - Flag to see which (if any) of the retry 13366 * counts should be decremented/checked. If the indicated 13367 * retry count is exhausted, then the command will not be 13368 * retried; it will be failed instead. This should use a 13369 * value equal to one of the following: 13370 * 13371 * SD_RETRIES_NOCHECK 13372 * SD_RESD_RETRIES_STANDARD 13373 * SD_RETRIES_VICTIM 13374 * 13375 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE 13376 * if the check should be made to see of FLAG_ISOLATE is set 13377 * in the pkt. If FLAG_ISOLATE is set, then the command is 13378 * not retried, it is simply failed. 13379 * 13380 * user_funcp - Ptr to function to call before dispatching the 13381 * command. May be NULL if no action needs to be performed. 13382 * (Primarily intended for printing messages.) 13383 * 13384 * user_arg - Optional argument to be passed along to 13385 * the user_funcp call. 13386 * 13387 * failure_code - errno return code to set in the bp if the 13388 * command is going to be failed. 13389 * 13390 * retry_delay - Retry delay interval in (clock_t) units. May 13391 * be zero which indicates that the retry should be retried 13392 * immediately (ie, without an intervening delay). 13393 * 13394 * statp - Ptr to kstat function to be updated if the command 13395 * is queued for a delayed retry. May be NULL if no kstat 13396 * update is desired. 13397 * 13398 * Context: May be called from interupt context. 13399 */ 13400 13401 static void 13402 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag, 13403 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int 13404 code), void *user_arg, int failure_code, clock_t retry_delay, 13405 void (*statp)(kstat_io_t *)) 13406 { 13407 struct sd_xbuf *xp; 13408 struct scsi_pkt *pktp; 13409 13410 ASSERT(un != NULL); 13411 ASSERT(mutex_owned(SD_MUTEX(un))); 13412 ASSERT(bp != NULL); 13413 xp = SD_GET_XBUF(bp); 13414 ASSERT(xp != NULL); 13415 pktp = SD_GET_PKTP(bp); 13416 ASSERT(pktp != NULL); 13417 13418 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 13419 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp); 13420 13421 /* 13422 * If we are syncing or dumping, fail the command to avoid 13423 * recursively calling back into scsi_transport(). 13424 */ 13425 if (ddi_in_panic()) { 13426 goto fail_command_no_log; 13427 } 13428 13429 /* 13430 * We should never be be retrying a command with FLAG_DIAGNOSE set, so 13431 * log an error and fail the command. 13432 */ 13433 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 13434 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 13435 "ERROR, retrying FLAG_DIAGNOSE command.\n"); 13436 sd_dump_memory(un, SD_LOG_IO, "CDB", 13437 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 13438 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 13439 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 13440 goto fail_command; 13441 } 13442 13443 /* 13444 * If we are suspended, then put the command onto head of the 13445 * wait queue since we don't want to start more commands. 13446 */ 13447 switch (un->un_state) { 13448 case SD_STATE_SUSPENDED: 13449 case SD_STATE_DUMPING: 13450 bp->av_forw = un->un_waitq_headp; 13451 un->un_waitq_headp = bp; 13452 if (un->un_waitq_tailp == NULL) { 13453 un->un_waitq_tailp = bp; 13454 } 13455 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 13456 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: " 13457 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp); 13458 return; 13459 default: 13460 break; 13461 } 13462 13463 /* 13464 * If the caller wants us to check FLAG_ISOLATE, then see if that 13465 * is set; if it is then we do not want to retry the command. 13466 * Normally, FLAG_ISOLATE is only used with USCSI cmds. 13467 */ 13468 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) { 13469 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { 13470 goto fail_command; 13471 } 13472 } 13473 13474 13475 /* 13476 * If SD_RETRIES_FAILFAST is set, it indicates that either a 13477 * command timeout or a selection timeout has occurred. This means 13478 * that we were unable to establish an kind of communication with 13479 * the target, and subsequent retries and/or commands are likely 13480 * to encounter similar results and take a long time to complete. 13481 * 13482 * If this is a failfast error condition, we need to update the 13483 * failfast state, even if this bp does not have B_FAILFAST set. 13484 */ 13485 if (retry_check_flag & SD_RETRIES_FAILFAST) { 13486 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { 13487 ASSERT(un->un_failfast_bp == NULL); 13488 /* 13489 * If we are already in the active failfast state, and 13490 * another failfast error condition has been detected, 13491 * then fail this command if it has B_FAILFAST set. 13492 * If B_FAILFAST is clear, then maintain the legacy 13493 * behavior of retrying heroically, even tho this will 13494 * take a lot more time to fail the command. 13495 */ 13496 if (bp->b_flags & B_FAILFAST) { 13497 goto fail_command; 13498 } 13499 } else { 13500 /* 13501 * We're not in the active failfast state, but we 13502 * have a failfast error condition, so we must begin 13503 * transition to the next state. We do this regardless 13504 * of whether or not this bp has B_FAILFAST set. 13505 */ 13506 if (un->un_failfast_bp == NULL) { 13507 /* 13508 * This is the first bp to meet a failfast 13509 * condition so save it on un_failfast_bp & 13510 * do normal retry processing. Do not enter 13511 * active failfast state yet. This marks 13512 * entry into the "failfast pending" state. 13513 */ 13514 un->un_failfast_bp = bp; 13515 13516 } else if (un->un_failfast_bp == bp) { 13517 /* 13518 * This is the second time *this* bp has 13519 * encountered a failfast error condition, 13520 * so enter active failfast state & flush 13521 * queues as appropriate. 13522 */ 13523 un->un_failfast_state = SD_FAILFAST_ACTIVE; 13524 un->un_failfast_bp = NULL; 13525 sd_failfast_flushq(un); 13526 13527 /* 13528 * Fail this bp now if B_FAILFAST set; 13529 * otherwise continue with retries. (It would 13530 * be pretty ironic if this bp succeeded on a 13531 * subsequent retry after we just flushed all 13532 * the queues). 13533 */ 13534 if (bp->b_flags & B_FAILFAST) { 13535 goto fail_command; 13536 } 13537 13538 #if !defined(lint) && !defined(__lint) 13539 } else { 13540 /* 13541 * If neither of the preceeding conditionals 13542 * was true, it means that there is some 13543 * *other* bp that has met an inital failfast 13544 * condition and is currently either being 13545 * retried or is waiting to be retried. In 13546 * that case we should perform normal retry 13547 * processing on *this* bp, since there is a 13548 * chance that the current failfast condition 13549 * is transient and recoverable. If that does 13550 * not turn out to be the case, then retries 13551 * will be cleared when the wait queue is 13552 * flushed anyway. 13553 */ 13554 #endif 13555 } 13556 } 13557 } else { 13558 /* 13559 * SD_RETRIES_FAILFAST is clear, which indicates that we 13560 * likely were able to at least establish some level of 13561 * communication with the target and subsequent commands 13562 * and/or retries are likely to get through to the target, 13563 * In this case we want to be aggressive about clearing 13564 * the failfast state. Note that this does not affect 13565 * the "failfast pending" condition. 13566 */ 13567 un->un_failfast_state = SD_FAILFAST_INACTIVE; 13568 } 13569 13570 13571 /* 13572 * Check the specified retry count to see if we can still do 13573 * any retries with this pkt before we should fail it. 13574 */ 13575 switch (retry_check_flag & SD_RETRIES_MASK) { 13576 case SD_RETRIES_VICTIM: 13577 /* 13578 * Check the victim retry count. If exhausted, then fall 13579 * thru & check against the standard retry count. 13580 */ 13581 if (xp->xb_victim_retry_count < un->un_victim_retry_count) { 13582 /* Increment count & proceed with the retry */ 13583 xp->xb_victim_retry_count++; 13584 break; 13585 } 13586 /* Victim retries exhausted, fall back to std. retries... */ 13587 /* FALLTHRU */ 13588 13589 case SD_RETRIES_STANDARD: 13590 if (xp->xb_retry_count >= un->un_retry_count) { 13591 /* Retries exhausted, fail the command */ 13592 SD_TRACE(SD_LOG_IO_CORE, un, 13593 "sd_retry_command: retries exhausted!\n"); 13594 /* 13595 * update b_resid for failed SCMD_READ & SCMD_WRITE 13596 * commands with nonzero pkt_resid. 13597 */ 13598 if ((pktp->pkt_reason == CMD_CMPLT) && 13599 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) && 13600 (pktp->pkt_resid != 0)) { 13601 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F; 13602 if ((op == SCMD_READ) || (op == SCMD_WRITE)) { 13603 SD_UPDATE_B_RESID(bp, pktp); 13604 } 13605 } 13606 goto fail_command; 13607 } 13608 xp->xb_retry_count++; 13609 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13610 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 13611 break; 13612 13613 case SD_RETRIES_UA: 13614 if (xp->xb_ua_retry_count >= sd_ua_retry_count) { 13615 /* Retries exhausted, fail the command */ 13616 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13617 "Unit Attention retries exhausted. " 13618 "Check the target.\n"); 13619 goto fail_command; 13620 } 13621 xp->xb_ua_retry_count++; 13622 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13623 "sd_retry_command: retry count:%d\n", 13624 xp->xb_ua_retry_count); 13625 break; 13626 13627 case SD_RETRIES_BUSY: 13628 if (xp->xb_retry_count >= un->un_busy_retry_count) { 13629 /* Retries exhausted, fail the command */ 13630 SD_TRACE(SD_LOG_IO_CORE, un, 13631 "sd_retry_command: retries exhausted!\n"); 13632 goto fail_command; 13633 } 13634 xp->xb_retry_count++; 13635 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13636 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 13637 break; 13638 13639 case SD_RETRIES_NOCHECK: 13640 default: 13641 /* No retry count to check. Just proceed with the retry */ 13642 break; 13643 } 13644 13645 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 13646 13647 /* 13648 * If we were given a zero timeout, we must attempt to retry the 13649 * command immediately (ie, without a delay). 13650 */ 13651 if (retry_delay == 0) { 13652 /* 13653 * Check some limiting conditions to see if we can actually 13654 * do the immediate retry. If we cannot, then we must 13655 * fall back to queueing up a delayed retry. 13656 */ 13657 if (un->un_ncmds_in_transport >= un->un_throttle) { 13658 /* 13659 * We are at the throttle limit for the target, 13660 * fall back to delayed retry. 13661 */ 13662 retry_delay = SD_BSY_TIMEOUT; 13663 statp = kstat_waitq_enter; 13664 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13665 "sd_retry_command: immed. retry hit " 13666 "throttle!\n"); 13667 } else { 13668 /* 13669 * We're clear to proceed with the immediate retry. 13670 * First call the user-provided function (if any) 13671 */ 13672 if (user_funcp != NULL) { 13673 (*user_funcp)(un, bp, user_arg, 13674 SD_IMMEDIATE_RETRY_ISSUED); 13675 #ifdef __lock_lint 13676 sd_print_incomplete_msg(un, bp, user_arg, 13677 SD_IMMEDIATE_RETRY_ISSUED); 13678 sd_print_cmd_incomplete_msg(un, bp, user_arg, 13679 SD_IMMEDIATE_RETRY_ISSUED); 13680 sd_print_sense_failed_msg(un, bp, user_arg, 13681 SD_IMMEDIATE_RETRY_ISSUED); 13682 #endif 13683 } 13684 13685 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13686 "sd_retry_command: issuing immediate retry\n"); 13687 13688 /* 13689 * Call sd_start_cmds() to transport the command to 13690 * the target. 13691 */ 13692 sd_start_cmds(un, bp); 13693 13694 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13695 "sd_retry_command exit\n"); 13696 return; 13697 } 13698 } 13699 13700 /* 13701 * Set up to retry the command after a delay. 13702 * First call the user-provided function (if any) 13703 */ 13704 if (user_funcp != NULL) { 13705 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED); 13706 } 13707 13708 sd_set_retry_bp(un, bp, retry_delay, statp); 13709 13710 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 13711 return; 13712 13713 fail_command: 13714 13715 if (user_funcp != NULL) { 13716 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED); 13717 } 13718 13719 fail_command_no_log: 13720 13721 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13722 "sd_retry_command: returning failed command\n"); 13723 13724 sd_return_failed_command(un, bp, failure_code); 13725 13726 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 13727 } 13728 13729 13730 /* 13731 * Function: sd_set_retry_bp 13732 * 13733 * Description: Set up the given bp for retry. 13734 * 13735 * Arguments: un - ptr to associated softstate 13736 * bp - ptr to buf(9S) for the command 13737 * retry_delay - time interval before issuing retry (may be 0) 13738 * statp - optional pointer to kstat function 13739 * 13740 * Context: May be called under interrupt context 13741 */ 13742 13743 static void 13744 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, 13745 void (*statp)(kstat_io_t *)) 13746 { 13747 ASSERT(un != NULL); 13748 ASSERT(mutex_owned(SD_MUTEX(un))); 13749 ASSERT(bp != NULL); 13750 13751 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 13752 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp); 13753 13754 /* 13755 * Indicate that the command is being retried. This will not allow any 13756 * other commands on the wait queue to be transported to the target 13757 * until this command has been completed (success or failure). The 13758 * "retry command" is not transported to the target until the given 13759 * time delay expires, unless the user specified a 0 retry_delay. 13760 * 13761 * Note: the timeout(9F) callback routine is what actually calls 13762 * sd_start_cmds() to transport the command, with the exception of a 13763 * zero retry_delay. The only current implementor of a zero retry delay 13764 * is the case where a START_STOP_UNIT is sent to spin-up a device. 13765 */ 13766 if (un->un_retry_bp == NULL) { 13767 ASSERT(un->un_retry_statp == NULL); 13768 un->un_retry_bp = bp; 13769 13770 /* 13771 * If the user has not specified a delay the command should 13772 * be queued and no timeout should be scheduled. 13773 */ 13774 if (retry_delay == 0) { 13775 /* 13776 * Save the kstat pointer that will be used in the 13777 * call to SD_UPDATE_KSTATS() below, so that 13778 * sd_start_cmds() can correctly decrement the waitq 13779 * count when it is time to transport this command. 13780 */ 13781 un->un_retry_statp = statp; 13782 goto done; 13783 } 13784 } 13785 13786 if (un->un_retry_bp == bp) { 13787 /* 13788 * Save the kstat pointer that will be used in the call to 13789 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can 13790 * correctly decrement the waitq count when it is time to 13791 * transport this command. 13792 */ 13793 un->un_retry_statp = statp; 13794 13795 /* 13796 * Schedule a timeout if: 13797 * 1) The user has specified a delay. 13798 * 2) There is not a START_STOP_UNIT callback pending. 13799 * 13800 * If no delay has been specified, then it is up to the caller 13801 * to ensure that IO processing continues without stalling. 13802 * Effectively, this means that the caller will issue the 13803 * required call to sd_start_cmds(). The START_STOP_UNIT 13804 * callback does this after the START STOP UNIT command has 13805 * completed. In either of these cases we should not schedule 13806 * a timeout callback here. Also don't schedule the timeout if 13807 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart. 13808 */ 13809 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) && 13810 (un->un_direct_priority_timeid == NULL)) { 13811 un->un_retry_timeid = 13812 timeout(sd_start_retry_command, un, retry_delay); 13813 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13814 "sd_set_retry_bp: setting timeout: un: 0x%p" 13815 " bp:0x%p un_retry_timeid:0x%p\n", 13816 un, bp, un->un_retry_timeid); 13817 } 13818 } else { 13819 /* 13820 * We only get in here if there is already another command 13821 * waiting to be retried. In this case, we just put the 13822 * given command onto the wait queue, so it can be transported 13823 * after the current retry command has completed. 13824 * 13825 * Also we have to make sure that if the command at the head 13826 * of the wait queue is the un_failfast_bp, that we do not 13827 * put ahead of it any other commands that are to be retried. 13828 */ 13829 if ((un->un_failfast_bp != NULL) && 13830 (un->un_failfast_bp == un->un_waitq_headp)) { 13831 /* 13832 * Enqueue this command AFTER the first command on 13833 * the wait queue (which is also un_failfast_bp). 13834 */ 13835 bp->av_forw = un->un_waitq_headp->av_forw; 13836 un->un_waitq_headp->av_forw = bp; 13837 if (un->un_waitq_headp == un->un_waitq_tailp) { 13838 un->un_waitq_tailp = bp; 13839 } 13840 } else { 13841 /* Enqueue this command at the head of the waitq. */ 13842 bp->av_forw = un->un_waitq_headp; 13843 un->un_waitq_headp = bp; 13844 if (un->un_waitq_tailp == NULL) { 13845 un->un_waitq_tailp = bp; 13846 } 13847 } 13848 13849 if (statp == NULL) { 13850 statp = kstat_waitq_enter; 13851 } 13852 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13853 "sd_set_retry_bp: un:0x%p already delayed retry\n", un); 13854 } 13855 13856 done: 13857 if (statp != NULL) { 13858 SD_UPDATE_KSTATS(un, statp, bp); 13859 } 13860 13861 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13862 "sd_set_retry_bp: exit un:0x%p\n", un); 13863 } 13864 13865 13866 /* 13867 * Function: sd_start_retry_command 13868 * 13869 * Description: Start the command that has been waiting on the target's 13870 * retry queue. Called from timeout(9F) context after the 13871 * retry delay interval has expired. 13872 * 13873 * Arguments: arg - pointer to associated softstate for the device. 13874 * 13875 * Context: timeout(9F) thread context. May not sleep. 13876 */ 13877 13878 static void 13879 sd_start_retry_command(void *arg) 13880 { 13881 struct sd_lun *un = arg; 13882 13883 ASSERT(un != NULL); 13884 ASSERT(!mutex_owned(SD_MUTEX(un))); 13885 13886 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13887 "sd_start_retry_command: entry\n"); 13888 13889 mutex_enter(SD_MUTEX(un)); 13890 13891 un->un_retry_timeid = NULL; 13892 13893 if (un->un_retry_bp != NULL) { 13894 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13895 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n", 13896 un, un->un_retry_bp); 13897 sd_start_cmds(un, un->un_retry_bp); 13898 } 13899 13900 mutex_exit(SD_MUTEX(un)); 13901 13902 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13903 "sd_start_retry_command: exit\n"); 13904 } 13905 13906 13907 /* 13908 * Function: sd_start_direct_priority_command 13909 * 13910 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had 13911 * received TRAN_BUSY when we called scsi_transport() to send it 13912 * to the underlying HBA. This function is called from timeout(9F) 13913 * context after the delay interval has expired. 13914 * 13915 * Arguments: arg - pointer to associated buf(9S) to be restarted. 13916 * 13917 * Context: timeout(9F) thread context. May not sleep. 13918 */ 13919 13920 static void 13921 sd_start_direct_priority_command(void *arg) 13922 { 13923 struct buf *priority_bp = arg; 13924 struct sd_lun *un; 13925 13926 ASSERT(priority_bp != NULL); 13927 un = SD_GET_UN(priority_bp); 13928 ASSERT(un != NULL); 13929 ASSERT(!mutex_owned(SD_MUTEX(un))); 13930 13931 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13932 "sd_start_direct_priority_command: entry\n"); 13933 13934 mutex_enter(SD_MUTEX(un)); 13935 un->un_direct_priority_timeid = NULL; 13936 sd_start_cmds(un, priority_bp); 13937 mutex_exit(SD_MUTEX(un)); 13938 13939 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13940 "sd_start_direct_priority_command: exit\n"); 13941 } 13942 13943 13944 /* 13945 * Function: sd_send_request_sense_command 13946 * 13947 * Description: Sends a REQUEST SENSE command to the target 13948 * 13949 * Context: May be called from interrupt context. 13950 */ 13951 13952 static void 13953 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 13954 struct scsi_pkt *pktp) 13955 { 13956 ASSERT(bp != NULL); 13957 ASSERT(un != NULL); 13958 ASSERT(mutex_owned(SD_MUTEX(un))); 13959 13960 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: " 13961 "entry: buf:0x%p\n", bp); 13962 13963 /* 13964 * If we are syncing or dumping, then fail the command to avoid a 13965 * recursive callback into scsi_transport(). Also fail the command 13966 * if we are suspended (legacy behavior). 13967 */ 13968 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 13969 (un->un_state == SD_STATE_DUMPING)) { 13970 sd_return_failed_command(un, bp, EIO); 13971 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13972 "sd_send_request_sense_command: syncing/dumping, exit\n"); 13973 return; 13974 } 13975 13976 /* 13977 * Retry the failed command and don't issue the request sense if: 13978 * 1) the sense buf is busy 13979 * 2) we have 1 or more outstanding commands on the target 13980 * (the sense data will be cleared or invalidated any way) 13981 * 13982 * Note: There could be an issue with not checking a retry limit here, 13983 * the problem is determining which retry limit to check. 13984 */ 13985 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { 13986 /* Don't retry if the command is flagged as non-retryable */ 13987 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 13988 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 13989 NULL, NULL, 0, SD_BSY_TIMEOUT, kstat_waitq_enter); 13990 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13991 "sd_send_request_sense_command: " 13992 "at full throttle, retrying exit\n"); 13993 } else { 13994 sd_return_failed_command(un, bp, EIO); 13995 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13996 "sd_send_request_sense_command: " 13997 "at full throttle, non-retryable exit\n"); 13998 } 13999 return; 14000 } 14001 14002 sd_mark_rqs_busy(un, bp); 14003 sd_start_cmds(un, un->un_rqs_bp); 14004 14005 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14006 "sd_send_request_sense_command: exit\n"); 14007 } 14008 14009 14010 /* 14011 * Function: sd_mark_rqs_busy 14012 * 14013 * Description: Indicate that the request sense bp for this instance is 14014 * in use. 14015 * 14016 * Context: May be called under interrupt context 14017 */ 14018 14019 static void 14020 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp) 14021 { 14022 struct sd_xbuf *sense_xp; 14023 14024 ASSERT(un != NULL); 14025 ASSERT(bp != NULL); 14026 ASSERT(mutex_owned(SD_MUTEX(un))); 14027 ASSERT(un->un_sense_isbusy == 0); 14028 14029 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: " 14030 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un); 14031 14032 sense_xp = SD_GET_XBUF(un->un_rqs_bp); 14033 ASSERT(sense_xp != NULL); 14034 14035 SD_INFO(SD_LOG_IO, un, 14036 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp); 14037 14038 ASSERT(sense_xp->xb_pktp != NULL); 14039 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) 14040 == (FLAG_SENSING | FLAG_HEAD)); 14041 14042 un->un_sense_isbusy = 1; 14043 un->un_rqs_bp->b_resid = 0; 14044 sense_xp->xb_pktp->pkt_resid = 0; 14045 sense_xp->xb_pktp->pkt_reason = 0; 14046 14047 /* So we can get back the bp at interrupt time! */ 14048 sense_xp->xb_sense_bp = bp; 14049 14050 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH); 14051 14052 /* 14053 * Mark this buf as awaiting sense data. (This is already set in 14054 * the pkt_flags for the RQS packet.) 14055 */ 14056 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING; 14057 14058 sense_xp->xb_retry_count = 0; 14059 sense_xp->xb_victim_retry_count = 0; 14060 sense_xp->xb_ua_retry_count = 0; 14061 sense_xp->xb_dma_resid = 0; 14062 14063 /* Clean up the fields for auto-request sense */ 14064 sense_xp->xb_sense_status = 0; 14065 sense_xp->xb_sense_state = 0; 14066 sense_xp->xb_sense_resid = 0; 14067 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data)); 14068 14069 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n"); 14070 } 14071 14072 14073 /* 14074 * Function: sd_mark_rqs_idle 14075 * 14076 * Description: SD_MUTEX must be held continuously through this routine 14077 * to prevent reuse of the rqs struct before the caller can 14078 * complete it's processing. 14079 * 14080 * Return Code: Pointer to the RQS buf 14081 * 14082 * Context: May be called under interrupt context 14083 */ 14084 14085 static struct buf * 14086 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp) 14087 { 14088 struct buf *bp; 14089 ASSERT(un != NULL); 14090 ASSERT(sense_xp != NULL); 14091 ASSERT(mutex_owned(SD_MUTEX(un))); 14092 ASSERT(un->un_sense_isbusy != 0); 14093 14094 un->un_sense_isbusy = 0; 14095 bp = sense_xp->xb_sense_bp; 14096 sense_xp->xb_sense_bp = NULL; 14097 14098 /* This pkt is no longer interested in getting sense data */ 14099 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING; 14100 14101 return (bp); 14102 } 14103 14104 14105 14106 /* 14107 * Function: sd_alloc_rqs 14108 * 14109 * Description: Set up the unit to receive auto request sense data 14110 * 14111 * Return Code: DDI_SUCCESS or DDI_FAILURE 14112 * 14113 * Context: Called under attach(9E) context 14114 */ 14115 14116 static int 14117 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un) 14118 { 14119 struct sd_xbuf *xp; 14120 14121 ASSERT(un != NULL); 14122 ASSERT(!mutex_owned(SD_MUTEX(un))); 14123 ASSERT(un->un_rqs_bp == NULL); 14124 ASSERT(un->un_rqs_pktp == NULL); 14125 14126 /* 14127 * First allocate the required buf and scsi_pkt structs, then set up 14128 * the CDB in the scsi_pkt for a REQUEST SENSE command. 14129 */ 14130 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL, 14131 SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 14132 if (un->un_rqs_bp == NULL) { 14133 return (DDI_FAILURE); 14134 } 14135 14136 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp, 14137 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL); 14138 14139 if (un->un_rqs_pktp == NULL) { 14140 sd_free_rqs(un); 14141 return (DDI_FAILURE); 14142 } 14143 14144 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */ 14145 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp, 14146 SCMD_REQUEST_SENSE, 0, SENSE_LENGTH, 0); 14147 14148 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); 14149 14150 /* Set up the other needed members in the ARQ scsi_pkt. */ 14151 un->un_rqs_pktp->pkt_comp = sdintr; 14152 un->un_rqs_pktp->pkt_time = sd_io_time; 14153 un->un_rqs_pktp->pkt_flags |= 14154 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ 14155 14156 /* 14157 * Allocate & init the sd_xbuf struct for the RQS command. Do not 14158 * provide any intpkt, destroypkt routines as we take care of 14159 * scsi_pkt allocation/freeing here and in sd_free_rqs(). 14160 */ 14161 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 14162 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL); 14163 xp->xb_pktp = un->un_rqs_pktp; 14164 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14165 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n", 14166 un, xp, un->un_rqs_pktp, un->un_rqs_bp); 14167 14168 /* 14169 * Save the pointer to the request sense private bp so it can 14170 * be retrieved in sdintr. 14171 */ 14172 un->un_rqs_pktp->pkt_private = un->un_rqs_bp; 14173 ASSERT(un->un_rqs_bp->b_private == xp); 14174 14175 /* 14176 * See if the HBA supports auto-request sense for the specified 14177 * target/lun. If it does, then try to enable it (if not already 14178 * enabled). 14179 * 14180 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return 14181 * failure, while for other HBAs (pln) scsi_ifsetcap will always 14182 * return success. However, in both of these cases ARQ is always 14183 * enabled and scsi_ifgetcap will always return true. The best approach 14184 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). 14185 * 14186 * The 3rd case is the HBA (adp) always return enabled on 14187 * scsi_ifgetgetcap even when it's not enable, the best approach 14188 * is issue a scsi_ifsetcap then a scsi_ifgetcap 14189 * Note: this case is to circumvent the Adaptec bug. (x86 only) 14190 */ 14191 14192 if (un->un_f_is_fibre == TRUE) { 14193 un->un_f_arq_enabled = TRUE; 14194 } else { 14195 #if defined(__i386) || defined(__amd64) 14196 /* 14197 * Circumvent the Adaptec bug, remove this code when 14198 * the bug is fixed 14199 */ 14200 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); 14201 #endif 14202 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { 14203 case 0: 14204 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14205 "sd_alloc_rqs: HBA supports ARQ\n"); 14206 /* 14207 * ARQ is supported by this HBA but currently is not 14208 * enabled. Attempt to enable it and if successful then 14209 * mark this instance as ARQ enabled. 14210 */ 14211 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1) 14212 == 1) { 14213 /* Successfully enabled ARQ in the HBA */ 14214 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14215 "sd_alloc_rqs: ARQ enabled\n"); 14216 un->un_f_arq_enabled = TRUE; 14217 } else { 14218 /* Could not enable ARQ in the HBA */ 14219 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14220 "sd_alloc_rqs: failed ARQ enable\n"); 14221 un->un_f_arq_enabled = FALSE; 14222 } 14223 break; 14224 case 1: 14225 /* 14226 * ARQ is supported by this HBA and is already enabled. 14227 * Just mark ARQ as enabled for this instance. 14228 */ 14229 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14230 "sd_alloc_rqs: ARQ already enabled\n"); 14231 un->un_f_arq_enabled = TRUE; 14232 break; 14233 default: 14234 /* 14235 * ARQ is not supported by this HBA; disable it for this 14236 * instance. 14237 */ 14238 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14239 "sd_alloc_rqs: HBA does not support ARQ\n"); 14240 un->un_f_arq_enabled = FALSE; 14241 break; 14242 } 14243 } 14244 14245 return (DDI_SUCCESS); 14246 } 14247 14248 14249 /* 14250 * Function: sd_free_rqs 14251 * 14252 * Description: Cleanup for the pre-instance RQS command. 14253 * 14254 * Context: Kernel thread context 14255 */ 14256 14257 static void 14258 sd_free_rqs(struct sd_lun *un) 14259 { 14260 ASSERT(un != NULL); 14261 14262 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n"); 14263 14264 /* 14265 * If consistent memory is bound to a scsi_pkt, the pkt 14266 * has to be destroyed *before* freeing the consistent memory. 14267 * Don't change the sequence of this operations. 14268 * scsi_destroy_pkt() might access memory, which isn't allowed, 14269 * after it was freed in scsi_free_consistent_buf(). 14270 */ 14271 if (un->un_rqs_pktp != NULL) { 14272 scsi_destroy_pkt(un->un_rqs_pktp); 14273 un->un_rqs_pktp = NULL; 14274 } 14275 14276 if (un->un_rqs_bp != NULL) { 14277 kmem_free(SD_GET_XBUF(un->un_rqs_bp), sizeof (struct sd_xbuf)); 14278 scsi_free_consistent_buf(un->un_rqs_bp); 14279 un->un_rqs_bp = NULL; 14280 } 14281 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n"); 14282 } 14283 14284 14285 14286 /* 14287 * Function: sd_reduce_throttle 14288 * 14289 * Description: Reduces the maximun # of outstanding commands on a 14290 * target to the current number of outstanding commands. 14291 * Queues a tiemout(9F) callback to restore the limit 14292 * after a specified interval has elapsed. 14293 * Typically used when we get a TRAN_BUSY return code 14294 * back from scsi_transport(). 14295 * 14296 * Arguments: un - ptr to the sd_lun softstate struct 14297 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL 14298 * 14299 * Context: May be called from interrupt context 14300 */ 14301 14302 static void 14303 sd_reduce_throttle(struct sd_lun *un, int throttle_type) 14304 { 14305 ASSERT(un != NULL); 14306 ASSERT(mutex_owned(SD_MUTEX(un))); 14307 ASSERT(un->un_ncmds_in_transport >= 0); 14308 14309 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 14310 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n", 14311 un, un->un_throttle, un->un_ncmds_in_transport); 14312 14313 if (un->un_throttle > 1) { 14314 if (un->un_f_use_adaptive_throttle == TRUE) { 14315 switch (throttle_type) { 14316 case SD_THROTTLE_TRAN_BUSY: 14317 if (un->un_busy_throttle == 0) { 14318 un->un_busy_throttle = un->un_throttle; 14319 } 14320 break; 14321 case SD_THROTTLE_QFULL: 14322 un->un_busy_throttle = 0; 14323 break; 14324 default: 14325 ASSERT(FALSE); 14326 } 14327 14328 if (un->un_ncmds_in_transport > 0) { 14329 un->un_throttle = un->un_ncmds_in_transport; 14330 } 14331 14332 } else { 14333 if (un->un_ncmds_in_transport == 0) { 14334 un->un_throttle = 1; 14335 } else { 14336 un->un_throttle = un->un_ncmds_in_transport; 14337 } 14338 } 14339 } 14340 14341 /* Reschedule the timeout if none is currently active */ 14342 if (un->un_reset_throttle_timeid == NULL) { 14343 un->un_reset_throttle_timeid = timeout(sd_restore_throttle, 14344 un, SD_THROTTLE_RESET_INTERVAL); 14345 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14346 "sd_reduce_throttle: timeout scheduled!\n"); 14347 } 14348 14349 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 14350 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle); 14351 } 14352 14353 14354 14355 /* 14356 * Function: sd_restore_throttle 14357 * 14358 * Description: Callback function for timeout(9F). Resets the current 14359 * value of un->un_throttle to its default. 14360 * 14361 * Arguments: arg - pointer to associated softstate for the device. 14362 * 14363 * Context: May be called from interrupt context 14364 */ 14365 14366 static void 14367 sd_restore_throttle(void *arg) 14368 { 14369 struct sd_lun *un = arg; 14370 14371 ASSERT(un != NULL); 14372 ASSERT(!mutex_owned(SD_MUTEX(un))); 14373 14374 mutex_enter(SD_MUTEX(un)); 14375 14376 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 14377 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle); 14378 14379 un->un_reset_throttle_timeid = NULL; 14380 14381 if (un->un_f_use_adaptive_throttle == TRUE) { 14382 /* 14383 * If un_busy_throttle is nonzero, then it contains the 14384 * value that un_throttle was when we got a TRAN_BUSY back 14385 * from scsi_transport(). We want to revert back to this 14386 * value. 14387 * 14388 * In the QFULL case, the throttle limit will incrementally 14389 * increase until it reaches max throttle. 14390 */ 14391 if (un->un_busy_throttle > 0) { 14392 un->un_throttle = un->un_busy_throttle; 14393 un->un_busy_throttle = 0; 14394 } else { 14395 /* 14396 * increase throttle by 10% open gate slowly, schedule 14397 * another restore if saved throttle has not been 14398 * reached 14399 */ 14400 short throttle; 14401 if (sd_qfull_throttle_enable) { 14402 throttle = un->un_throttle + 14403 max((un->un_throttle / 10), 1); 14404 un->un_throttle = 14405 (throttle < un->un_saved_throttle) ? 14406 throttle : un->un_saved_throttle; 14407 if (un->un_throttle < un->un_saved_throttle) { 14408 un->un_reset_throttle_timeid = 14409 timeout(sd_restore_throttle, 14410 un, SD_QFULL_THROTTLE_RESET_INTERVAL); 14411 } 14412 } 14413 } 14414 14415 /* 14416 * If un_throttle has fallen below the low-water mark, we 14417 * restore the maximum value here (and allow it to ratchet 14418 * down again if necessary). 14419 */ 14420 if (un->un_throttle < un->un_min_throttle) { 14421 un->un_throttle = un->un_saved_throttle; 14422 } 14423 } else { 14424 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 14425 "restoring limit from 0x%x to 0x%x\n", 14426 un->un_throttle, un->un_saved_throttle); 14427 un->un_throttle = un->un_saved_throttle; 14428 } 14429 14430 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14431 "sd_restore_throttle: calling sd_start_cmds!\n"); 14432 14433 sd_start_cmds(un, NULL); 14434 14435 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14436 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n", 14437 un, un->un_throttle); 14438 14439 mutex_exit(SD_MUTEX(un)); 14440 14441 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n"); 14442 } 14443 14444 /* 14445 * Function: sdrunout 14446 * 14447 * Description: Callback routine for scsi_init_pkt when a resource allocation 14448 * fails. 14449 * 14450 * Arguments: arg - a pointer to the sd_lun unit struct for the particular 14451 * soft state instance. 14452 * 14453 * Return Code: The scsi_init_pkt routine allows for the callback function to 14454 * return a 0 indicating the callback should be rescheduled or a 1 14455 * indicating not to reschedule. This routine always returns 1 14456 * because the driver always provides a callback function to 14457 * scsi_init_pkt. This results in a callback always being scheduled 14458 * (via the scsi_init_pkt callback implementation) if a resource 14459 * failure occurs. 14460 * 14461 * Context: This callback function may not block or call routines that block 14462 * 14463 * Note: Using the scsi_init_pkt callback facility can result in an I/O 14464 * request persisting at the head of the list which cannot be 14465 * satisfied even after multiple retries. In the future the driver 14466 * may implement some time of maximum runout count before failing 14467 * an I/O. 14468 */ 14469 14470 static int 14471 sdrunout(caddr_t arg) 14472 { 14473 struct sd_lun *un = (struct sd_lun *)arg; 14474 14475 ASSERT(un != NULL); 14476 ASSERT(!mutex_owned(SD_MUTEX(un))); 14477 14478 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n"); 14479 14480 mutex_enter(SD_MUTEX(un)); 14481 sd_start_cmds(un, NULL); 14482 mutex_exit(SD_MUTEX(un)); 14483 /* 14484 * This callback routine always returns 1 (i.e. do not reschedule) 14485 * because we always specify sdrunout as the callback handler for 14486 * scsi_init_pkt inside the call to sd_start_cmds. 14487 */ 14488 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); 14489 return (1); 14490 } 14491 14492 14493 /* 14494 * Function: sdintr 14495 * 14496 * Description: Completion callback routine for scsi_pkt(9S) structs 14497 * sent to the HBA driver via scsi_transport(9F). 14498 * 14499 * Context: Interrupt context 14500 */ 14501 14502 static void 14503 sdintr(struct scsi_pkt *pktp) 14504 { 14505 struct buf *bp; 14506 struct sd_xbuf *xp; 14507 struct sd_lun *un; 14508 14509 ASSERT(pktp != NULL); 14510 bp = (struct buf *)pktp->pkt_private; 14511 ASSERT(bp != NULL); 14512 xp = SD_GET_XBUF(bp); 14513 ASSERT(xp != NULL); 14514 ASSERT(xp->xb_pktp != NULL); 14515 un = SD_GET_UN(bp); 14516 ASSERT(un != NULL); 14517 ASSERT(!mutex_owned(SD_MUTEX(un))); 14518 14519 #ifdef SD_FAULT_INJECTION 14520 14521 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n"); 14522 /* SD FaultInjection */ 14523 sd_faultinjection(pktp); 14524 14525 #endif /* SD_FAULT_INJECTION */ 14526 14527 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p," 14528 " xp:0x%p, un:0x%p\n", bp, xp, un); 14529 14530 mutex_enter(SD_MUTEX(un)); 14531 14532 /* Reduce the count of the #commands currently in transport */ 14533 un->un_ncmds_in_transport--; 14534 ASSERT(un->un_ncmds_in_transport >= 0); 14535 14536 /* Increment counter to indicate that the callback routine is active */ 14537 un->un_in_callback++; 14538 14539 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14540 14541 #ifdef SDDEBUG 14542 if (bp == un->un_retry_bp) { 14543 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " 14544 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", 14545 un, un->un_retry_bp, un->un_ncmds_in_transport); 14546 } 14547 #endif 14548 14549 /* 14550 * If pkt_reason is CMD_DEV_GONE, just fail the command 14551 */ 14552 if (pktp->pkt_reason == CMD_DEV_GONE) { 14553 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 14554 "Device is gone\n"); 14555 sd_return_failed_command(un, bp, EIO); 14556 goto exit; 14557 } 14558 14559 /* 14560 * First see if the pkt has auto-request sense data with it.... 14561 * Look at the packet state first so we don't take a performance 14562 * hit looking at the arq enabled flag unless absolutely necessary. 14563 */ 14564 if ((pktp->pkt_state & STATE_ARQ_DONE) && 14565 (un->un_f_arq_enabled == TRUE)) { 14566 /* 14567 * The HBA did an auto request sense for this command so check 14568 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 14569 * driver command that should not be retried. 14570 */ 14571 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 14572 /* 14573 * Save the relevant sense info into the xp for the 14574 * original cmd. 14575 */ 14576 struct scsi_arq_status *asp; 14577 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 14578 xp->xb_sense_status = 14579 *((uchar_t *)(&(asp->sts_rqpkt_status))); 14580 xp->xb_sense_state = asp->sts_rqpkt_state; 14581 xp->xb_sense_resid = asp->sts_rqpkt_resid; 14582 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 14583 min(sizeof (struct scsi_extended_sense), 14584 SENSE_LENGTH)); 14585 14586 /* fail the command */ 14587 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14588 "sdintr: arq done and FLAG_DIAGNOSE set\n"); 14589 sd_return_failed_command(un, bp, EIO); 14590 goto exit; 14591 } 14592 14593 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 14594 /* 14595 * We want to either retry or fail this command, so free 14596 * the DMA resources here. If we retry the command then 14597 * the DMA resources will be reallocated in sd_start_cmds(). 14598 * Note that when PKT_DMA_PARTIAL is used, this reallocation 14599 * causes the *entire* transfer to start over again from the 14600 * beginning of the request, even for PARTIAL chunks that 14601 * have already transferred successfully. 14602 */ 14603 if ((un->un_f_is_fibre == TRUE) && 14604 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 14605 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 14606 scsi_dmafree(pktp); 14607 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 14608 } 14609 #endif 14610 14611 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14612 "sdintr: arq done, sd_handle_auto_request_sense\n"); 14613 14614 sd_handle_auto_request_sense(un, bp, xp, pktp); 14615 goto exit; 14616 } 14617 14618 /* Next see if this is the REQUEST SENSE pkt for the instance */ 14619 if (pktp->pkt_flags & FLAG_SENSING) { 14620 /* This pktp is from the unit's REQUEST_SENSE command */ 14621 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14622 "sdintr: sd_handle_request_sense\n"); 14623 sd_handle_request_sense(un, bp, xp, pktp); 14624 goto exit; 14625 } 14626 14627 /* 14628 * Check to see if the command successfully completed as requested; 14629 * this is the most common case (and also the hot performance path). 14630 * 14631 * Requirements for successful completion are: 14632 * pkt_reason is CMD_CMPLT and packet status is status good. 14633 * In addition: 14634 * - A residual of zero indicates successful completion no matter what 14635 * the command is. 14636 * - If the residual is not zero and the command is not a read or 14637 * write, then it's still defined as successful completion. In other 14638 * words, if the command is a read or write the residual must be 14639 * zero for successful completion. 14640 * - If the residual is not zero and the command is a read or 14641 * write, and it's a USCSICMD, then it's still defined as 14642 * successful completion. 14643 */ 14644 if ((pktp->pkt_reason == CMD_CMPLT) && 14645 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) { 14646 14647 /* 14648 * Since this command is returned with a good status, we 14649 * can reset the count for Sonoma failover. 14650 */ 14651 un->un_sonoma_failure_count = 0; 14652 14653 /* 14654 * Return all USCSI commands on good status 14655 */ 14656 if (pktp->pkt_resid == 0) { 14657 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14658 "sdintr: returning command for resid == 0\n"); 14659 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) && 14660 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) { 14661 SD_UPDATE_B_RESID(bp, pktp); 14662 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14663 "sdintr: returning command for resid != 0\n"); 14664 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 14665 SD_UPDATE_B_RESID(bp, pktp); 14666 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14667 "sdintr: returning uscsi command\n"); 14668 } else { 14669 goto not_successful; 14670 } 14671 sd_return_command(un, bp); 14672 14673 /* 14674 * Decrement counter to indicate that the callback routine 14675 * is done. 14676 */ 14677 un->un_in_callback--; 14678 ASSERT(un->un_in_callback >= 0); 14679 mutex_exit(SD_MUTEX(un)); 14680 14681 return; 14682 } 14683 14684 not_successful: 14685 14686 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 14687 /* 14688 * The following is based upon knowledge of the underlying transport 14689 * and its use of DMA resources. This code should be removed when 14690 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor 14691 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() 14692 * and sd_start_cmds(). 14693 * 14694 * Free any DMA resources associated with this command if there 14695 * is a chance it could be retried or enqueued for later retry. 14696 * If we keep the DMA binding then mpxio cannot reissue the 14697 * command on another path whenever a path failure occurs. 14698 * 14699 * Note that when PKT_DMA_PARTIAL is used, free/reallocation 14700 * causes the *entire* transfer to start over again from the 14701 * beginning of the request, even for PARTIAL chunks that 14702 * have already transferred successfully. 14703 * 14704 * This is only done for non-uscsi commands (and also skipped for the 14705 * driver's internal RQS command). Also just do this for Fibre Channel 14706 * devices as these are the only ones that support mpxio. 14707 */ 14708 if ((un->un_f_is_fibre == TRUE) && 14709 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 14710 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 14711 scsi_dmafree(pktp); 14712 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 14713 } 14714 #endif 14715 14716 /* 14717 * The command did not successfully complete as requested so check 14718 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 14719 * driver command that should not be retried so just return. If 14720 * FLAG_DIAGNOSE is not set the error will be processed below. 14721 */ 14722 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 14723 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14724 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n"); 14725 /* 14726 * Issue a request sense if a check condition caused the error 14727 * (we handle the auto request sense case above), otherwise 14728 * just fail the command. 14729 */ 14730 if ((pktp->pkt_reason == CMD_CMPLT) && 14731 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { 14732 sd_send_request_sense_command(un, bp, pktp); 14733 } else { 14734 sd_return_failed_command(un, bp, EIO); 14735 } 14736 goto exit; 14737 } 14738 14739 /* 14740 * The command did not successfully complete as requested so process 14741 * the error, retry, and/or attempt recovery. 14742 */ 14743 switch (pktp->pkt_reason) { 14744 case CMD_CMPLT: 14745 switch (SD_GET_PKT_STATUS(pktp)) { 14746 case STATUS_GOOD: 14747 /* 14748 * The command completed successfully with a non-zero 14749 * residual 14750 */ 14751 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14752 "sdintr: STATUS_GOOD \n"); 14753 sd_pkt_status_good(un, bp, xp, pktp); 14754 break; 14755 14756 case STATUS_CHECK: 14757 case STATUS_TERMINATED: 14758 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14759 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n"); 14760 sd_pkt_status_check_condition(un, bp, xp, pktp); 14761 break; 14762 14763 case STATUS_BUSY: 14764 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14765 "sdintr: STATUS_BUSY\n"); 14766 sd_pkt_status_busy(un, bp, xp, pktp); 14767 break; 14768 14769 case STATUS_RESERVATION_CONFLICT: 14770 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14771 "sdintr: STATUS_RESERVATION_CONFLICT\n"); 14772 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 14773 break; 14774 14775 case STATUS_QFULL: 14776 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14777 "sdintr: STATUS_QFULL\n"); 14778 sd_pkt_status_qfull(un, bp, xp, pktp); 14779 break; 14780 14781 case STATUS_MET: 14782 case STATUS_INTERMEDIATE: 14783 case STATUS_SCSI2: 14784 case STATUS_INTERMEDIATE_MET: 14785 case STATUS_ACA_ACTIVE: 14786 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 14787 "Unexpected SCSI status received: 0x%x\n", 14788 SD_GET_PKT_STATUS(pktp)); 14789 sd_return_failed_command(un, bp, EIO); 14790 break; 14791 14792 default: 14793 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 14794 "Invalid SCSI status received: 0x%x\n", 14795 SD_GET_PKT_STATUS(pktp)); 14796 sd_return_failed_command(un, bp, EIO); 14797 break; 14798 14799 } 14800 break; 14801 14802 case CMD_INCOMPLETE: 14803 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14804 "sdintr: CMD_INCOMPLETE\n"); 14805 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp); 14806 break; 14807 case CMD_TRAN_ERR: 14808 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14809 "sdintr: CMD_TRAN_ERR\n"); 14810 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp); 14811 break; 14812 case CMD_RESET: 14813 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14814 "sdintr: CMD_RESET \n"); 14815 sd_pkt_reason_cmd_reset(un, bp, xp, pktp); 14816 break; 14817 case CMD_ABORTED: 14818 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14819 "sdintr: CMD_ABORTED \n"); 14820 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp); 14821 break; 14822 case CMD_TIMEOUT: 14823 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14824 "sdintr: CMD_TIMEOUT\n"); 14825 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp); 14826 break; 14827 case CMD_UNX_BUS_FREE: 14828 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14829 "sdintr: CMD_UNX_BUS_FREE \n"); 14830 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp); 14831 break; 14832 case CMD_TAG_REJECT: 14833 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14834 "sdintr: CMD_TAG_REJECT\n"); 14835 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp); 14836 break; 14837 default: 14838 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14839 "sdintr: default\n"); 14840 sd_pkt_reason_default(un, bp, xp, pktp); 14841 break; 14842 } 14843 14844 exit: 14845 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n"); 14846 14847 /* Decrement counter to indicate that the callback routine is done. */ 14848 un->un_in_callback--; 14849 ASSERT(un->un_in_callback >= 0); 14850 14851 /* 14852 * At this point, the pkt has been dispatched, ie, it is either 14853 * being re-tried or has been returned to its caller and should 14854 * not be referenced. 14855 */ 14856 14857 mutex_exit(SD_MUTEX(un)); 14858 } 14859 14860 14861 /* 14862 * Function: sd_print_incomplete_msg 14863 * 14864 * Description: Prints the error message for a CMD_INCOMPLETE error. 14865 * 14866 * Arguments: un - ptr to associated softstate for the device. 14867 * bp - ptr to the buf(9S) for the command. 14868 * arg - message string ptr 14869 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED, 14870 * or SD_NO_RETRY_ISSUED. 14871 * 14872 * Context: May be called under interrupt context 14873 */ 14874 14875 static void 14876 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 14877 { 14878 struct scsi_pkt *pktp; 14879 char *msgp; 14880 char *cmdp = arg; 14881 14882 ASSERT(un != NULL); 14883 ASSERT(mutex_owned(SD_MUTEX(un))); 14884 ASSERT(bp != NULL); 14885 ASSERT(arg != NULL); 14886 pktp = SD_GET_PKTP(bp); 14887 ASSERT(pktp != NULL); 14888 14889 switch (code) { 14890 case SD_DELAYED_RETRY_ISSUED: 14891 case SD_IMMEDIATE_RETRY_ISSUED: 14892 msgp = "retrying"; 14893 break; 14894 case SD_NO_RETRY_ISSUED: 14895 default: 14896 msgp = "giving up"; 14897 break; 14898 } 14899 14900 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 14901 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14902 "incomplete %s- %s\n", cmdp, msgp); 14903 } 14904 } 14905 14906 14907 14908 /* 14909 * Function: sd_pkt_status_good 14910 * 14911 * Description: Processing for a STATUS_GOOD code in pkt_status. 14912 * 14913 * Context: May be called under interrupt context 14914 */ 14915 14916 static void 14917 sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 14918 struct sd_xbuf *xp, struct scsi_pkt *pktp) 14919 { 14920 char *cmdp; 14921 14922 ASSERT(un != NULL); 14923 ASSERT(mutex_owned(SD_MUTEX(un))); 14924 ASSERT(bp != NULL); 14925 ASSERT(xp != NULL); 14926 ASSERT(pktp != NULL); 14927 ASSERT(pktp->pkt_reason == CMD_CMPLT); 14928 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD); 14929 ASSERT(pktp->pkt_resid != 0); 14930 14931 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n"); 14932 14933 SD_UPDATE_ERRSTATS(un, sd_harderrs); 14934 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) { 14935 case SCMD_READ: 14936 cmdp = "read"; 14937 break; 14938 case SCMD_WRITE: 14939 cmdp = "write"; 14940 break; 14941 default: 14942 SD_UPDATE_B_RESID(bp, pktp); 14943 sd_return_command(un, bp); 14944 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 14945 return; 14946 } 14947 14948 /* 14949 * See if we can retry the read/write, preferrably immediately. 14950 * If retries are exhaused, then sd_retry_command() will update 14951 * the b_resid count. 14952 */ 14953 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg, 14954 cmdp, EIO, (clock_t)0, NULL); 14955 14956 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 14957 } 14958 14959 14960 14961 14962 14963 /* 14964 * Function: sd_handle_request_sense 14965 * 14966 * Description: Processing for non-auto Request Sense command. 14967 * 14968 * Arguments: un - ptr to associated softstate 14969 * sense_bp - ptr to buf(9S) for the RQS command 14970 * sense_xp - ptr to the sd_xbuf for the RQS command 14971 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command 14972 * 14973 * Context: May be called under interrupt context 14974 */ 14975 14976 static void 14977 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp, 14978 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp) 14979 { 14980 struct buf *cmd_bp; /* buf for the original command */ 14981 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */ 14982 struct scsi_pkt *cmd_pktp; /* pkt for the original command */ 14983 14984 ASSERT(un != NULL); 14985 ASSERT(mutex_owned(SD_MUTEX(un))); 14986 ASSERT(sense_bp != NULL); 14987 ASSERT(sense_xp != NULL); 14988 ASSERT(sense_pktp != NULL); 14989 14990 /* 14991 * Note the sense_bp, sense_xp, and sense_pktp here are for the 14992 * RQS command and not the original command. 14993 */ 14994 ASSERT(sense_pktp == un->un_rqs_pktp); 14995 ASSERT(sense_bp == un->un_rqs_bp); 14996 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) == 14997 (FLAG_SENSING | FLAG_HEAD)); 14998 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) & 14999 FLAG_SENSING) == FLAG_SENSING); 15000 15001 /* These are the bp, xp, and pktp for the original command */ 15002 cmd_bp = sense_xp->xb_sense_bp; 15003 cmd_xp = SD_GET_XBUF(cmd_bp); 15004 cmd_pktp = SD_GET_PKTP(cmd_bp); 15005 15006 if (sense_pktp->pkt_reason != CMD_CMPLT) { 15007 /* 15008 * The REQUEST SENSE command failed. Release the REQUEST 15009 * SENSE command for re-use, get back the bp for the original 15010 * command, and attempt to re-try the original command if 15011 * FLAG_DIAGNOSE is not set in the original packet. 15012 */ 15013 SD_UPDATE_ERRSTATS(un, sd_harderrs); 15014 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15015 cmd_bp = sd_mark_rqs_idle(un, sense_xp); 15016 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD, 15017 NULL, NULL, EIO, (clock_t)0, NULL); 15018 return; 15019 } 15020 } 15021 15022 /* 15023 * Save the relevant sense info into the xp for the original cmd. 15024 * 15025 * Note: if the request sense failed the state info will be zero 15026 * as set in sd_mark_rqs_busy() 15027 */ 15028 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp); 15029 cmd_xp->xb_sense_state = sense_pktp->pkt_state; 15030 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid; 15031 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, SENSE_LENGTH); 15032 15033 /* 15034 * Free up the RQS command.... 15035 * NOTE: 15036 * Must do this BEFORE calling sd_validate_sense_data! 15037 * sd_validate_sense_data may return the original command in 15038 * which case the pkt will be freed and the flags can no 15039 * longer be touched. 15040 * SD_MUTEX is held through this process until the command 15041 * is dispatched based upon the sense data, so there are 15042 * no race conditions. 15043 */ 15044 (void) sd_mark_rqs_idle(un, sense_xp); 15045 15046 /* 15047 * For a retryable command see if we have valid sense data, if so then 15048 * turn it over to sd_decode_sense() to figure out the right course of 15049 * action. Just fail a non-retryable command. 15050 */ 15051 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15052 if (sd_validate_sense_data(un, cmd_bp, cmd_xp) == 15053 SD_SENSE_DATA_IS_VALID) { 15054 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp); 15055 } 15056 } else { 15057 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB", 15058 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15059 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data", 15060 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 15061 sd_return_failed_command(un, cmd_bp, EIO); 15062 } 15063 } 15064 15065 15066 15067 15068 /* 15069 * Function: sd_handle_auto_request_sense 15070 * 15071 * Description: Processing for auto-request sense information. 15072 * 15073 * Arguments: un - ptr to associated softstate 15074 * bp - ptr to buf(9S) for the command 15075 * xp - ptr to the sd_xbuf for the command 15076 * pktp - ptr to the scsi_pkt(9S) for the command 15077 * 15078 * Context: May be called under interrupt context 15079 */ 15080 15081 static void 15082 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 15083 struct sd_xbuf *xp, struct scsi_pkt *pktp) 15084 { 15085 struct scsi_arq_status *asp; 15086 15087 ASSERT(un != NULL); 15088 ASSERT(mutex_owned(SD_MUTEX(un))); 15089 ASSERT(bp != NULL); 15090 ASSERT(xp != NULL); 15091 ASSERT(pktp != NULL); 15092 ASSERT(pktp != un->un_rqs_pktp); 15093 ASSERT(bp != un->un_rqs_bp); 15094 15095 /* 15096 * For auto-request sense, we get a scsi_arq_status back from 15097 * the HBA, with the sense data in the sts_sensedata member. 15098 * The pkt_scbp of the packet points to this scsi_arq_status. 15099 */ 15100 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 15101 15102 if (asp->sts_rqpkt_reason != CMD_CMPLT) { 15103 /* 15104 * The auto REQUEST SENSE failed; see if we can re-try 15105 * the original command. 15106 */ 15107 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15108 "auto request sense failed (reason=%s)\n", 15109 scsi_rname(asp->sts_rqpkt_reason)); 15110 15111 sd_reset_target(un, pktp); 15112 15113 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15114 NULL, NULL, EIO, (clock_t)0, NULL); 15115 return; 15116 } 15117 15118 /* Save the relevant sense info into the xp for the original cmd. */ 15119 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status))); 15120 xp->xb_sense_state = asp->sts_rqpkt_state; 15121 xp->xb_sense_resid = asp->sts_rqpkt_resid; 15122 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 15123 min(sizeof (struct scsi_extended_sense), SENSE_LENGTH)); 15124 15125 /* 15126 * See if we have valid sense data, if so then turn it over to 15127 * sd_decode_sense() to figure out the right course of action. 15128 */ 15129 if (sd_validate_sense_data(un, bp, xp) == SD_SENSE_DATA_IS_VALID) { 15130 sd_decode_sense(un, bp, xp, pktp); 15131 } 15132 } 15133 15134 15135 /* 15136 * Function: sd_print_sense_failed_msg 15137 * 15138 * Description: Print log message when RQS has failed. 15139 * 15140 * Arguments: un - ptr to associated softstate 15141 * bp - ptr to buf(9S) for the command 15142 * arg - generic message string ptr 15143 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 15144 * or SD_NO_RETRY_ISSUED 15145 * 15146 * Context: May be called from interrupt context 15147 */ 15148 15149 static void 15150 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg, 15151 int code) 15152 { 15153 char *msgp = arg; 15154 15155 ASSERT(un != NULL); 15156 ASSERT(mutex_owned(SD_MUTEX(un))); 15157 ASSERT(bp != NULL); 15158 15159 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) { 15160 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp); 15161 } 15162 } 15163 15164 15165 /* 15166 * Function: sd_validate_sense_data 15167 * 15168 * Description: Check the given sense data for validity. 15169 * If the sense data is not valid, the command will 15170 * be either failed or retried! 15171 * 15172 * Return Code: SD_SENSE_DATA_IS_INVALID 15173 * SD_SENSE_DATA_IS_VALID 15174 * 15175 * Context: May be called from interrupt context 15176 */ 15177 15178 static int 15179 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp) 15180 { 15181 struct scsi_extended_sense *esp; 15182 struct scsi_pkt *pktp; 15183 size_t actual_len; 15184 char *msgp = NULL; 15185 15186 ASSERT(un != NULL); 15187 ASSERT(mutex_owned(SD_MUTEX(un))); 15188 ASSERT(bp != NULL); 15189 ASSERT(bp != un->un_rqs_bp); 15190 ASSERT(xp != NULL); 15191 15192 pktp = SD_GET_PKTP(bp); 15193 ASSERT(pktp != NULL); 15194 15195 /* 15196 * Check the status of the RQS command (auto or manual). 15197 */ 15198 switch (xp->xb_sense_status & STATUS_MASK) { 15199 case STATUS_GOOD: 15200 break; 15201 15202 case STATUS_RESERVATION_CONFLICT: 15203 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 15204 return (SD_SENSE_DATA_IS_INVALID); 15205 15206 case STATUS_BUSY: 15207 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15208 "Busy Status on REQUEST SENSE\n"); 15209 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL, 15210 NULL, EIO, SD_BSY_TIMEOUT / 500, kstat_waitq_enter); 15211 return (SD_SENSE_DATA_IS_INVALID); 15212 15213 case STATUS_QFULL: 15214 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15215 "QFULL Status on REQUEST SENSE\n"); 15216 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, 15217 NULL, EIO, SD_BSY_TIMEOUT / 500, kstat_waitq_enter); 15218 return (SD_SENSE_DATA_IS_INVALID); 15219 15220 case STATUS_CHECK: 15221 case STATUS_TERMINATED: 15222 msgp = "Check Condition on REQUEST SENSE\n"; 15223 goto sense_failed; 15224 15225 default: 15226 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n"; 15227 goto sense_failed; 15228 } 15229 15230 /* 15231 * See if we got the minimum required amount of sense data. 15232 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes 15233 * or less. 15234 */ 15235 actual_len = (int)(SENSE_LENGTH - xp->xb_sense_resid); 15236 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) || 15237 (actual_len == 0)) { 15238 msgp = "Request Sense couldn't get sense data\n"; 15239 goto sense_failed; 15240 } 15241 15242 if (actual_len < SUN_MIN_SENSE_LENGTH) { 15243 msgp = "Not enough sense information\n"; 15244 goto sense_failed; 15245 } 15246 15247 /* 15248 * We require the extended sense data 15249 */ 15250 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 15251 if (esp->es_class != CLASS_EXTENDED_SENSE) { 15252 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 15253 static char tmp[8]; 15254 static char buf[148]; 15255 char *p = (char *)(xp->xb_sense_data); 15256 int i; 15257 15258 mutex_enter(&sd_sense_mutex); 15259 (void) strcpy(buf, "undecodable sense information:"); 15260 for (i = 0; i < actual_len; i++) { 15261 (void) sprintf(tmp, " 0x%x", *(p++)&0xff); 15262 (void) strcpy(&buf[strlen(buf)], tmp); 15263 } 15264 i = strlen(buf); 15265 (void) strcpy(&buf[i], "-(assumed fatal)\n"); 15266 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, buf); 15267 mutex_exit(&sd_sense_mutex); 15268 } 15269 /* Note: Legacy behavior, fail the command with no retry */ 15270 sd_return_failed_command(un, bp, EIO); 15271 return (SD_SENSE_DATA_IS_INVALID); 15272 } 15273 15274 /* 15275 * Check that es_code is valid (es_class concatenated with es_code 15276 * make up the "response code" field. es_class will always be 7, so 15277 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the 15278 * format. 15279 */ 15280 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) && 15281 (esp->es_code != CODE_FMT_FIXED_DEFERRED) && 15282 (esp->es_code != CODE_FMT_DESCR_CURRENT) && 15283 (esp->es_code != CODE_FMT_DESCR_DEFERRED) && 15284 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) { 15285 goto sense_failed; 15286 } 15287 15288 return (SD_SENSE_DATA_IS_VALID); 15289 15290 sense_failed: 15291 /* 15292 * If the request sense failed (for whatever reason), attempt 15293 * to retry the original command. 15294 */ 15295 #if defined(__i386) || defined(__amd64) 15296 /* 15297 * SD_RETRY_DELAY is conditionally compile (#if fibre) in 15298 * sddef.h for Sparc platform, and x86 uses 1 binary 15299 * for both SCSI/FC. 15300 * The SD_RETRY_DELAY value need to be adjusted here 15301 * when SD_RETRY_DELAY change in sddef.h 15302 */ 15303 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15304 sd_print_sense_failed_msg, msgp, EIO, 15305 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL); 15306 #else 15307 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15308 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); 15309 #endif 15310 15311 return (SD_SENSE_DATA_IS_INVALID); 15312 } 15313 15314 15315 15316 /* 15317 * Function: sd_decode_sense 15318 * 15319 * Description: Take recovery action(s) when SCSI Sense Data is received. 15320 * 15321 * Context: Interrupt context. 15322 */ 15323 15324 static void 15325 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 15326 struct scsi_pkt *pktp) 15327 { 15328 uint8_t sense_key; 15329 15330 ASSERT(un != NULL); 15331 ASSERT(mutex_owned(SD_MUTEX(un))); 15332 ASSERT(bp != NULL); 15333 ASSERT(bp != un->un_rqs_bp); 15334 ASSERT(xp != NULL); 15335 ASSERT(pktp != NULL); 15336 15337 sense_key = scsi_sense_key(xp->xb_sense_data); 15338 15339 switch (sense_key) { 15340 case KEY_NO_SENSE: 15341 sd_sense_key_no_sense(un, bp, xp, pktp); 15342 break; 15343 case KEY_RECOVERABLE_ERROR: 15344 sd_sense_key_recoverable_error(un, xp->xb_sense_data, 15345 bp, xp, pktp); 15346 break; 15347 case KEY_NOT_READY: 15348 sd_sense_key_not_ready(un, xp->xb_sense_data, 15349 bp, xp, pktp); 15350 break; 15351 case KEY_MEDIUM_ERROR: 15352 case KEY_HARDWARE_ERROR: 15353 sd_sense_key_medium_or_hardware_error(un, 15354 xp->xb_sense_data, bp, xp, pktp); 15355 break; 15356 case KEY_ILLEGAL_REQUEST: 15357 sd_sense_key_illegal_request(un, bp, xp, pktp); 15358 break; 15359 case KEY_UNIT_ATTENTION: 15360 sd_sense_key_unit_attention(un, xp->xb_sense_data, 15361 bp, xp, pktp); 15362 break; 15363 case KEY_WRITE_PROTECT: 15364 case KEY_VOLUME_OVERFLOW: 15365 case KEY_MISCOMPARE: 15366 sd_sense_key_fail_command(un, bp, xp, pktp); 15367 break; 15368 case KEY_BLANK_CHECK: 15369 sd_sense_key_blank_check(un, bp, xp, pktp); 15370 break; 15371 case KEY_ABORTED_COMMAND: 15372 sd_sense_key_aborted_command(un, bp, xp, pktp); 15373 break; 15374 case KEY_VENDOR_UNIQUE: 15375 case KEY_COPY_ABORTED: 15376 case KEY_EQUAL: 15377 case KEY_RESERVED: 15378 default: 15379 sd_sense_key_default(un, xp->xb_sense_data, 15380 bp, xp, pktp); 15381 break; 15382 } 15383 } 15384 15385 15386 /* 15387 * Function: sd_dump_memory 15388 * 15389 * Description: Debug logging routine to print the contents of a user provided 15390 * buffer. The output of the buffer is broken up into 256 byte 15391 * segments due to a size constraint of the scsi_log. 15392 * implementation. 15393 * 15394 * Arguments: un - ptr to softstate 15395 * comp - component mask 15396 * title - "title" string to preceed data when printed 15397 * data - ptr to data block to be printed 15398 * len - size of data block to be printed 15399 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c) 15400 * 15401 * Context: May be called from interrupt context 15402 */ 15403 15404 #define SD_DUMP_MEMORY_BUF_SIZE 256 15405 15406 static char *sd_dump_format_string[] = { 15407 " 0x%02x", 15408 " %c" 15409 }; 15410 15411 static void 15412 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data, 15413 int len, int fmt) 15414 { 15415 int i, j; 15416 int avail_count; 15417 int start_offset; 15418 int end_offset; 15419 size_t entry_len; 15420 char *bufp; 15421 char *local_buf; 15422 char *format_string; 15423 15424 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR)); 15425 15426 /* 15427 * In the debug version of the driver, this function is called from a 15428 * number of places which are NOPs in the release driver. 15429 * The debug driver therefore has additional methods of filtering 15430 * debug output. 15431 */ 15432 #ifdef SDDEBUG 15433 /* 15434 * In the debug version of the driver we can reduce the amount of debug 15435 * messages by setting sd_error_level to something other than 15436 * SCSI_ERR_ALL and clearing bits in sd_level_mask and 15437 * sd_component_mask. 15438 */ 15439 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) || 15440 (sd_error_level != SCSI_ERR_ALL)) { 15441 return; 15442 } 15443 if (((sd_component_mask & comp) == 0) || 15444 (sd_error_level != SCSI_ERR_ALL)) { 15445 return; 15446 } 15447 #else 15448 if (sd_error_level != SCSI_ERR_ALL) { 15449 return; 15450 } 15451 #endif 15452 15453 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP); 15454 bufp = local_buf; 15455 /* 15456 * Available length is the length of local_buf[], minus the 15457 * length of the title string, minus one for the ":", minus 15458 * one for the newline, minus one for the NULL terminator. 15459 * This gives the #bytes available for holding the printed 15460 * values from the given data buffer. 15461 */ 15462 if (fmt == SD_LOG_HEX) { 15463 format_string = sd_dump_format_string[0]; 15464 } else /* SD_LOG_CHAR */ { 15465 format_string = sd_dump_format_string[1]; 15466 } 15467 /* 15468 * Available count is the number of elements from the given 15469 * data buffer that we can fit into the available length. 15470 * This is based upon the size of the format string used. 15471 * Make one entry and find it's size. 15472 */ 15473 (void) sprintf(bufp, format_string, data[0]); 15474 entry_len = strlen(bufp); 15475 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len; 15476 15477 j = 0; 15478 while (j < len) { 15479 bufp = local_buf; 15480 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE); 15481 start_offset = j; 15482 15483 end_offset = start_offset + avail_count; 15484 15485 (void) sprintf(bufp, "%s:", title); 15486 bufp += strlen(bufp); 15487 for (i = start_offset; ((i < end_offset) && (j < len)); 15488 i++, j++) { 15489 (void) sprintf(bufp, format_string, data[i]); 15490 bufp += entry_len; 15491 } 15492 (void) sprintf(bufp, "\n"); 15493 15494 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf); 15495 } 15496 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE); 15497 } 15498 15499 /* 15500 * Function: sd_print_sense_msg 15501 * 15502 * Description: Log a message based upon the given sense data. 15503 * 15504 * Arguments: un - ptr to associated softstate 15505 * bp - ptr to buf(9S) for the command 15506 * arg - ptr to associate sd_sense_info struct 15507 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 15508 * or SD_NO_RETRY_ISSUED 15509 * 15510 * Context: May be called from interrupt context 15511 */ 15512 15513 static void 15514 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 15515 { 15516 struct sd_xbuf *xp; 15517 struct scsi_pkt *pktp; 15518 uint8_t *sensep; 15519 daddr_t request_blkno; 15520 diskaddr_t err_blkno; 15521 int severity; 15522 int pfa_flag; 15523 extern struct scsi_key_strings scsi_cmds[]; 15524 15525 ASSERT(un != NULL); 15526 ASSERT(mutex_owned(SD_MUTEX(un))); 15527 ASSERT(bp != NULL); 15528 xp = SD_GET_XBUF(bp); 15529 ASSERT(xp != NULL); 15530 pktp = SD_GET_PKTP(bp); 15531 ASSERT(pktp != NULL); 15532 ASSERT(arg != NULL); 15533 15534 severity = ((struct sd_sense_info *)(arg))->ssi_severity; 15535 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag; 15536 15537 if ((code == SD_DELAYED_RETRY_ISSUED) || 15538 (code == SD_IMMEDIATE_RETRY_ISSUED)) { 15539 severity = SCSI_ERR_RETRYABLE; 15540 } 15541 15542 /* Use absolute block number for the request block number */ 15543 request_blkno = xp->xb_blkno; 15544 15545 /* 15546 * Now try to get the error block number from the sense data 15547 */ 15548 sensep = xp->xb_sense_data; 15549 15550 if (scsi_sense_info_uint64(sensep, SENSE_LENGTH, 15551 (uint64_t *)&err_blkno)) { 15552 /* 15553 * We retrieved the error block number from the information 15554 * portion of the sense data. 15555 * 15556 * For USCSI commands we are better off using the error 15557 * block no. as the requested block no. (This is the best 15558 * we can estimate.) 15559 */ 15560 if ((SD_IS_BUFIO(xp) == FALSE) && 15561 ((pktp->pkt_flags & FLAG_SILENT) == 0)) { 15562 request_blkno = err_blkno; 15563 } 15564 } else { 15565 /* 15566 * Without the es_valid bit set (for fixed format) or an 15567 * information descriptor (for descriptor format) we cannot 15568 * be certain of the error blkno, so just use the 15569 * request_blkno. 15570 */ 15571 err_blkno = (diskaddr_t)request_blkno; 15572 } 15573 15574 /* 15575 * The following will log the buffer contents for the release driver 15576 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error 15577 * level is set to verbose. 15578 */ 15579 sd_dump_memory(un, SD_LOG_IO, "Failed CDB", 15580 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15581 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 15582 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX); 15583 15584 if (pfa_flag == FALSE) { 15585 /* This is normally only set for USCSI */ 15586 if ((pktp->pkt_flags & FLAG_SILENT) != 0) { 15587 return; 15588 } 15589 15590 if ((SD_IS_BUFIO(xp) == TRUE) && 15591 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) && 15592 (severity < sd_error_level))) { 15593 return; 15594 } 15595 } 15596 15597 /* 15598 * Check for Sonoma Failover and keep a count of how many failed I/O's 15599 */ 15600 if ((SD_IS_LSI(un)) && 15601 (scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) && 15602 (scsi_sense_asc(sensep) == 0x94) && 15603 (scsi_sense_ascq(sensep) == 0x01)) { 15604 un->un_sonoma_failure_count++; 15605 if (un->un_sonoma_failure_count > 1) { 15606 return; 15607 } 15608 } 15609 15610 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity, 15611 request_blkno, err_blkno, scsi_cmds, 15612 (struct scsi_extended_sense *)sensep, 15613 un->un_additional_codes, NULL); 15614 } 15615 15616 /* 15617 * Function: sd_sense_key_no_sense 15618 * 15619 * Description: Recovery action when sense data was not received. 15620 * 15621 * Context: May be called from interrupt context 15622 */ 15623 15624 static void 15625 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 15626 struct sd_xbuf *xp, struct scsi_pkt *pktp) 15627 { 15628 struct sd_sense_info si; 15629 15630 ASSERT(un != NULL); 15631 ASSERT(mutex_owned(SD_MUTEX(un))); 15632 ASSERT(bp != NULL); 15633 ASSERT(xp != NULL); 15634 ASSERT(pktp != NULL); 15635 15636 si.ssi_severity = SCSI_ERR_FATAL; 15637 si.ssi_pfa_flag = FALSE; 15638 15639 SD_UPDATE_ERRSTATS(un, sd_softerrs); 15640 15641 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 15642 &si, EIO, (clock_t)0, NULL); 15643 } 15644 15645 15646 /* 15647 * Function: sd_sense_key_recoverable_error 15648 * 15649 * Description: Recovery actions for a SCSI "Recovered Error" sense key. 15650 * 15651 * Context: May be called from interrupt context 15652 */ 15653 15654 static void 15655 sd_sense_key_recoverable_error(struct sd_lun *un, 15656 uint8_t *sense_datap, 15657 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 15658 { 15659 struct sd_sense_info si; 15660 uint8_t asc = scsi_sense_asc(sense_datap); 15661 15662 ASSERT(un != NULL); 15663 ASSERT(mutex_owned(SD_MUTEX(un))); 15664 ASSERT(bp != NULL); 15665 ASSERT(xp != NULL); 15666 ASSERT(pktp != NULL); 15667 15668 /* 15669 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED 15670 */ 15671 if ((asc == 0x5D) && (sd_report_pfa != 0)) { 15672 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 15673 si.ssi_severity = SCSI_ERR_INFO; 15674 si.ssi_pfa_flag = TRUE; 15675 } else { 15676 SD_UPDATE_ERRSTATS(un, sd_softerrs); 15677 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err); 15678 si.ssi_severity = SCSI_ERR_RECOVERED; 15679 si.ssi_pfa_flag = FALSE; 15680 } 15681 15682 if (pktp->pkt_resid == 0) { 15683 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 15684 sd_return_command(un, bp); 15685 return; 15686 } 15687 15688 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 15689 &si, EIO, (clock_t)0, NULL); 15690 } 15691 15692 15693 15694 15695 /* 15696 * Function: sd_sense_key_not_ready 15697 * 15698 * Description: Recovery actions for a SCSI "Not Ready" sense key. 15699 * 15700 * Context: May be called from interrupt context 15701 */ 15702 15703 static void 15704 sd_sense_key_not_ready(struct sd_lun *un, 15705 uint8_t *sense_datap, 15706 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 15707 { 15708 struct sd_sense_info si; 15709 uint8_t asc = scsi_sense_asc(sense_datap); 15710 uint8_t ascq = scsi_sense_ascq(sense_datap); 15711 15712 ASSERT(un != NULL); 15713 ASSERT(mutex_owned(SD_MUTEX(un))); 15714 ASSERT(bp != NULL); 15715 ASSERT(xp != NULL); 15716 ASSERT(pktp != NULL); 15717 15718 si.ssi_severity = SCSI_ERR_FATAL; 15719 si.ssi_pfa_flag = FALSE; 15720 15721 /* 15722 * Update error stats after first NOT READY error. Disks may have 15723 * been powered down and may need to be restarted. For CDROMs, 15724 * report NOT READY errors only if media is present. 15725 */ 15726 if ((ISCD(un) && (asc == 0x3A)) || 15727 (xp->xb_retry_count > 0)) { 15728 SD_UPDATE_ERRSTATS(un, sd_harderrs); 15729 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err); 15730 } 15731 15732 /* 15733 * Just fail if the "not ready" retry limit has been reached. 15734 */ 15735 if (xp->xb_retry_count >= un->un_notready_retry_count) { 15736 /* Special check for error message printing for removables. */ 15737 if (un->un_f_has_removable_media && (asc == 0x04) && 15738 (ascq >= 0x04)) { 15739 si.ssi_severity = SCSI_ERR_ALL; 15740 } 15741 goto fail_command; 15742 } 15743 15744 /* 15745 * Check the ASC and ASCQ in the sense data as needed, to determine 15746 * what to do. 15747 */ 15748 switch (asc) { 15749 case 0x04: /* LOGICAL UNIT NOT READY */ 15750 /* 15751 * disk drives that don't spin up result in a very long delay 15752 * in format without warning messages. We will log a message 15753 * if the error level is set to verbose. 15754 */ 15755 if (sd_error_level < SCSI_ERR_RETRYABLE) { 15756 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15757 "logical unit not ready, resetting disk\n"); 15758 } 15759 15760 /* 15761 * There are different requirements for CDROMs and disks for 15762 * the number of retries. If a CD-ROM is giving this, it is 15763 * probably reading TOC and is in the process of getting 15764 * ready, so we should keep on trying for a long time to make 15765 * sure that all types of media are taken in account (for 15766 * some media the drive takes a long time to read TOC). For 15767 * disks we do not want to retry this too many times as this 15768 * can cause a long hang in format when the drive refuses to 15769 * spin up (a very common failure). 15770 */ 15771 switch (ascq) { 15772 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */ 15773 /* 15774 * Disk drives frequently refuse to spin up which 15775 * results in a very long hang in format without 15776 * warning messages. 15777 * 15778 * Note: This code preserves the legacy behavior of 15779 * comparing xb_retry_count against zero for fibre 15780 * channel targets instead of comparing against the 15781 * un_reset_retry_count value. The reason for this 15782 * discrepancy has been so utterly lost beneath the 15783 * Sands of Time that even Indiana Jones could not 15784 * find it. 15785 */ 15786 if (un->un_f_is_fibre == TRUE) { 15787 if (((sd_level_mask & SD_LOGMASK_DIAG) || 15788 (xp->xb_retry_count > 0)) && 15789 (un->un_startstop_timeid == NULL)) { 15790 scsi_log(SD_DEVINFO(un), sd_label, 15791 CE_WARN, "logical unit not ready, " 15792 "resetting disk\n"); 15793 sd_reset_target(un, pktp); 15794 } 15795 } else { 15796 if (((sd_level_mask & SD_LOGMASK_DIAG) || 15797 (xp->xb_retry_count > 15798 un->un_reset_retry_count)) && 15799 (un->un_startstop_timeid == NULL)) { 15800 scsi_log(SD_DEVINFO(un), sd_label, 15801 CE_WARN, "logical unit not ready, " 15802 "resetting disk\n"); 15803 sd_reset_target(un, pktp); 15804 } 15805 } 15806 break; 15807 15808 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */ 15809 /* 15810 * If the target is in the process of becoming 15811 * ready, just proceed with the retry. This can 15812 * happen with CD-ROMs that take a long time to 15813 * read TOC after a power cycle or reset. 15814 */ 15815 goto do_retry; 15816 15817 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */ 15818 break; 15819 15820 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */ 15821 /* 15822 * Retries cannot help here so just fail right away. 15823 */ 15824 goto fail_command; 15825 15826 case 0x88: 15827 /* 15828 * Vendor-unique code for T3/T4: it indicates a 15829 * path problem in a mutipathed config, but as far as 15830 * the target driver is concerned it equates to a fatal 15831 * error, so we should just fail the command right away 15832 * (without printing anything to the console). If this 15833 * is not a T3/T4, fall thru to the default recovery 15834 * action. 15835 * T3/T4 is FC only, don't need to check is_fibre 15836 */ 15837 if (SD_IS_T3(un) || SD_IS_T4(un)) { 15838 sd_return_failed_command(un, bp, EIO); 15839 return; 15840 } 15841 /* FALLTHRU */ 15842 15843 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */ 15844 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */ 15845 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */ 15846 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */ 15847 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */ 15848 default: /* Possible future codes in SCSI spec? */ 15849 /* 15850 * For removable-media devices, do not retry if 15851 * ASCQ > 2 as these result mostly from USCSI commands 15852 * on MMC devices issued to check status of an 15853 * operation initiated in immediate mode. Also for 15854 * ASCQ >= 4 do not print console messages as these 15855 * mainly represent a user-initiated operation 15856 * instead of a system failure. 15857 */ 15858 if (un->un_f_has_removable_media) { 15859 si.ssi_severity = SCSI_ERR_ALL; 15860 goto fail_command; 15861 } 15862 break; 15863 } 15864 15865 /* 15866 * As part of our recovery attempt for the NOT READY 15867 * condition, we issue a START STOP UNIT command. However 15868 * we want to wait for a short delay before attempting this 15869 * as there may still be more commands coming back from the 15870 * target with the check condition. To do this we use 15871 * timeout(9F) to call sd_start_stop_unit_callback() after 15872 * the delay interval expires. (sd_start_stop_unit_callback() 15873 * dispatches sd_start_stop_unit_task(), which will issue 15874 * the actual START STOP UNIT command. The delay interval 15875 * is one-half of the delay that we will use to retry the 15876 * command that generated the NOT READY condition. 15877 * 15878 * Note that we could just dispatch sd_start_stop_unit_task() 15879 * from here and allow it to sleep for the delay interval, 15880 * but then we would be tying up the taskq thread 15881 * uncesessarily for the duration of the delay. 15882 * 15883 * Do not issue the START STOP UNIT if the current command 15884 * is already a START STOP UNIT. 15885 */ 15886 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) { 15887 break; 15888 } 15889 15890 /* 15891 * Do not schedule the timeout if one is already pending. 15892 */ 15893 if (un->un_startstop_timeid != NULL) { 15894 SD_INFO(SD_LOG_ERROR, un, 15895 "sd_sense_key_not_ready: restart already issued to" 15896 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)), 15897 ddi_get_instance(SD_DEVINFO(un))); 15898 break; 15899 } 15900 15901 /* 15902 * Schedule the START STOP UNIT command, then queue the command 15903 * for a retry. 15904 * 15905 * Note: A timeout is not scheduled for this retry because we 15906 * want the retry to be serial with the START_STOP_UNIT. The 15907 * retry will be started when the START_STOP_UNIT is completed 15908 * in sd_start_stop_unit_task. 15909 */ 15910 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback, 15911 un, SD_BSY_TIMEOUT / 2); 15912 xp->xb_retry_count++; 15913 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter); 15914 return; 15915 15916 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */ 15917 if (sd_error_level < SCSI_ERR_RETRYABLE) { 15918 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15919 "unit does not respond to selection\n"); 15920 } 15921 break; 15922 15923 case 0x3A: /* MEDIUM NOT PRESENT */ 15924 if (sd_error_level >= SCSI_ERR_FATAL) { 15925 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15926 "Caddy not inserted in drive\n"); 15927 } 15928 15929 sr_ejected(un); 15930 un->un_mediastate = DKIO_EJECTED; 15931 /* The state has changed, inform the media watch routines */ 15932 cv_broadcast(&un->un_state_cv); 15933 /* Just fail if no media is present in the drive. */ 15934 goto fail_command; 15935 15936 default: 15937 if (sd_error_level < SCSI_ERR_RETRYABLE) { 15938 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 15939 "Unit not Ready. Additional sense code 0x%x\n", 15940 asc); 15941 } 15942 break; 15943 } 15944 15945 do_retry: 15946 15947 /* 15948 * Retry the command, as some targets may report NOT READY for 15949 * several seconds after being reset. 15950 */ 15951 xp->xb_retry_count++; 15952 si.ssi_severity = SCSI_ERR_RETRYABLE; 15953 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 15954 &si, EIO, SD_BSY_TIMEOUT, NULL); 15955 15956 return; 15957 15958 fail_command: 15959 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 15960 sd_return_failed_command(un, bp, EIO); 15961 } 15962 15963 15964 15965 /* 15966 * Function: sd_sense_key_medium_or_hardware_error 15967 * 15968 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error" 15969 * sense key. 15970 * 15971 * Context: May be called from interrupt context 15972 */ 15973 15974 static void 15975 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 15976 uint8_t *sense_datap, 15977 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 15978 { 15979 struct sd_sense_info si; 15980 uint8_t sense_key = scsi_sense_key(sense_datap); 15981 uint8_t asc = scsi_sense_asc(sense_datap); 15982 15983 ASSERT(un != NULL); 15984 ASSERT(mutex_owned(SD_MUTEX(un))); 15985 ASSERT(bp != NULL); 15986 ASSERT(xp != NULL); 15987 ASSERT(pktp != NULL); 15988 15989 si.ssi_severity = SCSI_ERR_FATAL; 15990 si.ssi_pfa_flag = FALSE; 15991 15992 if (sense_key == KEY_MEDIUM_ERROR) { 15993 SD_UPDATE_ERRSTATS(un, sd_rq_media_err); 15994 } 15995 15996 SD_UPDATE_ERRSTATS(un, sd_harderrs); 15997 15998 if ((un->un_reset_retry_count != 0) && 15999 (xp->xb_retry_count == un->un_reset_retry_count)) { 16000 mutex_exit(SD_MUTEX(un)); 16001 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */ 16002 if (un->un_f_allow_bus_device_reset == TRUE) { 16003 16004 boolean_t try_resetting_target = B_TRUE; 16005 16006 /* 16007 * We need to be able to handle specific ASC when we are 16008 * handling a KEY_HARDWARE_ERROR. In particular 16009 * taking the default action of resetting the target may 16010 * not be the appropriate way to attempt recovery. 16011 * Resetting a target because of a single LUN failure 16012 * victimizes all LUNs on that target. 16013 * 16014 * This is true for the LSI arrays, if an LSI 16015 * array controller returns an ASC of 0x84 (LUN Dead) we 16016 * should trust it. 16017 */ 16018 16019 if (sense_key == KEY_HARDWARE_ERROR) { 16020 switch (asc) { 16021 case 0x84: 16022 if (SD_IS_LSI(un)) { 16023 try_resetting_target = B_FALSE; 16024 } 16025 break; 16026 default: 16027 break; 16028 } 16029 } 16030 16031 if (try_resetting_target == B_TRUE) { 16032 int reset_retval = 0; 16033 if (un->un_f_lun_reset_enabled == TRUE) { 16034 SD_TRACE(SD_LOG_IO_CORE, un, 16035 "sd_sense_key_medium_or_hardware_" 16036 "error: issuing RESET_LUN\n"); 16037 reset_retval = 16038 scsi_reset(SD_ADDRESS(un), 16039 RESET_LUN); 16040 } 16041 if (reset_retval == 0) { 16042 SD_TRACE(SD_LOG_IO_CORE, un, 16043 "sd_sense_key_medium_or_hardware_" 16044 "error: issuing RESET_TARGET\n"); 16045 (void) scsi_reset(SD_ADDRESS(un), 16046 RESET_TARGET); 16047 } 16048 } 16049 } 16050 mutex_enter(SD_MUTEX(un)); 16051 } 16052 16053 /* 16054 * This really ought to be a fatal error, but we will retry anyway 16055 * as some drives report this as a spurious error. 16056 */ 16057 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16058 &si, EIO, (clock_t)0, NULL); 16059 } 16060 16061 16062 16063 /* 16064 * Function: sd_sense_key_illegal_request 16065 * 16066 * Description: Recovery actions for a SCSI "Illegal Request" sense key. 16067 * 16068 * Context: May be called from interrupt context 16069 */ 16070 16071 static void 16072 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 16073 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16074 { 16075 struct sd_sense_info si; 16076 16077 ASSERT(un != NULL); 16078 ASSERT(mutex_owned(SD_MUTEX(un))); 16079 ASSERT(bp != NULL); 16080 ASSERT(xp != NULL); 16081 ASSERT(pktp != NULL); 16082 16083 SD_UPDATE_ERRSTATS(un, sd_softerrs); 16084 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err); 16085 16086 si.ssi_severity = SCSI_ERR_INFO; 16087 si.ssi_pfa_flag = FALSE; 16088 16089 /* Pointless to retry if the target thinks it's an illegal request */ 16090 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16091 sd_return_failed_command(un, bp, EIO); 16092 } 16093 16094 16095 16096 16097 /* 16098 * Function: sd_sense_key_unit_attention 16099 * 16100 * Description: Recovery actions for a SCSI "Unit Attention" sense key. 16101 * 16102 * Context: May be called from interrupt context 16103 */ 16104 16105 static void 16106 sd_sense_key_unit_attention(struct sd_lun *un, 16107 uint8_t *sense_datap, 16108 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16109 { 16110 /* 16111 * For UNIT ATTENTION we allow retries for one minute. Devices 16112 * like Sonoma can return UNIT ATTENTION close to a minute 16113 * under certain conditions. 16114 */ 16115 int retry_check_flag = SD_RETRIES_UA; 16116 boolean_t kstat_updated = B_FALSE; 16117 struct sd_sense_info si; 16118 uint8_t asc = scsi_sense_asc(sense_datap); 16119 16120 ASSERT(un != NULL); 16121 ASSERT(mutex_owned(SD_MUTEX(un))); 16122 ASSERT(bp != NULL); 16123 ASSERT(xp != NULL); 16124 ASSERT(pktp != NULL); 16125 16126 si.ssi_severity = SCSI_ERR_INFO; 16127 si.ssi_pfa_flag = FALSE; 16128 16129 16130 switch (asc) { 16131 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */ 16132 if (sd_report_pfa != 0) { 16133 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 16134 si.ssi_pfa_flag = TRUE; 16135 retry_check_flag = SD_RETRIES_STANDARD; 16136 goto do_retry; 16137 } 16138 16139 break; 16140 16141 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ 16142 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 16143 un->un_resvd_status |= 16144 (SD_LOST_RESERVE | SD_WANT_RESERVE); 16145 } 16146 #ifdef _LP64 16147 if (un->un_blockcount + 1 > SD_GROUP1_MAX_ADDRESS) { 16148 if (taskq_dispatch(sd_tq, sd_reenable_dsense_task, 16149 un, KM_NOSLEEP) == 0) { 16150 /* 16151 * If we can't dispatch the task we'll just 16152 * live without descriptor sense. We can 16153 * try again on the next "unit attention" 16154 */ 16155 SD_ERROR(SD_LOG_ERROR, un, 16156 "sd_sense_key_unit_attention: " 16157 "Could not dispatch " 16158 "sd_reenable_dsense_task\n"); 16159 } 16160 } 16161 #endif /* _LP64 */ 16162 /* FALLTHRU */ 16163 16164 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ 16165 if (!un->un_f_has_removable_media) { 16166 break; 16167 } 16168 16169 /* 16170 * When we get a unit attention from a removable-media device, 16171 * it may be in a state that will take a long time to recover 16172 * (e.g., from a reset). Since we are executing in interrupt 16173 * context here, we cannot wait around for the device to come 16174 * back. So hand this command off to sd_media_change_task() 16175 * for deferred processing under taskq thread context. (Note 16176 * that the command still may be failed if a problem is 16177 * encountered at a later time.) 16178 */ 16179 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp, 16180 KM_NOSLEEP) == 0) { 16181 /* 16182 * Cannot dispatch the request so fail the command. 16183 */ 16184 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16185 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 16186 si.ssi_severity = SCSI_ERR_FATAL; 16187 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16188 sd_return_failed_command(un, bp, EIO); 16189 } 16190 16191 /* 16192 * If failed to dispatch sd_media_change_task(), we already 16193 * updated kstat. If succeed to dispatch sd_media_change_task(), 16194 * we should update kstat later if it encounters an error. So, 16195 * we update kstat_updated flag here. 16196 */ 16197 kstat_updated = B_TRUE; 16198 16199 /* 16200 * Either the command has been successfully dispatched to a 16201 * task Q for retrying, or the dispatch failed. In either case 16202 * do NOT retry again by calling sd_retry_command. This sets up 16203 * two retries of the same command and when one completes and 16204 * frees the resources the other will access freed memory, 16205 * a bad thing. 16206 */ 16207 return; 16208 16209 default: 16210 break; 16211 } 16212 16213 /* 16214 * Update kstat if we haven't done that. 16215 */ 16216 if (!kstat_updated) { 16217 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16218 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 16219 } 16220 16221 do_retry: 16222 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si, 16223 EIO, SD_UA_RETRY_DELAY, NULL); 16224 } 16225 16226 16227 16228 /* 16229 * Function: sd_sense_key_fail_command 16230 * 16231 * Description: Use to fail a command when we don't like the sense key that 16232 * was returned. 16233 * 16234 * Context: May be called from interrupt context 16235 */ 16236 16237 static void 16238 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 16239 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16240 { 16241 struct sd_sense_info si; 16242 16243 ASSERT(un != NULL); 16244 ASSERT(mutex_owned(SD_MUTEX(un))); 16245 ASSERT(bp != NULL); 16246 ASSERT(xp != NULL); 16247 ASSERT(pktp != NULL); 16248 16249 si.ssi_severity = SCSI_ERR_FATAL; 16250 si.ssi_pfa_flag = FALSE; 16251 16252 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16253 sd_return_failed_command(un, bp, EIO); 16254 } 16255 16256 16257 16258 /* 16259 * Function: sd_sense_key_blank_check 16260 * 16261 * Description: Recovery actions for a SCSI "Blank Check" sense key. 16262 * Has no monetary connotation. 16263 * 16264 * Context: May be called from interrupt context 16265 */ 16266 16267 static void 16268 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 16269 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16270 { 16271 struct sd_sense_info si; 16272 16273 ASSERT(un != NULL); 16274 ASSERT(mutex_owned(SD_MUTEX(un))); 16275 ASSERT(bp != NULL); 16276 ASSERT(xp != NULL); 16277 ASSERT(pktp != NULL); 16278 16279 /* 16280 * Blank check is not fatal for removable devices, therefore 16281 * it does not require a console message. 16282 */ 16283 si.ssi_severity = (un->un_f_has_removable_media) ? SCSI_ERR_ALL : 16284 SCSI_ERR_FATAL; 16285 si.ssi_pfa_flag = FALSE; 16286 16287 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16288 sd_return_failed_command(un, bp, EIO); 16289 } 16290 16291 16292 16293 16294 /* 16295 * Function: sd_sense_key_aborted_command 16296 * 16297 * Description: Recovery actions for a SCSI "Aborted Command" sense key. 16298 * 16299 * Context: May be called from interrupt context 16300 */ 16301 16302 static void 16303 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 16304 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16305 { 16306 struct sd_sense_info si; 16307 16308 ASSERT(un != NULL); 16309 ASSERT(mutex_owned(SD_MUTEX(un))); 16310 ASSERT(bp != NULL); 16311 ASSERT(xp != NULL); 16312 ASSERT(pktp != NULL); 16313 16314 si.ssi_severity = SCSI_ERR_FATAL; 16315 si.ssi_pfa_flag = FALSE; 16316 16317 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16318 16319 /* 16320 * This really ought to be a fatal error, but we will retry anyway 16321 * as some drives report this as a spurious error. 16322 */ 16323 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16324 &si, EIO, (clock_t)0, NULL); 16325 } 16326 16327 16328 16329 /* 16330 * Function: sd_sense_key_default 16331 * 16332 * Description: Default recovery action for several SCSI sense keys (basically 16333 * attempts a retry). 16334 * 16335 * Context: May be called from interrupt context 16336 */ 16337 16338 static void 16339 sd_sense_key_default(struct sd_lun *un, 16340 uint8_t *sense_datap, 16341 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16342 { 16343 struct sd_sense_info si; 16344 uint8_t sense_key = scsi_sense_key(sense_datap); 16345 16346 ASSERT(un != NULL); 16347 ASSERT(mutex_owned(SD_MUTEX(un))); 16348 ASSERT(bp != NULL); 16349 ASSERT(xp != NULL); 16350 ASSERT(pktp != NULL); 16351 16352 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16353 16354 /* 16355 * Undecoded sense key. Attempt retries and hope that will fix 16356 * the problem. Otherwise, we're dead. 16357 */ 16358 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16359 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16360 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]); 16361 } 16362 16363 si.ssi_severity = SCSI_ERR_FATAL; 16364 si.ssi_pfa_flag = FALSE; 16365 16366 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16367 &si, EIO, (clock_t)0, NULL); 16368 } 16369 16370 16371 16372 /* 16373 * Function: sd_print_retry_msg 16374 * 16375 * Description: Print a message indicating the retry action being taken. 16376 * 16377 * Arguments: un - ptr to associated softstate 16378 * bp - ptr to buf(9S) for the command 16379 * arg - not used. 16380 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16381 * or SD_NO_RETRY_ISSUED 16382 * 16383 * Context: May be called from interrupt context 16384 */ 16385 /* ARGSUSED */ 16386 static void 16387 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag) 16388 { 16389 struct sd_xbuf *xp; 16390 struct scsi_pkt *pktp; 16391 char *reasonp; 16392 char *msgp; 16393 16394 ASSERT(un != NULL); 16395 ASSERT(mutex_owned(SD_MUTEX(un))); 16396 ASSERT(bp != NULL); 16397 pktp = SD_GET_PKTP(bp); 16398 ASSERT(pktp != NULL); 16399 xp = SD_GET_XBUF(bp); 16400 ASSERT(xp != NULL); 16401 16402 ASSERT(!mutex_owned(&un->un_pm_mutex)); 16403 mutex_enter(&un->un_pm_mutex); 16404 if ((un->un_state == SD_STATE_SUSPENDED) || 16405 (SD_DEVICE_IS_IN_LOW_POWER(un)) || 16406 (pktp->pkt_flags & FLAG_SILENT)) { 16407 mutex_exit(&un->un_pm_mutex); 16408 goto update_pkt_reason; 16409 } 16410 mutex_exit(&un->un_pm_mutex); 16411 16412 /* 16413 * Suppress messages if they are all the same pkt_reason; with 16414 * TQ, many (up to 256) are returned with the same pkt_reason. 16415 * If we are in panic, then suppress the retry messages. 16416 */ 16417 switch (flag) { 16418 case SD_NO_RETRY_ISSUED: 16419 msgp = "giving up"; 16420 break; 16421 case SD_IMMEDIATE_RETRY_ISSUED: 16422 case SD_DELAYED_RETRY_ISSUED: 16423 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) || 16424 ((pktp->pkt_reason == un->un_last_pkt_reason) && 16425 (sd_error_level != SCSI_ERR_ALL))) { 16426 return; 16427 } 16428 msgp = "retrying command"; 16429 break; 16430 default: 16431 goto update_pkt_reason; 16432 } 16433 16434 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" : 16435 scsi_rname(pktp->pkt_reason)); 16436 16437 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16438 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp); 16439 16440 update_pkt_reason: 16441 /* 16442 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason. 16443 * This is to prevent multiple console messages for the same failure 16444 * condition. Note that un->un_last_pkt_reason is NOT restored if & 16445 * when the command is retried successfully because there still may be 16446 * more commands coming back with the same value of pktp->pkt_reason. 16447 */ 16448 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) { 16449 un->un_last_pkt_reason = pktp->pkt_reason; 16450 } 16451 } 16452 16453 16454 /* 16455 * Function: sd_print_cmd_incomplete_msg 16456 * 16457 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason. 16458 * 16459 * Arguments: un - ptr to associated softstate 16460 * bp - ptr to buf(9S) for the command 16461 * arg - passed to sd_print_retry_msg() 16462 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16463 * or SD_NO_RETRY_ISSUED 16464 * 16465 * Context: May be called from interrupt context 16466 */ 16467 16468 static void 16469 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, 16470 int code) 16471 { 16472 dev_info_t *dip; 16473 16474 ASSERT(un != NULL); 16475 ASSERT(mutex_owned(SD_MUTEX(un))); 16476 ASSERT(bp != NULL); 16477 16478 switch (code) { 16479 case SD_NO_RETRY_ISSUED: 16480 /* Command was failed. Someone turned off this target? */ 16481 if (un->un_state != SD_STATE_OFFLINE) { 16482 /* 16483 * Suppress message if we are detaching and 16484 * device has been disconnected 16485 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation 16486 * private interface and not part of the DDI 16487 */ 16488 dip = un->un_sd->sd_dev; 16489 if (!(DEVI_IS_DETACHING(dip) && 16490 DEVI_IS_DEVICE_REMOVED(dip))) { 16491 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16492 "disk not responding to selection\n"); 16493 } 16494 New_state(un, SD_STATE_OFFLINE); 16495 } 16496 break; 16497 16498 case SD_DELAYED_RETRY_ISSUED: 16499 case SD_IMMEDIATE_RETRY_ISSUED: 16500 default: 16501 /* Command was successfully queued for retry */ 16502 sd_print_retry_msg(un, bp, arg, code); 16503 break; 16504 } 16505 } 16506 16507 16508 /* 16509 * Function: sd_pkt_reason_cmd_incomplete 16510 * 16511 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason. 16512 * 16513 * Context: May be called from interrupt context 16514 */ 16515 16516 static void 16517 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 16518 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16519 { 16520 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE; 16521 16522 ASSERT(un != NULL); 16523 ASSERT(mutex_owned(SD_MUTEX(un))); 16524 ASSERT(bp != NULL); 16525 ASSERT(xp != NULL); 16526 ASSERT(pktp != NULL); 16527 16528 /* Do not do a reset if selection did not complete */ 16529 /* Note: Should this not just check the bit? */ 16530 if (pktp->pkt_state != STATE_GOT_BUS) { 16531 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16532 sd_reset_target(un, pktp); 16533 } 16534 16535 /* 16536 * If the target was not successfully selected, then set 16537 * SD_RETRIES_FAILFAST to indicate that we lost communication 16538 * with the target, and further retries and/or commands are 16539 * likely to take a long time. 16540 */ 16541 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) { 16542 flag |= SD_RETRIES_FAILFAST; 16543 } 16544 16545 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16546 16547 sd_retry_command(un, bp, flag, 16548 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16549 } 16550 16551 16552 16553 /* 16554 * Function: sd_pkt_reason_cmd_tran_err 16555 * 16556 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason. 16557 * 16558 * Context: May be called from interrupt context 16559 */ 16560 16561 static void 16562 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 16563 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16564 { 16565 ASSERT(un != NULL); 16566 ASSERT(mutex_owned(SD_MUTEX(un))); 16567 ASSERT(bp != NULL); 16568 ASSERT(xp != NULL); 16569 ASSERT(pktp != NULL); 16570 16571 /* 16572 * Do not reset if we got a parity error, or if 16573 * selection did not complete. 16574 */ 16575 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16576 /* Note: Should this not just check the bit for pkt_state? */ 16577 if (((pktp->pkt_statistics & STAT_PERR) == 0) && 16578 (pktp->pkt_state != STATE_GOT_BUS)) { 16579 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16580 sd_reset_target(un, pktp); 16581 } 16582 16583 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16584 16585 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 16586 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16587 } 16588 16589 16590 16591 /* 16592 * Function: sd_pkt_reason_cmd_reset 16593 * 16594 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason. 16595 * 16596 * Context: May be called from interrupt context 16597 */ 16598 16599 static void 16600 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 16601 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16602 { 16603 ASSERT(un != NULL); 16604 ASSERT(mutex_owned(SD_MUTEX(un))); 16605 ASSERT(bp != NULL); 16606 ASSERT(xp != NULL); 16607 ASSERT(pktp != NULL); 16608 16609 /* The target may still be running the command, so try to reset. */ 16610 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16611 sd_reset_target(un, pktp); 16612 16613 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16614 16615 /* 16616 * If pkt_reason is CMD_RESET chances are that this pkt got 16617 * reset because another target on this bus caused it. The target 16618 * that caused it should get CMD_TIMEOUT with pkt_statistics 16619 * of STAT_TIMEOUT/STAT_DEV_RESET. 16620 */ 16621 16622 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 16623 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16624 } 16625 16626 16627 16628 16629 /* 16630 * Function: sd_pkt_reason_cmd_aborted 16631 * 16632 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason. 16633 * 16634 * Context: May be called from interrupt context 16635 */ 16636 16637 static void 16638 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 16639 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16640 { 16641 ASSERT(un != NULL); 16642 ASSERT(mutex_owned(SD_MUTEX(un))); 16643 ASSERT(bp != NULL); 16644 ASSERT(xp != NULL); 16645 ASSERT(pktp != NULL); 16646 16647 /* The target may still be running the command, so try to reset. */ 16648 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16649 sd_reset_target(un, pktp); 16650 16651 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16652 16653 /* 16654 * If pkt_reason is CMD_ABORTED chances are that this pkt got 16655 * aborted because another target on this bus caused it. The target 16656 * that caused it should get CMD_TIMEOUT with pkt_statistics 16657 * of STAT_TIMEOUT/STAT_DEV_RESET. 16658 */ 16659 16660 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 16661 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16662 } 16663 16664 16665 16666 /* 16667 * Function: sd_pkt_reason_cmd_timeout 16668 * 16669 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason. 16670 * 16671 * Context: May be called from interrupt context 16672 */ 16673 16674 static void 16675 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 16676 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16677 { 16678 ASSERT(un != NULL); 16679 ASSERT(mutex_owned(SD_MUTEX(un))); 16680 ASSERT(bp != NULL); 16681 ASSERT(xp != NULL); 16682 ASSERT(pktp != NULL); 16683 16684 16685 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16686 sd_reset_target(un, pktp); 16687 16688 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16689 16690 /* 16691 * A command timeout indicates that we could not establish 16692 * communication with the target, so set SD_RETRIES_FAILFAST 16693 * as further retries/commands are likely to take a long time. 16694 */ 16695 sd_retry_command(un, bp, 16696 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST), 16697 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16698 } 16699 16700 16701 16702 /* 16703 * Function: sd_pkt_reason_cmd_unx_bus_free 16704 * 16705 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason. 16706 * 16707 * Context: May be called from interrupt context 16708 */ 16709 16710 static void 16711 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 16712 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16713 { 16714 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code); 16715 16716 ASSERT(un != NULL); 16717 ASSERT(mutex_owned(SD_MUTEX(un))); 16718 ASSERT(bp != NULL); 16719 ASSERT(xp != NULL); 16720 ASSERT(pktp != NULL); 16721 16722 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16723 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16724 16725 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? 16726 sd_print_retry_msg : NULL; 16727 16728 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 16729 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16730 } 16731 16732 16733 /* 16734 * Function: sd_pkt_reason_cmd_tag_reject 16735 * 16736 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason. 16737 * 16738 * Context: May be called from interrupt context 16739 */ 16740 16741 static void 16742 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 16743 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16744 { 16745 ASSERT(un != NULL); 16746 ASSERT(mutex_owned(SD_MUTEX(un))); 16747 ASSERT(bp != NULL); 16748 ASSERT(xp != NULL); 16749 ASSERT(pktp != NULL); 16750 16751 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16752 pktp->pkt_flags = 0; 16753 un->un_tagflags = 0; 16754 if (un->un_f_opt_queueing == TRUE) { 16755 un->un_throttle = min(un->un_throttle, 3); 16756 } else { 16757 un->un_throttle = 1; 16758 } 16759 mutex_exit(SD_MUTEX(un)); 16760 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 16761 mutex_enter(SD_MUTEX(un)); 16762 16763 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16764 16765 /* Legacy behavior not to check retry counts here. */ 16766 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE), 16767 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16768 } 16769 16770 16771 /* 16772 * Function: sd_pkt_reason_default 16773 * 16774 * Description: Default recovery actions for SCSA pkt_reason values that 16775 * do not have more explicit recovery actions. 16776 * 16777 * Context: May be called from interrupt context 16778 */ 16779 16780 static void 16781 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 16782 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16783 { 16784 ASSERT(un != NULL); 16785 ASSERT(mutex_owned(SD_MUTEX(un))); 16786 ASSERT(bp != NULL); 16787 ASSERT(xp != NULL); 16788 ASSERT(pktp != NULL); 16789 16790 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16791 sd_reset_target(un, pktp); 16792 16793 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16794 16795 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 16796 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16797 } 16798 16799 16800 16801 /* 16802 * Function: sd_pkt_status_check_condition 16803 * 16804 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status. 16805 * 16806 * Context: May be called from interrupt context 16807 */ 16808 16809 static void 16810 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 16811 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16812 { 16813 ASSERT(un != NULL); 16814 ASSERT(mutex_owned(SD_MUTEX(un))); 16815 ASSERT(bp != NULL); 16816 ASSERT(xp != NULL); 16817 ASSERT(pktp != NULL); 16818 16819 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: " 16820 "entry: buf:0x%p xp:0x%p\n", bp, xp); 16821 16822 /* 16823 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the 16824 * command will be retried after the request sense). Otherwise, retry 16825 * the command. Note: we are issuing the request sense even though the 16826 * retry limit may have been reached for the failed command. 16827 */ 16828 if (un->un_f_arq_enabled == FALSE) { 16829 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 16830 "no ARQ, sending request sense command\n"); 16831 sd_send_request_sense_command(un, bp, pktp); 16832 } else { 16833 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 16834 "ARQ,retrying request sense command\n"); 16835 #if defined(__i386) || defined(__amd64) 16836 /* 16837 * The SD_RETRY_DELAY value need to be adjusted here 16838 * when SD_RETRY_DELAY change in sddef.h 16839 */ 16840 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 16841 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, 16842 NULL); 16843 #else 16844 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 16845 EIO, SD_RETRY_DELAY, NULL); 16846 #endif 16847 } 16848 16849 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); 16850 } 16851 16852 16853 /* 16854 * Function: sd_pkt_status_busy 16855 * 16856 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status. 16857 * 16858 * Context: May be called from interrupt context 16859 */ 16860 16861 static void 16862 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 16863 struct scsi_pkt *pktp) 16864 { 16865 ASSERT(un != NULL); 16866 ASSERT(mutex_owned(SD_MUTEX(un))); 16867 ASSERT(bp != NULL); 16868 ASSERT(xp != NULL); 16869 ASSERT(pktp != NULL); 16870 16871 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16872 "sd_pkt_status_busy: entry\n"); 16873 16874 /* If retries are exhausted, just fail the command. */ 16875 if (xp->xb_retry_count >= un->un_busy_retry_count) { 16876 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16877 "device busy too long\n"); 16878 sd_return_failed_command(un, bp, EIO); 16879 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16880 "sd_pkt_status_busy: exit\n"); 16881 return; 16882 } 16883 xp->xb_retry_count++; 16884 16885 /* 16886 * Try to reset the target. However, we do not want to perform 16887 * more than one reset if the device continues to fail. The reset 16888 * will be performed when the retry count reaches the reset 16889 * threshold. This threshold should be set such that at least 16890 * one retry is issued before the reset is performed. 16891 */ 16892 if (xp->xb_retry_count == 16893 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) { 16894 int rval = 0; 16895 mutex_exit(SD_MUTEX(un)); 16896 if (un->un_f_allow_bus_device_reset == TRUE) { 16897 /* 16898 * First try to reset the LUN; if we cannot then 16899 * try to reset the target. 16900 */ 16901 if (un->un_f_lun_reset_enabled == TRUE) { 16902 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16903 "sd_pkt_status_busy: RESET_LUN\n"); 16904 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 16905 } 16906 if (rval == 0) { 16907 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16908 "sd_pkt_status_busy: RESET_TARGET\n"); 16909 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 16910 } 16911 } 16912 if (rval == 0) { 16913 /* 16914 * If the RESET_LUN and/or RESET_TARGET failed, 16915 * try RESET_ALL 16916 */ 16917 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16918 "sd_pkt_status_busy: RESET_ALL\n"); 16919 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL); 16920 } 16921 mutex_enter(SD_MUTEX(un)); 16922 if (rval == 0) { 16923 /* 16924 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed. 16925 * At this point we give up & fail the command. 16926 */ 16927 sd_return_failed_command(un, bp, EIO); 16928 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16929 "sd_pkt_status_busy: exit (failed cmd)\n"); 16930 return; 16931 } 16932 } 16933 16934 /* 16935 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as 16936 * we have already checked the retry counts above. 16937 */ 16938 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 16939 EIO, SD_BSY_TIMEOUT, NULL); 16940 16941 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16942 "sd_pkt_status_busy: exit\n"); 16943 } 16944 16945 16946 /* 16947 * Function: sd_pkt_status_reservation_conflict 16948 * 16949 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI 16950 * command status. 16951 * 16952 * Context: May be called from interrupt context 16953 */ 16954 16955 static void 16956 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp, 16957 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16958 { 16959 ASSERT(un != NULL); 16960 ASSERT(mutex_owned(SD_MUTEX(un))); 16961 ASSERT(bp != NULL); 16962 ASSERT(xp != NULL); 16963 ASSERT(pktp != NULL); 16964 16965 /* 16966 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation 16967 * conflict could be due to various reasons like incorrect keys, not 16968 * registered or not reserved etc. So, we return EACCES to the caller. 16969 */ 16970 if (un->un_reservation_type == SD_SCSI3_RESERVATION) { 16971 int cmd = SD_GET_PKT_OPCODE(pktp); 16972 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) || 16973 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) { 16974 sd_return_failed_command(un, bp, EACCES); 16975 return; 16976 } 16977 } 16978 16979 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 16980 16981 if ((un->un_resvd_status & SD_FAILFAST) != 0) { 16982 if (sd_failfast_enable != 0) { 16983 /* By definition, we must panic here.... */ 16984 sd_panic_for_res_conflict(un); 16985 /*NOTREACHED*/ 16986 } 16987 SD_ERROR(SD_LOG_IO, un, 16988 "sd_handle_resv_conflict: Disk Reserved\n"); 16989 sd_return_failed_command(un, bp, EACCES); 16990 return; 16991 } 16992 16993 /* 16994 * 1147670: retry only if sd_retry_on_reservation_conflict 16995 * property is set (default is 1). Retries will not succeed 16996 * on a disk reserved by another initiator. HA systems 16997 * may reset this via sd.conf to avoid these retries. 16998 * 16999 * Note: The legacy return code for this failure is EIO, however EACCES 17000 * seems more appropriate for a reservation conflict. 17001 */ 17002 if (sd_retry_on_reservation_conflict == 0) { 17003 SD_ERROR(SD_LOG_IO, un, 17004 "sd_handle_resv_conflict: Device Reserved\n"); 17005 sd_return_failed_command(un, bp, EIO); 17006 return; 17007 } 17008 17009 /* 17010 * Retry the command if we can. 17011 * 17012 * Note: The legacy return code for this failure is EIO, however EACCES 17013 * seems more appropriate for a reservation conflict. 17014 */ 17015 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 17016 (clock_t)2, NULL); 17017 } 17018 17019 17020 17021 /* 17022 * Function: sd_pkt_status_qfull 17023 * 17024 * Description: Handle a QUEUE FULL condition from the target. This can 17025 * occur if the HBA does not handle the queue full condition. 17026 * (Basically this means third-party HBAs as Sun HBAs will 17027 * handle the queue full condition.) Note that if there are 17028 * some commands already in the transport, then the queue full 17029 * has occurred because the queue for this nexus is actually 17030 * full. If there are no commands in the transport, then the 17031 * queue full is resulting from some other initiator or lun 17032 * consuming all the resources at the target. 17033 * 17034 * Context: May be called from interrupt context 17035 */ 17036 17037 static void 17038 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 17039 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17040 { 17041 ASSERT(un != NULL); 17042 ASSERT(mutex_owned(SD_MUTEX(un))); 17043 ASSERT(bp != NULL); 17044 ASSERT(xp != NULL); 17045 ASSERT(pktp != NULL); 17046 17047 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17048 "sd_pkt_status_qfull: entry\n"); 17049 17050 /* 17051 * Just lower the QFULL throttle and retry the command. Note that 17052 * we do not limit the number of retries here. 17053 */ 17054 sd_reduce_throttle(un, SD_THROTTLE_QFULL); 17055 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, 17056 SD_RESTART_TIMEOUT, NULL); 17057 17058 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17059 "sd_pkt_status_qfull: exit\n"); 17060 } 17061 17062 17063 /* 17064 * Function: sd_reset_target 17065 * 17066 * Description: Issue a scsi_reset(9F), with either RESET_LUN, 17067 * RESET_TARGET, or RESET_ALL. 17068 * 17069 * Context: May be called under interrupt context. 17070 */ 17071 17072 static void 17073 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp) 17074 { 17075 int rval = 0; 17076 17077 ASSERT(un != NULL); 17078 ASSERT(mutex_owned(SD_MUTEX(un))); 17079 ASSERT(pktp != NULL); 17080 17081 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n"); 17082 17083 /* 17084 * No need to reset if the transport layer has already done so. 17085 */ 17086 if ((pktp->pkt_statistics & 17087 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) { 17088 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17089 "sd_reset_target: no reset\n"); 17090 return; 17091 } 17092 17093 mutex_exit(SD_MUTEX(un)); 17094 17095 if (un->un_f_allow_bus_device_reset == TRUE) { 17096 if (un->un_f_lun_reset_enabled == TRUE) { 17097 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17098 "sd_reset_target: RESET_LUN\n"); 17099 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 17100 } 17101 if (rval == 0) { 17102 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17103 "sd_reset_target: RESET_TARGET\n"); 17104 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 17105 } 17106 } 17107 17108 if (rval == 0) { 17109 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17110 "sd_reset_target: RESET_ALL\n"); 17111 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 17112 } 17113 17114 mutex_enter(SD_MUTEX(un)); 17115 17116 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n"); 17117 } 17118 17119 17120 /* 17121 * Function: sd_media_change_task 17122 * 17123 * Description: Recovery action for CDROM to become available. 17124 * 17125 * Context: Executes in a taskq() thread context 17126 */ 17127 17128 static void 17129 sd_media_change_task(void *arg) 17130 { 17131 struct scsi_pkt *pktp = arg; 17132 struct sd_lun *un; 17133 struct buf *bp; 17134 struct sd_xbuf *xp; 17135 int err = 0; 17136 int retry_count = 0; 17137 int retry_limit = SD_UNIT_ATTENTION_RETRY/10; 17138 struct sd_sense_info si; 17139 17140 ASSERT(pktp != NULL); 17141 bp = (struct buf *)pktp->pkt_private; 17142 ASSERT(bp != NULL); 17143 xp = SD_GET_XBUF(bp); 17144 ASSERT(xp != NULL); 17145 un = SD_GET_UN(bp); 17146 ASSERT(un != NULL); 17147 ASSERT(!mutex_owned(SD_MUTEX(un))); 17148 ASSERT(un->un_f_monitor_media_state); 17149 17150 si.ssi_severity = SCSI_ERR_INFO; 17151 si.ssi_pfa_flag = FALSE; 17152 17153 /* 17154 * When a reset is issued on a CDROM, it takes a long time to 17155 * recover. First few attempts to read capacity and other things 17156 * related to handling unit attention fail (with a ASC 0x4 and 17157 * ASCQ 0x1). In that case we want to do enough retries and we want 17158 * to limit the retries in other cases of genuine failures like 17159 * no media in drive. 17160 */ 17161 while (retry_count++ < retry_limit) { 17162 if ((err = sd_handle_mchange(un)) == 0) { 17163 break; 17164 } 17165 if (err == EAGAIN) { 17166 retry_limit = SD_UNIT_ATTENTION_RETRY; 17167 } 17168 /* Sleep for 0.5 sec. & try again */ 17169 delay(drv_usectohz(500000)); 17170 } 17171 17172 /* 17173 * Dispatch (retry or fail) the original command here, 17174 * along with appropriate console messages.... 17175 * 17176 * Must grab the mutex before calling sd_retry_command, 17177 * sd_print_sense_msg and sd_return_failed_command. 17178 */ 17179 mutex_enter(SD_MUTEX(un)); 17180 if (err != SD_CMD_SUCCESS) { 17181 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17182 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17183 si.ssi_severity = SCSI_ERR_FATAL; 17184 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17185 sd_return_failed_command(un, bp, EIO); 17186 } else { 17187 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 17188 &si, EIO, (clock_t)0, NULL); 17189 } 17190 mutex_exit(SD_MUTEX(un)); 17191 } 17192 17193 17194 17195 /* 17196 * Function: sd_handle_mchange 17197 * 17198 * Description: Perform geometry validation & other recovery when CDROM 17199 * has been removed from drive. 17200 * 17201 * Return Code: 0 for success 17202 * errno-type return code of either sd_send_scsi_DOORLOCK() or 17203 * sd_send_scsi_READ_CAPACITY() 17204 * 17205 * Context: Executes in a taskq() thread context 17206 */ 17207 17208 static int 17209 sd_handle_mchange(struct sd_lun *un) 17210 { 17211 uint64_t capacity; 17212 uint32_t lbasize; 17213 int rval; 17214 17215 ASSERT(!mutex_owned(SD_MUTEX(un))); 17216 ASSERT(un->un_f_monitor_media_state); 17217 17218 if ((rval = sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 17219 SD_PATH_DIRECT_PRIORITY)) != 0) { 17220 return (rval); 17221 } 17222 17223 mutex_enter(SD_MUTEX(un)); 17224 sd_update_block_info(un, lbasize, capacity); 17225 17226 if (un->un_errstats != NULL) { 17227 struct sd_errstats *stp = 17228 (struct sd_errstats *)un->un_errstats->ks_data; 17229 stp->sd_capacity.value.ui64 = (uint64_t) 17230 ((uint64_t)un->un_blockcount * 17231 (uint64_t)un->un_tgt_blocksize); 17232 } 17233 17234 17235 /* 17236 * Check if the media in the device is writable or not 17237 */ 17238 sd_check_for_writable_cd(un, SD_PATH_DIRECT_PRIORITY); 17239 17240 /* 17241 * Note: Maybe let the strategy/partitioning chain worry about getting 17242 * valid geometry. 17243 */ 17244 mutex_exit(SD_MUTEX(un)); 17245 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 17246 17247 17248 if (cmlb_validate(un->un_cmlbhandle, 0, 17249 (void *)SD_PATH_DIRECT_PRIORITY) != 0) { 17250 return (EIO); 17251 } else { 17252 if (un->un_f_pkstats_enabled) { 17253 sd_set_pstats(un); 17254 SD_TRACE(SD_LOG_IO_PARTITION, un, 17255 "sd_handle_mchange: un:0x%p pstats created and " 17256 "set\n", un); 17257 } 17258 } 17259 17260 17261 /* 17262 * Try to lock the door 17263 */ 17264 return (sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 17265 SD_PATH_DIRECT_PRIORITY)); 17266 } 17267 17268 17269 /* 17270 * Function: sd_send_scsi_DOORLOCK 17271 * 17272 * Description: Issue the scsi DOOR LOCK command 17273 * 17274 * Arguments: un - pointer to driver soft state (unit) structure for 17275 * this target. 17276 * flag - SD_REMOVAL_ALLOW 17277 * SD_REMOVAL_PREVENT 17278 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17279 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17280 * to use the USCSI "direct" chain and bypass the normal 17281 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 17282 * command is issued as part of an error recovery action. 17283 * 17284 * Return Code: 0 - Success 17285 * errno return code from sd_send_scsi_cmd() 17286 * 17287 * Context: Can sleep. 17288 */ 17289 17290 static int 17291 sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag) 17292 { 17293 union scsi_cdb cdb; 17294 struct uscsi_cmd ucmd_buf; 17295 struct scsi_extended_sense sense_buf; 17296 int status; 17297 17298 ASSERT(un != NULL); 17299 ASSERT(!mutex_owned(SD_MUTEX(un))); 17300 17301 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un); 17302 17303 /* already determined doorlock is not supported, fake success */ 17304 if (un->un_f_doorlock_supported == FALSE) { 17305 return (0); 17306 } 17307 17308 /* 17309 * If we are ejecting and see an SD_REMOVAL_PREVENT 17310 * ignore the command so we can complete the eject 17311 * operation. 17312 */ 17313 if (flag == SD_REMOVAL_PREVENT) { 17314 mutex_enter(SD_MUTEX(un)); 17315 if (un->un_f_ejecting == TRUE) { 17316 mutex_exit(SD_MUTEX(un)); 17317 return (EAGAIN); 17318 } 17319 mutex_exit(SD_MUTEX(un)); 17320 } 17321 17322 bzero(&cdb, sizeof (cdb)); 17323 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17324 17325 cdb.scc_cmd = SCMD_DOORLOCK; 17326 cdb.cdb_opaque[4] = (uchar_t)flag; 17327 17328 ucmd_buf.uscsi_cdb = (char *)&cdb; 17329 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 17330 ucmd_buf.uscsi_bufaddr = NULL; 17331 ucmd_buf.uscsi_buflen = 0; 17332 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17333 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 17334 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 17335 ucmd_buf.uscsi_timeout = 15; 17336 17337 SD_TRACE(SD_LOG_IO, un, 17338 "sd_send_scsi_DOORLOCK: returning sd_send_scsi_cmd()\n"); 17339 17340 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17341 UIO_SYSSPACE, path_flag); 17342 17343 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) && 17344 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 17345 (scsi_sense_key((uint8_t *)&sense_buf) == KEY_ILLEGAL_REQUEST)) { 17346 /* fake success and skip subsequent doorlock commands */ 17347 un->un_f_doorlock_supported = FALSE; 17348 return (0); 17349 } 17350 17351 return (status); 17352 } 17353 17354 /* 17355 * Function: sd_send_scsi_READ_CAPACITY 17356 * 17357 * Description: This routine uses the scsi READ CAPACITY command to determine 17358 * the device capacity in number of blocks and the device native 17359 * block size. If this function returns a failure, then the 17360 * values in *capp and *lbap are undefined. If the capacity 17361 * returned is 0xffffffff then the lun is too large for a 17362 * normal READ CAPACITY command and the results of a 17363 * READ CAPACITY 16 will be used instead. 17364 * 17365 * Arguments: un - ptr to soft state struct for the target 17366 * capp - ptr to unsigned 64-bit variable to receive the 17367 * capacity value from the command. 17368 * lbap - ptr to unsigned 32-bit varaible to receive the 17369 * block size value from the command 17370 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17371 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17372 * to use the USCSI "direct" chain and bypass the normal 17373 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 17374 * command is issued as part of an error recovery action. 17375 * 17376 * Return Code: 0 - Success 17377 * EIO - IO error 17378 * EACCES - Reservation conflict detected 17379 * EAGAIN - Device is becoming ready 17380 * errno return code from sd_send_scsi_cmd() 17381 * 17382 * Context: Can sleep. Blocks until command completes. 17383 */ 17384 17385 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity) 17386 17387 static int 17388 sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, uint32_t *lbap, 17389 int path_flag) 17390 { 17391 struct scsi_extended_sense sense_buf; 17392 struct uscsi_cmd ucmd_buf; 17393 union scsi_cdb cdb; 17394 uint32_t *capacity_buf; 17395 uint64_t capacity; 17396 uint32_t lbasize; 17397 int status; 17398 17399 ASSERT(un != NULL); 17400 ASSERT(!mutex_owned(SD_MUTEX(un))); 17401 ASSERT(capp != NULL); 17402 ASSERT(lbap != NULL); 17403 17404 SD_TRACE(SD_LOG_IO, un, 17405 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 17406 17407 /* 17408 * First send a READ_CAPACITY command to the target. 17409 * (This command is mandatory under SCSI-2.) 17410 * 17411 * Set up the CDB for the READ_CAPACITY command. The Partial 17412 * Medium Indicator bit is cleared. The address field must be 17413 * zero if the PMI bit is zero. 17414 */ 17415 bzero(&cdb, sizeof (cdb)); 17416 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17417 17418 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP); 17419 17420 cdb.scc_cmd = SCMD_READ_CAPACITY; 17421 17422 ucmd_buf.uscsi_cdb = (char *)&cdb; 17423 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 17424 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; 17425 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; 17426 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17427 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 17428 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 17429 ucmd_buf.uscsi_timeout = 60; 17430 17431 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17432 UIO_SYSSPACE, path_flag); 17433 17434 switch (status) { 17435 case 0: 17436 /* Return failure if we did not get valid capacity data. */ 17437 if (ucmd_buf.uscsi_resid != 0) { 17438 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17439 return (EIO); 17440 } 17441 17442 /* 17443 * Read capacity and block size from the READ CAPACITY 10 data. 17444 * This data may be adjusted later due to device specific 17445 * issues. 17446 * 17447 * According to the SCSI spec, the READ CAPACITY 10 17448 * command returns the following: 17449 * 17450 * bytes 0-3: Maximum logical block address available. 17451 * (MSB in byte:0 & LSB in byte:3) 17452 * 17453 * bytes 4-7: Block length in bytes 17454 * (MSB in byte:4 & LSB in byte:7) 17455 * 17456 */ 17457 capacity = BE_32(capacity_buf[0]); 17458 lbasize = BE_32(capacity_buf[1]); 17459 17460 /* 17461 * Done with capacity_buf 17462 */ 17463 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17464 17465 /* 17466 * if the reported capacity is set to all 0xf's, then 17467 * this disk is too large and requires SBC-2 commands. 17468 * Reissue the request using READ CAPACITY 16. 17469 */ 17470 if (capacity == 0xffffffff) { 17471 status = sd_send_scsi_READ_CAPACITY_16(un, &capacity, 17472 &lbasize, path_flag); 17473 if (status != 0) { 17474 return (status); 17475 } 17476 } 17477 break; /* Success! */ 17478 case EIO: 17479 switch (ucmd_buf.uscsi_status) { 17480 case STATUS_RESERVATION_CONFLICT: 17481 status = EACCES; 17482 break; 17483 case STATUS_CHECK: 17484 /* 17485 * Check condition; look for ASC/ASCQ of 0x04/0x01 17486 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 17487 */ 17488 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 17489 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 17490 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 17491 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17492 return (EAGAIN); 17493 } 17494 break; 17495 default: 17496 break; 17497 } 17498 /* FALLTHRU */ 17499 default: 17500 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17501 return (status); 17502 } 17503 17504 /* 17505 * Some ATAPI CD-ROM drives report inaccurate LBA size values 17506 * (2352 and 0 are common) so for these devices always force the value 17507 * to 2048 as required by the ATAPI specs. 17508 */ 17509 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 17510 lbasize = 2048; 17511 } 17512 17513 /* 17514 * Get the maximum LBA value from the READ CAPACITY data. 17515 * Here we assume that the Partial Medium Indicator (PMI) bit 17516 * was cleared when issuing the command. This means that the LBA 17517 * returned from the device is the LBA of the last logical block 17518 * on the logical unit. The actual logical block count will be 17519 * this value plus one. 17520 * 17521 * Currently the capacity is saved in terms of un->un_sys_blocksize, 17522 * so scale the capacity value to reflect this. 17523 */ 17524 capacity = (capacity + 1) * (lbasize / un->un_sys_blocksize); 17525 17526 /* 17527 * Copy the values from the READ CAPACITY command into the space 17528 * provided by the caller. 17529 */ 17530 *capp = capacity; 17531 *lbap = lbasize; 17532 17533 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: " 17534 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 17535 17536 /* 17537 * Both the lbasize and capacity from the device must be nonzero, 17538 * otherwise we assume that the values are not valid and return 17539 * failure to the caller. (4203735) 17540 */ 17541 if ((capacity == 0) || (lbasize == 0)) { 17542 return (EIO); 17543 } 17544 17545 return (0); 17546 } 17547 17548 /* 17549 * Function: sd_send_scsi_READ_CAPACITY_16 17550 * 17551 * Description: This routine uses the scsi READ CAPACITY 16 command to 17552 * determine the device capacity in number of blocks and the 17553 * device native block size. If this function returns a failure, 17554 * then the values in *capp and *lbap are undefined. 17555 * This routine should always be called by 17556 * sd_send_scsi_READ_CAPACITY which will appy any device 17557 * specific adjustments to capacity and lbasize. 17558 * 17559 * Arguments: un - ptr to soft state struct for the target 17560 * capp - ptr to unsigned 64-bit variable to receive the 17561 * capacity value from the command. 17562 * lbap - ptr to unsigned 32-bit varaible to receive the 17563 * block size value from the command 17564 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17565 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17566 * to use the USCSI "direct" chain and bypass the normal 17567 * command waitq. SD_PATH_DIRECT_PRIORITY is used when 17568 * this command is issued as part of an error recovery 17569 * action. 17570 * 17571 * Return Code: 0 - Success 17572 * EIO - IO error 17573 * EACCES - Reservation conflict detected 17574 * EAGAIN - Device is becoming ready 17575 * errno return code from sd_send_scsi_cmd() 17576 * 17577 * Context: Can sleep. Blocks until command completes. 17578 */ 17579 17580 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16) 17581 17582 static int 17583 sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 17584 uint32_t *lbap, int path_flag) 17585 { 17586 struct scsi_extended_sense sense_buf; 17587 struct uscsi_cmd ucmd_buf; 17588 union scsi_cdb cdb; 17589 uint64_t *capacity16_buf; 17590 uint64_t capacity; 17591 uint32_t lbasize; 17592 int status; 17593 17594 ASSERT(un != NULL); 17595 ASSERT(!mutex_owned(SD_MUTEX(un))); 17596 ASSERT(capp != NULL); 17597 ASSERT(lbap != NULL); 17598 17599 SD_TRACE(SD_LOG_IO, un, 17600 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 17601 17602 /* 17603 * First send a READ_CAPACITY_16 command to the target. 17604 * 17605 * Set up the CDB for the READ_CAPACITY_16 command. The Partial 17606 * Medium Indicator bit is cleared. The address field must be 17607 * zero if the PMI bit is zero. 17608 */ 17609 bzero(&cdb, sizeof (cdb)); 17610 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17611 17612 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP); 17613 17614 ucmd_buf.uscsi_cdb = (char *)&cdb; 17615 ucmd_buf.uscsi_cdblen = CDB_GROUP4; 17616 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; 17617 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; 17618 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17619 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 17620 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 17621 ucmd_buf.uscsi_timeout = 60; 17622 17623 /* 17624 * Read Capacity (16) is a Service Action In command. One 17625 * command byte (0x9E) is overloaded for multiple operations, 17626 * with the second CDB byte specifying the desired operation 17627 */ 17628 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4; 17629 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4; 17630 17631 /* 17632 * Fill in allocation length field 17633 */ 17634 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen); 17635 17636 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17637 UIO_SYSSPACE, path_flag); 17638 17639 switch (status) { 17640 case 0: 17641 /* Return failure if we did not get valid capacity data. */ 17642 if (ucmd_buf.uscsi_resid > 20) { 17643 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 17644 return (EIO); 17645 } 17646 17647 /* 17648 * Read capacity and block size from the READ CAPACITY 10 data. 17649 * This data may be adjusted later due to device specific 17650 * issues. 17651 * 17652 * According to the SCSI spec, the READ CAPACITY 10 17653 * command returns the following: 17654 * 17655 * bytes 0-7: Maximum logical block address available. 17656 * (MSB in byte:0 & LSB in byte:7) 17657 * 17658 * bytes 8-11: Block length in bytes 17659 * (MSB in byte:8 & LSB in byte:11) 17660 * 17661 */ 17662 capacity = BE_64(capacity16_buf[0]); 17663 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); 17664 17665 /* 17666 * Done with capacity16_buf 17667 */ 17668 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 17669 17670 /* 17671 * if the reported capacity is set to all 0xf's, then 17672 * this disk is too large. This could only happen with 17673 * a device that supports LBAs larger than 64 bits which 17674 * are not defined by any current T10 standards. 17675 */ 17676 if (capacity == 0xffffffffffffffff) { 17677 return (EIO); 17678 } 17679 break; /* Success! */ 17680 case EIO: 17681 switch (ucmd_buf.uscsi_status) { 17682 case STATUS_RESERVATION_CONFLICT: 17683 status = EACCES; 17684 break; 17685 case STATUS_CHECK: 17686 /* 17687 * Check condition; look for ASC/ASCQ of 0x04/0x01 17688 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 17689 */ 17690 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 17691 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 17692 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 17693 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 17694 return (EAGAIN); 17695 } 17696 break; 17697 default: 17698 break; 17699 } 17700 /* FALLTHRU */ 17701 default: 17702 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 17703 return (status); 17704 } 17705 17706 *capp = capacity; 17707 *lbap = lbasize; 17708 17709 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: " 17710 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 17711 17712 return (0); 17713 } 17714 17715 17716 /* 17717 * Function: sd_send_scsi_START_STOP_UNIT 17718 * 17719 * Description: Issue a scsi START STOP UNIT command to the target. 17720 * 17721 * Arguments: un - pointer to driver soft state (unit) structure for 17722 * this target. 17723 * flag - SD_TARGET_START 17724 * SD_TARGET_STOP 17725 * SD_TARGET_EJECT 17726 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17727 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17728 * to use the USCSI "direct" chain and bypass the normal 17729 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 17730 * command is issued as part of an error recovery action. 17731 * 17732 * Return Code: 0 - Success 17733 * EIO - IO error 17734 * EACCES - Reservation conflict detected 17735 * ENXIO - Not Ready, medium not present 17736 * errno return code from sd_send_scsi_cmd() 17737 * 17738 * Context: Can sleep. 17739 */ 17740 17741 static int 17742 sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, int path_flag) 17743 { 17744 struct scsi_extended_sense sense_buf; 17745 union scsi_cdb cdb; 17746 struct uscsi_cmd ucmd_buf; 17747 int status; 17748 17749 ASSERT(un != NULL); 17750 ASSERT(!mutex_owned(SD_MUTEX(un))); 17751 17752 SD_TRACE(SD_LOG_IO, un, 17753 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un); 17754 17755 if (un->un_f_check_start_stop && 17756 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) && 17757 (un->un_f_start_stop_supported != TRUE)) { 17758 return (0); 17759 } 17760 17761 /* 17762 * If we are performing an eject operation and 17763 * we receive any command other than SD_TARGET_EJECT 17764 * we should immediately return. 17765 */ 17766 if (flag != SD_TARGET_EJECT) { 17767 mutex_enter(SD_MUTEX(un)); 17768 if (un->un_f_ejecting == TRUE) { 17769 mutex_exit(SD_MUTEX(un)); 17770 return (EAGAIN); 17771 } 17772 mutex_exit(SD_MUTEX(un)); 17773 } 17774 17775 bzero(&cdb, sizeof (cdb)); 17776 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17777 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 17778 17779 cdb.scc_cmd = SCMD_START_STOP; 17780 cdb.cdb_opaque[4] = (uchar_t)flag; 17781 17782 ucmd_buf.uscsi_cdb = (char *)&cdb; 17783 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 17784 ucmd_buf.uscsi_bufaddr = NULL; 17785 ucmd_buf.uscsi_buflen = 0; 17786 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17787 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 17788 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 17789 ucmd_buf.uscsi_timeout = 200; 17790 17791 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17792 UIO_SYSSPACE, path_flag); 17793 17794 switch (status) { 17795 case 0: 17796 break; /* Success! */ 17797 case EIO: 17798 switch (ucmd_buf.uscsi_status) { 17799 case STATUS_RESERVATION_CONFLICT: 17800 status = EACCES; 17801 break; 17802 case STATUS_CHECK: 17803 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) { 17804 switch (scsi_sense_key( 17805 (uint8_t *)&sense_buf)) { 17806 case KEY_ILLEGAL_REQUEST: 17807 status = ENOTSUP; 17808 break; 17809 case KEY_NOT_READY: 17810 if (scsi_sense_asc( 17811 (uint8_t *)&sense_buf) 17812 == 0x3A) { 17813 status = ENXIO; 17814 } 17815 break; 17816 default: 17817 break; 17818 } 17819 } 17820 break; 17821 default: 17822 break; 17823 } 17824 break; 17825 default: 17826 break; 17827 } 17828 17829 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n"); 17830 17831 return (status); 17832 } 17833 17834 17835 /* 17836 * Function: sd_start_stop_unit_callback 17837 * 17838 * Description: timeout(9F) callback to begin recovery process for a 17839 * device that has spun down. 17840 * 17841 * Arguments: arg - pointer to associated softstate struct. 17842 * 17843 * Context: Executes in a timeout(9F) thread context 17844 */ 17845 17846 static void 17847 sd_start_stop_unit_callback(void *arg) 17848 { 17849 struct sd_lun *un = arg; 17850 ASSERT(un != NULL); 17851 ASSERT(!mutex_owned(SD_MUTEX(un))); 17852 17853 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n"); 17854 17855 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP); 17856 } 17857 17858 17859 /* 17860 * Function: sd_start_stop_unit_task 17861 * 17862 * Description: Recovery procedure when a drive is spun down. 17863 * 17864 * Arguments: arg - pointer to associated softstate struct. 17865 * 17866 * Context: Executes in a taskq() thread context 17867 */ 17868 17869 static void 17870 sd_start_stop_unit_task(void *arg) 17871 { 17872 struct sd_lun *un = arg; 17873 17874 ASSERT(un != NULL); 17875 ASSERT(!mutex_owned(SD_MUTEX(un))); 17876 17877 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n"); 17878 17879 /* 17880 * Some unformatted drives report not ready error, no need to 17881 * restart if format has been initiated. 17882 */ 17883 mutex_enter(SD_MUTEX(un)); 17884 if (un->un_f_format_in_progress == TRUE) { 17885 mutex_exit(SD_MUTEX(un)); 17886 return; 17887 } 17888 mutex_exit(SD_MUTEX(un)); 17889 17890 /* 17891 * When a START STOP command is issued from here, it is part of a 17892 * failure recovery operation and must be issued before any other 17893 * commands, including any pending retries. Thus it must be sent 17894 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up 17895 * succeeds or not, we will start I/O after the attempt. 17896 */ 17897 (void) sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 17898 SD_PATH_DIRECT_PRIORITY); 17899 17900 /* 17901 * The above call blocks until the START_STOP_UNIT command completes. 17902 * Now that it has completed, we must re-try the original IO that 17903 * received the NOT READY condition in the first place. There are 17904 * three possible conditions here: 17905 * 17906 * (1) The original IO is on un_retry_bp. 17907 * (2) The original IO is on the regular wait queue, and un_retry_bp 17908 * is NULL. 17909 * (3) The original IO is on the regular wait queue, and un_retry_bp 17910 * points to some other, unrelated bp. 17911 * 17912 * For each case, we must call sd_start_cmds() with un_retry_bp 17913 * as the argument. If un_retry_bp is NULL, this will initiate 17914 * processing of the regular wait queue. If un_retry_bp is not NULL, 17915 * then this will process the bp on un_retry_bp. That may or may not 17916 * be the original IO, but that does not matter: the important thing 17917 * is to keep the IO processing going at this point. 17918 * 17919 * Note: This is a very specific error recovery sequence associated 17920 * with a drive that is not spun up. We attempt a START_STOP_UNIT and 17921 * serialize the I/O with completion of the spin-up. 17922 */ 17923 mutex_enter(SD_MUTEX(un)); 17924 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17925 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n", 17926 un, un->un_retry_bp); 17927 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */ 17928 sd_start_cmds(un, un->un_retry_bp); 17929 mutex_exit(SD_MUTEX(un)); 17930 17931 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n"); 17932 } 17933 17934 17935 /* 17936 * Function: sd_send_scsi_INQUIRY 17937 * 17938 * Description: Issue the scsi INQUIRY command. 17939 * 17940 * Arguments: un 17941 * bufaddr 17942 * buflen 17943 * evpd 17944 * page_code 17945 * page_length 17946 * 17947 * Return Code: 0 - Success 17948 * errno return code from sd_send_scsi_cmd() 17949 * 17950 * Context: Can sleep. Does not return until command is completed. 17951 */ 17952 17953 static int 17954 sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, size_t buflen, 17955 uchar_t evpd, uchar_t page_code, size_t *residp) 17956 { 17957 union scsi_cdb cdb; 17958 struct uscsi_cmd ucmd_buf; 17959 int status; 17960 17961 ASSERT(un != NULL); 17962 ASSERT(!mutex_owned(SD_MUTEX(un))); 17963 ASSERT(bufaddr != NULL); 17964 17965 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un); 17966 17967 bzero(&cdb, sizeof (cdb)); 17968 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17969 bzero(bufaddr, buflen); 17970 17971 cdb.scc_cmd = SCMD_INQUIRY; 17972 cdb.cdb_opaque[1] = evpd; 17973 cdb.cdb_opaque[2] = page_code; 17974 FORMG0COUNT(&cdb, buflen); 17975 17976 ucmd_buf.uscsi_cdb = (char *)&cdb; 17977 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 17978 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 17979 ucmd_buf.uscsi_buflen = buflen; 17980 ucmd_buf.uscsi_rqbuf = NULL; 17981 ucmd_buf.uscsi_rqlen = 0; 17982 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 17983 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ 17984 17985 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17986 UIO_SYSSPACE, SD_PATH_DIRECT); 17987 17988 if ((status == 0) && (residp != NULL)) { 17989 *residp = ucmd_buf.uscsi_resid; 17990 } 17991 17992 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n"); 17993 17994 return (status); 17995 } 17996 17997 17998 /* 17999 * Function: sd_send_scsi_TEST_UNIT_READY 18000 * 18001 * Description: Issue the scsi TEST UNIT READY command. 18002 * This routine can be told to set the flag USCSI_DIAGNOSE to 18003 * prevent retrying failed commands. Use this when the intent 18004 * is either to check for device readiness, to clear a Unit 18005 * Attention, or to clear any outstanding sense data. 18006 * However under specific conditions the expected behavior 18007 * is for retries to bring a device ready, so use the flag 18008 * with caution. 18009 * 18010 * Arguments: un 18011 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present 18012 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE. 18013 * 0: dont check for media present, do retries on cmd. 18014 * 18015 * Return Code: 0 - Success 18016 * EIO - IO error 18017 * EACCES - Reservation conflict detected 18018 * ENXIO - Not Ready, medium not present 18019 * errno return code from sd_send_scsi_cmd() 18020 * 18021 * Context: Can sleep. Does not return until command is completed. 18022 */ 18023 18024 static int 18025 sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag) 18026 { 18027 struct scsi_extended_sense sense_buf; 18028 union scsi_cdb cdb; 18029 struct uscsi_cmd ucmd_buf; 18030 int status; 18031 18032 ASSERT(un != NULL); 18033 ASSERT(!mutex_owned(SD_MUTEX(un))); 18034 18035 SD_TRACE(SD_LOG_IO, un, 18036 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un); 18037 18038 /* 18039 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect 18040 * timeouts when they receive a TUR and the queue is not empty. Check 18041 * the configuration flag set during attach (indicating the drive has 18042 * this firmware bug) and un_ncmds_in_transport before issuing the 18043 * TUR. If there are 18044 * pending commands return success, this is a bit arbitrary but is ok 18045 * for non-removables (i.e. the eliteI disks) and non-clustering 18046 * configurations. 18047 */ 18048 if (un->un_f_cfg_tur_check == TRUE) { 18049 mutex_enter(SD_MUTEX(un)); 18050 if (un->un_ncmds_in_transport != 0) { 18051 mutex_exit(SD_MUTEX(un)); 18052 return (0); 18053 } 18054 mutex_exit(SD_MUTEX(un)); 18055 } 18056 18057 bzero(&cdb, sizeof (cdb)); 18058 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18059 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18060 18061 cdb.scc_cmd = SCMD_TEST_UNIT_READY; 18062 18063 ucmd_buf.uscsi_cdb = (char *)&cdb; 18064 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18065 ucmd_buf.uscsi_bufaddr = NULL; 18066 ucmd_buf.uscsi_buflen = 0; 18067 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18068 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18069 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18070 18071 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ 18072 if ((flag & SD_DONT_RETRY_TUR) != 0) { 18073 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; 18074 } 18075 ucmd_buf.uscsi_timeout = 60; 18076 18077 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18078 UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : 18079 SD_PATH_STANDARD)); 18080 18081 switch (status) { 18082 case 0: 18083 break; /* Success! */ 18084 case EIO: 18085 switch (ucmd_buf.uscsi_status) { 18086 case STATUS_RESERVATION_CONFLICT: 18087 status = EACCES; 18088 break; 18089 case STATUS_CHECK: 18090 if ((flag & SD_CHECK_FOR_MEDIA) == 0) { 18091 break; 18092 } 18093 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18094 (scsi_sense_key((uint8_t *)&sense_buf) == 18095 KEY_NOT_READY) && 18096 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x3A)) { 18097 status = ENXIO; 18098 } 18099 break; 18100 default: 18101 break; 18102 } 18103 break; 18104 default: 18105 break; 18106 } 18107 18108 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n"); 18109 18110 return (status); 18111 } 18112 18113 18114 /* 18115 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN 18116 * 18117 * Description: Issue the scsi PERSISTENT RESERVE IN command. 18118 * 18119 * Arguments: un 18120 * 18121 * Return Code: 0 - Success 18122 * EACCES 18123 * ENOTSUP 18124 * errno return code from sd_send_scsi_cmd() 18125 * 18126 * Context: Can sleep. Does not return until command is completed. 18127 */ 18128 18129 static int 18130 sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, uchar_t usr_cmd, 18131 uint16_t data_len, uchar_t *data_bufp) 18132 { 18133 struct scsi_extended_sense sense_buf; 18134 union scsi_cdb cdb; 18135 struct uscsi_cmd ucmd_buf; 18136 int status; 18137 int no_caller_buf = FALSE; 18138 18139 ASSERT(un != NULL); 18140 ASSERT(!mutex_owned(SD_MUTEX(un))); 18141 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV)); 18142 18143 SD_TRACE(SD_LOG_IO, un, 18144 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un); 18145 18146 bzero(&cdb, sizeof (cdb)); 18147 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18148 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18149 if (data_bufp == NULL) { 18150 /* Allocate a default buf if the caller did not give one */ 18151 ASSERT(data_len == 0); 18152 data_len = MHIOC_RESV_KEY_SIZE; 18153 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP); 18154 no_caller_buf = TRUE; 18155 } 18156 18157 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN; 18158 cdb.cdb_opaque[1] = usr_cmd; 18159 FORMG1COUNT(&cdb, data_len); 18160 18161 ucmd_buf.uscsi_cdb = (char *)&cdb; 18162 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 18163 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; 18164 ucmd_buf.uscsi_buflen = data_len; 18165 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18166 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18167 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 18168 ucmd_buf.uscsi_timeout = 60; 18169 18170 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18171 UIO_SYSSPACE, SD_PATH_STANDARD); 18172 18173 switch (status) { 18174 case 0: 18175 break; /* Success! */ 18176 case EIO: 18177 switch (ucmd_buf.uscsi_status) { 18178 case STATUS_RESERVATION_CONFLICT: 18179 status = EACCES; 18180 break; 18181 case STATUS_CHECK: 18182 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18183 (scsi_sense_key((uint8_t *)&sense_buf) == 18184 KEY_ILLEGAL_REQUEST)) { 18185 status = ENOTSUP; 18186 } 18187 break; 18188 default: 18189 break; 18190 } 18191 break; 18192 default: 18193 break; 18194 } 18195 18196 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n"); 18197 18198 if (no_caller_buf == TRUE) { 18199 kmem_free(data_bufp, data_len); 18200 } 18201 18202 return (status); 18203 } 18204 18205 18206 /* 18207 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT 18208 * 18209 * Description: This routine is the driver entry point for handling CD-ROM 18210 * multi-host persistent reservation requests (MHIOCGRP_INKEYS, 18211 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the 18212 * device. 18213 * 18214 * Arguments: un - Pointer to soft state struct for the target. 18215 * usr_cmd SCSI-3 reservation facility command (one of 18216 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE, 18217 * SD_SCSI3_PREEMPTANDABORT) 18218 * usr_bufp - user provided pointer register, reserve descriptor or 18219 * preempt and abort structure (mhioc_register_t, 18220 * mhioc_resv_desc_t, mhioc_preemptandabort_t) 18221 * 18222 * Return Code: 0 - Success 18223 * EACCES 18224 * ENOTSUP 18225 * errno return code from sd_send_scsi_cmd() 18226 * 18227 * Context: Can sleep. Does not return until command is completed. 18228 */ 18229 18230 static int 18231 sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, uchar_t usr_cmd, 18232 uchar_t *usr_bufp) 18233 { 18234 struct scsi_extended_sense sense_buf; 18235 union scsi_cdb cdb; 18236 struct uscsi_cmd ucmd_buf; 18237 int status; 18238 uchar_t data_len = sizeof (sd_prout_t); 18239 sd_prout_t *prp; 18240 18241 ASSERT(un != NULL); 18242 ASSERT(!mutex_owned(SD_MUTEX(un))); 18243 ASSERT(data_len == 24); /* required by scsi spec */ 18244 18245 SD_TRACE(SD_LOG_IO, un, 18246 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un); 18247 18248 if (usr_bufp == NULL) { 18249 return (EINVAL); 18250 } 18251 18252 bzero(&cdb, sizeof (cdb)); 18253 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18254 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18255 prp = kmem_zalloc(data_len, KM_SLEEP); 18256 18257 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT; 18258 cdb.cdb_opaque[1] = usr_cmd; 18259 FORMG1COUNT(&cdb, data_len); 18260 18261 ucmd_buf.uscsi_cdb = (char *)&cdb; 18262 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 18263 ucmd_buf.uscsi_bufaddr = (caddr_t)prp; 18264 ucmd_buf.uscsi_buflen = data_len; 18265 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18266 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18267 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 18268 ucmd_buf.uscsi_timeout = 60; 18269 18270 switch (usr_cmd) { 18271 case SD_SCSI3_REGISTER: { 18272 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; 18273 18274 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18275 bcopy(ptr->newkey.key, prp->service_key, 18276 MHIOC_RESV_KEY_SIZE); 18277 prp->aptpl = ptr->aptpl; 18278 break; 18279 } 18280 case SD_SCSI3_RESERVE: 18281 case SD_SCSI3_RELEASE: { 18282 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 18283 18284 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18285 prp->scope_address = BE_32(ptr->scope_specific_addr); 18286 cdb.cdb_opaque[2] = ptr->type; 18287 break; 18288 } 18289 case SD_SCSI3_PREEMPTANDABORT: { 18290 mhioc_preemptandabort_t *ptr = 18291 (mhioc_preemptandabort_t *)usr_bufp; 18292 18293 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18294 bcopy(ptr->victim_key.key, prp->service_key, 18295 MHIOC_RESV_KEY_SIZE); 18296 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr); 18297 cdb.cdb_opaque[2] = ptr->resvdesc.type; 18298 ucmd_buf.uscsi_flags |= USCSI_HEAD; 18299 break; 18300 } 18301 case SD_SCSI3_REGISTERANDIGNOREKEY: 18302 { 18303 mhioc_registerandignorekey_t *ptr; 18304 ptr = (mhioc_registerandignorekey_t *)usr_bufp; 18305 bcopy(ptr->newkey.key, 18306 prp->service_key, MHIOC_RESV_KEY_SIZE); 18307 prp->aptpl = ptr->aptpl; 18308 break; 18309 } 18310 default: 18311 ASSERT(FALSE); 18312 break; 18313 } 18314 18315 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18316 UIO_SYSSPACE, SD_PATH_STANDARD); 18317 18318 switch (status) { 18319 case 0: 18320 break; /* Success! */ 18321 case EIO: 18322 switch (ucmd_buf.uscsi_status) { 18323 case STATUS_RESERVATION_CONFLICT: 18324 status = EACCES; 18325 break; 18326 case STATUS_CHECK: 18327 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18328 (scsi_sense_key((uint8_t *)&sense_buf) == 18329 KEY_ILLEGAL_REQUEST)) { 18330 status = ENOTSUP; 18331 } 18332 break; 18333 default: 18334 break; 18335 } 18336 break; 18337 default: 18338 break; 18339 } 18340 18341 kmem_free(prp, data_len); 18342 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n"); 18343 return (status); 18344 } 18345 18346 18347 /* 18348 * Function: sd_send_scsi_SYNCHRONIZE_CACHE 18349 * 18350 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target 18351 * 18352 * Arguments: un - pointer to the target's soft state struct 18353 * 18354 * Return Code: 0 - success 18355 * errno-type error code 18356 * 18357 * Context: kernel thread context only. 18358 */ 18359 18360 static int 18361 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc) 18362 { 18363 struct sd_uscsi_info *uip; 18364 struct uscsi_cmd *uscmd; 18365 union scsi_cdb *cdb; 18366 struct buf *bp; 18367 int rval = 0; 18368 18369 SD_TRACE(SD_LOG_IO, un, 18370 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un); 18371 18372 ASSERT(un != NULL); 18373 ASSERT(!mutex_owned(SD_MUTEX(un))); 18374 18375 cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP); 18376 cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE; 18377 18378 /* 18379 * First get some memory for the uscsi_cmd struct and cdb 18380 * and initialize for SYNCHRONIZE_CACHE cmd. 18381 */ 18382 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 18383 uscmd->uscsi_cdblen = CDB_GROUP1; 18384 uscmd->uscsi_cdb = (caddr_t)cdb; 18385 uscmd->uscsi_bufaddr = NULL; 18386 uscmd->uscsi_buflen = 0; 18387 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 18388 uscmd->uscsi_rqlen = SENSE_LENGTH; 18389 uscmd->uscsi_rqresid = SENSE_LENGTH; 18390 uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18391 uscmd->uscsi_timeout = sd_io_time; 18392 18393 /* 18394 * Allocate an sd_uscsi_info struct and fill it with the info 18395 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 18396 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 18397 * since we allocate the buf here in this function, we do not 18398 * need to preserve the prior contents of b_private. 18399 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 18400 */ 18401 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 18402 uip->ui_flags = SD_PATH_DIRECT; 18403 uip->ui_cmdp = uscmd; 18404 18405 bp = getrbuf(KM_SLEEP); 18406 bp->b_private = uip; 18407 18408 /* 18409 * Setup buffer to carry uscsi request. 18410 */ 18411 bp->b_flags = B_BUSY; 18412 bp->b_bcount = 0; 18413 bp->b_blkno = 0; 18414 18415 if (dkc != NULL) { 18416 bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone; 18417 uip->ui_dkc = *dkc; 18418 } 18419 18420 bp->b_edev = SD_GET_DEV(un); 18421 bp->b_dev = cmpdev(bp->b_edev); /* maybe unnecessary? */ 18422 18423 (void) sd_uscsi_strategy(bp); 18424 18425 /* 18426 * If synchronous request, wait for completion 18427 * If async just return and let b_iodone callback 18428 * cleanup. 18429 * NOTE: On return, u_ncmds_in_driver will be decremented, 18430 * but it was also incremented in sd_uscsi_strategy(), so 18431 * we should be ok. 18432 */ 18433 if (dkc == NULL) { 18434 (void) biowait(bp); 18435 rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp); 18436 } 18437 18438 return (rval); 18439 } 18440 18441 18442 static int 18443 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp) 18444 { 18445 struct sd_uscsi_info *uip; 18446 struct uscsi_cmd *uscmd; 18447 uint8_t *sense_buf; 18448 struct sd_lun *un; 18449 int status; 18450 18451 uip = (struct sd_uscsi_info *)(bp->b_private); 18452 ASSERT(uip != NULL); 18453 18454 uscmd = uip->ui_cmdp; 18455 ASSERT(uscmd != NULL); 18456 18457 sense_buf = (uint8_t *)uscmd->uscsi_rqbuf; 18458 ASSERT(sense_buf != NULL); 18459 18460 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 18461 ASSERT(un != NULL); 18462 18463 status = geterror(bp); 18464 switch (status) { 18465 case 0: 18466 break; /* Success! */ 18467 case EIO: 18468 switch (uscmd->uscsi_status) { 18469 case STATUS_RESERVATION_CONFLICT: 18470 /* Ignore reservation conflict */ 18471 status = 0; 18472 goto done; 18473 18474 case STATUS_CHECK: 18475 if ((uscmd->uscsi_rqstatus == STATUS_GOOD) && 18476 (scsi_sense_key(sense_buf) == 18477 KEY_ILLEGAL_REQUEST)) { 18478 /* Ignore Illegal Request error */ 18479 mutex_enter(SD_MUTEX(un)); 18480 un->un_f_sync_cache_supported = FALSE; 18481 mutex_exit(SD_MUTEX(un)); 18482 status = ENOTSUP; 18483 goto done; 18484 } 18485 break; 18486 default: 18487 break; 18488 } 18489 /* FALLTHRU */ 18490 default: 18491 /* 18492 * Don't log an error message if this device 18493 * has removable media. 18494 */ 18495 if (!un->un_f_has_removable_media) { 18496 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18497 "SYNCHRONIZE CACHE command failed (%d)\n", status); 18498 } 18499 break; 18500 } 18501 18502 done: 18503 if (uip->ui_dkc.dkc_callback != NULL) { 18504 (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status); 18505 } 18506 18507 ASSERT((bp->b_flags & B_REMAPPED) == 0); 18508 freerbuf(bp); 18509 kmem_free(uip, sizeof (struct sd_uscsi_info)); 18510 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 18511 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 18512 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 18513 18514 return (status); 18515 } 18516 18517 18518 /* 18519 * Function: sd_send_scsi_GET_CONFIGURATION 18520 * 18521 * Description: Issues the get configuration command to the device. 18522 * Called from sd_check_for_writable_cd & sd_get_media_info 18523 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN 18524 * Arguments: un 18525 * ucmdbuf 18526 * rqbuf 18527 * rqbuflen 18528 * bufaddr 18529 * buflen 18530 * path_flag 18531 * 18532 * Return Code: 0 - Success 18533 * errno return code from sd_send_scsi_cmd() 18534 * 18535 * Context: Can sleep. Does not return until command is completed. 18536 * 18537 */ 18538 18539 static int 18540 sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, struct uscsi_cmd *ucmdbuf, 18541 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen, 18542 int path_flag) 18543 { 18544 char cdb[CDB_GROUP1]; 18545 int status; 18546 18547 ASSERT(un != NULL); 18548 ASSERT(!mutex_owned(SD_MUTEX(un))); 18549 ASSERT(bufaddr != NULL); 18550 ASSERT(ucmdbuf != NULL); 18551 ASSERT(rqbuf != NULL); 18552 18553 SD_TRACE(SD_LOG_IO, un, 18554 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un); 18555 18556 bzero(cdb, sizeof (cdb)); 18557 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 18558 bzero(rqbuf, rqbuflen); 18559 bzero(bufaddr, buflen); 18560 18561 /* 18562 * Set up cdb field for the get configuration command. 18563 */ 18564 cdb[0] = SCMD_GET_CONFIGURATION; 18565 cdb[1] = 0x02; /* Requested Type */ 18566 cdb[8] = SD_PROFILE_HEADER_LEN; 18567 ucmdbuf->uscsi_cdb = cdb; 18568 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 18569 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 18570 ucmdbuf->uscsi_buflen = buflen; 18571 ucmdbuf->uscsi_timeout = sd_io_time; 18572 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 18573 ucmdbuf->uscsi_rqlen = rqbuflen; 18574 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 18575 18576 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, FKIOCTL, 18577 UIO_SYSSPACE, path_flag); 18578 18579 switch (status) { 18580 case 0: 18581 break; /* Success! */ 18582 case EIO: 18583 switch (ucmdbuf->uscsi_status) { 18584 case STATUS_RESERVATION_CONFLICT: 18585 status = EACCES; 18586 break; 18587 default: 18588 break; 18589 } 18590 break; 18591 default: 18592 break; 18593 } 18594 18595 if (status == 0) { 18596 SD_DUMP_MEMORY(un, SD_LOG_IO, 18597 "sd_send_scsi_GET_CONFIGURATION: data", 18598 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 18599 } 18600 18601 SD_TRACE(SD_LOG_IO, un, 18602 "sd_send_scsi_GET_CONFIGURATION: exit\n"); 18603 18604 return (status); 18605 } 18606 18607 /* 18608 * Function: sd_send_scsi_feature_GET_CONFIGURATION 18609 * 18610 * Description: Issues the get configuration command to the device to 18611 * retrieve a specfic feature. Called from 18612 * sd_check_for_writable_cd & sd_set_mmc_caps. 18613 * Arguments: un 18614 * ucmdbuf 18615 * rqbuf 18616 * rqbuflen 18617 * bufaddr 18618 * buflen 18619 * feature 18620 * 18621 * Return Code: 0 - Success 18622 * errno return code from sd_send_scsi_cmd() 18623 * 18624 * Context: Can sleep. Does not return until command is completed. 18625 * 18626 */ 18627 static int 18628 sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 18629 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 18630 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag) 18631 { 18632 char cdb[CDB_GROUP1]; 18633 int status; 18634 18635 ASSERT(un != NULL); 18636 ASSERT(!mutex_owned(SD_MUTEX(un))); 18637 ASSERT(bufaddr != NULL); 18638 ASSERT(ucmdbuf != NULL); 18639 ASSERT(rqbuf != NULL); 18640 18641 SD_TRACE(SD_LOG_IO, un, 18642 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un); 18643 18644 bzero(cdb, sizeof (cdb)); 18645 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 18646 bzero(rqbuf, rqbuflen); 18647 bzero(bufaddr, buflen); 18648 18649 /* 18650 * Set up cdb field for the get configuration command. 18651 */ 18652 cdb[0] = SCMD_GET_CONFIGURATION; 18653 cdb[1] = 0x02; /* Requested Type */ 18654 cdb[3] = feature; 18655 cdb[8] = buflen; 18656 ucmdbuf->uscsi_cdb = cdb; 18657 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 18658 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 18659 ucmdbuf->uscsi_buflen = buflen; 18660 ucmdbuf->uscsi_timeout = sd_io_time; 18661 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 18662 ucmdbuf->uscsi_rqlen = rqbuflen; 18663 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 18664 18665 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, FKIOCTL, 18666 UIO_SYSSPACE, path_flag); 18667 18668 switch (status) { 18669 case 0: 18670 break; /* Success! */ 18671 case EIO: 18672 switch (ucmdbuf->uscsi_status) { 18673 case STATUS_RESERVATION_CONFLICT: 18674 status = EACCES; 18675 break; 18676 default: 18677 break; 18678 } 18679 break; 18680 default: 18681 break; 18682 } 18683 18684 if (status == 0) { 18685 SD_DUMP_MEMORY(un, SD_LOG_IO, 18686 "sd_send_scsi_feature_GET_CONFIGURATION: data", 18687 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 18688 } 18689 18690 SD_TRACE(SD_LOG_IO, un, 18691 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n"); 18692 18693 return (status); 18694 } 18695 18696 18697 /* 18698 * Function: sd_send_scsi_MODE_SENSE 18699 * 18700 * Description: Utility function for issuing a scsi MODE SENSE command. 18701 * Note: This routine uses a consistent implementation for Group0, 18702 * Group1, and Group2 commands across all platforms. ATAPI devices 18703 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 18704 * 18705 * Arguments: un - pointer to the softstate struct for the target. 18706 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 18707 * CDB_GROUP[1|2] (10 byte). 18708 * bufaddr - buffer for page data retrieved from the target. 18709 * buflen - size of page to be retrieved. 18710 * page_code - page code of data to be retrieved from the target. 18711 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18712 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18713 * to use the USCSI "direct" chain and bypass the normal 18714 * command waitq. 18715 * 18716 * Return Code: 0 - Success 18717 * errno return code from sd_send_scsi_cmd() 18718 * 18719 * Context: Can sleep. Does not return until command is completed. 18720 */ 18721 18722 static int 18723 sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 18724 size_t buflen, uchar_t page_code, int path_flag) 18725 { 18726 struct scsi_extended_sense sense_buf; 18727 union scsi_cdb cdb; 18728 struct uscsi_cmd ucmd_buf; 18729 int status; 18730 int headlen; 18731 18732 ASSERT(un != NULL); 18733 ASSERT(!mutex_owned(SD_MUTEX(un))); 18734 ASSERT(bufaddr != NULL); 18735 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 18736 (cdbsize == CDB_GROUP2)); 18737 18738 SD_TRACE(SD_LOG_IO, un, 18739 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un); 18740 18741 bzero(&cdb, sizeof (cdb)); 18742 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18743 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18744 bzero(bufaddr, buflen); 18745 18746 if (cdbsize == CDB_GROUP0) { 18747 cdb.scc_cmd = SCMD_MODE_SENSE; 18748 cdb.cdb_opaque[2] = page_code; 18749 FORMG0COUNT(&cdb, buflen); 18750 headlen = MODE_HEADER_LENGTH; 18751 } else { 18752 cdb.scc_cmd = SCMD_MODE_SENSE_G1; 18753 cdb.cdb_opaque[2] = page_code; 18754 FORMG1COUNT(&cdb, buflen); 18755 headlen = MODE_HEADER_LENGTH_GRP2; 18756 } 18757 18758 ASSERT(headlen <= buflen); 18759 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 18760 18761 ucmd_buf.uscsi_cdb = (char *)&cdb; 18762 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 18763 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 18764 ucmd_buf.uscsi_buflen = buflen; 18765 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18766 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18767 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 18768 ucmd_buf.uscsi_timeout = 60; 18769 18770 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18771 UIO_SYSSPACE, path_flag); 18772 18773 switch (status) { 18774 case 0: 18775 /* 18776 * sr_check_wp() uses 0x3f page code and check the header of 18777 * mode page to determine if target device is write-protected. 18778 * But some USB devices return 0 bytes for 0x3f page code. For 18779 * this case, make sure that mode page header is returned at 18780 * least. 18781 */ 18782 if (buflen - ucmd_buf.uscsi_resid < headlen) 18783 status = EIO; 18784 break; /* Success! */ 18785 case EIO: 18786 switch (ucmd_buf.uscsi_status) { 18787 case STATUS_RESERVATION_CONFLICT: 18788 status = EACCES; 18789 break; 18790 default: 18791 break; 18792 } 18793 break; 18794 default: 18795 break; 18796 } 18797 18798 if (status == 0) { 18799 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data", 18800 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 18801 } 18802 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n"); 18803 18804 return (status); 18805 } 18806 18807 18808 /* 18809 * Function: sd_send_scsi_MODE_SELECT 18810 * 18811 * Description: Utility function for issuing a scsi MODE SELECT command. 18812 * Note: This routine uses a consistent implementation for Group0, 18813 * Group1, and Group2 commands across all platforms. ATAPI devices 18814 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 18815 * 18816 * Arguments: un - pointer to the softstate struct for the target. 18817 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 18818 * CDB_GROUP[1|2] (10 byte). 18819 * bufaddr - buffer for page data retrieved from the target. 18820 * buflen - size of page to be retrieved. 18821 * save_page - boolean to determin if SP bit should be set. 18822 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18823 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18824 * to use the USCSI "direct" chain and bypass the normal 18825 * command waitq. 18826 * 18827 * Return Code: 0 - Success 18828 * errno return code from sd_send_scsi_cmd() 18829 * 18830 * Context: Can sleep. Does not return until command is completed. 18831 */ 18832 18833 static int 18834 sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 18835 size_t buflen, uchar_t save_page, int path_flag) 18836 { 18837 struct scsi_extended_sense sense_buf; 18838 union scsi_cdb cdb; 18839 struct uscsi_cmd ucmd_buf; 18840 int status; 18841 18842 ASSERT(un != NULL); 18843 ASSERT(!mutex_owned(SD_MUTEX(un))); 18844 ASSERT(bufaddr != NULL); 18845 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 18846 (cdbsize == CDB_GROUP2)); 18847 18848 SD_TRACE(SD_LOG_IO, un, 18849 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un); 18850 18851 bzero(&cdb, sizeof (cdb)); 18852 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18853 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18854 18855 /* Set the PF bit for many third party drives */ 18856 cdb.cdb_opaque[1] = 0x10; 18857 18858 /* Set the savepage(SP) bit if given */ 18859 if (save_page == SD_SAVE_PAGE) { 18860 cdb.cdb_opaque[1] |= 0x01; 18861 } 18862 18863 if (cdbsize == CDB_GROUP0) { 18864 cdb.scc_cmd = SCMD_MODE_SELECT; 18865 FORMG0COUNT(&cdb, buflen); 18866 } else { 18867 cdb.scc_cmd = SCMD_MODE_SELECT_G1; 18868 FORMG1COUNT(&cdb, buflen); 18869 } 18870 18871 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 18872 18873 ucmd_buf.uscsi_cdb = (char *)&cdb; 18874 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 18875 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 18876 ucmd_buf.uscsi_buflen = buflen; 18877 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18878 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18879 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 18880 ucmd_buf.uscsi_timeout = 60; 18881 18882 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18883 UIO_SYSSPACE, path_flag); 18884 18885 switch (status) { 18886 case 0: 18887 break; /* Success! */ 18888 case EIO: 18889 switch (ucmd_buf.uscsi_status) { 18890 case STATUS_RESERVATION_CONFLICT: 18891 status = EACCES; 18892 break; 18893 default: 18894 break; 18895 } 18896 break; 18897 default: 18898 break; 18899 } 18900 18901 if (status == 0) { 18902 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data", 18903 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 18904 } 18905 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n"); 18906 18907 return (status); 18908 } 18909 18910 18911 /* 18912 * Function: sd_send_scsi_RDWR 18913 * 18914 * Description: Issue a scsi READ or WRITE command with the given parameters. 18915 * 18916 * Arguments: un: Pointer to the sd_lun struct for the target. 18917 * cmd: SCMD_READ or SCMD_WRITE 18918 * bufaddr: Address of caller's buffer to receive the RDWR data 18919 * buflen: Length of caller's buffer receive the RDWR data. 18920 * start_block: Block number for the start of the RDWR operation. 18921 * (Assumes target-native block size.) 18922 * residp: Pointer to variable to receive the redisual of the 18923 * RDWR operation (may be NULL of no residual requested). 18924 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18925 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18926 * to use the USCSI "direct" chain and bypass the normal 18927 * command waitq. 18928 * 18929 * Return Code: 0 - Success 18930 * errno return code from sd_send_scsi_cmd() 18931 * 18932 * Context: Can sleep. Does not return until command is completed. 18933 */ 18934 18935 static int 18936 sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 18937 size_t buflen, daddr_t start_block, int path_flag) 18938 { 18939 struct scsi_extended_sense sense_buf; 18940 union scsi_cdb cdb; 18941 struct uscsi_cmd ucmd_buf; 18942 uint32_t block_count; 18943 int status; 18944 int cdbsize; 18945 uchar_t flag; 18946 18947 ASSERT(un != NULL); 18948 ASSERT(!mutex_owned(SD_MUTEX(un))); 18949 ASSERT(bufaddr != NULL); 18950 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE)); 18951 18952 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un); 18953 18954 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 18955 return (EINVAL); 18956 } 18957 18958 mutex_enter(SD_MUTEX(un)); 18959 block_count = SD_BYTES2TGTBLOCKS(un, buflen); 18960 mutex_exit(SD_MUTEX(un)); 18961 18962 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE; 18963 18964 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: " 18965 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n", 18966 bufaddr, buflen, start_block, block_count); 18967 18968 bzero(&cdb, sizeof (cdb)); 18969 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18970 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18971 18972 /* Compute CDB size to use */ 18973 if (start_block > 0xffffffff) 18974 cdbsize = CDB_GROUP4; 18975 else if ((start_block & 0xFFE00000) || 18976 (un->un_f_cfg_is_atapi == TRUE)) 18977 cdbsize = CDB_GROUP1; 18978 else 18979 cdbsize = CDB_GROUP0; 18980 18981 switch (cdbsize) { 18982 case CDB_GROUP0: /* 6-byte CDBs */ 18983 cdb.scc_cmd = cmd; 18984 FORMG0ADDR(&cdb, start_block); 18985 FORMG0COUNT(&cdb, block_count); 18986 break; 18987 case CDB_GROUP1: /* 10-byte CDBs */ 18988 cdb.scc_cmd = cmd | SCMD_GROUP1; 18989 FORMG1ADDR(&cdb, start_block); 18990 FORMG1COUNT(&cdb, block_count); 18991 break; 18992 case CDB_GROUP4: /* 16-byte CDBs */ 18993 cdb.scc_cmd = cmd | SCMD_GROUP4; 18994 FORMG4LONGADDR(&cdb, (uint64_t)start_block); 18995 FORMG4COUNT(&cdb, block_count); 18996 break; 18997 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */ 18998 default: 18999 /* All others reserved */ 19000 return (EINVAL); 19001 } 19002 19003 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */ 19004 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 19005 19006 ucmd_buf.uscsi_cdb = (char *)&cdb; 19007 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 19008 ucmd_buf.uscsi_bufaddr = bufaddr; 19009 ucmd_buf.uscsi_buflen = buflen; 19010 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19011 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19012 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; 19013 ucmd_buf.uscsi_timeout = 60; 19014 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19015 UIO_SYSSPACE, path_flag); 19016 switch (status) { 19017 case 0: 19018 break; /* Success! */ 19019 case EIO: 19020 switch (ucmd_buf.uscsi_status) { 19021 case STATUS_RESERVATION_CONFLICT: 19022 status = EACCES; 19023 break; 19024 default: 19025 break; 19026 } 19027 break; 19028 default: 19029 break; 19030 } 19031 19032 if (status == 0) { 19033 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data", 19034 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19035 } 19036 19037 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n"); 19038 19039 return (status); 19040 } 19041 19042 19043 /* 19044 * Function: sd_send_scsi_LOG_SENSE 19045 * 19046 * Description: Issue a scsi LOG_SENSE command with the given parameters. 19047 * 19048 * Arguments: un: Pointer to the sd_lun struct for the target. 19049 * 19050 * Return Code: 0 - Success 19051 * errno return code from sd_send_scsi_cmd() 19052 * 19053 * Context: Can sleep. Does not return until command is completed. 19054 */ 19055 19056 static int 19057 sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, uint16_t buflen, 19058 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, 19059 int path_flag) 19060 19061 { 19062 struct scsi_extended_sense sense_buf; 19063 union scsi_cdb cdb; 19064 struct uscsi_cmd ucmd_buf; 19065 int status; 19066 19067 ASSERT(un != NULL); 19068 ASSERT(!mutex_owned(SD_MUTEX(un))); 19069 19070 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un); 19071 19072 bzero(&cdb, sizeof (cdb)); 19073 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19074 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19075 19076 cdb.scc_cmd = SCMD_LOG_SENSE_G1; 19077 cdb.cdb_opaque[2] = (page_control << 6) | page_code; 19078 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8); 19079 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF); 19080 FORMG1COUNT(&cdb, buflen); 19081 19082 ucmd_buf.uscsi_cdb = (char *)&cdb; 19083 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19084 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19085 ucmd_buf.uscsi_buflen = buflen; 19086 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19087 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19088 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19089 ucmd_buf.uscsi_timeout = 60; 19090 19091 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19092 UIO_SYSSPACE, path_flag); 19093 19094 switch (status) { 19095 case 0: 19096 break; 19097 case EIO: 19098 switch (ucmd_buf.uscsi_status) { 19099 case STATUS_RESERVATION_CONFLICT: 19100 status = EACCES; 19101 break; 19102 case STATUS_CHECK: 19103 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19104 (scsi_sense_key((uint8_t *)&sense_buf) == 19105 KEY_ILLEGAL_REQUEST) && 19106 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x24)) { 19107 /* 19108 * ASC 0x24: INVALID FIELD IN CDB 19109 */ 19110 switch (page_code) { 19111 case START_STOP_CYCLE_PAGE: 19112 /* 19113 * The start stop cycle counter is 19114 * implemented as page 0x31 in earlier 19115 * generation disks. In new generation 19116 * disks the start stop cycle counter is 19117 * implemented as page 0xE. To properly 19118 * handle this case if an attempt for 19119 * log page 0xE is made and fails we 19120 * will try again using page 0x31. 19121 * 19122 * Network storage BU committed to 19123 * maintain the page 0x31 for this 19124 * purpose and will not have any other 19125 * page implemented with page code 0x31 19126 * until all disks transition to the 19127 * standard page. 19128 */ 19129 mutex_enter(SD_MUTEX(un)); 19130 un->un_start_stop_cycle_page = 19131 START_STOP_CYCLE_VU_PAGE; 19132 cdb.cdb_opaque[2] = 19133 (char)(page_control << 6) | 19134 un->un_start_stop_cycle_page; 19135 mutex_exit(SD_MUTEX(un)); 19136 status = sd_send_scsi_cmd( 19137 SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19138 UIO_SYSSPACE, path_flag); 19139 19140 break; 19141 case TEMPERATURE_PAGE: 19142 status = ENOTTY; 19143 break; 19144 default: 19145 break; 19146 } 19147 } 19148 break; 19149 default: 19150 break; 19151 } 19152 break; 19153 default: 19154 break; 19155 } 19156 19157 if (status == 0) { 19158 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data", 19159 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19160 } 19161 19162 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n"); 19163 19164 return (status); 19165 } 19166 19167 19168 /* 19169 * Function: sdioctl 19170 * 19171 * Description: Driver's ioctl(9e) entry point function. 19172 * 19173 * Arguments: dev - device number 19174 * cmd - ioctl operation to be performed 19175 * arg - user argument, contains data to be set or reference 19176 * parameter for get 19177 * flag - bit flag, indicating open settings, 32/64 bit type 19178 * cred_p - user credential pointer 19179 * rval_p - calling process return value (OPT) 19180 * 19181 * Return Code: EINVAL 19182 * ENOTTY 19183 * ENXIO 19184 * EIO 19185 * EFAULT 19186 * ENOTSUP 19187 * EPERM 19188 * 19189 * Context: Called from the device switch at normal priority. 19190 */ 19191 19192 static int 19193 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) 19194 { 19195 struct sd_lun *un = NULL; 19196 int err = 0; 19197 int i = 0; 19198 cred_t *cr; 19199 int tmprval = EINVAL; 19200 int is_valid; 19201 19202 /* 19203 * All device accesses go thru sdstrategy where we check on suspend 19204 * status 19205 */ 19206 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 19207 return (ENXIO); 19208 } 19209 19210 ASSERT(!mutex_owned(SD_MUTEX(un))); 19211 19212 19213 is_valid = SD_IS_VALID_LABEL(un); 19214 19215 /* 19216 * Moved this wait from sd_uscsi_strategy to here for 19217 * reasons of deadlock prevention. Internal driver commands, 19218 * specifically those to change a devices power level, result 19219 * in a call to sd_uscsi_strategy. 19220 */ 19221 mutex_enter(SD_MUTEX(un)); 19222 while ((un->un_state == SD_STATE_SUSPENDED) || 19223 (un->un_state == SD_STATE_PM_CHANGING)) { 19224 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 19225 } 19226 /* 19227 * Twiddling the counter here protects commands from now 19228 * through to the top of sd_uscsi_strategy. Without the 19229 * counter inc. a power down, for example, could get in 19230 * after the above check for state is made and before 19231 * execution gets to the top of sd_uscsi_strategy. 19232 * That would cause problems. 19233 */ 19234 un->un_ncmds_in_driver++; 19235 19236 if (!is_valid && 19237 (flag & (FNDELAY | FNONBLOCK))) { 19238 switch (cmd) { 19239 case DKIOCGGEOM: /* SD_PATH_DIRECT */ 19240 case DKIOCGVTOC: 19241 case DKIOCGAPART: 19242 case DKIOCPARTINFO: 19243 case DKIOCSGEOM: 19244 case DKIOCSAPART: 19245 case DKIOCGETEFI: 19246 case DKIOCPARTITION: 19247 case DKIOCSVTOC: 19248 case DKIOCSETEFI: 19249 case DKIOCGMBOOT: 19250 case DKIOCSMBOOT: 19251 case DKIOCG_PHYGEOM: 19252 case DKIOCG_VIRTGEOM: 19253 /* let cmlb handle it */ 19254 goto skip_ready_valid; 19255 19256 case CDROMPAUSE: 19257 case CDROMRESUME: 19258 case CDROMPLAYMSF: 19259 case CDROMPLAYTRKIND: 19260 case CDROMREADTOCHDR: 19261 case CDROMREADTOCENTRY: 19262 case CDROMSTOP: 19263 case CDROMSTART: 19264 case CDROMVOLCTRL: 19265 case CDROMSUBCHNL: 19266 case CDROMREADMODE2: 19267 case CDROMREADMODE1: 19268 case CDROMREADOFFSET: 19269 case CDROMSBLKMODE: 19270 case CDROMGBLKMODE: 19271 case CDROMGDRVSPEED: 19272 case CDROMSDRVSPEED: 19273 case CDROMCDDA: 19274 case CDROMCDXA: 19275 case CDROMSUBCODE: 19276 if (!ISCD(un)) { 19277 un->un_ncmds_in_driver--; 19278 ASSERT(un->un_ncmds_in_driver >= 0); 19279 mutex_exit(SD_MUTEX(un)); 19280 return (ENOTTY); 19281 } 19282 break; 19283 case FDEJECT: 19284 case DKIOCEJECT: 19285 case CDROMEJECT: 19286 if (!un->un_f_eject_media_supported) { 19287 un->un_ncmds_in_driver--; 19288 ASSERT(un->un_ncmds_in_driver >= 0); 19289 mutex_exit(SD_MUTEX(un)); 19290 return (ENOTTY); 19291 } 19292 break; 19293 case DKIOCFLUSHWRITECACHE: 19294 mutex_exit(SD_MUTEX(un)); 19295 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 19296 if (err != 0) { 19297 mutex_enter(SD_MUTEX(un)); 19298 un->un_ncmds_in_driver--; 19299 ASSERT(un->un_ncmds_in_driver >= 0); 19300 mutex_exit(SD_MUTEX(un)); 19301 return (EIO); 19302 } 19303 mutex_enter(SD_MUTEX(un)); 19304 /* FALLTHROUGH */ 19305 case DKIOCREMOVABLE: 19306 case DKIOCHOTPLUGGABLE: 19307 case DKIOCINFO: 19308 case DKIOCGMEDIAINFO: 19309 case MHIOCENFAILFAST: 19310 case MHIOCSTATUS: 19311 case MHIOCTKOWN: 19312 case MHIOCRELEASE: 19313 case MHIOCGRP_INKEYS: 19314 case MHIOCGRP_INRESV: 19315 case MHIOCGRP_REGISTER: 19316 case MHIOCGRP_RESERVE: 19317 case MHIOCGRP_PREEMPTANDABORT: 19318 case MHIOCGRP_REGISTERANDIGNOREKEY: 19319 case CDROMCLOSETRAY: 19320 case USCSICMD: 19321 goto skip_ready_valid; 19322 default: 19323 break; 19324 } 19325 19326 mutex_exit(SD_MUTEX(un)); 19327 err = sd_ready_and_valid(un); 19328 mutex_enter(SD_MUTEX(un)); 19329 19330 if (err != SD_READY_VALID) { 19331 switch (cmd) { 19332 case DKIOCSTATE: 19333 case CDROMGDRVSPEED: 19334 case CDROMSDRVSPEED: 19335 case FDEJECT: /* for eject command */ 19336 case DKIOCEJECT: 19337 case CDROMEJECT: 19338 case DKIOCREMOVABLE: 19339 case DKIOCHOTPLUGGABLE: 19340 break; 19341 default: 19342 if (un->un_f_has_removable_media) { 19343 err = ENXIO; 19344 } else { 19345 /* Do not map SD_RESERVED_BY_OTHERS to EIO */ 19346 if (err == SD_RESERVED_BY_OTHERS) { 19347 err = EACCES; 19348 } else { 19349 err = EIO; 19350 } 19351 } 19352 un->un_ncmds_in_driver--; 19353 ASSERT(un->un_ncmds_in_driver >= 0); 19354 mutex_exit(SD_MUTEX(un)); 19355 return (err); 19356 } 19357 } 19358 } 19359 19360 skip_ready_valid: 19361 mutex_exit(SD_MUTEX(un)); 19362 19363 switch (cmd) { 19364 case DKIOCINFO: 19365 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n"); 19366 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag); 19367 break; 19368 19369 case DKIOCGMEDIAINFO: 19370 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n"); 19371 err = sd_get_media_info(dev, (caddr_t)arg, flag); 19372 break; 19373 19374 case DKIOCGGEOM: 19375 case DKIOCGVTOC: 19376 case DKIOCGAPART: 19377 case DKIOCPARTINFO: 19378 case DKIOCSGEOM: 19379 case DKIOCSAPART: 19380 case DKIOCGETEFI: 19381 case DKIOCPARTITION: 19382 case DKIOCSVTOC: 19383 case DKIOCSETEFI: 19384 case DKIOCGMBOOT: 19385 case DKIOCSMBOOT: 19386 case DKIOCG_PHYGEOM: 19387 case DKIOCG_VIRTGEOM: 19388 SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd); 19389 19390 /* TUR should spin up */ 19391 19392 if (un->un_f_has_removable_media) 19393 err = sd_send_scsi_TEST_UNIT_READY(un, 19394 SD_CHECK_FOR_MEDIA); 19395 else 19396 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 19397 19398 if (err != 0) 19399 break; 19400 19401 err = cmlb_ioctl(un->un_cmlbhandle, dev, 19402 cmd, arg, flag, cred_p, rval_p, (void *)SD_PATH_DIRECT); 19403 19404 if ((err == 0) && 19405 ((cmd == DKIOCSETEFI) || 19406 (un->un_f_pkstats_enabled) && 19407 (cmd == DKIOCSAPART || cmd == DKIOCSVTOC))) { 19408 19409 tmprval = cmlb_validate(un->un_cmlbhandle, CMLB_SILENT, 19410 (void *)SD_PATH_DIRECT); 19411 if ((tmprval == 0) && un->un_f_pkstats_enabled) { 19412 sd_set_pstats(un); 19413 SD_TRACE(SD_LOG_IO_PARTITION, un, 19414 "sd_ioctl: un:0x%p pstats created and " 19415 "set\n", un); 19416 } 19417 } 19418 19419 if ((cmd == DKIOCSVTOC) || 19420 ((cmd == DKIOCSETEFI) && (tmprval == 0))) { 19421 19422 mutex_enter(SD_MUTEX(un)); 19423 if (un->un_f_devid_supported && 19424 (un->un_f_opt_fab_devid == TRUE)) { 19425 if (un->un_devid == NULL) { 19426 sd_register_devid(un, SD_DEVINFO(un), 19427 SD_TARGET_IS_UNRESERVED); 19428 } else { 19429 /* 19430 * The device id for this disk 19431 * has been fabricated. The 19432 * device id must be preserved 19433 * by writing it back out to 19434 * disk. 19435 */ 19436 if (sd_write_deviceid(un) != 0) { 19437 ddi_devid_free(un->un_devid); 19438 un->un_devid = NULL; 19439 } 19440 } 19441 } 19442 mutex_exit(SD_MUTEX(un)); 19443 } 19444 19445 break; 19446 19447 case DKIOCLOCK: 19448 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n"); 19449 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 19450 SD_PATH_STANDARD); 19451 break; 19452 19453 case DKIOCUNLOCK: 19454 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n"); 19455 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 19456 SD_PATH_STANDARD); 19457 break; 19458 19459 case DKIOCSTATE: { 19460 enum dkio_state state; 19461 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n"); 19462 19463 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) { 19464 err = EFAULT; 19465 } else { 19466 err = sd_check_media(dev, state); 19467 if (err == 0) { 19468 if (ddi_copyout(&un->un_mediastate, (void *)arg, 19469 sizeof (int), flag) != 0) 19470 err = EFAULT; 19471 } 19472 } 19473 break; 19474 } 19475 19476 case DKIOCREMOVABLE: 19477 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n"); 19478 i = un->un_f_has_removable_media ? 1 : 0; 19479 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 19480 err = EFAULT; 19481 } else { 19482 err = 0; 19483 } 19484 break; 19485 19486 case DKIOCHOTPLUGGABLE: 19487 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCHOTPLUGGABLE\n"); 19488 i = un->un_f_is_hotpluggable ? 1 : 0; 19489 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 19490 err = EFAULT; 19491 } else { 19492 err = 0; 19493 } 19494 break; 19495 19496 case DKIOCGTEMPERATURE: 19497 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n"); 19498 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag); 19499 break; 19500 19501 case MHIOCENFAILFAST: 19502 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n"); 19503 if ((err = drv_priv(cred_p)) == 0) { 19504 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag); 19505 } 19506 break; 19507 19508 case MHIOCTKOWN: 19509 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n"); 19510 if ((err = drv_priv(cred_p)) == 0) { 19511 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag); 19512 } 19513 break; 19514 19515 case MHIOCRELEASE: 19516 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n"); 19517 if ((err = drv_priv(cred_p)) == 0) { 19518 err = sd_mhdioc_release(dev); 19519 } 19520 break; 19521 19522 case MHIOCSTATUS: 19523 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n"); 19524 if ((err = drv_priv(cred_p)) == 0) { 19525 switch (sd_send_scsi_TEST_UNIT_READY(un, 0)) { 19526 case 0: 19527 err = 0; 19528 break; 19529 case EACCES: 19530 *rval_p = 1; 19531 err = 0; 19532 break; 19533 default: 19534 err = EIO; 19535 break; 19536 } 19537 } 19538 break; 19539 19540 case MHIOCQRESERVE: 19541 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n"); 19542 if ((err = drv_priv(cred_p)) == 0) { 19543 err = sd_reserve_release(dev, SD_RESERVE); 19544 } 19545 break; 19546 19547 case MHIOCREREGISTERDEVID: 19548 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n"); 19549 if (drv_priv(cred_p) == EPERM) { 19550 err = EPERM; 19551 } else if (!un->un_f_devid_supported) { 19552 err = ENOTTY; 19553 } else { 19554 err = sd_mhdioc_register_devid(dev); 19555 } 19556 break; 19557 19558 case MHIOCGRP_INKEYS: 19559 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n"); 19560 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 19561 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 19562 err = ENOTSUP; 19563 } else { 19564 err = sd_mhdioc_inkeys(dev, (caddr_t)arg, 19565 flag); 19566 } 19567 } 19568 break; 19569 19570 case MHIOCGRP_INRESV: 19571 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n"); 19572 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 19573 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 19574 err = ENOTSUP; 19575 } else { 19576 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag); 19577 } 19578 } 19579 break; 19580 19581 case MHIOCGRP_REGISTER: 19582 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n"); 19583 if ((err = drv_priv(cred_p)) != EPERM) { 19584 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 19585 err = ENOTSUP; 19586 } else if (arg != NULL) { 19587 mhioc_register_t reg; 19588 if (ddi_copyin((void *)arg, ®, 19589 sizeof (mhioc_register_t), flag) != 0) { 19590 err = EFAULT; 19591 } else { 19592 err = 19593 sd_send_scsi_PERSISTENT_RESERVE_OUT( 19594 un, SD_SCSI3_REGISTER, 19595 (uchar_t *)®); 19596 } 19597 } 19598 } 19599 break; 19600 19601 case MHIOCGRP_RESERVE: 19602 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n"); 19603 if ((err = drv_priv(cred_p)) != EPERM) { 19604 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 19605 err = ENOTSUP; 19606 } else if (arg != NULL) { 19607 mhioc_resv_desc_t resv_desc; 19608 if (ddi_copyin((void *)arg, &resv_desc, 19609 sizeof (mhioc_resv_desc_t), flag) != 0) { 19610 err = EFAULT; 19611 } else { 19612 err = 19613 sd_send_scsi_PERSISTENT_RESERVE_OUT( 19614 un, SD_SCSI3_RESERVE, 19615 (uchar_t *)&resv_desc); 19616 } 19617 } 19618 } 19619 break; 19620 19621 case MHIOCGRP_PREEMPTANDABORT: 19622 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 19623 if ((err = drv_priv(cred_p)) != EPERM) { 19624 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 19625 err = ENOTSUP; 19626 } else if (arg != NULL) { 19627 mhioc_preemptandabort_t preempt_abort; 19628 if (ddi_copyin((void *)arg, &preempt_abort, 19629 sizeof (mhioc_preemptandabort_t), 19630 flag) != 0) { 19631 err = EFAULT; 19632 } else { 19633 err = 19634 sd_send_scsi_PERSISTENT_RESERVE_OUT( 19635 un, SD_SCSI3_PREEMPTANDABORT, 19636 (uchar_t *)&preempt_abort); 19637 } 19638 } 19639 } 19640 break; 19641 19642 case MHIOCGRP_REGISTERANDIGNOREKEY: 19643 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 19644 if ((err = drv_priv(cred_p)) != EPERM) { 19645 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 19646 err = ENOTSUP; 19647 } else if (arg != NULL) { 19648 mhioc_registerandignorekey_t r_and_i; 19649 if (ddi_copyin((void *)arg, (void *)&r_and_i, 19650 sizeof (mhioc_registerandignorekey_t), 19651 flag) != 0) { 19652 err = EFAULT; 19653 } else { 19654 err = 19655 sd_send_scsi_PERSISTENT_RESERVE_OUT( 19656 un, SD_SCSI3_REGISTERANDIGNOREKEY, 19657 (uchar_t *)&r_and_i); 19658 } 19659 } 19660 } 19661 break; 19662 19663 case USCSICMD: 19664 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n"); 19665 cr = ddi_get_cred(); 19666 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 19667 err = EPERM; 19668 } else { 19669 enum uio_seg uioseg; 19670 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE : 19671 UIO_USERSPACE; 19672 if (un->un_f_format_in_progress == TRUE) { 19673 err = EAGAIN; 19674 break; 19675 } 19676 err = sd_send_scsi_cmd(dev, (struct uscsi_cmd *)arg, 19677 flag, uioseg, SD_PATH_STANDARD); 19678 } 19679 break; 19680 19681 case CDROMPAUSE: 19682 case CDROMRESUME: 19683 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n"); 19684 if (!ISCD(un)) { 19685 err = ENOTTY; 19686 } else { 19687 err = sr_pause_resume(dev, cmd); 19688 } 19689 break; 19690 19691 case CDROMPLAYMSF: 19692 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n"); 19693 if (!ISCD(un)) { 19694 err = ENOTTY; 19695 } else { 19696 err = sr_play_msf(dev, (caddr_t)arg, flag); 19697 } 19698 break; 19699 19700 case CDROMPLAYTRKIND: 19701 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); 19702 #if defined(__i386) || defined(__amd64) 19703 /* 19704 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead 19705 */ 19706 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 19707 #else 19708 if (!ISCD(un)) { 19709 #endif 19710 err = ENOTTY; 19711 } else { 19712 err = sr_play_trkind(dev, (caddr_t)arg, flag); 19713 } 19714 break; 19715 19716 case CDROMREADTOCHDR: 19717 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n"); 19718 if (!ISCD(un)) { 19719 err = ENOTTY; 19720 } else { 19721 err = sr_read_tochdr(dev, (caddr_t)arg, flag); 19722 } 19723 break; 19724 19725 case CDROMREADTOCENTRY: 19726 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n"); 19727 if (!ISCD(un)) { 19728 err = ENOTTY; 19729 } else { 19730 err = sr_read_tocentry(dev, (caddr_t)arg, flag); 19731 } 19732 break; 19733 19734 case CDROMSTOP: 19735 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n"); 19736 if (!ISCD(un)) { 19737 err = ENOTTY; 19738 } else { 19739 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_STOP, 19740 SD_PATH_STANDARD); 19741 } 19742 break; 19743 19744 case CDROMSTART: 19745 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n"); 19746 if (!ISCD(un)) { 19747 err = ENOTTY; 19748 } else { 19749 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 19750 SD_PATH_STANDARD); 19751 } 19752 break; 19753 19754 case CDROMCLOSETRAY: 19755 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n"); 19756 if (!ISCD(un)) { 19757 err = ENOTTY; 19758 } else { 19759 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_CLOSE, 19760 SD_PATH_STANDARD); 19761 } 19762 break; 19763 19764 case FDEJECT: /* for eject command */ 19765 case DKIOCEJECT: 19766 case CDROMEJECT: 19767 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n"); 19768 if (!un->un_f_eject_media_supported) { 19769 err = ENOTTY; 19770 } else { 19771 err = sr_eject(dev); 19772 } 19773 break; 19774 19775 case CDROMVOLCTRL: 19776 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n"); 19777 if (!ISCD(un)) { 19778 err = ENOTTY; 19779 } else { 19780 err = sr_volume_ctrl(dev, (caddr_t)arg, flag); 19781 } 19782 break; 19783 19784 case CDROMSUBCHNL: 19785 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n"); 19786 if (!ISCD(un)) { 19787 err = ENOTTY; 19788 } else { 19789 err = sr_read_subchannel(dev, (caddr_t)arg, flag); 19790 } 19791 break; 19792 19793 case CDROMREADMODE2: 19794 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n"); 19795 if (!ISCD(un)) { 19796 err = ENOTTY; 19797 } else if (un->un_f_cfg_is_atapi == TRUE) { 19798 /* 19799 * If the drive supports READ CD, use that instead of 19800 * switching the LBA size via a MODE SELECT 19801 * Block Descriptor 19802 */ 19803 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag); 19804 } else { 19805 err = sr_read_mode2(dev, (caddr_t)arg, flag); 19806 } 19807 break; 19808 19809 case CDROMREADMODE1: 19810 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n"); 19811 if (!ISCD(un)) { 19812 err = ENOTTY; 19813 } else { 19814 err = sr_read_mode1(dev, (caddr_t)arg, flag); 19815 } 19816 break; 19817 19818 case CDROMREADOFFSET: 19819 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n"); 19820 if (!ISCD(un)) { 19821 err = ENOTTY; 19822 } else { 19823 err = sr_read_sony_session_offset(dev, (caddr_t)arg, 19824 flag); 19825 } 19826 break; 19827 19828 case CDROMSBLKMODE: 19829 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n"); 19830 /* 19831 * There is no means of changing block size in case of atapi 19832 * drives, thus return ENOTTY if drive type is atapi 19833 */ 19834 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 19835 err = ENOTTY; 19836 } else if (un->un_f_mmc_cap == TRUE) { 19837 19838 /* 19839 * MMC Devices do not support changing the 19840 * logical block size 19841 * 19842 * Note: EINVAL is being returned instead of ENOTTY to 19843 * maintain consistancy with the original mmc 19844 * driver update. 19845 */ 19846 err = EINVAL; 19847 } else { 19848 mutex_enter(SD_MUTEX(un)); 19849 if ((!(un->un_exclopen & (1<<SDPART(dev)))) || 19850 (un->un_ncmds_in_transport > 0)) { 19851 mutex_exit(SD_MUTEX(un)); 19852 err = EINVAL; 19853 } else { 19854 mutex_exit(SD_MUTEX(un)); 19855 err = sr_change_blkmode(dev, cmd, arg, flag); 19856 } 19857 } 19858 break; 19859 19860 case CDROMGBLKMODE: 19861 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n"); 19862 if (!ISCD(un)) { 19863 err = ENOTTY; 19864 } else if ((un->un_f_cfg_is_atapi != FALSE) && 19865 (un->un_f_blockcount_is_valid != FALSE)) { 19866 /* 19867 * Drive is an ATAPI drive so return target block 19868 * size for ATAPI drives since we cannot change the 19869 * blocksize on ATAPI drives. Used primarily to detect 19870 * if an ATAPI cdrom is present. 19871 */ 19872 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg, 19873 sizeof (int), flag) != 0) { 19874 err = EFAULT; 19875 } else { 19876 err = 0; 19877 } 19878 19879 } else { 19880 /* 19881 * Drive supports changing block sizes via a Mode 19882 * Select. 19883 */ 19884 err = sr_change_blkmode(dev, cmd, arg, flag); 19885 } 19886 break; 19887 19888 case CDROMGDRVSPEED: 19889 case CDROMSDRVSPEED: 19890 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n"); 19891 if (!ISCD(un)) { 19892 err = ENOTTY; 19893 } else if (un->un_f_mmc_cap == TRUE) { 19894 /* 19895 * Note: In the future the driver implementation 19896 * for getting and 19897 * setting cd speed should entail: 19898 * 1) If non-mmc try the Toshiba mode page 19899 * (sr_change_speed) 19900 * 2) If mmc but no support for Real Time Streaming try 19901 * the SET CD SPEED (0xBB) command 19902 * (sr_atapi_change_speed) 19903 * 3) If mmc and support for Real Time Streaming 19904 * try the GET PERFORMANCE and SET STREAMING 19905 * commands (not yet implemented, 4380808) 19906 */ 19907 /* 19908 * As per recent MMC spec, CD-ROM speed is variable 19909 * and changes with LBA. Since there is no such 19910 * things as drive speed now, fail this ioctl. 19911 * 19912 * Note: EINVAL is returned for consistancy of original 19913 * implementation which included support for getting 19914 * the drive speed of mmc devices but not setting 19915 * the drive speed. Thus EINVAL would be returned 19916 * if a set request was made for an mmc device. 19917 * We no longer support get or set speed for 19918 * mmc but need to remain consistant with regard 19919 * to the error code returned. 19920 */ 19921 err = EINVAL; 19922 } else if (un->un_f_cfg_is_atapi == TRUE) { 19923 err = sr_atapi_change_speed(dev, cmd, arg, flag); 19924 } else { 19925 err = sr_change_speed(dev, cmd, arg, flag); 19926 } 19927 break; 19928 19929 case CDROMCDDA: 19930 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n"); 19931 if (!ISCD(un)) { 19932 err = ENOTTY; 19933 } else { 19934 err = sr_read_cdda(dev, (void *)arg, flag); 19935 } 19936 break; 19937 19938 case CDROMCDXA: 19939 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n"); 19940 if (!ISCD(un)) { 19941 err = ENOTTY; 19942 } else { 19943 err = sr_read_cdxa(dev, (caddr_t)arg, flag); 19944 } 19945 break; 19946 19947 case CDROMSUBCODE: 19948 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n"); 19949 if (!ISCD(un)) { 19950 err = ENOTTY; 19951 } else { 19952 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag); 19953 } 19954 break; 19955 19956 19957 #ifdef SDDEBUG 19958 /* RESET/ABORTS testing ioctls */ 19959 case DKIOCRESET: { 19960 int reset_level; 19961 19962 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) { 19963 err = EFAULT; 19964 } else { 19965 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: " 19966 "reset_level = 0x%lx\n", reset_level); 19967 if (scsi_reset(SD_ADDRESS(un), reset_level)) { 19968 err = 0; 19969 } else { 19970 err = EIO; 19971 } 19972 } 19973 break; 19974 } 19975 19976 case DKIOCABORT: 19977 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n"); 19978 if (scsi_abort(SD_ADDRESS(un), NULL)) { 19979 err = 0; 19980 } else { 19981 err = EIO; 19982 } 19983 break; 19984 #endif 19985 19986 #ifdef SD_FAULT_INJECTION 19987 /* SDIOC FaultInjection testing ioctls */ 19988 case SDIOCSTART: 19989 case SDIOCSTOP: 19990 case SDIOCINSERTPKT: 19991 case SDIOCINSERTXB: 19992 case SDIOCINSERTUN: 19993 case SDIOCINSERTARQ: 19994 case SDIOCPUSH: 19995 case SDIOCRETRIEVE: 19996 case SDIOCRUN: 19997 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" 19998 "SDIOC detected cmd:0x%X:\n", cmd); 19999 /* call error generator */ 20000 sd_faultinjection_ioctl(cmd, arg, un); 20001 err = 0; 20002 break; 20003 20004 #endif /* SD_FAULT_INJECTION */ 20005 20006 case DKIOCFLUSHWRITECACHE: 20007 { 20008 struct dk_callback *dkc = (struct dk_callback *)arg; 20009 20010 mutex_enter(SD_MUTEX(un)); 20011 if (!un->un_f_sync_cache_supported || 20012 !un->un_f_write_cache_enabled) { 20013 err = un->un_f_sync_cache_supported ? 20014 0 : ENOTSUP; 20015 mutex_exit(SD_MUTEX(un)); 20016 if ((flag & FKIOCTL) && dkc != NULL && 20017 dkc->dkc_callback != NULL) { 20018 (*dkc->dkc_callback)(dkc->dkc_cookie, 20019 err); 20020 /* 20021 * Did callback and reported error. 20022 * Since we did a callback, ioctl 20023 * should return 0. 20024 */ 20025 err = 0; 20026 } 20027 break; 20028 } 20029 mutex_exit(SD_MUTEX(un)); 20030 20031 if ((flag & FKIOCTL) && dkc != NULL && 20032 dkc->dkc_callback != NULL) { 20033 /* async SYNC CACHE request */ 20034 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 20035 } else { 20036 /* synchronous SYNC CACHE request */ 20037 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 20038 } 20039 } 20040 break; 20041 20042 case DKIOCGETWCE: { 20043 20044 int wce; 20045 20046 if ((err = sd_get_write_cache_enabled(un, &wce)) != 0) { 20047 break; 20048 } 20049 20050 if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) { 20051 err = EFAULT; 20052 } 20053 break; 20054 } 20055 20056 case DKIOCSETWCE: { 20057 20058 int wce, sync_supported; 20059 20060 if (ddi_copyin((void *)arg, &wce, sizeof (wce), flag)) { 20061 err = EFAULT; 20062 break; 20063 } 20064 20065 /* 20066 * Synchronize multiple threads trying to enable 20067 * or disable the cache via the un_f_wcc_cv 20068 * condition variable. 20069 */ 20070 mutex_enter(SD_MUTEX(un)); 20071 20072 /* 20073 * Don't allow the cache to be enabled if the 20074 * config file has it disabled. 20075 */ 20076 if (un->un_f_opt_disable_cache && wce) { 20077 mutex_exit(SD_MUTEX(un)); 20078 err = EINVAL; 20079 break; 20080 } 20081 20082 /* 20083 * Wait for write cache change in progress 20084 * bit to be clear before proceeding. 20085 */ 20086 while (un->un_f_wcc_inprog) 20087 cv_wait(&un->un_wcc_cv, SD_MUTEX(un)); 20088 20089 un->un_f_wcc_inprog = 1; 20090 20091 if (un->un_f_write_cache_enabled && wce == 0) { 20092 /* 20093 * Disable the write cache. Don't clear 20094 * un_f_write_cache_enabled until after 20095 * the mode select and flush are complete. 20096 */ 20097 sync_supported = un->un_f_sync_cache_supported; 20098 mutex_exit(SD_MUTEX(un)); 20099 if ((err = sd_cache_control(un, SD_CACHE_NOCHANGE, 20100 SD_CACHE_DISABLE)) == 0 && sync_supported) { 20101 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 20102 } 20103 20104 mutex_enter(SD_MUTEX(un)); 20105 if (err == 0) { 20106 un->un_f_write_cache_enabled = 0; 20107 } 20108 20109 } else if (!un->un_f_write_cache_enabled && wce != 0) { 20110 /* 20111 * Set un_f_write_cache_enabled first, so there is 20112 * no window where the cache is enabled, but the 20113 * bit says it isn't. 20114 */ 20115 un->un_f_write_cache_enabled = 1; 20116 mutex_exit(SD_MUTEX(un)); 20117 20118 err = sd_cache_control(un, SD_CACHE_NOCHANGE, 20119 SD_CACHE_ENABLE); 20120 20121 mutex_enter(SD_MUTEX(un)); 20122 20123 if (err) { 20124 un->un_f_write_cache_enabled = 0; 20125 } 20126 } 20127 20128 un->un_f_wcc_inprog = 0; 20129 cv_broadcast(&un->un_wcc_cv); 20130 mutex_exit(SD_MUTEX(un)); 20131 break; 20132 } 20133 20134 default: 20135 err = ENOTTY; 20136 break; 20137 } 20138 mutex_enter(SD_MUTEX(un)); 20139 un->un_ncmds_in_driver--; 20140 ASSERT(un->un_ncmds_in_driver >= 0); 20141 mutex_exit(SD_MUTEX(un)); 20142 20143 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 20144 return (err); 20145 } 20146 20147 20148 /* 20149 * Function: sd_dkio_ctrl_info 20150 * 20151 * Description: This routine is the driver entry point for handling controller 20152 * information ioctl requests (DKIOCINFO). 20153 * 20154 * Arguments: dev - the device number 20155 * arg - pointer to user provided dk_cinfo structure 20156 * specifying the controller type and attributes. 20157 * flag - this argument is a pass through to ddi_copyxxx() 20158 * directly from the mode argument of ioctl(). 20159 * 20160 * Return Code: 0 20161 * EFAULT 20162 * ENXIO 20163 */ 20164 20165 static int 20166 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag) 20167 { 20168 struct sd_lun *un = NULL; 20169 struct dk_cinfo *info; 20170 dev_info_t *pdip; 20171 int lun, tgt; 20172 20173 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20174 return (ENXIO); 20175 } 20176 20177 info = (struct dk_cinfo *) 20178 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); 20179 20180 switch (un->un_ctype) { 20181 case CTYPE_CDROM: 20182 info->dki_ctype = DKC_CDROM; 20183 break; 20184 default: 20185 info->dki_ctype = DKC_SCSI_CCS; 20186 break; 20187 } 20188 pdip = ddi_get_parent(SD_DEVINFO(un)); 20189 info->dki_cnum = ddi_get_instance(pdip); 20190 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) { 20191 (void) strcpy(info->dki_cname, ddi_get_name(pdip)); 20192 } else { 20193 (void) strncpy(info->dki_cname, ddi_node_name(pdip), 20194 DK_DEVLEN - 1); 20195 } 20196 20197 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 20198 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0); 20199 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 20200 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0); 20201 20202 /* Unit Information */ 20203 info->dki_unit = ddi_get_instance(SD_DEVINFO(un)); 20204 info->dki_slave = ((tgt << 3) | lun); 20205 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)), 20206 DK_DEVLEN - 1); 20207 info->dki_flags = DKI_FMTVOL; 20208 info->dki_partition = SDPART(dev); 20209 20210 /* Max Transfer size of this device in blocks */ 20211 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize; 20212 info->dki_addr = 0; 20213 info->dki_space = 0; 20214 info->dki_prio = 0; 20215 info->dki_vec = 0; 20216 20217 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) { 20218 kmem_free(info, sizeof (struct dk_cinfo)); 20219 return (EFAULT); 20220 } else { 20221 kmem_free(info, sizeof (struct dk_cinfo)); 20222 return (0); 20223 } 20224 } 20225 20226 20227 /* 20228 * Function: sd_get_media_info 20229 * 20230 * Description: This routine is the driver entry point for handling ioctl 20231 * requests for the media type or command set profile used by the 20232 * drive to operate on the media (DKIOCGMEDIAINFO). 20233 * 20234 * Arguments: dev - the device number 20235 * arg - pointer to user provided dk_minfo structure 20236 * specifying the media type, logical block size and 20237 * drive capacity. 20238 * flag - this argument is a pass through to ddi_copyxxx() 20239 * directly from the mode argument of ioctl(). 20240 * 20241 * Return Code: 0 20242 * EACCESS 20243 * EFAULT 20244 * ENXIO 20245 * EIO 20246 */ 20247 20248 static int 20249 sd_get_media_info(dev_t dev, caddr_t arg, int flag) 20250 { 20251 struct sd_lun *un = NULL; 20252 struct uscsi_cmd com; 20253 struct scsi_inquiry *sinq; 20254 struct dk_minfo media_info; 20255 u_longlong_t media_capacity; 20256 uint64_t capacity; 20257 uint_t lbasize; 20258 uchar_t *out_data; 20259 uchar_t *rqbuf; 20260 int rval = 0; 20261 int rtn; 20262 20263 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 20264 (un->un_state == SD_STATE_OFFLINE)) { 20265 return (ENXIO); 20266 } 20267 20268 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info: entry\n"); 20269 20270 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 20271 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 20272 20273 /* Issue a TUR to determine if the drive is ready with media present */ 20274 rval = sd_send_scsi_TEST_UNIT_READY(un, SD_CHECK_FOR_MEDIA); 20275 if (rval == ENXIO) { 20276 goto done; 20277 } 20278 20279 /* Now get configuration data */ 20280 if (ISCD(un)) { 20281 media_info.dki_media_type = DK_CDROM; 20282 20283 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 20284 if (un->un_f_mmc_cap == TRUE) { 20285 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, 20286 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN, 20287 SD_PATH_STANDARD); 20288 20289 if (rtn) { 20290 /* 20291 * Failed for other than an illegal request 20292 * or command not supported 20293 */ 20294 if ((com.uscsi_status == STATUS_CHECK) && 20295 (com.uscsi_rqstatus == STATUS_GOOD)) { 20296 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 20297 (rqbuf[12] != 0x20)) { 20298 rval = EIO; 20299 goto done; 20300 } 20301 } 20302 } else { 20303 /* 20304 * The GET CONFIGURATION command succeeded 20305 * so set the media type according to the 20306 * returned data 20307 */ 20308 media_info.dki_media_type = out_data[6]; 20309 media_info.dki_media_type <<= 8; 20310 media_info.dki_media_type |= out_data[7]; 20311 } 20312 } 20313 } else { 20314 /* 20315 * The profile list is not available, so we attempt to identify 20316 * the media type based on the inquiry data 20317 */ 20318 sinq = un->un_sd->sd_inq; 20319 if ((sinq->inq_dtype == DTYPE_DIRECT) || 20320 (sinq->inq_dtype == DTYPE_OPTICAL)) { 20321 /* This is a direct access device or optical disk */ 20322 media_info.dki_media_type = DK_FIXED_DISK; 20323 20324 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 20325 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 20326 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 20327 media_info.dki_media_type = DK_ZIP; 20328 } else if ( 20329 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 20330 media_info.dki_media_type = DK_JAZ; 20331 } 20332 } 20333 } else { 20334 /* 20335 * Not a CD, direct access or optical disk so return 20336 * unknown media 20337 */ 20338 media_info.dki_media_type = DK_UNKNOWN; 20339 } 20340 } 20341 20342 /* Now read the capacity so we can provide the lbasize and capacity */ 20343 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 20344 SD_PATH_DIRECT)) { 20345 case 0: 20346 break; 20347 case EACCES: 20348 rval = EACCES; 20349 goto done; 20350 default: 20351 rval = EIO; 20352 goto done; 20353 } 20354 20355 media_info.dki_lbsize = lbasize; 20356 media_capacity = capacity; 20357 20358 /* 20359 * sd_send_scsi_READ_CAPACITY() reports capacity in 20360 * un->un_sys_blocksize chunks. So we need to convert it into 20361 * cap.lbasize chunks. 20362 */ 20363 media_capacity *= un->un_sys_blocksize; 20364 media_capacity /= lbasize; 20365 media_info.dki_capacity = media_capacity; 20366 20367 if (ddi_copyout(&media_info, arg, sizeof (struct dk_minfo), flag)) { 20368 rval = EFAULT; 20369 /* Put goto. Anybody might add some code below in future */ 20370 goto done; 20371 } 20372 done: 20373 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 20374 kmem_free(rqbuf, SENSE_LENGTH); 20375 return (rval); 20376 } 20377 20378 20379 /* 20380 * Function: sd_check_media 20381 * 20382 * Description: This utility routine implements the functionality for the 20383 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the 20384 * driver state changes from that specified by the user 20385 * (inserted or ejected). For example, if the user specifies 20386 * DKIO_EJECTED and the current media state is inserted this 20387 * routine will immediately return DKIO_INSERTED. However, if the 20388 * current media state is not inserted the user thread will be 20389 * blocked until the drive state changes. If DKIO_NONE is specified 20390 * the user thread will block until a drive state change occurs. 20391 * 20392 * Arguments: dev - the device number 20393 * state - user pointer to a dkio_state, updated with the current 20394 * drive state at return. 20395 * 20396 * Return Code: ENXIO 20397 * EIO 20398 * EAGAIN 20399 * EINTR 20400 */ 20401 20402 static int 20403 sd_check_media(dev_t dev, enum dkio_state state) 20404 { 20405 struct sd_lun *un = NULL; 20406 enum dkio_state prev_state; 20407 opaque_t token = NULL; 20408 int rval = 0; 20409 20410 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20411 return (ENXIO); 20412 } 20413 20414 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n"); 20415 20416 mutex_enter(SD_MUTEX(un)); 20417 20418 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: " 20419 "state=%x, mediastate=%x\n", state, un->un_mediastate); 20420 20421 prev_state = un->un_mediastate; 20422 20423 /* is there anything to do? */ 20424 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) { 20425 /* 20426 * submit the request to the scsi_watch service; 20427 * scsi_media_watch_cb() does the real work 20428 */ 20429 mutex_exit(SD_MUTEX(un)); 20430 20431 /* 20432 * This change handles the case where a scsi watch request is 20433 * added to a device that is powered down. To accomplish this 20434 * we power up the device before adding the scsi watch request, 20435 * since the scsi watch sends a TUR directly to the device 20436 * which the device cannot handle if it is powered down. 20437 */ 20438 if (sd_pm_entry(un) != DDI_SUCCESS) { 20439 mutex_enter(SD_MUTEX(un)); 20440 goto done; 20441 } 20442 20443 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), 20444 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 20445 (caddr_t)dev); 20446 20447 sd_pm_exit(un); 20448 20449 mutex_enter(SD_MUTEX(un)); 20450 if (token == NULL) { 20451 rval = EAGAIN; 20452 goto done; 20453 } 20454 20455 /* 20456 * This is a special case IOCTL that doesn't return 20457 * until the media state changes. Routine sdpower 20458 * knows about and handles this so don't count it 20459 * as an active cmd in the driver, which would 20460 * keep the device busy to the pm framework. 20461 * If the count isn't decremented the device can't 20462 * be powered down. 20463 */ 20464 un->un_ncmds_in_driver--; 20465 ASSERT(un->un_ncmds_in_driver >= 0); 20466 20467 /* 20468 * if a prior request had been made, this will be the same 20469 * token, as scsi_watch was designed that way. 20470 */ 20471 un->un_swr_token = token; 20472 un->un_specified_mediastate = state; 20473 20474 /* 20475 * now wait for media change 20476 * we will not be signalled unless mediastate == state but it is 20477 * still better to test for this condition, since there is a 20478 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED 20479 */ 20480 SD_TRACE(SD_LOG_COMMON, un, 20481 "sd_check_media: waiting for media state change\n"); 20482 while (un->un_mediastate == state) { 20483 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) { 20484 SD_TRACE(SD_LOG_COMMON, un, 20485 "sd_check_media: waiting for media state " 20486 "was interrupted\n"); 20487 un->un_ncmds_in_driver++; 20488 rval = EINTR; 20489 goto done; 20490 } 20491 SD_TRACE(SD_LOG_COMMON, un, 20492 "sd_check_media: received signal, state=%x\n", 20493 un->un_mediastate); 20494 } 20495 /* 20496 * Inc the counter to indicate the device once again 20497 * has an active outstanding cmd. 20498 */ 20499 un->un_ncmds_in_driver++; 20500 } 20501 20502 /* invalidate geometry */ 20503 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) { 20504 sr_ejected(un); 20505 } 20506 20507 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) { 20508 uint64_t capacity; 20509 uint_t lbasize; 20510 20511 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n"); 20512 mutex_exit(SD_MUTEX(un)); 20513 /* 20514 * Since the following routines use SD_PATH_DIRECT, we must 20515 * call PM directly before the upcoming disk accesses. This 20516 * may cause the disk to be power/spin up. 20517 */ 20518 20519 if (sd_pm_entry(un) == DDI_SUCCESS) { 20520 rval = sd_send_scsi_READ_CAPACITY(un, 20521 &capacity, 20522 &lbasize, SD_PATH_DIRECT); 20523 if (rval != 0) { 20524 sd_pm_exit(un); 20525 mutex_enter(SD_MUTEX(un)); 20526 goto done; 20527 } 20528 } else { 20529 rval = EIO; 20530 mutex_enter(SD_MUTEX(un)); 20531 goto done; 20532 } 20533 mutex_enter(SD_MUTEX(un)); 20534 20535 sd_update_block_info(un, lbasize, capacity); 20536 20537 /* 20538 * Check if the media in the device is writable or not 20539 */ 20540 sd_check_for_writable_cd(un, SD_PATH_DIRECT); 20541 20542 mutex_exit(SD_MUTEX(un)); 20543 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 20544 if ((cmlb_validate(un->un_cmlbhandle, 0, 20545 (void *)SD_PATH_DIRECT) == 0) && un->un_f_pkstats_enabled) { 20546 sd_set_pstats(un); 20547 SD_TRACE(SD_LOG_IO_PARTITION, un, 20548 "sd_check_media: un:0x%p pstats created and " 20549 "set\n", un); 20550 } 20551 20552 rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 20553 SD_PATH_DIRECT); 20554 sd_pm_exit(un); 20555 20556 mutex_enter(SD_MUTEX(un)); 20557 } 20558 done: 20559 un->un_f_watcht_stopped = FALSE; 20560 if (un->un_swr_token) { 20561 /* 20562 * Use of this local token and the mutex ensures that we avoid 20563 * some race conditions associated with terminating the 20564 * scsi watch. 20565 */ 20566 token = un->un_swr_token; 20567 un->un_swr_token = (opaque_t)NULL; 20568 mutex_exit(SD_MUTEX(un)); 20569 (void) scsi_watch_request_terminate(token, 20570 SCSI_WATCH_TERMINATE_WAIT); 20571 mutex_enter(SD_MUTEX(un)); 20572 } 20573 20574 /* 20575 * Update the capacity kstat value, if no media previously 20576 * (capacity kstat is 0) and a media has been inserted 20577 * (un_f_blockcount_is_valid == TRUE) 20578 */ 20579 if (un->un_errstats) { 20580 struct sd_errstats *stp = NULL; 20581 20582 stp = (struct sd_errstats *)un->un_errstats->ks_data; 20583 if ((stp->sd_capacity.value.ui64 == 0) && 20584 (un->un_f_blockcount_is_valid == TRUE)) { 20585 stp->sd_capacity.value.ui64 = 20586 (uint64_t)((uint64_t)un->un_blockcount * 20587 un->un_sys_blocksize); 20588 } 20589 } 20590 mutex_exit(SD_MUTEX(un)); 20591 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n"); 20592 return (rval); 20593 } 20594 20595 20596 /* 20597 * Function: sd_delayed_cv_broadcast 20598 * 20599 * Description: Delayed cv_broadcast to allow for target to recover from media 20600 * insertion. 20601 * 20602 * Arguments: arg - driver soft state (unit) structure 20603 */ 20604 20605 static void 20606 sd_delayed_cv_broadcast(void *arg) 20607 { 20608 struct sd_lun *un = arg; 20609 20610 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n"); 20611 20612 mutex_enter(SD_MUTEX(un)); 20613 un->un_dcvb_timeid = NULL; 20614 cv_broadcast(&un->un_state_cv); 20615 mutex_exit(SD_MUTEX(un)); 20616 } 20617 20618 20619 /* 20620 * Function: sd_media_watch_cb 20621 * 20622 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This 20623 * routine processes the TUR sense data and updates the driver 20624 * state if a transition has occurred. The user thread 20625 * (sd_check_media) is then signalled. 20626 * 20627 * Arguments: arg - the device 'dev_t' is used for context to discriminate 20628 * among multiple watches that share this callback function 20629 * resultp - scsi watch facility result packet containing scsi 20630 * packet, status byte and sense data 20631 * 20632 * Return Code: 0 for success, -1 for failure 20633 */ 20634 20635 static int 20636 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 20637 { 20638 struct sd_lun *un; 20639 struct scsi_status *statusp = resultp->statusp; 20640 uint8_t *sensep = (uint8_t *)resultp->sensep; 20641 enum dkio_state state = DKIO_NONE; 20642 dev_t dev = (dev_t)arg; 20643 uchar_t actual_sense_length; 20644 uint8_t skey, asc, ascq; 20645 20646 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20647 return (-1); 20648 } 20649 actual_sense_length = resultp->actual_sense_length; 20650 20651 mutex_enter(SD_MUTEX(un)); 20652 SD_TRACE(SD_LOG_COMMON, un, 20653 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n", 20654 *((char *)statusp), (void *)sensep, actual_sense_length); 20655 20656 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) { 20657 un->un_mediastate = DKIO_DEV_GONE; 20658 cv_broadcast(&un->un_state_cv); 20659 mutex_exit(SD_MUTEX(un)); 20660 20661 return (0); 20662 } 20663 20664 /* 20665 * If there was a check condition then sensep points to valid sense data 20666 * If status was not a check condition but a reservation or busy status 20667 * then the new state is DKIO_NONE 20668 */ 20669 if (sensep != NULL) { 20670 skey = scsi_sense_key(sensep); 20671 asc = scsi_sense_asc(sensep); 20672 ascq = scsi_sense_ascq(sensep); 20673 20674 SD_INFO(SD_LOG_COMMON, un, 20675 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n", 20676 skey, asc, ascq); 20677 /* This routine only uses up to 13 bytes of sense data. */ 20678 if (actual_sense_length >= 13) { 20679 if (skey == KEY_UNIT_ATTENTION) { 20680 if (asc == 0x28) { 20681 state = DKIO_INSERTED; 20682 } 20683 } else { 20684 /* 20685 * if 02/04/02 means that the host 20686 * should send start command. Explicitly 20687 * leave the media state as is 20688 * (inserted) as the media is inserted 20689 * and host has stopped device for PM 20690 * reasons. Upon next true read/write 20691 * to this media will bring the 20692 * device to the right state good for 20693 * media access. 20694 */ 20695 if ((skey == KEY_NOT_READY) && 20696 (asc == 0x3a)) { 20697 state = DKIO_EJECTED; 20698 } 20699 20700 /* 20701 * If the drivge is busy with an operation 20702 * or long write, keep the media in an 20703 * inserted state. 20704 */ 20705 20706 if ((skey == KEY_NOT_READY) && 20707 (asc == 0x04) && 20708 ((ascq == 0x02) || 20709 (ascq == 0x07) || 20710 (ascq == 0x08))) { 20711 state = DKIO_INSERTED; 20712 } 20713 } 20714 } 20715 } else if ((*((char *)statusp) == STATUS_GOOD) && 20716 (resultp->pkt->pkt_reason == CMD_CMPLT)) { 20717 state = DKIO_INSERTED; 20718 } 20719 20720 SD_TRACE(SD_LOG_COMMON, un, 20721 "sd_media_watch_cb: state=%x, specified=%x\n", 20722 state, un->un_specified_mediastate); 20723 20724 /* 20725 * now signal the waiting thread if this is *not* the specified state; 20726 * delay the signal if the state is DKIO_INSERTED to allow the target 20727 * to recover 20728 */ 20729 if (state != un->un_specified_mediastate) { 20730 un->un_mediastate = state; 20731 if (state == DKIO_INSERTED) { 20732 /* 20733 * delay the signal to give the drive a chance 20734 * to do what it apparently needs to do 20735 */ 20736 SD_TRACE(SD_LOG_COMMON, un, 20737 "sd_media_watch_cb: delayed cv_broadcast\n"); 20738 if (un->un_dcvb_timeid == NULL) { 20739 un->un_dcvb_timeid = 20740 timeout(sd_delayed_cv_broadcast, un, 20741 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY)); 20742 } 20743 } else { 20744 SD_TRACE(SD_LOG_COMMON, un, 20745 "sd_media_watch_cb: immediate cv_broadcast\n"); 20746 cv_broadcast(&un->un_state_cv); 20747 } 20748 } 20749 mutex_exit(SD_MUTEX(un)); 20750 return (0); 20751 } 20752 20753 20754 /* 20755 * Function: sd_dkio_get_temp 20756 * 20757 * Description: This routine is the driver entry point for handling ioctl 20758 * requests to get the disk temperature. 20759 * 20760 * Arguments: dev - the device number 20761 * arg - pointer to user provided dk_temperature structure. 20762 * flag - this argument is a pass through to ddi_copyxxx() 20763 * directly from the mode argument of ioctl(). 20764 * 20765 * Return Code: 0 20766 * EFAULT 20767 * ENXIO 20768 * EAGAIN 20769 */ 20770 20771 static int 20772 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag) 20773 { 20774 struct sd_lun *un = NULL; 20775 struct dk_temperature *dktemp = NULL; 20776 uchar_t *temperature_page; 20777 int rval = 0; 20778 int path_flag = SD_PATH_STANDARD; 20779 20780 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20781 return (ENXIO); 20782 } 20783 20784 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP); 20785 20786 /* copyin the disk temp argument to get the user flags */ 20787 if (ddi_copyin((void *)arg, dktemp, 20788 sizeof (struct dk_temperature), flag) != 0) { 20789 rval = EFAULT; 20790 goto done; 20791 } 20792 20793 /* Initialize the temperature to invalid. */ 20794 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 20795 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 20796 20797 /* 20798 * Note: Investigate removing the "bypass pm" semantic. 20799 * Can we just bypass PM always? 20800 */ 20801 if (dktemp->dkt_flags & DKT_BYPASS_PM) { 20802 path_flag = SD_PATH_DIRECT; 20803 ASSERT(!mutex_owned(&un->un_pm_mutex)); 20804 mutex_enter(&un->un_pm_mutex); 20805 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 20806 /* 20807 * If DKT_BYPASS_PM is set, and the drive happens to be 20808 * in low power mode, we can not wake it up, Need to 20809 * return EAGAIN. 20810 */ 20811 mutex_exit(&un->un_pm_mutex); 20812 rval = EAGAIN; 20813 goto done; 20814 } else { 20815 /* 20816 * Indicate to PM the device is busy. This is required 20817 * to avoid a race - i.e. the ioctl is issuing a 20818 * command and the pm framework brings down the device 20819 * to low power mode (possible power cut-off on some 20820 * platforms). 20821 */ 20822 mutex_exit(&un->un_pm_mutex); 20823 if (sd_pm_entry(un) != DDI_SUCCESS) { 20824 rval = EAGAIN; 20825 goto done; 20826 } 20827 } 20828 } 20829 20830 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP); 20831 20832 if ((rval = sd_send_scsi_LOG_SENSE(un, temperature_page, 20833 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag)) != 0) { 20834 goto done2; 20835 } 20836 20837 /* 20838 * For the current temperature verify that the parameter length is 0x02 20839 * and the parameter code is 0x00 20840 */ 20841 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) && 20842 (temperature_page[5] == 0x00)) { 20843 if (temperature_page[9] == 0xFF) { 20844 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 20845 } else { 20846 dktemp->dkt_cur_temp = (short)(temperature_page[9]); 20847 } 20848 } 20849 20850 /* 20851 * For the reference temperature verify that the parameter 20852 * length is 0x02 and the parameter code is 0x01 20853 */ 20854 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) && 20855 (temperature_page[11] == 0x01)) { 20856 if (temperature_page[15] == 0xFF) { 20857 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 20858 } else { 20859 dktemp->dkt_ref_temp = (short)(temperature_page[15]); 20860 } 20861 } 20862 20863 /* Do the copyout regardless of the temperature commands status. */ 20864 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature), 20865 flag) != 0) { 20866 rval = EFAULT; 20867 } 20868 20869 done2: 20870 if (path_flag == SD_PATH_DIRECT) { 20871 sd_pm_exit(un); 20872 } 20873 20874 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE); 20875 done: 20876 if (dktemp != NULL) { 20877 kmem_free(dktemp, sizeof (struct dk_temperature)); 20878 } 20879 20880 return (rval); 20881 } 20882 20883 20884 /* 20885 * Function: sd_log_page_supported 20886 * 20887 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of 20888 * supported log pages. 20889 * 20890 * Arguments: un - 20891 * log_page - 20892 * 20893 * Return Code: -1 - on error (log sense is optional and may not be supported). 20894 * 0 - log page not found. 20895 * 1 - log page found. 20896 */ 20897 20898 static int 20899 sd_log_page_supported(struct sd_lun *un, int log_page) 20900 { 20901 uchar_t *log_page_data; 20902 int i; 20903 int match = 0; 20904 int log_size; 20905 20906 log_page_data = kmem_zalloc(0xFF, KM_SLEEP); 20907 20908 if (sd_send_scsi_LOG_SENSE(un, log_page_data, 0xFF, 0, 0x01, 0, 20909 SD_PATH_DIRECT) != 0) { 20910 SD_ERROR(SD_LOG_COMMON, un, 20911 "sd_log_page_supported: failed log page retrieval\n"); 20912 kmem_free(log_page_data, 0xFF); 20913 return (-1); 20914 } 20915 log_size = log_page_data[3]; 20916 20917 /* 20918 * The list of supported log pages start from the fourth byte. Check 20919 * until we run out of log pages or a match is found. 20920 */ 20921 for (i = 4; (i < (log_size + 4)) && !match; i++) { 20922 if (log_page_data[i] == log_page) { 20923 match++; 20924 } 20925 } 20926 kmem_free(log_page_data, 0xFF); 20927 return (match); 20928 } 20929 20930 20931 /* 20932 * Function: sd_mhdioc_failfast 20933 * 20934 * Description: This routine is the driver entry point for handling ioctl 20935 * requests to enable/disable the multihost failfast option. 20936 * (MHIOCENFAILFAST) 20937 * 20938 * Arguments: dev - the device number 20939 * arg - user specified probing interval. 20940 * flag - this argument is a pass through to ddi_copyxxx() 20941 * directly from the mode argument of ioctl(). 20942 * 20943 * Return Code: 0 20944 * EFAULT 20945 * ENXIO 20946 */ 20947 20948 static int 20949 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag) 20950 { 20951 struct sd_lun *un = NULL; 20952 int mh_time; 20953 int rval = 0; 20954 20955 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20956 return (ENXIO); 20957 } 20958 20959 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag)) 20960 return (EFAULT); 20961 20962 if (mh_time) { 20963 mutex_enter(SD_MUTEX(un)); 20964 un->un_resvd_status |= SD_FAILFAST; 20965 mutex_exit(SD_MUTEX(un)); 20966 /* 20967 * If mh_time is INT_MAX, then this ioctl is being used for 20968 * SCSI-3 PGR purposes, and we don't need to spawn watch thread. 20969 */ 20970 if (mh_time != INT_MAX) { 20971 rval = sd_check_mhd(dev, mh_time); 20972 } 20973 } else { 20974 (void) sd_check_mhd(dev, 0); 20975 mutex_enter(SD_MUTEX(un)); 20976 un->un_resvd_status &= ~SD_FAILFAST; 20977 mutex_exit(SD_MUTEX(un)); 20978 } 20979 return (rval); 20980 } 20981 20982 20983 /* 20984 * Function: sd_mhdioc_takeown 20985 * 20986 * Description: This routine is the driver entry point for handling ioctl 20987 * requests to forcefully acquire exclusive access rights to the 20988 * multihost disk (MHIOCTKOWN). 20989 * 20990 * Arguments: dev - the device number 20991 * arg - user provided structure specifying the delay 20992 * parameters in milliseconds 20993 * flag - this argument is a pass through to ddi_copyxxx() 20994 * directly from the mode argument of ioctl(). 20995 * 20996 * Return Code: 0 20997 * EFAULT 20998 * ENXIO 20999 */ 21000 21001 static int 21002 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag) 21003 { 21004 struct sd_lun *un = NULL; 21005 struct mhioctkown *tkown = NULL; 21006 int rval = 0; 21007 21008 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21009 return (ENXIO); 21010 } 21011 21012 if (arg != NULL) { 21013 tkown = (struct mhioctkown *) 21014 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP); 21015 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag); 21016 if (rval != 0) { 21017 rval = EFAULT; 21018 goto error; 21019 } 21020 } 21021 21022 rval = sd_take_ownership(dev, tkown); 21023 mutex_enter(SD_MUTEX(un)); 21024 if (rval == 0) { 21025 un->un_resvd_status |= SD_RESERVE; 21026 if (tkown != NULL && tkown->reinstate_resv_delay != 0) { 21027 sd_reinstate_resv_delay = 21028 tkown->reinstate_resv_delay * 1000; 21029 } else { 21030 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 21031 } 21032 /* 21033 * Give the scsi_watch routine interval set by 21034 * the MHIOCENFAILFAST ioctl precedence here. 21035 */ 21036 if ((un->un_resvd_status & SD_FAILFAST) == 0) { 21037 mutex_exit(SD_MUTEX(un)); 21038 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000); 21039 SD_TRACE(SD_LOG_IOCTL_MHD, un, 21040 "sd_mhdioc_takeown : %d\n", 21041 sd_reinstate_resv_delay); 21042 } else { 21043 mutex_exit(SD_MUTEX(un)); 21044 } 21045 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY, 21046 sd_mhd_reset_notify_cb, (caddr_t)un); 21047 } else { 21048 un->un_resvd_status &= ~SD_RESERVE; 21049 mutex_exit(SD_MUTEX(un)); 21050 } 21051 21052 error: 21053 if (tkown != NULL) { 21054 kmem_free(tkown, sizeof (struct mhioctkown)); 21055 } 21056 return (rval); 21057 } 21058 21059 21060 /* 21061 * Function: sd_mhdioc_release 21062 * 21063 * Description: This routine is the driver entry point for handling ioctl 21064 * requests to release exclusive access rights to the multihost 21065 * disk (MHIOCRELEASE). 21066 * 21067 * Arguments: dev - the device number 21068 * 21069 * Return Code: 0 21070 * ENXIO 21071 */ 21072 21073 static int 21074 sd_mhdioc_release(dev_t dev) 21075 { 21076 struct sd_lun *un = NULL; 21077 timeout_id_t resvd_timeid_save; 21078 int resvd_status_save; 21079 int rval = 0; 21080 21081 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21082 return (ENXIO); 21083 } 21084 21085 mutex_enter(SD_MUTEX(un)); 21086 resvd_status_save = un->un_resvd_status; 21087 un->un_resvd_status &= 21088 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE); 21089 if (un->un_resvd_timeid) { 21090 resvd_timeid_save = un->un_resvd_timeid; 21091 un->un_resvd_timeid = NULL; 21092 mutex_exit(SD_MUTEX(un)); 21093 (void) untimeout(resvd_timeid_save); 21094 } else { 21095 mutex_exit(SD_MUTEX(un)); 21096 } 21097 21098 /* 21099 * destroy any pending timeout thread that may be attempting to 21100 * reinstate reservation on this device. 21101 */ 21102 sd_rmv_resv_reclaim_req(dev); 21103 21104 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) { 21105 mutex_enter(SD_MUTEX(un)); 21106 if ((un->un_mhd_token) && 21107 ((un->un_resvd_status & SD_FAILFAST) == 0)) { 21108 mutex_exit(SD_MUTEX(un)); 21109 (void) sd_check_mhd(dev, 0); 21110 } else { 21111 mutex_exit(SD_MUTEX(un)); 21112 } 21113 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 21114 sd_mhd_reset_notify_cb, (caddr_t)un); 21115 } else { 21116 /* 21117 * sd_mhd_watch_cb will restart the resvd recover timeout thread 21118 */ 21119 mutex_enter(SD_MUTEX(un)); 21120 un->un_resvd_status = resvd_status_save; 21121 mutex_exit(SD_MUTEX(un)); 21122 } 21123 return (rval); 21124 } 21125 21126 21127 /* 21128 * Function: sd_mhdioc_register_devid 21129 * 21130 * Description: This routine is the driver entry point for handling ioctl 21131 * requests to register the device id (MHIOCREREGISTERDEVID). 21132 * 21133 * Note: The implementation for this ioctl has been updated to 21134 * be consistent with the original PSARC case (1999/357) 21135 * (4375899, 4241671, 4220005) 21136 * 21137 * Arguments: dev - the device number 21138 * 21139 * Return Code: 0 21140 * ENXIO 21141 */ 21142 21143 static int 21144 sd_mhdioc_register_devid(dev_t dev) 21145 { 21146 struct sd_lun *un = NULL; 21147 int rval = 0; 21148 21149 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21150 return (ENXIO); 21151 } 21152 21153 ASSERT(!mutex_owned(SD_MUTEX(un))); 21154 21155 mutex_enter(SD_MUTEX(un)); 21156 21157 /* If a devid already exists, de-register it */ 21158 if (un->un_devid != NULL) { 21159 ddi_devid_unregister(SD_DEVINFO(un)); 21160 /* 21161 * After unregister devid, needs to free devid memory 21162 */ 21163 ddi_devid_free(un->un_devid); 21164 un->un_devid = NULL; 21165 } 21166 21167 /* Check for reservation conflict */ 21168 mutex_exit(SD_MUTEX(un)); 21169 rval = sd_send_scsi_TEST_UNIT_READY(un, 0); 21170 mutex_enter(SD_MUTEX(un)); 21171 21172 switch (rval) { 21173 case 0: 21174 sd_register_devid(un, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED); 21175 break; 21176 case EACCES: 21177 break; 21178 default: 21179 rval = EIO; 21180 } 21181 21182 mutex_exit(SD_MUTEX(un)); 21183 return (rval); 21184 } 21185 21186 21187 /* 21188 * Function: sd_mhdioc_inkeys 21189 * 21190 * Description: This routine is the driver entry point for handling ioctl 21191 * requests to issue the SCSI-3 Persistent In Read Keys command 21192 * to the device (MHIOCGRP_INKEYS). 21193 * 21194 * Arguments: dev - the device number 21195 * arg - user provided in_keys structure 21196 * flag - this argument is a pass through to ddi_copyxxx() 21197 * directly from the mode argument of ioctl(). 21198 * 21199 * Return Code: code returned by sd_persistent_reservation_in_read_keys() 21200 * ENXIO 21201 * EFAULT 21202 */ 21203 21204 static int 21205 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag) 21206 { 21207 struct sd_lun *un; 21208 mhioc_inkeys_t inkeys; 21209 int rval = 0; 21210 21211 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21212 return (ENXIO); 21213 } 21214 21215 #ifdef _MULTI_DATAMODEL 21216 switch (ddi_model_convert_from(flag & FMODELS)) { 21217 case DDI_MODEL_ILP32: { 21218 struct mhioc_inkeys32 inkeys32; 21219 21220 if (ddi_copyin(arg, &inkeys32, 21221 sizeof (struct mhioc_inkeys32), flag) != 0) { 21222 return (EFAULT); 21223 } 21224 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li; 21225 if ((rval = sd_persistent_reservation_in_read_keys(un, 21226 &inkeys, flag)) != 0) { 21227 return (rval); 21228 } 21229 inkeys32.generation = inkeys.generation; 21230 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32), 21231 flag) != 0) { 21232 return (EFAULT); 21233 } 21234 break; 21235 } 21236 case DDI_MODEL_NONE: 21237 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), 21238 flag) != 0) { 21239 return (EFAULT); 21240 } 21241 if ((rval = sd_persistent_reservation_in_read_keys(un, 21242 &inkeys, flag)) != 0) { 21243 return (rval); 21244 } 21245 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), 21246 flag) != 0) { 21247 return (EFAULT); 21248 } 21249 break; 21250 } 21251 21252 #else /* ! _MULTI_DATAMODEL */ 21253 21254 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) { 21255 return (EFAULT); 21256 } 21257 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag); 21258 if (rval != 0) { 21259 return (rval); 21260 } 21261 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) { 21262 return (EFAULT); 21263 } 21264 21265 #endif /* _MULTI_DATAMODEL */ 21266 21267 return (rval); 21268 } 21269 21270 21271 /* 21272 * Function: sd_mhdioc_inresv 21273 * 21274 * Description: This routine is the driver entry point for handling ioctl 21275 * requests to issue the SCSI-3 Persistent In Read Reservations 21276 * command to the device (MHIOCGRP_INKEYS). 21277 * 21278 * Arguments: dev - the device number 21279 * arg - user provided in_resv structure 21280 * flag - this argument is a pass through to ddi_copyxxx() 21281 * directly from the mode argument of ioctl(). 21282 * 21283 * Return Code: code returned by sd_persistent_reservation_in_read_resv() 21284 * ENXIO 21285 * EFAULT 21286 */ 21287 21288 static int 21289 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag) 21290 { 21291 struct sd_lun *un; 21292 mhioc_inresvs_t inresvs; 21293 int rval = 0; 21294 21295 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21296 return (ENXIO); 21297 } 21298 21299 #ifdef _MULTI_DATAMODEL 21300 21301 switch (ddi_model_convert_from(flag & FMODELS)) { 21302 case DDI_MODEL_ILP32: { 21303 struct mhioc_inresvs32 inresvs32; 21304 21305 if (ddi_copyin(arg, &inresvs32, 21306 sizeof (struct mhioc_inresvs32), flag) != 0) { 21307 return (EFAULT); 21308 } 21309 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li; 21310 if ((rval = sd_persistent_reservation_in_read_resv(un, 21311 &inresvs, flag)) != 0) { 21312 return (rval); 21313 } 21314 inresvs32.generation = inresvs.generation; 21315 if (ddi_copyout(&inresvs32, arg, 21316 sizeof (struct mhioc_inresvs32), flag) != 0) { 21317 return (EFAULT); 21318 } 21319 break; 21320 } 21321 case DDI_MODEL_NONE: 21322 if (ddi_copyin(arg, &inresvs, 21323 sizeof (mhioc_inresvs_t), flag) != 0) { 21324 return (EFAULT); 21325 } 21326 if ((rval = sd_persistent_reservation_in_read_resv(un, 21327 &inresvs, flag)) != 0) { 21328 return (rval); 21329 } 21330 if (ddi_copyout(&inresvs, arg, 21331 sizeof (mhioc_inresvs_t), flag) != 0) { 21332 return (EFAULT); 21333 } 21334 break; 21335 } 21336 21337 #else /* ! _MULTI_DATAMODEL */ 21338 21339 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) { 21340 return (EFAULT); 21341 } 21342 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag); 21343 if (rval != 0) { 21344 return (rval); 21345 } 21346 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) { 21347 return (EFAULT); 21348 } 21349 21350 #endif /* ! _MULTI_DATAMODEL */ 21351 21352 return (rval); 21353 } 21354 21355 21356 /* 21357 * The following routines support the clustering functionality described below 21358 * and implement lost reservation reclaim functionality. 21359 * 21360 * Clustering 21361 * ---------- 21362 * The clustering code uses two different, independent forms of SCSI 21363 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3 21364 * Persistent Group Reservations. For any particular disk, it will use either 21365 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk. 21366 * 21367 * SCSI-2 21368 * The cluster software takes ownership of a multi-hosted disk by issuing the 21369 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the 21370 * MHIOCRELEASE ioctl.Closely related is the MHIOCENFAILFAST ioctl -- a cluster, 21371 * just after taking ownership of the disk with the MHIOCTKOWN ioctl then issues 21372 * the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the driver. The 21373 * meaning of failfast is that if the driver (on this host) ever encounters the 21374 * scsi error return code RESERVATION_CONFLICT from the device, it should 21375 * immediately panic the host. The motivation for this ioctl is that if this 21376 * host does encounter reservation conflict, the underlying cause is that some 21377 * other host of the cluster has decided that this host is no longer in the 21378 * cluster and has seized control of the disks for itself. Since this host is no 21379 * longer in the cluster, it ought to panic itself. The MHIOCENFAILFAST ioctl 21380 * does two things: 21381 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT 21382 * error to panic the host 21383 * (b) it sets up a periodic timer to test whether this host still has 21384 * "access" (in that no other host has reserved the device): if the 21385 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The 21386 * purpose of that periodic timer is to handle scenarios where the host is 21387 * otherwise temporarily quiescent, temporarily doing no real i/o. 21388 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host, 21389 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for 21390 * the device itself. 21391 * 21392 * SCSI-3 PGR 21393 * A direct semantic implementation of the SCSI-3 Persistent Reservation 21394 * facility is supported through the shared multihost disk ioctls 21395 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE, 21396 * MHIOCGRP_PREEMPTANDABORT) 21397 * 21398 * Reservation Reclaim: 21399 * -------------------- 21400 * To support the lost reservation reclaim operations this driver creates a 21401 * single thread to handle reinstating reservations on all devices that have 21402 * lost reservations sd_resv_reclaim_requests are logged for all devices that 21403 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb 21404 * and the reservation reclaim thread loops through the requests to regain the 21405 * lost reservations. 21406 */ 21407 21408 /* 21409 * Function: sd_check_mhd() 21410 * 21411 * Description: This function sets up and submits a scsi watch request or 21412 * terminates an existing watch request. This routine is used in 21413 * support of reservation reclaim. 21414 * 21415 * Arguments: dev - the device 'dev_t' is used for context to discriminate 21416 * among multiple watches that share the callback function 21417 * interval - the number of microseconds specifying the watch 21418 * interval for issuing TEST UNIT READY commands. If 21419 * set to 0 the watch should be terminated. If the 21420 * interval is set to 0 and if the device is required 21421 * to hold reservation while disabling failfast, the 21422 * watch is restarted with an interval of 21423 * reinstate_resv_delay. 21424 * 21425 * Return Code: 0 - Successful submit/terminate of scsi watch request 21426 * ENXIO - Indicates an invalid device was specified 21427 * EAGAIN - Unable to submit the scsi watch request 21428 */ 21429 21430 static int 21431 sd_check_mhd(dev_t dev, int interval) 21432 { 21433 struct sd_lun *un; 21434 opaque_t token; 21435 21436 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21437 return (ENXIO); 21438 } 21439 21440 /* is this a watch termination request? */ 21441 if (interval == 0) { 21442 mutex_enter(SD_MUTEX(un)); 21443 /* if there is an existing watch task then terminate it */ 21444 if (un->un_mhd_token) { 21445 token = un->un_mhd_token; 21446 un->un_mhd_token = NULL; 21447 mutex_exit(SD_MUTEX(un)); 21448 (void) scsi_watch_request_terminate(token, 21449 SCSI_WATCH_TERMINATE_WAIT); 21450 mutex_enter(SD_MUTEX(un)); 21451 } else { 21452 mutex_exit(SD_MUTEX(un)); 21453 /* 21454 * Note: If we return here we don't check for the 21455 * failfast case. This is the original legacy 21456 * implementation but perhaps we should be checking 21457 * the failfast case. 21458 */ 21459 return (0); 21460 } 21461 /* 21462 * If the device is required to hold reservation while 21463 * disabling failfast, we need to restart the scsi_watch 21464 * routine with an interval of reinstate_resv_delay. 21465 */ 21466 if (un->un_resvd_status & SD_RESERVE) { 21467 interval = sd_reinstate_resv_delay/1000; 21468 } else { 21469 /* no failfast so bail */ 21470 mutex_exit(SD_MUTEX(un)); 21471 return (0); 21472 } 21473 mutex_exit(SD_MUTEX(un)); 21474 } 21475 21476 /* 21477 * adjust minimum time interval to 1 second, 21478 * and convert from msecs to usecs 21479 */ 21480 if (interval > 0 && interval < 1000) { 21481 interval = 1000; 21482 } 21483 interval *= 1000; 21484 21485 /* 21486 * submit the request to the scsi_watch service 21487 */ 21488 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval, 21489 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev); 21490 if (token == NULL) { 21491 return (EAGAIN); 21492 } 21493 21494 /* 21495 * save token for termination later on 21496 */ 21497 mutex_enter(SD_MUTEX(un)); 21498 un->un_mhd_token = token; 21499 mutex_exit(SD_MUTEX(un)); 21500 return (0); 21501 } 21502 21503 21504 /* 21505 * Function: sd_mhd_watch_cb() 21506 * 21507 * Description: This function is the call back function used by the scsi watch 21508 * facility. The scsi watch facility sends the "Test Unit Ready" 21509 * and processes the status. If applicable (i.e. a "Unit Attention" 21510 * status and automatic "Request Sense" not used) the scsi watch 21511 * facility will send a "Request Sense" and retrieve the sense data 21512 * to be passed to this callback function. In either case the 21513 * automatic "Request Sense" or the facility submitting one, this 21514 * callback is passed the status and sense data. 21515 * 21516 * Arguments: arg - the device 'dev_t' is used for context to discriminate 21517 * among multiple watches that share this callback function 21518 * resultp - scsi watch facility result packet containing scsi 21519 * packet, status byte and sense data 21520 * 21521 * Return Code: 0 - continue the watch task 21522 * non-zero - terminate the watch task 21523 */ 21524 21525 static int 21526 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 21527 { 21528 struct sd_lun *un; 21529 struct scsi_status *statusp; 21530 uint8_t *sensep; 21531 struct scsi_pkt *pkt; 21532 uchar_t actual_sense_length; 21533 dev_t dev = (dev_t)arg; 21534 21535 ASSERT(resultp != NULL); 21536 statusp = resultp->statusp; 21537 sensep = (uint8_t *)resultp->sensep; 21538 pkt = resultp->pkt; 21539 actual_sense_length = resultp->actual_sense_length; 21540 21541 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21542 return (ENXIO); 21543 } 21544 21545 SD_TRACE(SD_LOG_IOCTL_MHD, un, 21546 "sd_mhd_watch_cb: reason '%s', status '%s'\n", 21547 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp))); 21548 21549 /* Begin processing of the status and/or sense data */ 21550 if (pkt->pkt_reason != CMD_CMPLT) { 21551 /* Handle the incomplete packet */ 21552 sd_mhd_watch_incomplete(un, pkt); 21553 return (0); 21554 } else if (*((unsigned char *)statusp) != STATUS_GOOD) { 21555 if (*((unsigned char *)statusp) 21556 == STATUS_RESERVATION_CONFLICT) { 21557 /* 21558 * Handle a reservation conflict by panicking if 21559 * configured for failfast or by logging the conflict 21560 * and updating the reservation status 21561 */ 21562 mutex_enter(SD_MUTEX(un)); 21563 if ((un->un_resvd_status & SD_FAILFAST) && 21564 (sd_failfast_enable)) { 21565 sd_panic_for_res_conflict(un); 21566 /*NOTREACHED*/ 21567 } 21568 SD_INFO(SD_LOG_IOCTL_MHD, un, 21569 "sd_mhd_watch_cb: Reservation Conflict\n"); 21570 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 21571 mutex_exit(SD_MUTEX(un)); 21572 } 21573 } 21574 21575 if (sensep != NULL) { 21576 if (actual_sense_length >= (SENSE_LENGTH - 2)) { 21577 mutex_enter(SD_MUTEX(un)); 21578 if ((scsi_sense_asc(sensep) == 21579 SD_SCSI_RESET_SENSE_CODE) && 21580 (un->un_resvd_status & SD_RESERVE)) { 21581 /* 21582 * The additional sense code indicates a power 21583 * on or bus device reset has occurred; update 21584 * the reservation status. 21585 */ 21586 un->un_resvd_status |= 21587 (SD_LOST_RESERVE | SD_WANT_RESERVE); 21588 SD_INFO(SD_LOG_IOCTL_MHD, un, 21589 "sd_mhd_watch_cb: Lost Reservation\n"); 21590 } 21591 } else { 21592 return (0); 21593 } 21594 } else { 21595 mutex_enter(SD_MUTEX(un)); 21596 } 21597 21598 if ((un->un_resvd_status & SD_RESERVE) && 21599 (un->un_resvd_status & SD_LOST_RESERVE)) { 21600 if (un->un_resvd_status & SD_WANT_RESERVE) { 21601 /* 21602 * A reset occurred in between the last probe and this 21603 * one so if a timeout is pending cancel it. 21604 */ 21605 if (un->un_resvd_timeid) { 21606 timeout_id_t temp_id = un->un_resvd_timeid; 21607 un->un_resvd_timeid = NULL; 21608 mutex_exit(SD_MUTEX(un)); 21609 (void) untimeout(temp_id); 21610 mutex_enter(SD_MUTEX(un)); 21611 } 21612 un->un_resvd_status &= ~SD_WANT_RESERVE; 21613 } 21614 if (un->un_resvd_timeid == 0) { 21615 /* Schedule a timeout to handle the lost reservation */ 21616 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover, 21617 (void *)dev, 21618 drv_usectohz(sd_reinstate_resv_delay)); 21619 } 21620 } 21621 mutex_exit(SD_MUTEX(un)); 21622 return (0); 21623 } 21624 21625 21626 /* 21627 * Function: sd_mhd_watch_incomplete() 21628 * 21629 * Description: This function is used to find out why a scsi pkt sent by the 21630 * scsi watch facility was not completed. Under some scenarios this 21631 * routine will return. Otherwise it will send a bus reset to see 21632 * if the drive is still online. 21633 * 21634 * Arguments: un - driver soft state (unit) structure 21635 * pkt - incomplete scsi pkt 21636 */ 21637 21638 static void 21639 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt) 21640 { 21641 int be_chatty; 21642 int perr; 21643 21644 ASSERT(pkt != NULL); 21645 ASSERT(un != NULL); 21646 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT)); 21647 perr = (pkt->pkt_statistics & STAT_PERR); 21648 21649 mutex_enter(SD_MUTEX(un)); 21650 if (un->un_state == SD_STATE_DUMPING) { 21651 mutex_exit(SD_MUTEX(un)); 21652 return; 21653 } 21654 21655 switch (pkt->pkt_reason) { 21656 case CMD_UNX_BUS_FREE: 21657 /* 21658 * If we had a parity error that caused the target to drop BSY*, 21659 * don't be chatty about it. 21660 */ 21661 if (perr && be_chatty) { 21662 be_chatty = 0; 21663 } 21664 break; 21665 case CMD_TAG_REJECT: 21666 /* 21667 * The SCSI-2 spec states that a tag reject will be sent by the 21668 * target if tagged queuing is not supported. A tag reject may 21669 * also be sent during certain initialization periods or to 21670 * control internal resources. For the latter case the target 21671 * may also return Queue Full. 21672 * 21673 * If this driver receives a tag reject from a target that is 21674 * going through an init period or controlling internal 21675 * resources tagged queuing will be disabled. This is a less 21676 * than optimal behavior but the driver is unable to determine 21677 * the target state and assumes tagged queueing is not supported 21678 */ 21679 pkt->pkt_flags = 0; 21680 un->un_tagflags = 0; 21681 21682 if (un->un_f_opt_queueing == TRUE) { 21683 un->un_throttle = min(un->un_throttle, 3); 21684 } else { 21685 un->un_throttle = 1; 21686 } 21687 mutex_exit(SD_MUTEX(un)); 21688 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 21689 mutex_enter(SD_MUTEX(un)); 21690 break; 21691 case CMD_INCOMPLETE: 21692 /* 21693 * The transport stopped with an abnormal state, fallthrough and 21694 * reset the target and/or bus unless selection did not complete 21695 * (indicated by STATE_GOT_BUS) in which case we don't want to 21696 * go through a target/bus reset 21697 */ 21698 if (pkt->pkt_state == STATE_GOT_BUS) { 21699 break; 21700 } 21701 /*FALLTHROUGH*/ 21702 21703 case CMD_TIMEOUT: 21704 default: 21705 /* 21706 * The lun may still be running the command, so a lun reset 21707 * should be attempted. If the lun reset fails or cannot be 21708 * issued, than try a target reset. Lastly try a bus reset. 21709 */ 21710 if ((pkt->pkt_statistics & 21711 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 21712 int reset_retval = 0; 21713 mutex_exit(SD_MUTEX(un)); 21714 if (un->un_f_allow_bus_device_reset == TRUE) { 21715 if (un->un_f_lun_reset_enabled == TRUE) { 21716 reset_retval = 21717 scsi_reset(SD_ADDRESS(un), 21718 RESET_LUN); 21719 } 21720 if (reset_retval == 0) { 21721 reset_retval = 21722 scsi_reset(SD_ADDRESS(un), 21723 RESET_TARGET); 21724 } 21725 } 21726 if (reset_retval == 0) { 21727 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 21728 } 21729 mutex_enter(SD_MUTEX(un)); 21730 } 21731 break; 21732 } 21733 21734 /* A device/bus reset has occurred; update the reservation status. */ 21735 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics & 21736 (STAT_BUS_RESET | STAT_DEV_RESET))) { 21737 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 21738 un->un_resvd_status |= 21739 (SD_LOST_RESERVE | SD_WANT_RESERVE); 21740 SD_INFO(SD_LOG_IOCTL_MHD, un, 21741 "sd_mhd_watch_incomplete: Lost Reservation\n"); 21742 } 21743 } 21744 21745 /* 21746 * The disk has been turned off; Update the device state. 21747 * 21748 * Note: Should we be offlining the disk here? 21749 */ 21750 if (pkt->pkt_state == STATE_GOT_BUS) { 21751 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: " 21752 "Disk not responding to selection\n"); 21753 if (un->un_state != SD_STATE_OFFLINE) { 21754 New_state(un, SD_STATE_OFFLINE); 21755 } 21756 } else if (be_chatty) { 21757 /* 21758 * suppress messages if they are all the same pkt reason; 21759 * with TQ, many (up to 256) are returned with the same 21760 * pkt_reason 21761 */ 21762 if (pkt->pkt_reason != un->un_last_pkt_reason) { 21763 SD_ERROR(SD_LOG_IOCTL_MHD, un, 21764 "sd_mhd_watch_incomplete: " 21765 "SCSI transport failed: reason '%s'\n", 21766 scsi_rname(pkt->pkt_reason)); 21767 } 21768 } 21769 un->un_last_pkt_reason = pkt->pkt_reason; 21770 mutex_exit(SD_MUTEX(un)); 21771 } 21772 21773 21774 /* 21775 * Function: sd_sname() 21776 * 21777 * Description: This is a simple little routine to return a string containing 21778 * a printable description of command status byte for use in 21779 * logging. 21780 * 21781 * Arguments: status - pointer to a status byte 21782 * 21783 * Return Code: char * - string containing status description. 21784 */ 21785 21786 static char * 21787 sd_sname(uchar_t status) 21788 { 21789 switch (status & STATUS_MASK) { 21790 case STATUS_GOOD: 21791 return ("good status"); 21792 case STATUS_CHECK: 21793 return ("check condition"); 21794 case STATUS_MET: 21795 return ("condition met"); 21796 case STATUS_BUSY: 21797 return ("busy"); 21798 case STATUS_INTERMEDIATE: 21799 return ("intermediate"); 21800 case STATUS_INTERMEDIATE_MET: 21801 return ("intermediate - condition met"); 21802 case STATUS_RESERVATION_CONFLICT: 21803 return ("reservation_conflict"); 21804 case STATUS_TERMINATED: 21805 return ("command terminated"); 21806 case STATUS_QFULL: 21807 return ("queue full"); 21808 default: 21809 return ("<unknown status>"); 21810 } 21811 } 21812 21813 21814 /* 21815 * Function: sd_mhd_resvd_recover() 21816 * 21817 * Description: This function adds a reservation entry to the 21818 * sd_resv_reclaim_request list and signals the reservation 21819 * reclaim thread that there is work pending. If the reservation 21820 * reclaim thread has not been previously created this function 21821 * will kick it off. 21822 * 21823 * Arguments: arg - the device 'dev_t' is used for context to discriminate 21824 * among multiple watches that share this callback function 21825 * 21826 * Context: This routine is called by timeout() and is run in interrupt 21827 * context. It must not sleep or call other functions which may 21828 * sleep. 21829 */ 21830 21831 static void 21832 sd_mhd_resvd_recover(void *arg) 21833 { 21834 dev_t dev = (dev_t)arg; 21835 struct sd_lun *un; 21836 struct sd_thr_request *sd_treq = NULL; 21837 struct sd_thr_request *sd_cur = NULL; 21838 struct sd_thr_request *sd_prev = NULL; 21839 int already_there = 0; 21840 21841 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21842 return; 21843 } 21844 21845 mutex_enter(SD_MUTEX(un)); 21846 un->un_resvd_timeid = NULL; 21847 if (un->un_resvd_status & SD_WANT_RESERVE) { 21848 /* 21849 * There was a reset so don't issue the reserve, allow the 21850 * sd_mhd_watch_cb callback function to notice this and 21851 * reschedule the timeout for reservation. 21852 */ 21853 mutex_exit(SD_MUTEX(un)); 21854 return; 21855 } 21856 mutex_exit(SD_MUTEX(un)); 21857 21858 /* 21859 * Add this device to the sd_resv_reclaim_request list and the 21860 * sd_resv_reclaim_thread should take care of the rest. 21861 * 21862 * Note: We can't sleep in this context so if the memory allocation 21863 * fails allow the sd_mhd_watch_cb callback function to notice this and 21864 * reschedule the timeout for reservation. (4378460) 21865 */ 21866 sd_treq = (struct sd_thr_request *) 21867 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP); 21868 if (sd_treq == NULL) { 21869 return; 21870 } 21871 21872 sd_treq->sd_thr_req_next = NULL; 21873 sd_treq->dev = dev; 21874 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 21875 if (sd_tr.srq_thr_req_head == NULL) { 21876 sd_tr.srq_thr_req_head = sd_treq; 21877 } else { 21878 sd_cur = sd_prev = sd_tr.srq_thr_req_head; 21879 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) { 21880 if (sd_cur->dev == dev) { 21881 /* 21882 * already in Queue so don't log 21883 * another request for the device 21884 */ 21885 already_there = 1; 21886 break; 21887 } 21888 sd_prev = sd_cur; 21889 } 21890 if (!already_there) { 21891 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: " 21892 "logging request for %lx\n", dev); 21893 sd_prev->sd_thr_req_next = sd_treq; 21894 } else { 21895 kmem_free(sd_treq, sizeof (struct sd_thr_request)); 21896 } 21897 } 21898 21899 /* 21900 * Create a kernel thread to do the reservation reclaim and free up this 21901 * thread. We cannot block this thread while we go away to do the 21902 * reservation reclaim 21903 */ 21904 if (sd_tr.srq_resv_reclaim_thread == NULL) 21905 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0, 21906 sd_resv_reclaim_thread, NULL, 21907 0, &p0, TS_RUN, v.v_maxsyspri - 2); 21908 21909 /* Tell the reservation reclaim thread that it has work to do */ 21910 cv_signal(&sd_tr.srq_resv_reclaim_cv); 21911 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 21912 } 21913 21914 /* 21915 * Function: sd_resv_reclaim_thread() 21916 * 21917 * Description: This function implements the reservation reclaim operations 21918 * 21919 * Arguments: arg - the device 'dev_t' is used for context to discriminate 21920 * among multiple watches that share this callback function 21921 */ 21922 21923 static void 21924 sd_resv_reclaim_thread() 21925 { 21926 struct sd_lun *un; 21927 struct sd_thr_request *sd_mhreq; 21928 21929 /* Wait for work */ 21930 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 21931 if (sd_tr.srq_thr_req_head == NULL) { 21932 cv_wait(&sd_tr.srq_resv_reclaim_cv, 21933 &sd_tr.srq_resv_reclaim_mutex); 21934 } 21935 21936 /* Loop while we have work */ 21937 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) { 21938 un = ddi_get_soft_state(sd_state, 21939 SDUNIT(sd_tr.srq_thr_cur_req->dev)); 21940 if (un == NULL) { 21941 /* 21942 * softstate structure is NULL so just 21943 * dequeue the request and continue 21944 */ 21945 sd_tr.srq_thr_req_head = 21946 sd_tr.srq_thr_cur_req->sd_thr_req_next; 21947 kmem_free(sd_tr.srq_thr_cur_req, 21948 sizeof (struct sd_thr_request)); 21949 continue; 21950 } 21951 21952 /* dequeue the request */ 21953 sd_mhreq = sd_tr.srq_thr_cur_req; 21954 sd_tr.srq_thr_req_head = 21955 sd_tr.srq_thr_cur_req->sd_thr_req_next; 21956 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 21957 21958 /* 21959 * Reclaim reservation only if SD_RESERVE is still set. There 21960 * may have been a call to MHIOCRELEASE before we got here. 21961 */ 21962 mutex_enter(SD_MUTEX(un)); 21963 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 21964 /* 21965 * Note: The SD_LOST_RESERVE flag is cleared before 21966 * reclaiming the reservation. If this is done after the 21967 * call to sd_reserve_release a reservation loss in the 21968 * window between pkt completion of reserve cmd and 21969 * mutex_enter below may not be recognized 21970 */ 21971 un->un_resvd_status &= ~SD_LOST_RESERVE; 21972 mutex_exit(SD_MUTEX(un)); 21973 21974 if (sd_reserve_release(sd_mhreq->dev, 21975 SD_RESERVE) == 0) { 21976 mutex_enter(SD_MUTEX(un)); 21977 un->un_resvd_status |= SD_RESERVE; 21978 mutex_exit(SD_MUTEX(un)); 21979 SD_INFO(SD_LOG_IOCTL_MHD, un, 21980 "sd_resv_reclaim_thread: " 21981 "Reservation Recovered\n"); 21982 } else { 21983 mutex_enter(SD_MUTEX(un)); 21984 un->un_resvd_status |= SD_LOST_RESERVE; 21985 mutex_exit(SD_MUTEX(un)); 21986 SD_INFO(SD_LOG_IOCTL_MHD, un, 21987 "sd_resv_reclaim_thread: Failed " 21988 "Reservation Recovery\n"); 21989 } 21990 } else { 21991 mutex_exit(SD_MUTEX(un)); 21992 } 21993 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 21994 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req); 21995 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 21996 sd_mhreq = sd_tr.srq_thr_cur_req = NULL; 21997 /* 21998 * wakeup the destroy thread if anyone is waiting on 21999 * us to complete. 22000 */ 22001 cv_signal(&sd_tr.srq_inprocess_cv); 22002 SD_TRACE(SD_LOG_IOCTL_MHD, un, 22003 "sd_resv_reclaim_thread: cv_signalling current request \n"); 22004 } 22005 22006 /* 22007 * cleanup the sd_tr structure now that this thread will not exist 22008 */ 22009 ASSERT(sd_tr.srq_thr_req_head == NULL); 22010 ASSERT(sd_tr.srq_thr_cur_req == NULL); 22011 sd_tr.srq_resv_reclaim_thread = NULL; 22012 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22013 thread_exit(); 22014 } 22015 22016 22017 /* 22018 * Function: sd_rmv_resv_reclaim_req() 22019 * 22020 * Description: This function removes any pending reservation reclaim requests 22021 * for the specified device. 22022 * 22023 * Arguments: dev - the device 'dev_t' 22024 */ 22025 22026 static void 22027 sd_rmv_resv_reclaim_req(dev_t dev) 22028 { 22029 struct sd_thr_request *sd_mhreq; 22030 struct sd_thr_request *sd_prev; 22031 22032 /* Remove a reservation reclaim request from the list */ 22033 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22034 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) { 22035 /* 22036 * We are attempting to reinstate reservation for 22037 * this device. We wait for sd_reserve_release() 22038 * to return before we return. 22039 */ 22040 cv_wait(&sd_tr.srq_inprocess_cv, 22041 &sd_tr.srq_resv_reclaim_mutex); 22042 } else { 22043 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head; 22044 if (sd_mhreq && sd_mhreq->dev == dev) { 22045 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next; 22046 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22047 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22048 return; 22049 } 22050 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) { 22051 if (sd_mhreq && sd_mhreq->dev == dev) { 22052 break; 22053 } 22054 sd_prev = sd_mhreq; 22055 } 22056 if (sd_mhreq != NULL) { 22057 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next; 22058 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22059 } 22060 } 22061 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22062 } 22063 22064 22065 /* 22066 * Function: sd_mhd_reset_notify_cb() 22067 * 22068 * Description: This is a call back function for scsi_reset_notify. This 22069 * function updates the softstate reserved status and logs the 22070 * reset. The driver scsi watch facility callback function 22071 * (sd_mhd_watch_cb) and reservation reclaim thread functionality 22072 * will reclaim the reservation. 22073 * 22074 * Arguments: arg - driver soft state (unit) structure 22075 */ 22076 22077 static void 22078 sd_mhd_reset_notify_cb(caddr_t arg) 22079 { 22080 struct sd_lun *un = (struct sd_lun *)arg; 22081 22082 mutex_enter(SD_MUTEX(un)); 22083 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 22084 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE); 22085 SD_INFO(SD_LOG_IOCTL_MHD, un, 22086 "sd_mhd_reset_notify_cb: Lost Reservation\n"); 22087 } 22088 mutex_exit(SD_MUTEX(un)); 22089 } 22090 22091 22092 /* 22093 * Function: sd_take_ownership() 22094 * 22095 * Description: This routine implements an algorithm to achieve a stable 22096 * reservation on disks which don't implement priority reserve, 22097 * and makes sure that other host lose re-reservation attempts. 22098 * This algorithm contains of a loop that keeps issuing the RESERVE 22099 * for some period of time (min_ownership_delay, default 6 seconds) 22100 * During that loop, it looks to see if there has been a bus device 22101 * reset or bus reset (both of which cause an existing reservation 22102 * to be lost). If the reservation is lost issue RESERVE until a 22103 * period of min_ownership_delay with no resets has gone by, or 22104 * until max_ownership_delay has expired. This loop ensures that 22105 * the host really did manage to reserve the device, in spite of 22106 * resets. The looping for min_ownership_delay (default six 22107 * seconds) is important to early generation clustering products, 22108 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an 22109 * MHIOCENFAILFAST periodic timer of two seconds. By having 22110 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having 22111 * MHIOCENFAILFAST poll every two seconds, the idea is that by the 22112 * time the MHIOCTKOWN ioctl returns, the other host (if any) will 22113 * have already noticed, via the MHIOCENFAILFAST polling, that it 22114 * no longer "owns" the disk and will have panicked itself. Thus, 22115 * the host issuing the MHIOCTKOWN is assured (with timing 22116 * dependencies) that by the time it actually starts to use the 22117 * disk for real work, the old owner is no longer accessing it. 22118 * 22119 * min_ownership_delay is the minimum amount of time for which the 22120 * disk must be reserved continuously devoid of resets before the 22121 * MHIOCTKOWN ioctl will return success. 22122 * 22123 * max_ownership_delay indicates the amount of time by which the 22124 * take ownership should succeed or timeout with an error. 22125 * 22126 * Arguments: dev - the device 'dev_t' 22127 * *p - struct containing timing info. 22128 * 22129 * Return Code: 0 for success or error code 22130 */ 22131 22132 static int 22133 sd_take_ownership(dev_t dev, struct mhioctkown *p) 22134 { 22135 struct sd_lun *un; 22136 int rval; 22137 int err; 22138 int reservation_count = 0; 22139 int min_ownership_delay = 6000000; /* in usec */ 22140 int max_ownership_delay = 30000000; /* in usec */ 22141 clock_t start_time; /* starting time of this algorithm */ 22142 clock_t end_time; /* time limit for giving up */ 22143 clock_t ownership_time; /* time limit for stable ownership */ 22144 clock_t current_time; 22145 clock_t previous_current_time; 22146 22147 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22148 return (ENXIO); 22149 } 22150 22151 /* 22152 * Attempt a device reservation. A priority reservation is requested. 22153 */ 22154 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE)) 22155 != SD_SUCCESS) { 22156 SD_ERROR(SD_LOG_IOCTL_MHD, un, 22157 "sd_take_ownership: return(1)=%d\n", rval); 22158 return (rval); 22159 } 22160 22161 /* Update the softstate reserved status to indicate the reservation */ 22162 mutex_enter(SD_MUTEX(un)); 22163 un->un_resvd_status |= SD_RESERVE; 22164 un->un_resvd_status &= 22165 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT); 22166 mutex_exit(SD_MUTEX(un)); 22167 22168 if (p != NULL) { 22169 if (p->min_ownership_delay != 0) { 22170 min_ownership_delay = p->min_ownership_delay * 1000; 22171 } 22172 if (p->max_ownership_delay != 0) { 22173 max_ownership_delay = p->max_ownership_delay * 1000; 22174 } 22175 } 22176 SD_INFO(SD_LOG_IOCTL_MHD, un, 22177 "sd_take_ownership: min, max delays: %d, %d\n", 22178 min_ownership_delay, max_ownership_delay); 22179 22180 start_time = ddi_get_lbolt(); 22181 current_time = start_time; 22182 ownership_time = current_time + drv_usectohz(min_ownership_delay); 22183 end_time = start_time + drv_usectohz(max_ownership_delay); 22184 22185 while (current_time - end_time < 0) { 22186 delay(drv_usectohz(500000)); 22187 22188 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) { 22189 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) { 22190 mutex_enter(SD_MUTEX(un)); 22191 rval = (un->un_resvd_status & 22192 SD_RESERVATION_CONFLICT) ? EACCES : EIO; 22193 mutex_exit(SD_MUTEX(un)); 22194 break; 22195 } 22196 } 22197 previous_current_time = current_time; 22198 current_time = ddi_get_lbolt(); 22199 mutex_enter(SD_MUTEX(un)); 22200 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) { 22201 ownership_time = ddi_get_lbolt() + 22202 drv_usectohz(min_ownership_delay); 22203 reservation_count = 0; 22204 } else { 22205 reservation_count++; 22206 } 22207 un->un_resvd_status |= SD_RESERVE; 22208 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE); 22209 mutex_exit(SD_MUTEX(un)); 22210 22211 SD_INFO(SD_LOG_IOCTL_MHD, un, 22212 "sd_take_ownership: ticks for loop iteration=%ld, " 22213 "reservation=%s\n", (current_time - previous_current_time), 22214 reservation_count ? "ok" : "reclaimed"); 22215 22216 if (current_time - ownership_time >= 0 && 22217 reservation_count >= 4) { 22218 rval = 0; /* Achieved a stable ownership */ 22219 break; 22220 } 22221 if (current_time - end_time >= 0) { 22222 rval = EACCES; /* No ownership in max possible time */ 22223 break; 22224 } 22225 } 22226 SD_TRACE(SD_LOG_IOCTL_MHD, un, 22227 "sd_take_ownership: return(2)=%d\n", rval); 22228 return (rval); 22229 } 22230 22231 22232 /* 22233 * Function: sd_reserve_release() 22234 * 22235 * Description: This function builds and sends scsi RESERVE, RELEASE, and 22236 * PRIORITY RESERVE commands based on a user specified command type 22237 * 22238 * Arguments: dev - the device 'dev_t' 22239 * cmd - user specified command type; one of SD_PRIORITY_RESERVE, 22240 * SD_RESERVE, SD_RELEASE 22241 * 22242 * Return Code: 0 or Error Code 22243 */ 22244 22245 static int 22246 sd_reserve_release(dev_t dev, int cmd) 22247 { 22248 struct uscsi_cmd *com = NULL; 22249 struct sd_lun *un = NULL; 22250 char cdb[CDB_GROUP0]; 22251 int rval; 22252 22253 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) || 22254 (cmd == SD_PRIORITY_RESERVE)); 22255 22256 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22257 return (ENXIO); 22258 } 22259 22260 /* instantiate and initialize the command and cdb */ 22261 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 22262 bzero(cdb, CDB_GROUP0); 22263 com->uscsi_flags = USCSI_SILENT; 22264 com->uscsi_timeout = un->un_reserve_release_time; 22265 com->uscsi_cdblen = CDB_GROUP0; 22266 com->uscsi_cdb = cdb; 22267 if (cmd == SD_RELEASE) { 22268 cdb[0] = SCMD_RELEASE; 22269 } else { 22270 cdb[0] = SCMD_RESERVE; 22271 } 22272 22273 /* Send the command. */ 22274 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 22275 SD_PATH_STANDARD); 22276 22277 /* 22278 * "break" a reservation that is held by another host, by issuing a 22279 * reset if priority reserve is desired, and we could not get the 22280 * device. 22281 */ 22282 if ((cmd == SD_PRIORITY_RESERVE) && 22283 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 22284 /* 22285 * First try to reset the LUN. If we cannot, then try a target 22286 * reset, followed by a bus reset if the target reset fails. 22287 */ 22288 int reset_retval = 0; 22289 if (un->un_f_lun_reset_enabled == TRUE) { 22290 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 22291 } 22292 if (reset_retval == 0) { 22293 /* The LUN reset either failed or was not issued */ 22294 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 22295 } 22296 if ((reset_retval == 0) && 22297 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) { 22298 rval = EIO; 22299 kmem_free(com, sizeof (*com)); 22300 return (rval); 22301 } 22302 22303 bzero(com, sizeof (struct uscsi_cmd)); 22304 com->uscsi_flags = USCSI_SILENT; 22305 com->uscsi_cdb = cdb; 22306 com->uscsi_cdblen = CDB_GROUP0; 22307 com->uscsi_timeout = 5; 22308 22309 /* 22310 * Reissue the last reserve command, this time without request 22311 * sense. Assume that it is just a regular reserve command. 22312 */ 22313 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 22314 SD_PATH_STANDARD); 22315 } 22316 22317 /* Return an error if still getting a reservation conflict. */ 22318 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 22319 rval = EACCES; 22320 } 22321 22322 kmem_free(com, sizeof (*com)); 22323 return (rval); 22324 } 22325 22326 22327 #define SD_NDUMP_RETRIES 12 22328 /* 22329 * System Crash Dump routine 22330 */ 22331 22332 static int 22333 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 22334 { 22335 int instance; 22336 int partition; 22337 int i; 22338 int err; 22339 struct sd_lun *un; 22340 struct scsi_pkt *wr_pktp; 22341 struct buf *wr_bp; 22342 struct buf wr_buf; 22343 daddr_t tgt_byte_offset; /* rmw - byte offset for target */ 22344 daddr_t tgt_blkno; /* rmw - blkno for target */ 22345 size_t tgt_byte_count; /* rmw - # of bytes to xfer */ 22346 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */ 22347 size_t io_start_offset; 22348 int doing_rmw = FALSE; 22349 int rval; 22350 #if defined(__i386) || defined(__amd64) 22351 ssize_t dma_resid; 22352 daddr_t oblkno; 22353 #endif 22354 diskaddr_t nblks = 0; 22355 diskaddr_t start_block; 22356 22357 instance = SDUNIT(dev); 22358 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 22359 !SD_IS_VALID_LABEL(un) || ISCD(un)) { 22360 return (ENXIO); 22361 } 22362 22363 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 22364 22365 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); 22366 22367 partition = SDPART(dev); 22368 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); 22369 22370 /* Validate blocks to dump at against partition size. */ 22371 22372 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 22373 &nblks, &start_block, NULL, NULL, (void *)SD_PATH_DIRECT); 22374 22375 if ((blkno + nblk) > nblks) { 22376 SD_TRACE(SD_LOG_DUMP, un, 22377 "sddump: dump range larger than partition: " 22378 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 22379 blkno, nblk, nblks); 22380 return (EINVAL); 22381 } 22382 22383 mutex_enter(&un->un_pm_mutex); 22384 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 22385 struct scsi_pkt *start_pktp; 22386 22387 mutex_exit(&un->un_pm_mutex); 22388 22389 /* 22390 * use pm framework to power on HBA 1st 22391 */ 22392 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 22393 22394 /* 22395 * Dump no long uses sdpower to power on a device, it's 22396 * in-line here so it can be done in polled mode. 22397 */ 22398 22399 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n"); 22400 22401 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL, 22402 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL); 22403 22404 if (start_pktp == NULL) { 22405 /* We were not given a SCSI packet, fail. */ 22406 return (EIO); 22407 } 22408 bzero(start_pktp->pkt_cdbp, CDB_GROUP0); 22409 start_pktp->pkt_cdbp[0] = SCMD_START_STOP; 22410 start_pktp->pkt_cdbp[4] = SD_TARGET_START; 22411 start_pktp->pkt_flags = FLAG_NOINTR; 22412 22413 mutex_enter(SD_MUTEX(un)); 22414 SD_FILL_SCSI1_LUN(un, start_pktp); 22415 mutex_exit(SD_MUTEX(un)); 22416 /* 22417 * Scsi_poll returns 0 (success) if the command completes and 22418 * the status block is STATUS_GOOD. 22419 */ 22420 if (sd_scsi_poll(un, start_pktp) != 0) { 22421 scsi_destroy_pkt(start_pktp); 22422 return (EIO); 22423 } 22424 scsi_destroy_pkt(start_pktp); 22425 (void) sd_ddi_pm_resume(un); 22426 } else { 22427 mutex_exit(&un->un_pm_mutex); 22428 } 22429 22430 mutex_enter(SD_MUTEX(un)); 22431 un->un_throttle = 0; 22432 22433 /* 22434 * The first time through, reset the specific target device. 22435 * However, when cpr calls sddump we know that sd is in a 22436 * a good state so no bus reset is required. 22437 * Clear sense data via Request Sense cmd. 22438 * In sddump we don't care about allow_bus_device_reset anymore 22439 */ 22440 22441 if ((un->un_state != SD_STATE_SUSPENDED) && 22442 (un->un_state != SD_STATE_DUMPING)) { 22443 22444 New_state(un, SD_STATE_DUMPING); 22445 22446 if (un->un_f_is_fibre == FALSE) { 22447 mutex_exit(SD_MUTEX(un)); 22448 /* 22449 * Attempt a bus reset for parallel scsi. 22450 * 22451 * Note: A bus reset is required because on some host 22452 * systems (i.e. E420R) a bus device reset is 22453 * insufficient to reset the state of the target. 22454 * 22455 * Note: Don't issue the reset for fibre-channel, 22456 * because this tends to hang the bus (loop) for 22457 * too long while everyone is logging out and in 22458 * and the deadman timer for dumping will fire 22459 * before the dump is complete. 22460 */ 22461 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) { 22462 mutex_enter(SD_MUTEX(un)); 22463 Restore_state(un); 22464 mutex_exit(SD_MUTEX(un)); 22465 return (EIO); 22466 } 22467 22468 /* Delay to give the device some recovery time. */ 22469 drv_usecwait(10000); 22470 22471 if (sd_send_polled_RQS(un) == SD_FAILURE) { 22472 SD_INFO(SD_LOG_DUMP, un, 22473 "sddump: sd_send_polled_RQS failed\n"); 22474 } 22475 mutex_enter(SD_MUTEX(un)); 22476 } 22477 } 22478 22479 /* 22480 * Convert the partition-relative block number to a 22481 * disk physical block number. 22482 */ 22483 blkno += start_block; 22484 22485 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno); 22486 22487 22488 /* 22489 * Check if the device has a non-512 block size. 22490 */ 22491 wr_bp = NULL; 22492 if (NOT_DEVBSIZE(un)) { 22493 tgt_byte_offset = blkno * un->un_sys_blocksize; 22494 tgt_byte_count = nblk * un->un_sys_blocksize; 22495 if ((tgt_byte_offset % un->un_tgt_blocksize) || 22496 (tgt_byte_count % un->un_tgt_blocksize)) { 22497 doing_rmw = TRUE; 22498 /* 22499 * Calculate the block number and number of block 22500 * in terms of the media block size. 22501 */ 22502 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 22503 tgt_nblk = 22504 ((tgt_byte_offset + tgt_byte_count + 22505 (un->un_tgt_blocksize - 1)) / 22506 un->un_tgt_blocksize) - tgt_blkno; 22507 22508 /* 22509 * Invoke the routine which is going to do read part 22510 * of read-modify-write. 22511 * Note that this routine returns a pointer to 22512 * a valid bp in wr_bp. 22513 */ 22514 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk, 22515 &wr_bp); 22516 if (err) { 22517 mutex_exit(SD_MUTEX(un)); 22518 return (err); 22519 } 22520 /* 22521 * Offset is being calculated as - 22522 * (original block # * system block size) - 22523 * (new block # * target block size) 22524 */ 22525 io_start_offset = 22526 ((uint64_t)(blkno * un->un_sys_blocksize)) - 22527 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize)); 22528 22529 ASSERT((io_start_offset >= 0) && 22530 (io_start_offset < un->un_tgt_blocksize)); 22531 /* 22532 * Do the modify portion of read modify write. 22533 */ 22534 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset], 22535 (size_t)nblk * un->un_sys_blocksize); 22536 } else { 22537 doing_rmw = FALSE; 22538 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 22539 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize; 22540 } 22541 22542 /* Convert blkno and nblk to target blocks */ 22543 blkno = tgt_blkno; 22544 nblk = tgt_nblk; 22545 } else { 22546 wr_bp = &wr_buf; 22547 bzero(wr_bp, sizeof (struct buf)); 22548 wr_bp->b_flags = B_BUSY; 22549 wr_bp->b_un.b_addr = addr; 22550 wr_bp->b_bcount = nblk << DEV_BSHIFT; 22551 wr_bp->b_resid = 0; 22552 } 22553 22554 mutex_exit(SD_MUTEX(un)); 22555 22556 /* 22557 * Obtain a SCSI packet for the write command. 22558 * It should be safe to call the allocator here without 22559 * worrying about being locked for DVMA mapping because 22560 * the address we're passed is already a DVMA mapping 22561 * 22562 * We are also not going to worry about semaphore ownership 22563 * in the dump buffer. Dumping is single threaded at present. 22564 */ 22565 22566 wr_pktp = NULL; 22567 22568 #if defined(__i386) || defined(__amd64) 22569 dma_resid = wr_bp->b_bcount; 22570 oblkno = blkno; 22571 while (dma_resid != 0) { 22572 #endif 22573 22574 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 22575 wr_bp->b_flags &= ~B_ERROR; 22576 22577 #if defined(__i386) || defined(__amd64) 22578 blkno = oblkno + 22579 ((wr_bp->b_bcount - dma_resid) / 22580 un->un_tgt_blocksize); 22581 nblk = dma_resid / un->un_tgt_blocksize; 22582 22583 if (wr_pktp) { 22584 /* Partial DMA transfers after initial transfer */ 22585 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp, 22586 blkno, nblk); 22587 } else { 22588 /* Initial transfer */ 22589 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 22590 un->un_pkt_flags, NULL_FUNC, NULL, 22591 blkno, nblk); 22592 } 22593 #else 22594 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 22595 0, NULL_FUNC, NULL, blkno, nblk); 22596 #endif 22597 22598 if (rval == 0) { 22599 /* We were given a SCSI packet, continue. */ 22600 break; 22601 } 22602 22603 if (i == 0) { 22604 if (wr_bp->b_flags & B_ERROR) { 22605 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 22606 "no resources for dumping; " 22607 "error code: 0x%x, retrying", 22608 geterror(wr_bp)); 22609 } else { 22610 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 22611 "no resources for dumping; retrying"); 22612 } 22613 } else if (i != (SD_NDUMP_RETRIES - 1)) { 22614 if (wr_bp->b_flags & B_ERROR) { 22615 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 22616 "no resources for dumping; error code: " 22617 "0x%x, retrying\n", geterror(wr_bp)); 22618 } 22619 } else { 22620 if (wr_bp->b_flags & B_ERROR) { 22621 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 22622 "no resources for dumping; " 22623 "error code: 0x%x, retries failed, " 22624 "giving up.\n", geterror(wr_bp)); 22625 } else { 22626 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 22627 "no resources for dumping; " 22628 "retries failed, giving up.\n"); 22629 } 22630 mutex_enter(SD_MUTEX(un)); 22631 Restore_state(un); 22632 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) { 22633 mutex_exit(SD_MUTEX(un)); 22634 scsi_free_consistent_buf(wr_bp); 22635 } else { 22636 mutex_exit(SD_MUTEX(un)); 22637 } 22638 return (EIO); 22639 } 22640 drv_usecwait(10000); 22641 } 22642 22643 #if defined(__i386) || defined(__amd64) 22644 /* 22645 * save the resid from PARTIAL_DMA 22646 */ 22647 dma_resid = wr_pktp->pkt_resid; 22648 if (dma_resid != 0) 22649 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid); 22650 wr_pktp->pkt_resid = 0; 22651 #endif 22652 22653 /* SunBug 1222170 */ 22654 wr_pktp->pkt_flags = FLAG_NOINTR; 22655 22656 err = EIO; 22657 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 22658 22659 /* 22660 * Scsi_poll returns 0 (success) if the command completes and 22661 * the status block is STATUS_GOOD. We should only check 22662 * errors if this condition is not true. Even then we should 22663 * send our own request sense packet only if we have a check 22664 * condition and auto request sense has not been performed by 22665 * the hba. 22666 */ 22667 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n"); 22668 22669 if ((sd_scsi_poll(un, wr_pktp) == 0) && 22670 (wr_pktp->pkt_resid == 0)) { 22671 err = SD_SUCCESS; 22672 break; 22673 } 22674 22675 /* 22676 * Check CMD_DEV_GONE 1st, give up if device is gone. 22677 */ 22678 if (wr_pktp->pkt_reason == CMD_DEV_GONE) { 22679 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 22680 "Device is gone\n"); 22681 break; 22682 } 22683 22684 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) { 22685 SD_INFO(SD_LOG_DUMP, un, 22686 "sddump: write failed with CHECK, try # %d\n", i); 22687 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) { 22688 (void) sd_send_polled_RQS(un); 22689 } 22690 22691 continue; 22692 } 22693 22694 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) { 22695 int reset_retval = 0; 22696 22697 SD_INFO(SD_LOG_DUMP, un, 22698 "sddump: write failed with BUSY, try # %d\n", i); 22699 22700 if (un->un_f_lun_reset_enabled == TRUE) { 22701 reset_retval = scsi_reset(SD_ADDRESS(un), 22702 RESET_LUN); 22703 } 22704 if (reset_retval == 0) { 22705 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 22706 } 22707 (void) sd_send_polled_RQS(un); 22708 22709 } else { 22710 SD_INFO(SD_LOG_DUMP, un, 22711 "sddump: write failed with 0x%x, try # %d\n", 22712 SD_GET_PKT_STATUS(wr_pktp), i); 22713 mutex_enter(SD_MUTEX(un)); 22714 sd_reset_target(un, wr_pktp); 22715 mutex_exit(SD_MUTEX(un)); 22716 } 22717 22718 /* 22719 * If we are not getting anywhere with lun/target resets, 22720 * let's reset the bus. 22721 */ 22722 if (i == SD_NDUMP_RETRIES/2) { 22723 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 22724 (void) sd_send_polled_RQS(un); 22725 } 22726 22727 } 22728 #if defined(__i386) || defined(__amd64) 22729 } /* dma_resid */ 22730 #endif 22731 22732 scsi_destroy_pkt(wr_pktp); 22733 mutex_enter(SD_MUTEX(un)); 22734 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) { 22735 mutex_exit(SD_MUTEX(un)); 22736 scsi_free_consistent_buf(wr_bp); 22737 } else { 22738 mutex_exit(SD_MUTEX(un)); 22739 } 22740 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err); 22741 return (err); 22742 } 22743 22744 /* 22745 * Function: sd_scsi_poll() 22746 * 22747 * Description: This is a wrapper for the scsi_poll call. 22748 * 22749 * Arguments: sd_lun - The unit structure 22750 * scsi_pkt - The scsi packet being sent to the device. 22751 * 22752 * Return Code: 0 - Command completed successfully with good status 22753 * -1 - Command failed. This could indicate a check condition 22754 * or other status value requiring recovery action. 22755 * 22756 */ 22757 22758 static int 22759 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp) 22760 { 22761 int status; 22762 22763 ASSERT(un != NULL); 22764 ASSERT(!mutex_owned(SD_MUTEX(un))); 22765 ASSERT(pktp != NULL); 22766 22767 status = SD_SUCCESS; 22768 22769 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) { 22770 pktp->pkt_flags |= un->un_tagflags; 22771 pktp->pkt_flags &= ~FLAG_NODISCON; 22772 } 22773 22774 status = sd_ddi_scsi_poll(pktp); 22775 /* 22776 * Scsi_poll returns 0 (success) if the command completes and the 22777 * status block is STATUS_GOOD. We should only check errors if this 22778 * condition is not true. Even then we should send our own request 22779 * sense packet only if we have a check condition and auto 22780 * request sense has not been performed by the hba. 22781 * Don't get RQS data if pkt_reason is CMD_DEV_GONE. 22782 */ 22783 if ((status != SD_SUCCESS) && 22784 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) && 22785 (pktp->pkt_state & STATE_ARQ_DONE) == 0 && 22786 (pktp->pkt_reason != CMD_DEV_GONE)) 22787 (void) sd_send_polled_RQS(un); 22788 22789 return (status); 22790 } 22791 22792 /* 22793 * Function: sd_send_polled_RQS() 22794 * 22795 * Description: This sends the request sense command to a device. 22796 * 22797 * Arguments: sd_lun - The unit structure 22798 * 22799 * Return Code: 0 - Command completed successfully with good status 22800 * -1 - Command failed. 22801 * 22802 */ 22803 22804 static int 22805 sd_send_polled_RQS(struct sd_lun *un) 22806 { 22807 int ret_val; 22808 struct scsi_pkt *rqs_pktp; 22809 struct buf *rqs_bp; 22810 22811 ASSERT(un != NULL); 22812 ASSERT(!mutex_owned(SD_MUTEX(un))); 22813 22814 ret_val = SD_SUCCESS; 22815 22816 rqs_pktp = un->un_rqs_pktp; 22817 rqs_bp = un->un_rqs_bp; 22818 22819 mutex_enter(SD_MUTEX(un)); 22820 22821 if (un->un_sense_isbusy) { 22822 ret_val = SD_FAILURE; 22823 mutex_exit(SD_MUTEX(un)); 22824 return (ret_val); 22825 } 22826 22827 /* 22828 * If the request sense buffer (and packet) is not in use, 22829 * let's set the un_sense_isbusy and send our packet 22830 */ 22831 un->un_sense_isbusy = 1; 22832 rqs_pktp->pkt_resid = 0; 22833 rqs_pktp->pkt_reason = 0; 22834 rqs_pktp->pkt_flags |= FLAG_NOINTR; 22835 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH); 22836 22837 mutex_exit(SD_MUTEX(un)); 22838 22839 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at" 22840 " 0x%p\n", rqs_bp->b_un.b_addr); 22841 22842 /* 22843 * Can't send this to sd_scsi_poll, we wrap ourselves around the 22844 * axle - it has a call into us! 22845 */ 22846 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) { 22847 SD_INFO(SD_LOG_COMMON, un, 22848 "sd_send_polled_RQS: RQS failed\n"); 22849 } 22850 22851 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:", 22852 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX); 22853 22854 mutex_enter(SD_MUTEX(un)); 22855 un->un_sense_isbusy = 0; 22856 mutex_exit(SD_MUTEX(un)); 22857 22858 return (ret_val); 22859 } 22860 22861 /* 22862 * Defines needed for localized version of the scsi_poll routine. 22863 */ 22864 #define SD_CSEC 10000 /* usecs */ 22865 #define SD_SEC_TO_CSEC (1000000/SD_CSEC) 22866 22867 22868 /* 22869 * Function: sd_ddi_scsi_poll() 22870 * 22871 * Description: Localized version of the scsi_poll routine. The purpose is to 22872 * send a scsi_pkt to a device as a polled command. This version 22873 * is to ensure more robust handling of transport errors. 22874 * Specifically this routine cures not ready, coming ready 22875 * transition for power up and reset of sonoma's. This can take 22876 * up to 45 seconds for power-on and 20 seconds for reset of a 22877 * sonoma lun. 22878 * 22879 * Arguments: scsi_pkt - The scsi_pkt being sent to a device 22880 * 22881 * Return Code: 0 - Command completed successfully with good status 22882 * -1 - Command failed. 22883 * 22884 */ 22885 22886 static int 22887 sd_ddi_scsi_poll(struct scsi_pkt *pkt) 22888 { 22889 int busy_count; 22890 int timeout; 22891 int rval = SD_FAILURE; 22892 int savef; 22893 uint8_t *sensep; 22894 long savet; 22895 void (*savec)(); 22896 /* 22897 * The following is defined in machdep.c and is used in determining if 22898 * the scsi transport system will do polled I/O instead of interrupt 22899 * I/O when called from xx_dump(). 22900 */ 22901 extern int do_polled_io; 22902 22903 /* 22904 * save old flags in pkt, to restore at end 22905 */ 22906 savef = pkt->pkt_flags; 22907 savec = pkt->pkt_comp; 22908 savet = pkt->pkt_time; 22909 22910 pkt->pkt_flags |= FLAG_NOINTR; 22911 22912 /* 22913 * XXX there is nothing in the SCSA spec that states that we should not 22914 * do a callback for polled cmds; however, removing this will break sd 22915 * and probably other target drivers 22916 */ 22917 pkt->pkt_comp = NULL; 22918 22919 /* 22920 * we don't like a polled command without timeout. 22921 * 60 seconds seems long enough. 22922 */ 22923 if (pkt->pkt_time == 0) { 22924 pkt->pkt_time = SCSI_POLL_TIMEOUT; 22925 } 22926 22927 /* 22928 * Send polled cmd. 22929 * 22930 * We do some error recovery for various errors. Tran_busy, 22931 * queue full, and non-dispatched commands are retried every 10 msec. 22932 * as they are typically transient failures. Busy status and Not 22933 * Ready are retried every second as this status takes a while to 22934 * change. Unit attention is retried for pkt_time (60) times 22935 * with no delay. 22936 */ 22937 timeout = pkt->pkt_time * SD_SEC_TO_CSEC; 22938 22939 for (busy_count = 0; busy_count < timeout; busy_count++) { 22940 int rc; 22941 int poll_delay; 22942 22943 /* 22944 * Initialize pkt status variables. 22945 */ 22946 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0; 22947 22948 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) { 22949 if (rc != TRAN_BUSY) { 22950 /* Transport failed - give up. */ 22951 break; 22952 } else { 22953 /* Transport busy - try again. */ 22954 poll_delay = 1 * SD_CSEC; /* 10 msec */ 22955 } 22956 } else { 22957 /* 22958 * Transport accepted - check pkt status. 22959 */ 22960 rc = (*pkt->pkt_scbp) & STATUS_MASK; 22961 if (pkt->pkt_reason == CMD_CMPLT && 22962 rc == STATUS_CHECK && 22963 pkt->pkt_state & STATE_ARQ_DONE) { 22964 struct scsi_arq_status *arqstat = 22965 (struct scsi_arq_status *)(pkt->pkt_scbp); 22966 22967 sensep = (uint8_t *)&arqstat->sts_sensedata; 22968 } else { 22969 sensep = NULL; 22970 } 22971 22972 if ((pkt->pkt_reason == CMD_CMPLT) && 22973 (rc == STATUS_GOOD)) { 22974 /* No error - we're done */ 22975 rval = SD_SUCCESS; 22976 break; 22977 22978 } else if (pkt->pkt_reason == CMD_DEV_GONE) { 22979 /* Lost connection - give up */ 22980 break; 22981 22982 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) && 22983 (pkt->pkt_state == 0)) { 22984 /* Pkt not dispatched - try again. */ 22985 poll_delay = 1 * SD_CSEC; /* 10 msec. */ 22986 22987 } else if ((pkt->pkt_reason == CMD_CMPLT) && 22988 (rc == STATUS_QFULL)) { 22989 /* Queue full - try again. */ 22990 poll_delay = 1 * SD_CSEC; /* 10 msec. */ 22991 22992 } else if ((pkt->pkt_reason == CMD_CMPLT) && 22993 (rc == STATUS_BUSY)) { 22994 /* Busy - try again. */ 22995 poll_delay = 100 * SD_CSEC; /* 1 sec. */ 22996 busy_count += (SD_SEC_TO_CSEC - 1); 22997 22998 } else if ((sensep != NULL) && 22999 (scsi_sense_key(sensep) == 23000 KEY_UNIT_ATTENTION)) { 23001 /* Unit Attention - try again */ 23002 busy_count += (SD_SEC_TO_CSEC - 1); /* 1 */ 23003 continue; 23004 23005 } else if ((sensep != NULL) && 23006 (scsi_sense_key(sensep) == KEY_NOT_READY) && 23007 (scsi_sense_asc(sensep) == 0x04) && 23008 (scsi_sense_ascq(sensep) == 0x01)) { 23009 /* Not ready -> ready - try again. */ 23010 poll_delay = 100 * SD_CSEC; /* 1 sec. */ 23011 busy_count += (SD_SEC_TO_CSEC - 1); 23012 23013 } else { 23014 /* BAD status - give up. */ 23015 break; 23016 } 23017 } 23018 23019 if ((curthread->t_flag & T_INTR_THREAD) == 0 && 23020 !do_polled_io) { 23021 delay(drv_usectohz(poll_delay)); 23022 } else { 23023 /* we busy wait during cpr_dump or interrupt threads */ 23024 drv_usecwait(poll_delay); 23025 } 23026 } 23027 23028 pkt->pkt_flags = savef; 23029 pkt->pkt_comp = savec; 23030 pkt->pkt_time = savet; 23031 return (rval); 23032 } 23033 23034 23035 /* 23036 * Function: sd_persistent_reservation_in_read_keys 23037 * 23038 * Description: This routine is the driver entry point for handling CD-ROM 23039 * multi-host persistent reservation requests (MHIOCGRP_INKEYS) 23040 * by sending the SCSI-3 PRIN commands to the device. 23041 * Processes the read keys command response by copying the 23042 * reservation key information into the user provided buffer. 23043 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented. 23044 * 23045 * Arguments: un - Pointer to soft state struct for the target. 23046 * usrp - user provided pointer to multihost Persistent In Read 23047 * Keys structure (mhioc_inkeys_t) 23048 * flag - this argument is a pass through to ddi_copyxxx() 23049 * directly from the mode argument of ioctl(). 23050 * 23051 * Return Code: 0 - Success 23052 * EACCES 23053 * ENOTSUP 23054 * errno return code from sd_send_scsi_cmd() 23055 * 23056 * Context: Can sleep. Does not return until command is completed. 23057 */ 23058 23059 static int 23060 sd_persistent_reservation_in_read_keys(struct sd_lun *un, 23061 mhioc_inkeys_t *usrp, int flag) 23062 { 23063 #ifdef _MULTI_DATAMODEL 23064 struct mhioc_key_list32 li32; 23065 #endif 23066 sd_prin_readkeys_t *in; 23067 mhioc_inkeys_t *ptr; 23068 mhioc_key_list_t li; 23069 uchar_t *data_bufp; 23070 int data_len; 23071 int rval; 23072 size_t copysz; 23073 23074 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) { 23075 return (EINVAL); 23076 } 23077 bzero(&li, sizeof (mhioc_key_list_t)); 23078 23079 /* 23080 * Get the listsize from user 23081 */ 23082 #ifdef _MULTI_DATAMODEL 23083 23084 switch (ddi_model_convert_from(flag & FMODELS)) { 23085 case DDI_MODEL_ILP32: 23086 copysz = sizeof (struct mhioc_key_list32); 23087 if (ddi_copyin(ptr->li, &li32, copysz, flag)) { 23088 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23089 "sd_persistent_reservation_in_read_keys: " 23090 "failed ddi_copyin: mhioc_key_list32_t\n"); 23091 rval = EFAULT; 23092 goto done; 23093 } 23094 li.listsize = li32.listsize; 23095 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list; 23096 break; 23097 23098 case DDI_MODEL_NONE: 23099 copysz = sizeof (mhioc_key_list_t); 23100 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 23101 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23102 "sd_persistent_reservation_in_read_keys: " 23103 "failed ddi_copyin: mhioc_key_list_t\n"); 23104 rval = EFAULT; 23105 goto done; 23106 } 23107 break; 23108 } 23109 23110 #else /* ! _MULTI_DATAMODEL */ 23111 copysz = sizeof (mhioc_key_list_t); 23112 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 23113 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23114 "sd_persistent_reservation_in_read_keys: " 23115 "failed ddi_copyin: mhioc_key_list_t\n"); 23116 rval = EFAULT; 23117 goto done; 23118 } 23119 #endif 23120 23121 data_len = li.listsize * MHIOC_RESV_KEY_SIZE; 23122 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t)); 23123 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 23124 23125 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 23126 data_len, data_bufp)) != 0) { 23127 goto done; 23128 } 23129 in = (sd_prin_readkeys_t *)data_bufp; 23130 ptr->generation = BE_32(in->generation); 23131 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE; 23132 23133 /* 23134 * Return the min(listsize, listlen) keys 23135 */ 23136 #ifdef _MULTI_DATAMODEL 23137 23138 switch (ddi_model_convert_from(flag & FMODELS)) { 23139 case DDI_MODEL_ILP32: 23140 li32.listlen = li.listlen; 23141 if (ddi_copyout(&li32, ptr->li, copysz, flag)) { 23142 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23143 "sd_persistent_reservation_in_read_keys: " 23144 "failed ddi_copyout: mhioc_key_list32_t\n"); 23145 rval = EFAULT; 23146 goto done; 23147 } 23148 break; 23149 23150 case DDI_MODEL_NONE: 23151 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 23152 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23153 "sd_persistent_reservation_in_read_keys: " 23154 "failed ddi_copyout: mhioc_key_list_t\n"); 23155 rval = EFAULT; 23156 goto done; 23157 } 23158 break; 23159 } 23160 23161 #else /* ! _MULTI_DATAMODEL */ 23162 23163 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 23164 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23165 "sd_persistent_reservation_in_read_keys: " 23166 "failed ddi_copyout: mhioc_key_list_t\n"); 23167 rval = EFAULT; 23168 goto done; 23169 } 23170 23171 #endif /* _MULTI_DATAMODEL */ 23172 23173 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE, 23174 li.listsize * MHIOC_RESV_KEY_SIZE); 23175 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) { 23176 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23177 "sd_persistent_reservation_in_read_keys: " 23178 "failed ddi_copyout: keylist\n"); 23179 rval = EFAULT; 23180 } 23181 done: 23182 kmem_free(data_bufp, data_len); 23183 return (rval); 23184 } 23185 23186 23187 /* 23188 * Function: sd_persistent_reservation_in_read_resv 23189 * 23190 * Description: This routine is the driver entry point for handling CD-ROM 23191 * multi-host persistent reservation requests (MHIOCGRP_INRESV) 23192 * by sending the SCSI-3 PRIN commands to the device. 23193 * Process the read persistent reservations command response by 23194 * copying the reservation information into the user provided 23195 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented. 23196 * 23197 * Arguments: un - Pointer to soft state struct for the target. 23198 * usrp - user provided pointer to multihost Persistent In Read 23199 * Keys structure (mhioc_inkeys_t) 23200 * flag - this argument is a pass through to ddi_copyxxx() 23201 * directly from the mode argument of ioctl(). 23202 * 23203 * Return Code: 0 - Success 23204 * EACCES 23205 * ENOTSUP 23206 * errno return code from sd_send_scsi_cmd() 23207 * 23208 * Context: Can sleep. Does not return until command is completed. 23209 */ 23210 23211 static int 23212 sd_persistent_reservation_in_read_resv(struct sd_lun *un, 23213 mhioc_inresvs_t *usrp, int flag) 23214 { 23215 #ifdef _MULTI_DATAMODEL 23216 struct mhioc_resv_desc_list32 resvlist32; 23217 #endif 23218 sd_prin_readresv_t *in; 23219 mhioc_inresvs_t *ptr; 23220 sd_readresv_desc_t *readresv_ptr; 23221 mhioc_resv_desc_list_t resvlist; 23222 mhioc_resv_desc_t resvdesc; 23223 uchar_t *data_bufp; 23224 int data_len; 23225 int rval; 23226 int i; 23227 size_t copysz; 23228 mhioc_resv_desc_t *bufp; 23229 23230 if ((ptr = usrp) == NULL) { 23231 return (EINVAL); 23232 } 23233 23234 /* 23235 * Get the listsize from user 23236 */ 23237 #ifdef _MULTI_DATAMODEL 23238 switch (ddi_model_convert_from(flag & FMODELS)) { 23239 case DDI_MODEL_ILP32: 23240 copysz = sizeof (struct mhioc_resv_desc_list32); 23241 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) { 23242 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23243 "sd_persistent_reservation_in_read_resv: " 23244 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23245 rval = EFAULT; 23246 goto done; 23247 } 23248 resvlist.listsize = resvlist32.listsize; 23249 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list; 23250 break; 23251 23252 case DDI_MODEL_NONE: 23253 copysz = sizeof (mhioc_resv_desc_list_t); 23254 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 23255 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23256 "sd_persistent_reservation_in_read_resv: " 23257 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23258 rval = EFAULT; 23259 goto done; 23260 } 23261 break; 23262 } 23263 #else /* ! _MULTI_DATAMODEL */ 23264 copysz = sizeof (mhioc_resv_desc_list_t); 23265 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 23266 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23267 "sd_persistent_reservation_in_read_resv: " 23268 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23269 rval = EFAULT; 23270 goto done; 23271 } 23272 #endif /* ! _MULTI_DATAMODEL */ 23273 23274 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN; 23275 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t)); 23276 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 23277 23278 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_RESV, 23279 data_len, data_bufp)) != 0) { 23280 goto done; 23281 } 23282 in = (sd_prin_readresv_t *)data_bufp; 23283 ptr->generation = BE_32(in->generation); 23284 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN; 23285 23286 /* 23287 * Return the min(listsize, listlen( keys 23288 */ 23289 #ifdef _MULTI_DATAMODEL 23290 23291 switch (ddi_model_convert_from(flag & FMODELS)) { 23292 case DDI_MODEL_ILP32: 23293 resvlist32.listlen = resvlist.listlen; 23294 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) { 23295 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23296 "sd_persistent_reservation_in_read_resv: " 23297 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 23298 rval = EFAULT; 23299 goto done; 23300 } 23301 break; 23302 23303 case DDI_MODEL_NONE: 23304 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 23305 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23306 "sd_persistent_reservation_in_read_resv: " 23307 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 23308 rval = EFAULT; 23309 goto done; 23310 } 23311 break; 23312 } 23313 23314 #else /* ! _MULTI_DATAMODEL */ 23315 23316 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 23317 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23318 "sd_persistent_reservation_in_read_resv: " 23319 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 23320 rval = EFAULT; 23321 goto done; 23322 } 23323 23324 #endif /* ! _MULTI_DATAMODEL */ 23325 23326 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc; 23327 bufp = resvlist.list; 23328 copysz = sizeof (mhioc_resv_desc_t); 23329 for (i = 0; i < min(resvlist.listlen, resvlist.listsize); 23330 i++, readresv_ptr++, bufp++) { 23331 23332 bcopy(&readresv_ptr->resvkey, &resvdesc.key, 23333 MHIOC_RESV_KEY_SIZE); 23334 resvdesc.type = readresv_ptr->type; 23335 resvdesc.scope = readresv_ptr->scope; 23336 resvdesc.scope_specific_addr = 23337 BE_32(readresv_ptr->scope_specific_addr); 23338 23339 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) { 23340 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23341 "sd_persistent_reservation_in_read_resv: " 23342 "failed ddi_copyout: resvlist\n"); 23343 rval = EFAULT; 23344 goto done; 23345 } 23346 } 23347 done: 23348 kmem_free(data_bufp, data_len); 23349 return (rval); 23350 } 23351 23352 23353 /* 23354 * Function: sr_change_blkmode() 23355 * 23356 * Description: This routine is the driver entry point for handling CD-ROM 23357 * block mode ioctl requests. Support for returning and changing 23358 * the current block size in use by the device is implemented. The 23359 * LBA size is changed via a MODE SELECT Block Descriptor. 23360 * 23361 * This routine issues a mode sense with an allocation length of 23362 * 12 bytes for the mode page header and a single block descriptor. 23363 * 23364 * Arguments: dev - the device 'dev_t' 23365 * cmd - the request type; one of CDROMGBLKMODE (get) or 23366 * CDROMSBLKMODE (set) 23367 * data - current block size or requested block size 23368 * flag - this argument is a pass through to ddi_copyxxx() directly 23369 * from the mode argument of ioctl(). 23370 * 23371 * Return Code: the code returned by sd_send_scsi_cmd() 23372 * EINVAL if invalid arguments are provided 23373 * EFAULT if ddi_copyxxx() fails 23374 * ENXIO if fail ddi_get_soft_state 23375 * EIO if invalid mode sense block descriptor length 23376 * 23377 */ 23378 23379 static int 23380 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag) 23381 { 23382 struct sd_lun *un = NULL; 23383 struct mode_header *sense_mhp, *select_mhp; 23384 struct block_descriptor *sense_desc, *select_desc; 23385 int current_bsize; 23386 int rval = EINVAL; 23387 uchar_t *sense = NULL; 23388 uchar_t *select = NULL; 23389 23390 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE)); 23391 23392 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23393 return (ENXIO); 23394 } 23395 23396 /* 23397 * The block length is changed via the Mode Select block descriptor, the 23398 * "Read/Write Error Recovery" mode page (0x1) contents are not actually 23399 * required as part of this routine. Therefore the mode sense allocation 23400 * length is specified to be the length of a mode page header and a 23401 * block descriptor. 23402 */ 23403 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 23404 23405 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 23406 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD)) != 0) { 23407 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23408 "sr_change_blkmode: Mode Sense Failed\n"); 23409 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23410 return (rval); 23411 } 23412 23413 /* Check the block descriptor len to handle only 1 block descriptor */ 23414 sense_mhp = (struct mode_header *)sense; 23415 if ((sense_mhp->bdesc_length == 0) || 23416 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) { 23417 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23418 "sr_change_blkmode: Mode Sense returned invalid block" 23419 " descriptor length\n"); 23420 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23421 return (EIO); 23422 } 23423 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH); 23424 current_bsize = ((sense_desc->blksize_hi << 16) | 23425 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo); 23426 23427 /* Process command */ 23428 switch (cmd) { 23429 case CDROMGBLKMODE: 23430 /* Return the block size obtained during the mode sense */ 23431 if (ddi_copyout(¤t_bsize, (void *)data, 23432 sizeof (int), flag) != 0) 23433 rval = EFAULT; 23434 break; 23435 case CDROMSBLKMODE: 23436 /* Validate the requested block size */ 23437 switch (data) { 23438 case CDROM_BLK_512: 23439 case CDROM_BLK_1024: 23440 case CDROM_BLK_2048: 23441 case CDROM_BLK_2056: 23442 case CDROM_BLK_2336: 23443 case CDROM_BLK_2340: 23444 case CDROM_BLK_2352: 23445 case CDROM_BLK_2368: 23446 case CDROM_BLK_2448: 23447 case CDROM_BLK_2646: 23448 case CDROM_BLK_2647: 23449 break; 23450 default: 23451 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23452 "sr_change_blkmode: " 23453 "Block Size '%ld' Not Supported\n", data); 23454 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23455 return (EINVAL); 23456 } 23457 23458 /* 23459 * The current block size matches the requested block size so 23460 * there is no need to send the mode select to change the size 23461 */ 23462 if (current_bsize == data) { 23463 break; 23464 } 23465 23466 /* Build the select data for the requested block size */ 23467 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 23468 select_mhp = (struct mode_header *)select; 23469 select_desc = 23470 (struct block_descriptor *)(select + MODE_HEADER_LENGTH); 23471 /* 23472 * The LBA size is changed via the block descriptor, so the 23473 * descriptor is built according to the user data 23474 */ 23475 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH; 23476 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16); 23477 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8); 23478 select_desc->blksize_lo = (char)((data) & 0x000000ff); 23479 23480 /* Send the mode select for the requested block size */ 23481 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 23482 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 23483 SD_PATH_STANDARD)) != 0) { 23484 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23485 "sr_change_blkmode: Mode Select Failed\n"); 23486 /* 23487 * The mode select failed for the requested block size, 23488 * so reset the data for the original block size and 23489 * send it to the target. The error is indicated by the 23490 * return value for the failed mode select. 23491 */ 23492 select_desc->blksize_hi = sense_desc->blksize_hi; 23493 select_desc->blksize_mid = sense_desc->blksize_mid; 23494 select_desc->blksize_lo = sense_desc->blksize_lo; 23495 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 23496 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 23497 SD_PATH_STANDARD); 23498 } else { 23499 ASSERT(!mutex_owned(SD_MUTEX(un))); 23500 mutex_enter(SD_MUTEX(un)); 23501 sd_update_block_info(un, (uint32_t)data, 0); 23502 mutex_exit(SD_MUTEX(un)); 23503 } 23504 break; 23505 default: 23506 /* should not reach here, but check anyway */ 23507 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23508 "sr_change_blkmode: Command '%x' Not Supported\n", cmd); 23509 rval = EINVAL; 23510 break; 23511 } 23512 23513 if (select) { 23514 kmem_free(select, BUFLEN_CHG_BLK_MODE); 23515 } 23516 if (sense) { 23517 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23518 } 23519 return (rval); 23520 } 23521 23522 23523 /* 23524 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines 23525 * implement driver support for getting and setting the CD speed. The command 23526 * set used will be based on the device type. If the device has not been 23527 * identified as MMC the Toshiba vendor specific mode page will be used. If 23528 * the device is MMC but does not support the Real Time Streaming feature 23529 * the SET CD SPEED command will be used to set speed and mode page 0x2A will 23530 * be used to read the speed. 23531 */ 23532 23533 /* 23534 * Function: sr_change_speed() 23535 * 23536 * Description: This routine is the driver entry point for handling CD-ROM 23537 * drive speed ioctl requests for devices supporting the Toshiba 23538 * vendor specific drive speed mode page. Support for returning 23539 * and changing the current drive speed in use by the device is 23540 * implemented. 23541 * 23542 * Arguments: dev - the device 'dev_t' 23543 * cmd - the request type; one of CDROMGDRVSPEED (get) or 23544 * CDROMSDRVSPEED (set) 23545 * data - current drive speed or requested drive speed 23546 * flag - this argument is a pass through to ddi_copyxxx() directly 23547 * from the mode argument of ioctl(). 23548 * 23549 * Return Code: the code returned by sd_send_scsi_cmd() 23550 * EINVAL if invalid arguments are provided 23551 * EFAULT if ddi_copyxxx() fails 23552 * ENXIO if fail ddi_get_soft_state 23553 * EIO if invalid mode sense block descriptor length 23554 */ 23555 23556 static int 23557 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 23558 { 23559 struct sd_lun *un = NULL; 23560 struct mode_header *sense_mhp, *select_mhp; 23561 struct mode_speed *sense_page, *select_page; 23562 int current_speed; 23563 int rval = EINVAL; 23564 int bd_len; 23565 uchar_t *sense = NULL; 23566 uchar_t *select = NULL; 23567 23568 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 23569 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23570 return (ENXIO); 23571 } 23572 23573 /* 23574 * Note: The drive speed is being modified here according to a Toshiba 23575 * vendor specific mode page (0x31). 23576 */ 23577 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 23578 23579 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 23580 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED, 23581 SD_PATH_STANDARD)) != 0) { 23582 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23583 "sr_change_speed: Mode Sense Failed\n"); 23584 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 23585 return (rval); 23586 } 23587 sense_mhp = (struct mode_header *)sense; 23588 23589 /* Check the block descriptor len to handle only 1 block descriptor */ 23590 bd_len = sense_mhp->bdesc_length; 23591 if (bd_len > MODE_BLK_DESC_LENGTH) { 23592 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23593 "sr_change_speed: Mode Sense returned invalid block " 23594 "descriptor length\n"); 23595 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 23596 return (EIO); 23597 } 23598 23599 sense_page = (struct mode_speed *) 23600 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 23601 current_speed = sense_page->speed; 23602 23603 /* Process command */ 23604 switch (cmd) { 23605 case CDROMGDRVSPEED: 23606 /* Return the drive speed obtained during the mode sense */ 23607 if (current_speed == 0x2) { 23608 current_speed = CDROM_TWELVE_SPEED; 23609 } 23610 if (ddi_copyout(¤t_speed, (void *)data, 23611 sizeof (int), flag) != 0) { 23612 rval = EFAULT; 23613 } 23614 break; 23615 case CDROMSDRVSPEED: 23616 /* Validate the requested drive speed */ 23617 switch ((uchar_t)data) { 23618 case CDROM_TWELVE_SPEED: 23619 data = 0x2; 23620 /*FALLTHROUGH*/ 23621 case CDROM_NORMAL_SPEED: 23622 case CDROM_DOUBLE_SPEED: 23623 case CDROM_QUAD_SPEED: 23624 case CDROM_MAXIMUM_SPEED: 23625 break; 23626 default: 23627 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23628 "sr_change_speed: " 23629 "Drive Speed '%d' Not Supported\n", (uchar_t)data); 23630 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 23631 return (EINVAL); 23632 } 23633 23634 /* 23635 * The current drive speed matches the requested drive speed so 23636 * there is no need to send the mode select to change the speed 23637 */ 23638 if (current_speed == data) { 23639 break; 23640 } 23641 23642 /* Build the select data for the requested drive speed */ 23643 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 23644 select_mhp = (struct mode_header *)select; 23645 select_mhp->bdesc_length = 0; 23646 select_page = 23647 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 23648 select_page = 23649 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 23650 select_page->mode_page.code = CDROM_MODE_SPEED; 23651 select_page->mode_page.length = 2; 23652 select_page->speed = (uchar_t)data; 23653 23654 /* Send the mode select for the requested block size */ 23655 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 23656 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 23657 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 23658 /* 23659 * The mode select failed for the requested drive speed, 23660 * so reset the data for the original drive speed and 23661 * send it to the target. The error is indicated by the 23662 * return value for the failed mode select. 23663 */ 23664 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23665 "sr_drive_speed: Mode Select Failed\n"); 23666 select_page->speed = sense_page->speed; 23667 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 23668 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 23669 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 23670 } 23671 break; 23672 default: 23673 /* should not reach here, but check anyway */ 23674 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23675 "sr_change_speed: Command '%x' Not Supported\n", cmd); 23676 rval = EINVAL; 23677 break; 23678 } 23679 23680 if (select) { 23681 kmem_free(select, BUFLEN_MODE_CDROM_SPEED); 23682 } 23683 if (sense) { 23684 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 23685 } 23686 23687 return (rval); 23688 } 23689 23690 23691 /* 23692 * Function: sr_atapi_change_speed() 23693 * 23694 * Description: This routine is the driver entry point for handling CD-ROM 23695 * drive speed ioctl requests for MMC devices that do not support 23696 * the Real Time Streaming feature (0x107). 23697 * 23698 * Note: This routine will use the SET SPEED command which may not 23699 * be supported by all devices. 23700 * 23701 * Arguments: dev- the device 'dev_t' 23702 * cmd- the request type; one of CDROMGDRVSPEED (get) or 23703 * CDROMSDRVSPEED (set) 23704 * data- current drive speed or requested drive speed 23705 * flag- this argument is a pass through to ddi_copyxxx() directly 23706 * from the mode argument of ioctl(). 23707 * 23708 * Return Code: the code returned by sd_send_scsi_cmd() 23709 * EINVAL if invalid arguments are provided 23710 * EFAULT if ddi_copyxxx() fails 23711 * ENXIO if fail ddi_get_soft_state 23712 * EIO if invalid mode sense block descriptor length 23713 */ 23714 23715 static int 23716 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 23717 { 23718 struct sd_lun *un; 23719 struct uscsi_cmd *com = NULL; 23720 struct mode_header_grp2 *sense_mhp; 23721 uchar_t *sense_page; 23722 uchar_t *sense = NULL; 23723 char cdb[CDB_GROUP5]; 23724 int bd_len; 23725 int current_speed = 0; 23726 int max_speed = 0; 23727 int rval; 23728 23729 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 23730 23731 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23732 return (ENXIO); 23733 } 23734 23735 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 23736 23737 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 23738 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, 23739 SD_PATH_STANDARD)) != 0) { 23740 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23741 "sr_atapi_change_speed: Mode Sense Failed\n"); 23742 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 23743 return (rval); 23744 } 23745 23746 /* Check the block descriptor len to handle only 1 block descriptor */ 23747 sense_mhp = (struct mode_header_grp2 *)sense; 23748 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 23749 if (bd_len > MODE_BLK_DESC_LENGTH) { 23750 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23751 "sr_atapi_change_speed: Mode Sense returned invalid " 23752 "block descriptor length\n"); 23753 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 23754 return (EIO); 23755 } 23756 23757 /* Calculate the current and maximum drive speeds */ 23758 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 23759 current_speed = (sense_page[14] << 8) | sense_page[15]; 23760 max_speed = (sense_page[8] << 8) | sense_page[9]; 23761 23762 /* Process the command */ 23763 switch (cmd) { 23764 case CDROMGDRVSPEED: 23765 current_speed /= SD_SPEED_1X; 23766 if (ddi_copyout(¤t_speed, (void *)data, 23767 sizeof (int), flag) != 0) 23768 rval = EFAULT; 23769 break; 23770 case CDROMSDRVSPEED: 23771 /* Convert the speed code to KB/sec */ 23772 switch ((uchar_t)data) { 23773 case CDROM_NORMAL_SPEED: 23774 current_speed = SD_SPEED_1X; 23775 break; 23776 case CDROM_DOUBLE_SPEED: 23777 current_speed = 2 * SD_SPEED_1X; 23778 break; 23779 case CDROM_QUAD_SPEED: 23780 current_speed = 4 * SD_SPEED_1X; 23781 break; 23782 case CDROM_TWELVE_SPEED: 23783 current_speed = 12 * SD_SPEED_1X; 23784 break; 23785 case CDROM_MAXIMUM_SPEED: 23786 current_speed = 0xffff; 23787 break; 23788 default: 23789 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23790 "sr_atapi_change_speed: invalid drive speed %d\n", 23791 (uchar_t)data); 23792 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 23793 return (EINVAL); 23794 } 23795 23796 /* Check the request against the drive's max speed. */ 23797 if (current_speed != 0xffff) { 23798 if (current_speed > max_speed) { 23799 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 23800 return (EINVAL); 23801 } 23802 } 23803 23804 /* 23805 * Build and send the SET SPEED command 23806 * 23807 * Note: The SET SPEED (0xBB) command used in this routine is 23808 * obsolete per the SCSI MMC spec but still supported in the 23809 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 23810 * therefore the command is still implemented in this routine. 23811 */ 23812 bzero(cdb, sizeof (cdb)); 23813 cdb[0] = (char)SCMD_SET_CDROM_SPEED; 23814 cdb[2] = (uchar_t)(current_speed >> 8); 23815 cdb[3] = (uchar_t)current_speed; 23816 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 23817 com->uscsi_cdb = (caddr_t)cdb; 23818 com->uscsi_cdblen = CDB_GROUP5; 23819 com->uscsi_bufaddr = NULL; 23820 com->uscsi_buflen = 0; 23821 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 23822 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, 0, SD_PATH_STANDARD); 23823 break; 23824 default: 23825 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23826 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd); 23827 rval = EINVAL; 23828 } 23829 23830 if (sense) { 23831 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 23832 } 23833 if (com) { 23834 kmem_free(com, sizeof (*com)); 23835 } 23836 return (rval); 23837 } 23838 23839 23840 /* 23841 * Function: sr_pause_resume() 23842 * 23843 * Description: This routine is the driver entry point for handling CD-ROM 23844 * pause/resume ioctl requests. This only affects the audio play 23845 * operation. 23846 * 23847 * Arguments: dev - the device 'dev_t' 23848 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used 23849 * for setting the resume bit of the cdb. 23850 * 23851 * Return Code: the code returned by sd_send_scsi_cmd() 23852 * EINVAL if invalid mode specified 23853 * 23854 */ 23855 23856 static int 23857 sr_pause_resume(dev_t dev, int cmd) 23858 { 23859 struct sd_lun *un; 23860 struct uscsi_cmd *com; 23861 char cdb[CDB_GROUP1]; 23862 int rval; 23863 23864 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23865 return (ENXIO); 23866 } 23867 23868 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 23869 bzero(cdb, CDB_GROUP1); 23870 cdb[0] = SCMD_PAUSE_RESUME; 23871 switch (cmd) { 23872 case CDROMRESUME: 23873 cdb[8] = 1; 23874 break; 23875 case CDROMPAUSE: 23876 cdb[8] = 0; 23877 break; 23878 default: 23879 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:" 23880 " Command '%x' Not Supported\n", cmd); 23881 rval = EINVAL; 23882 goto done; 23883 } 23884 23885 com->uscsi_cdb = cdb; 23886 com->uscsi_cdblen = CDB_GROUP1; 23887 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 23888 23889 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 23890 SD_PATH_STANDARD); 23891 23892 done: 23893 kmem_free(com, sizeof (*com)); 23894 return (rval); 23895 } 23896 23897 23898 /* 23899 * Function: sr_play_msf() 23900 * 23901 * Description: This routine is the driver entry point for handling CD-ROM 23902 * ioctl requests to output the audio signals at the specified 23903 * starting address and continue the audio play until the specified 23904 * ending address (CDROMPLAYMSF) The address is in Minute Second 23905 * Frame (MSF) format. 23906 * 23907 * Arguments: dev - the device 'dev_t' 23908 * data - pointer to user provided audio msf structure, 23909 * specifying start/end addresses. 23910 * flag - this argument is a pass through to ddi_copyxxx() 23911 * directly from the mode argument of ioctl(). 23912 * 23913 * Return Code: the code returned by sd_send_scsi_cmd() 23914 * EFAULT if ddi_copyxxx() fails 23915 * ENXIO if fail ddi_get_soft_state 23916 * EINVAL if data pointer is NULL 23917 */ 23918 23919 static int 23920 sr_play_msf(dev_t dev, caddr_t data, int flag) 23921 { 23922 struct sd_lun *un; 23923 struct uscsi_cmd *com; 23924 struct cdrom_msf msf_struct; 23925 struct cdrom_msf *msf = &msf_struct; 23926 char cdb[CDB_GROUP1]; 23927 int rval; 23928 23929 if (data == NULL) { 23930 return (EINVAL); 23931 } 23932 23933 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23934 return (ENXIO); 23935 } 23936 23937 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) { 23938 return (EFAULT); 23939 } 23940 23941 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 23942 bzero(cdb, CDB_GROUP1); 23943 cdb[0] = SCMD_PLAYAUDIO_MSF; 23944 if (un->un_f_cfg_playmsf_bcd == TRUE) { 23945 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0); 23946 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0); 23947 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0); 23948 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1); 23949 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1); 23950 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1); 23951 } else { 23952 cdb[3] = msf->cdmsf_min0; 23953 cdb[4] = msf->cdmsf_sec0; 23954 cdb[5] = msf->cdmsf_frame0; 23955 cdb[6] = msf->cdmsf_min1; 23956 cdb[7] = msf->cdmsf_sec1; 23957 cdb[8] = msf->cdmsf_frame1; 23958 } 23959 com->uscsi_cdb = cdb; 23960 com->uscsi_cdblen = CDB_GROUP1; 23961 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 23962 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 23963 SD_PATH_STANDARD); 23964 kmem_free(com, sizeof (*com)); 23965 return (rval); 23966 } 23967 23968 23969 /* 23970 * Function: sr_play_trkind() 23971 * 23972 * Description: This routine is the driver entry point for handling CD-ROM 23973 * ioctl requests to output the audio signals at the specified 23974 * starting address and continue the audio play until the specified 23975 * ending address (CDROMPLAYTRKIND). The address is in Track Index 23976 * format. 23977 * 23978 * Arguments: dev - the device 'dev_t' 23979 * data - pointer to user provided audio track/index structure, 23980 * specifying start/end addresses. 23981 * flag - this argument is a pass through to ddi_copyxxx() 23982 * directly from the mode argument of ioctl(). 23983 * 23984 * Return Code: the code returned by sd_send_scsi_cmd() 23985 * EFAULT if ddi_copyxxx() fails 23986 * ENXIO if fail ddi_get_soft_state 23987 * EINVAL if data pointer is NULL 23988 */ 23989 23990 static int 23991 sr_play_trkind(dev_t dev, caddr_t data, int flag) 23992 { 23993 struct cdrom_ti ti_struct; 23994 struct cdrom_ti *ti = &ti_struct; 23995 struct uscsi_cmd *com = NULL; 23996 char cdb[CDB_GROUP1]; 23997 int rval; 23998 23999 if (data == NULL) { 24000 return (EINVAL); 24001 } 24002 24003 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) { 24004 return (EFAULT); 24005 } 24006 24007 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24008 bzero(cdb, CDB_GROUP1); 24009 cdb[0] = SCMD_PLAYAUDIO_TI; 24010 cdb[4] = ti->cdti_trk0; 24011 cdb[5] = ti->cdti_ind0; 24012 cdb[7] = ti->cdti_trk1; 24013 cdb[8] = ti->cdti_ind1; 24014 com->uscsi_cdb = cdb; 24015 com->uscsi_cdblen = CDB_GROUP1; 24016 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24017 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24018 SD_PATH_STANDARD); 24019 kmem_free(com, sizeof (*com)); 24020 return (rval); 24021 } 24022 24023 24024 /* 24025 * Function: sr_read_all_subcodes() 24026 * 24027 * Description: This routine is the driver entry point for handling CD-ROM 24028 * ioctl requests to return raw subcode data while the target is 24029 * playing audio (CDROMSUBCODE). 24030 * 24031 * Arguments: dev - the device 'dev_t' 24032 * data - pointer to user provided cdrom subcode structure, 24033 * specifying the transfer length and address. 24034 * flag - this argument is a pass through to ddi_copyxxx() 24035 * directly from the mode argument of ioctl(). 24036 * 24037 * Return Code: the code returned by sd_send_scsi_cmd() 24038 * EFAULT if ddi_copyxxx() fails 24039 * ENXIO if fail ddi_get_soft_state 24040 * EINVAL if data pointer is NULL 24041 */ 24042 24043 static int 24044 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag) 24045 { 24046 struct sd_lun *un = NULL; 24047 struct uscsi_cmd *com = NULL; 24048 struct cdrom_subcode *subcode = NULL; 24049 int rval; 24050 size_t buflen; 24051 char cdb[CDB_GROUP5]; 24052 24053 #ifdef _MULTI_DATAMODEL 24054 /* To support ILP32 applications in an LP64 world */ 24055 struct cdrom_subcode32 cdrom_subcode32; 24056 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32; 24057 #endif 24058 if (data == NULL) { 24059 return (EINVAL); 24060 } 24061 24062 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24063 return (ENXIO); 24064 } 24065 24066 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP); 24067 24068 #ifdef _MULTI_DATAMODEL 24069 switch (ddi_model_convert_from(flag & FMODELS)) { 24070 case DDI_MODEL_ILP32: 24071 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) { 24072 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24073 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24074 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24075 return (EFAULT); 24076 } 24077 /* Convert the ILP32 uscsi data from the application to LP64 */ 24078 cdrom_subcode32tocdrom_subcode(cdsc32, subcode); 24079 break; 24080 case DDI_MODEL_NONE: 24081 if (ddi_copyin(data, subcode, 24082 sizeof (struct cdrom_subcode), flag)) { 24083 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24084 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24085 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24086 return (EFAULT); 24087 } 24088 break; 24089 } 24090 #else /* ! _MULTI_DATAMODEL */ 24091 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) { 24092 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24093 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24094 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24095 return (EFAULT); 24096 } 24097 #endif /* _MULTI_DATAMODEL */ 24098 24099 /* 24100 * Since MMC-2 expects max 3 bytes for length, check if the 24101 * length input is greater than 3 bytes 24102 */ 24103 if ((subcode->cdsc_length & 0xFF000000) != 0) { 24104 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24105 "sr_read_all_subcodes: " 24106 "cdrom transfer length too large: %d (limit %d)\n", 24107 subcode->cdsc_length, 0xFFFFFF); 24108 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24109 return (EINVAL); 24110 } 24111 24112 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length; 24113 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24114 bzero(cdb, CDB_GROUP5); 24115 24116 if (un->un_f_mmc_cap == TRUE) { 24117 cdb[0] = (char)SCMD_READ_CD; 24118 cdb[2] = (char)0xff; 24119 cdb[3] = (char)0xff; 24120 cdb[4] = (char)0xff; 24121 cdb[5] = (char)0xff; 24122 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 24123 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 24124 cdb[8] = ((subcode->cdsc_length) & 0x000000ff); 24125 cdb[10] = 1; 24126 } else { 24127 /* 24128 * Note: A vendor specific command (0xDF) is being used her to 24129 * request a read of all subcodes. 24130 */ 24131 cdb[0] = (char)SCMD_READ_ALL_SUBCODES; 24132 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24); 24133 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 24134 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 24135 cdb[9] = ((subcode->cdsc_length) & 0x000000ff); 24136 } 24137 com->uscsi_cdb = cdb; 24138 com->uscsi_cdblen = CDB_GROUP5; 24139 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr; 24140 com->uscsi_buflen = buflen; 24141 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24142 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 24143 SD_PATH_STANDARD); 24144 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24145 kmem_free(com, sizeof (*com)); 24146 return (rval); 24147 } 24148 24149 24150 /* 24151 * Function: sr_read_subchannel() 24152 * 24153 * Description: This routine is the driver entry point for handling CD-ROM 24154 * ioctl requests to return the Q sub-channel data of the CD 24155 * current position block. (CDROMSUBCHNL) The data includes the 24156 * track number, index number, absolute CD-ROM address (LBA or MSF 24157 * format per the user) , track relative CD-ROM address (LBA or MSF 24158 * format per the user), control data and audio status. 24159 * 24160 * Arguments: dev - the device 'dev_t' 24161 * data - pointer to user provided cdrom sub-channel structure 24162 * flag - this argument is a pass through to ddi_copyxxx() 24163 * directly from the mode argument of ioctl(). 24164 * 24165 * Return Code: the code returned by sd_send_scsi_cmd() 24166 * EFAULT if ddi_copyxxx() fails 24167 * ENXIO if fail ddi_get_soft_state 24168 * EINVAL if data pointer is NULL 24169 */ 24170 24171 static int 24172 sr_read_subchannel(dev_t dev, caddr_t data, int flag) 24173 { 24174 struct sd_lun *un; 24175 struct uscsi_cmd *com; 24176 struct cdrom_subchnl subchanel; 24177 struct cdrom_subchnl *subchnl = &subchanel; 24178 char cdb[CDB_GROUP1]; 24179 caddr_t buffer; 24180 int rval; 24181 24182 if (data == NULL) { 24183 return (EINVAL); 24184 } 24185 24186 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24187 (un->un_state == SD_STATE_OFFLINE)) { 24188 return (ENXIO); 24189 } 24190 24191 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) { 24192 return (EFAULT); 24193 } 24194 24195 buffer = kmem_zalloc((size_t)16, KM_SLEEP); 24196 bzero(cdb, CDB_GROUP1); 24197 cdb[0] = SCMD_READ_SUBCHANNEL; 24198 /* Set the MSF bit based on the user requested address format */ 24199 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02; 24200 /* 24201 * Set the Q bit in byte 2 to indicate that Q sub-channel data be 24202 * returned 24203 */ 24204 cdb[2] = 0x40; 24205 /* 24206 * Set byte 3 to specify the return data format. A value of 0x01 24207 * indicates that the CD-ROM current position should be returned. 24208 */ 24209 cdb[3] = 0x01; 24210 cdb[8] = 0x10; 24211 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24212 com->uscsi_cdb = cdb; 24213 com->uscsi_cdblen = CDB_GROUP1; 24214 com->uscsi_bufaddr = buffer; 24215 com->uscsi_buflen = 16; 24216 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24217 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24218 SD_PATH_STANDARD); 24219 if (rval != 0) { 24220 kmem_free(buffer, 16); 24221 kmem_free(com, sizeof (*com)); 24222 return (rval); 24223 } 24224 24225 /* Process the returned Q sub-channel data */ 24226 subchnl->cdsc_audiostatus = buffer[1]; 24227 subchnl->cdsc_adr = (buffer[5] & 0xF0); 24228 subchnl->cdsc_ctrl = (buffer[5] & 0x0F); 24229 subchnl->cdsc_trk = buffer[6]; 24230 subchnl->cdsc_ind = buffer[7]; 24231 if (subchnl->cdsc_format & CDROM_LBA) { 24232 subchnl->cdsc_absaddr.lba = 24233 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 24234 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 24235 subchnl->cdsc_reladdr.lba = 24236 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) + 24237 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]); 24238 } else if (un->un_f_cfg_readsub_bcd == TRUE) { 24239 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]); 24240 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]); 24241 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]); 24242 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]); 24243 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]); 24244 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]); 24245 } else { 24246 subchnl->cdsc_absaddr.msf.minute = buffer[9]; 24247 subchnl->cdsc_absaddr.msf.second = buffer[10]; 24248 subchnl->cdsc_absaddr.msf.frame = buffer[11]; 24249 subchnl->cdsc_reladdr.msf.minute = buffer[13]; 24250 subchnl->cdsc_reladdr.msf.second = buffer[14]; 24251 subchnl->cdsc_reladdr.msf.frame = buffer[15]; 24252 } 24253 kmem_free(buffer, 16); 24254 kmem_free(com, sizeof (*com)); 24255 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag) 24256 != 0) { 24257 return (EFAULT); 24258 } 24259 return (rval); 24260 } 24261 24262 24263 /* 24264 * Function: sr_read_tocentry() 24265 * 24266 * Description: This routine is the driver entry point for handling CD-ROM 24267 * ioctl requests to read from the Table of Contents (TOC) 24268 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL 24269 * fields, the starting address (LBA or MSF format per the user) 24270 * and the data mode if the user specified track is a data track. 24271 * 24272 * Note: The READ HEADER (0x44) command used in this routine is 24273 * obsolete per the SCSI MMC spec but still supported in the 24274 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 24275 * therefore the command is still implemented in this routine. 24276 * 24277 * Arguments: dev - the device 'dev_t' 24278 * data - pointer to user provided toc entry structure, 24279 * specifying the track # and the address format 24280 * (LBA or MSF). 24281 * flag - this argument is a pass through to ddi_copyxxx() 24282 * directly from the mode argument of ioctl(). 24283 * 24284 * Return Code: the code returned by sd_send_scsi_cmd() 24285 * EFAULT if ddi_copyxxx() fails 24286 * ENXIO if fail ddi_get_soft_state 24287 * EINVAL if data pointer is NULL 24288 */ 24289 24290 static int 24291 sr_read_tocentry(dev_t dev, caddr_t data, int flag) 24292 { 24293 struct sd_lun *un = NULL; 24294 struct uscsi_cmd *com; 24295 struct cdrom_tocentry toc_entry; 24296 struct cdrom_tocentry *entry = &toc_entry; 24297 caddr_t buffer; 24298 int rval; 24299 char cdb[CDB_GROUP1]; 24300 24301 if (data == NULL) { 24302 return (EINVAL); 24303 } 24304 24305 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24306 (un->un_state == SD_STATE_OFFLINE)) { 24307 return (ENXIO); 24308 } 24309 24310 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) { 24311 return (EFAULT); 24312 } 24313 24314 /* Validate the requested track and address format */ 24315 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) { 24316 return (EINVAL); 24317 } 24318 24319 if (entry->cdte_track == 0) { 24320 return (EINVAL); 24321 } 24322 24323 buffer = kmem_zalloc((size_t)12, KM_SLEEP); 24324 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24325 bzero(cdb, CDB_GROUP1); 24326 24327 cdb[0] = SCMD_READ_TOC; 24328 /* Set the MSF bit based on the user requested address format */ 24329 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2); 24330 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 24331 cdb[6] = BYTE_TO_BCD(entry->cdte_track); 24332 } else { 24333 cdb[6] = entry->cdte_track; 24334 } 24335 24336 /* 24337 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 24338 * (4 byte TOC response header + 8 byte track descriptor) 24339 */ 24340 cdb[8] = 12; 24341 com->uscsi_cdb = cdb; 24342 com->uscsi_cdblen = CDB_GROUP1; 24343 com->uscsi_bufaddr = buffer; 24344 com->uscsi_buflen = 0x0C; 24345 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ); 24346 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24347 SD_PATH_STANDARD); 24348 if (rval != 0) { 24349 kmem_free(buffer, 12); 24350 kmem_free(com, sizeof (*com)); 24351 return (rval); 24352 } 24353 24354 /* Process the toc entry */ 24355 entry->cdte_adr = (buffer[5] & 0xF0) >> 4; 24356 entry->cdte_ctrl = (buffer[5] & 0x0F); 24357 if (entry->cdte_format & CDROM_LBA) { 24358 entry->cdte_addr.lba = 24359 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 24360 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 24361 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) { 24362 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]); 24363 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]); 24364 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]); 24365 /* 24366 * Send a READ TOC command using the LBA address format to get 24367 * the LBA for the track requested so it can be used in the 24368 * READ HEADER request 24369 * 24370 * Note: The MSF bit of the READ HEADER command specifies the 24371 * output format. The block address specified in that command 24372 * must be in LBA format. 24373 */ 24374 cdb[1] = 0; 24375 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24376 SD_PATH_STANDARD); 24377 if (rval != 0) { 24378 kmem_free(buffer, 12); 24379 kmem_free(com, sizeof (*com)); 24380 return (rval); 24381 } 24382 } else { 24383 entry->cdte_addr.msf.minute = buffer[9]; 24384 entry->cdte_addr.msf.second = buffer[10]; 24385 entry->cdte_addr.msf.frame = buffer[11]; 24386 /* 24387 * Send a READ TOC command using the LBA address format to get 24388 * the LBA for the track requested so it can be used in the 24389 * READ HEADER request 24390 * 24391 * Note: The MSF bit of the READ HEADER command specifies the 24392 * output format. The block address specified in that command 24393 * must be in LBA format. 24394 */ 24395 cdb[1] = 0; 24396 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24397 SD_PATH_STANDARD); 24398 if (rval != 0) { 24399 kmem_free(buffer, 12); 24400 kmem_free(com, sizeof (*com)); 24401 return (rval); 24402 } 24403 } 24404 24405 /* 24406 * Build and send the READ HEADER command to determine the data mode of 24407 * the user specified track. 24408 */ 24409 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) && 24410 (entry->cdte_track != CDROM_LEADOUT)) { 24411 bzero(cdb, CDB_GROUP1); 24412 cdb[0] = SCMD_READ_HEADER; 24413 cdb[2] = buffer[8]; 24414 cdb[3] = buffer[9]; 24415 cdb[4] = buffer[10]; 24416 cdb[5] = buffer[11]; 24417 cdb[8] = 0x08; 24418 com->uscsi_buflen = 0x08; 24419 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24420 SD_PATH_STANDARD); 24421 if (rval == 0) { 24422 entry->cdte_datamode = buffer[0]; 24423 } else { 24424 /* 24425 * READ HEADER command failed, since this is 24426 * obsoleted in one spec, its better to return 24427 * -1 for an invlid track so that we can still 24428 * recieve the rest of the TOC data. 24429 */ 24430 entry->cdte_datamode = (uchar_t)-1; 24431 } 24432 } else { 24433 entry->cdte_datamode = (uchar_t)-1; 24434 } 24435 24436 kmem_free(buffer, 12); 24437 kmem_free(com, sizeof (*com)); 24438 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0) 24439 return (EFAULT); 24440 24441 return (rval); 24442 } 24443 24444 24445 /* 24446 * Function: sr_read_tochdr() 24447 * 24448 * Description: This routine is the driver entry point for handling CD-ROM 24449 * ioctl requests to read the Table of Contents (TOC) header 24450 * (CDROMREADTOHDR). The TOC header consists of the disk starting 24451 * and ending track numbers 24452 * 24453 * Arguments: dev - the device 'dev_t' 24454 * data - pointer to user provided toc header structure, 24455 * specifying the starting and ending track numbers. 24456 * flag - this argument is a pass through to ddi_copyxxx() 24457 * directly from the mode argument of ioctl(). 24458 * 24459 * Return Code: the code returned by sd_send_scsi_cmd() 24460 * EFAULT if ddi_copyxxx() fails 24461 * ENXIO if fail ddi_get_soft_state 24462 * EINVAL if data pointer is NULL 24463 */ 24464 24465 static int 24466 sr_read_tochdr(dev_t dev, caddr_t data, int flag) 24467 { 24468 struct sd_lun *un; 24469 struct uscsi_cmd *com; 24470 struct cdrom_tochdr toc_header; 24471 struct cdrom_tochdr *hdr = &toc_header; 24472 char cdb[CDB_GROUP1]; 24473 int rval; 24474 caddr_t buffer; 24475 24476 if (data == NULL) { 24477 return (EINVAL); 24478 } 24479 24480 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24481 (un->un_state == SD_STATE_OFFLINE)) { 24482 return (ENXIO); 24483 } 24484 24485 buffer = kmem_zalloc(4, KM_SLEEP); 24486 bzero(cdb, CDB_GROUP1); 24487 cdb[0] = SCMD_READ_TOC; 24488 /* 24489 * Specifying a track number of 0x00 in the READ TOC command indicates 24490 * that the TOC header should be returned 24491 */ 24492 cdb[6] = 0x00; 24493 /* 24494 * Bytes 7 & 8 are the 4 byte allocation length for TOC header. 24495 * (2 byte data len + 1 byte starting track # + 1 byte ending track #) 24496 */ 24497 cdb[8] = 0x04; 24498 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24499 com->uscsi_cdb = cdb; 24500 com->uscsi_cdblen = CDB_GROUP1; 24501 com->uscsi_bufaddr = buffer; 24502 com->uscsi_buflen = 0x04; 24503 com->uscsi_timeout = 300; 24504 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24505 24506 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24507 SD_PATH_STANDARD); 24508 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 24509 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]); 24510 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]); 24511 } else { 24512 hdr->cdth_trk0 = buffer[2]; 24513 hdr->cdth_trk1 = buffer[3]; 24514 } 24515 kmem_free(buffer, 4); 24516 kmem_free(com, sizeof (*com)); 24517 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) { 24518 return (EFAULT); 24519 } 24520 return (rval); 24521 } 24522 24523 24524 /* 24525 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(), 24526 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for 24527 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data, 24528 * digital audio and extended architecture digital audio. These modes are 24529 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3 24530 * MMC specs. 24531 * 24532 * In addition to support for the various data formats these routines also 24533 * include support for devices that implement only the direct access READ 24534 * commands (0x08, 0x28), devices that implement the READ_CD commands 24535 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and 24536 * READ CDXA commands (0xD8, 0xDB) 24537 */ 24538 24539 /* 24540 * Function: sr_read_mode1() 24541 * 24542 * Description: This routine is the driver entry point for handling CD-ROM 24543 * ioctl read mode1 requests (CDROMREADMODE1). 24544 * 24545 * Arguments: dev - the device 'dev_t' 24546 * data - pointer to user provided cd read structure specifying 24547 * the lba buffer address and length. 24548 * flag - this argument is a pass through to ddi_copyxxx() 24549 * directly from the mode argument of ioctl(). 24550 * 24551 * Return Code: the code returned by sd_send_scsi_cmd() 24552 * EFAULT if ddi_copyxxx() fails 24553 * ENXIO if fail ddi_get_soft_state 24554 * EINVAL if data pointer is NULL 24555 */ 24556 24557 static int 24558 sr_read_mode1(dev_t dev, caddr_t data, int flag) 24559 { 24560 struct sd_lun *un; 24561 struct cdrom_read mode1_struct; 24562 struct cdrom_read *mode1 = &mode1_struct; 24563 int rval; 24564 #ifdef _MULTI_DATAMODEL 24565 /* To support ILP32 applications in an LP64 world */ 24566 struct cdrom_read32 cdrom_read32; 24567 struct cdrom_read32 *cdrd32 = &cdrom_read32; 24568 #endif /* _MULTI_DATAMODEL */ 24569 24570 if (data == NULL) { 24571 return (EINVAL); 24572 } 24573 24574 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24575 (un->un_state == SD_STATE_OFFLINE)) { 24576 return (ENXIO); 24577 } 24578 24579 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 24580 "sd_read_mode1: entry: un:0x%p\n", un); 24581 24582 #ifdef _MULTI_DATAMODEL 24583 switch (ddi_model_convert_from(flag & FMODELS)) { 24584 case DDI_MODEL_ILP32: 24585 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 24586 return (EFAULT); 24587 } 24588 /* Convert the ILP32 uscsi data from the application to LP64 */ 24589 cdrom_read32tocdrom_read(cdrd32, mode1); 24590 break; 24591 case DDI_MODEL_NONE: 24592 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 24593 return (EFAULT); 24594 } 24595 } 24596 #else /* ! _MULTI_DATAMODEL */ 24597 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 24598 return (EFAULT); 24599 } 24600 #endif /* _MULTI_DATAMODEL */ 24601 24602 rval = sd_send_scsi_READ(un, mode1->cdread_bufaddr, 24603 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD); 24604 24605 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 24606 "sd_read_mode1: exit: un:0x%p\n", un); 24607 24608 return (rval); 24609 } 24610 24611 24612 /* 24613 * Function: sr_read_cd_mode2() 24614 * 24615 * Description: This routine is the driver entry point for handling CD-ROM 24616 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 24617 * support the READ CD (0xBE) command or the 1st generation 24618 * READ CD (0xD4) command. 24619 * 24620 * Arguments: dev - the device 'dev_t' 24621 * data - pointer to user provided cd read structure specifying 24622 * the lba buffer address and length. 24623 * flag - this argument is a pass through to ddi_copyxxx() 24624 * directly from the mode argument of ioctl(). 24625 * 24626 * Return Code: the code returned by sd_send_scsi_cmd() 24627 * EFAULT if ddi_copyxxx() fails 24628 * ENXIO if fail ddi_get_soft_state 24629 * EINVAL if data pointer is NULL 24630 */ 24631 24632 static int 24633 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag) 24634 { 24635 struct sd_lun *un; 24636 struct uscsi_cmd *com; 24637 struct cdrom_read mode2_struct; 24638 struct cdrom_read *mode2 = &mode2_struct; 24639 uchar_t cdb[CDB_GROUP5]; 24640 int nblocks; 24641 int rval; 24642 #ifdef _MULTI_DATAMODEL 24643 /* To support ILP32 applications in an LP64 world */ 24644 struct cdrom_read32 cdrom_read32; 24645 struct cdrom_read32 *cdrd32 = &cdrom_read32; 24646 #endif /* _MULTI_DATAMODEL */ 24647 24648 if (data == NULL) { 24649 return (EINVAL); 24650 } 24651 24652 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24653 (un->un_state == SD_STATE_OFFLINE)) { 24654 return (ENXIO); 24655 } 24656 24657 #ifdef _MULTI_DATAMODEL 24658 switch (ddi_model_convert_from(flag & FMODELS)) { 24659 case DDI_MODEL_ILP32: 24660 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 24661 return (EFAULT); 24662 } 24663 /* Convert the ILP32 uscsi data from the application to LP64 */ 24664 cdrom_read32tocdrom_read(cdrd32, mode2); 24665 break; 24666 case DDI_MODEL_NONE: 24667 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 24668 return (EFAULT); 24669 } 24670 break; 24671 } 24672 24673 #else /* ! _MULTI_DATAMODEL */ 24674 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 24675 return (EFAULT); 24676 } 24677 #endif /* _MULTI_DATAMODEL */ 24678 24679 bzero(cdb, sizeof (cdb)); 24680 if (un->un_f_cfg_read_cd_xd4 == TRUE) { 24681 /* Read command supported by 1st generation atapi drives */ 24682 cdb[0] = SCMD_READ_CDD4; 24683 } else { 24684 /* Universal CD Access Command */ 24685 cdb[0] = SCMD_READ_CD; 24686 } 24687 24688 /* 24689 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book 24690 */ 24691 cdb[1] = CDROM_SECTOR_TYPE_MODE2; 24692 24693 /* set the start address */ 24694 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF); 24695 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF); 24696 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 24697 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF); 24698 24699 /* set the transfer length */ 24700 nblocks = mode2->cdread_buflen / 2336; 24701 cdb[6] = (uchar_t)(nblocks >> 16); 24702 cdb[7] = (uchar_t)(nblocks >> 8); 24703 cdb[8] = (uchar_t)nblocks; 24704 24705 /* set the filter bits */ 24706 cdb[9] = CDROM_READ_CD_USERDATA; 24707 24708 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24709 com->uscsi_cdb = (caddr_t)cdb; 24710 com->uscsi_cdblen = sizeof (cdb); 24711 com->uscsi_bufaddr = mode2->cdread_bufaddr; 24712 com->uscsi_buflen = mode2->cdread_buflen; 24713 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24714 24715 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 24716 SD_PATH_STANDARD); 24717 kmem_free(com, sizeof (*com)); 24718 return (rval); 24719 } 24720 24721 24722 /* 24723 * Function: sr_read_mode2() 24724 * 24725 * Description: This routine is the driver entry point for handling CD-ROM 24726 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 24727 * do not support the READ CD (0xBE) command. 24728 * 24729 * Arguments: dev - the device 'dev_t' 24730 * data - pointer to user provided cd read structure specifying 24731 * the lba buffer address and length. 24732 * flag - this argument is a pass through to ddi_copyxxx() 24733 * directly from the mode argument of ioctl(). 24734 * 24735 * Return Code: the code returned by sd_send_scsi_cmd() 24736 * EFAULT if ddi_copyxxx() fails 24737 * ENXIO if fail ddi_get_soft_state 24738 * EINVAL if data pointer is NULL 24739 * EIO if fail to reset block size 24740 * EAGAIN if commands are in progress in the driver 24741 */ 24742 24743 static int 24744 sr_read_mode2(dev_t dev, caddr_t data, int flag) 24745 { 24746 struct sd_lun *un; 24747 struct cdrom_read mode2_struct; 24748 struct cdrom_read *mode2 = &mode2_struct; 24749 int rval; 24750 uint32_t restore_blksize; 24751 struct uscsi_cmd *com; 24752 uchar_t cdb[CDB_GROUP0]; 24753 int nblocks; 24754 24755 #ifdef _MULTI_DATAMODEL 24756 /* To support ILP32 applications in an LP64 world */ 24757 struct cdrom_read32 cdrom_read32; 24758 struct cdrom_read32 *cdrd32 = &cdrom_read32; 24759 #endif /* _MULTI_DATAMODEL */ 24760 24761 if (data == NULL) { 24762 return (EINVAL); 24763 } 24764 24765 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24766 (un->un_state == SD_STATE_OFFLINE)) { 24767 return (ENXIO); 24768 } 24769 24770 /* 24771 * Because this routine will update the device and driver block size 24772 * being used we want to make sure there are no commands in progress. 24773 * If commands are in progress the user will have to try again. 24774 * 24775 * We check for 1 instead of 0 because we increment un_ncmds_in_driver 24776 * in sdioctl to protect commands from sdioctl through to the top of 24777 * sd_uscsi_strategy. See sdioctl for details. 24778 */ 24779 mutex_enter(SD_MUTEX(un)); 24780 if (un->un_ncmds_in_driver != 1) { 24781 mutex_exit(SD_MUTEX(un)); 24782 return (EAGAIN); 24783 } 24784 mutex_exit(SD_MUTEX(un)); 24785 24786 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 24787 "sd_read_mode2: entry: un:0x%p\n", un); 24788 24789 #ifdef _MULTI_DATAMODEL 24790 switch (ddi_model_convert_from(flag & FMODELS)) { 24791 case DDI_MODEL_ILP32: 24792 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 24793 return (EFAULT); 24794 } 24795 /* Convert the ILP32 uscsi data from the application to LP64 */ 24796 cdrom_read32tocdrom_read(cdrd32, mode2); 24797 break; 24798 case DDI_MODEL_NONE: 24799 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 24800 return (EFAULT); 24801 } 24802 break; 24803 } 24804 #else /* ! _MULTI_DATAMODEL */ 24805 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) { 24806 return (EFAULT); 24807 } 24808 #endif /* _MULTI_DATAMODEL */ 24809 24810 /* Store the current target block size for restoration later */ 24811 restore_blksize = un->un_tgt_blocksize; 24812 24813 /* Change the device and soft state target block size to 2336 */ 24814 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) { 24815 rval = EIO; 24816 goto done; 24817 } 24818 24819 24820 bzero(cdb, sizeof (cdb)); 24821 24822 /* set READ operation */ 24823 cdb[0] = SCMD_READ; 24824 24825 /* adjust lba for 2kbyte blocks from 512 byte blocks */ 24826 mode2->cdread_lba >>= 2; 24827 24828 /* set the start address */ 24829 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F); 24830 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 24831 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF); 24832 24833 /* set the transfer length */ 24834 nblocks = mode2->cdread_buflen / 2336; 24835 cdb[4] = (uchar_t)nblocks & 0xFF; 24836 24837 /* build command */ 24838 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24839 com->uscsi_cdb = (caddr_t)cdb; 24840 com->uscsi_cdblen = sizeof (cdb); 24841 com->uscsi_bufaddr = mode2->cdread_bufaddr; 24842 com->uscsi_buflen = mode2->cdread_buflen; 24843 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24844 24845 /* 24846 * Issue SCSI command with user space address for read buffer. 24847 * 24848 * This sends the command through main channel in the driver. 24849 * 24850 * Since this is accessed via an IOCTL call, we go through the 24851 * standard path, so that if the device was powered down, then 24852 * it would be 'awakened' to handle the command. 24853 */ 24854 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 24855 SD_PATH_STANDARD); 24856 24857 kmem_free(com, sizeof (*com)); 24858 24859 /* Restore the device and soft state target block size */ 24860 if (sr_sector_mode(dev, restore_blksize) != 0) { 24861 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24862 "can't do switch back to mode 1\n"); 24863 /* 24864 * If sd_send_scsi_READ succeeded we still need to report 24865 * an error because we failed to reset the block size 24866 */ 24867 if (rval == 0) { 24868 rval = EIO; 24869 } 24870 } 24871 24872 done: 24873 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 24874 "sd_read_mode2: exit: un:0x%p\n", un); 24875 24876 return (rval); 24877 } 24878 24879 24880 /* 24881 * Function: sr_sector_mode() 24882 * 24883 * Description: This utility function is used by sr_read_mode2 to set the target 24884 * block size based on the user specified size. This is a legacy 24885 * implementation based upon a vendor specific mode page 24886 * 24887 * Arguments: dev - the device 'dev_t' 24888 * data - flag indicating if block size is being set to 2336 or 24889 * 512. 24890 * 24891 * Return Code: the code returned by sd_send_scsi_cmd() 24892 * EFAULT if ddi_copyxxx() fails 24893 * ENXIO if fail ddi_get_soft_state 24894 * EINVAL if data pointer is NULL 24895 */ 24896 24897 static int 24898 sr_sector_mode(dev_t dev, uint32_t blksize) 24899 { 24900 struct sd_lun *un; 24901 uchar_t *sense; 24902 uchar_t *select; 24903 int rval; 24904 24905 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24906 (un->un_state == SD_STATE_OFFLINE)) { 24907 return (ENXIO); 24908 } 24909 24910 sense = kmem_zalloc(20, KM_SLEEP); 24911 24912 /* Note: This is a vendor specific mode page (0x81) */ 24913 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 20, 0x81, 24914 SD_PATH_STANDARD)) != 0) { 24915 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 24916 "sr_sector_mode: Mode Sense failed\n"); 24917 kmem_free(sense, 20); 24918 return (rval); 24919 } 24920 select = kmem_zalloc(20, KM_SLEEP); 24921 select[3] = 0x08; 24922 select[10] = ((blksize >> 8) & 0xff); 24923 select[11] = (blksize & 0xff); 24924 select[12] = 0x01; 24925 select[13] = 0x06; 24926 select[14] = sense[14]; 24927 select[15] = sense[15]; 24928 if (blksize == SD_MODE2_BLKSIZE) { 24929 select[14] |= 0x01; 24930 } 24931 24932 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 20, 24933 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 24934 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 24935 "sr_sector_mode: Mode Select failed\n"); 24936 } else { 24937 /* 24938 * Only update the softstate block size if we successfully 24939 * changed the device block mode. 24940 */ 24941 mutex_enter(SD_MUTEX(un)); 24942 sd_update_block_info(un, blksize, 0); 24943 mutex_exit(SD_MUTEX(un)); 24944 } 24945 kmem_free(sense, 20); 24946 kmem_free(select, 20); 24947 return (rval); 24948 } 24949 24950 24951 /* 24952 * Function: sr_read_cdda() 24953 * 24954 * Description: This routine is the driver entry point for handling CD-ROM 24955 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If 24956 * the target supports CDDA these requests are handled via a vendor 24957 * specific command (0xD8) If the target does not support CDDA 24958 * these requests are handled via the READ CD command (0xBE). 24959 * 24960 * Arguments: dev - the device 'dev_t' 24961 * data - pointer to user provided CD-DA structure specifying 24962 * the track starting address, transfer length, and 24963 * subcode options. 24964 * flag - this argument is a pass through to ddi_copyxxx() 24965 * directly from the mode argument of ioctl(). 24966 * 24967 * Return Code: the code returned by sd_send_scsi_cmd() 24968 * EFAULT if ddi_copyxxx() fails 24969 * ENXIO if fail ddi_get_soft_state 24970 * EINVAL if invalid arguments are provided 24971 * ENOTTY 24972 */ 24973 24974 static int 24975 sr_read_cdda(dev_t dev, caddr_t data, int flag) 24976 { 24977 struct sd_lun *un; 24978 struct uscsi_cmd *com; 24979 struct cdrom_cdda *cdda; 24980 int rval; 24981 size_t buflen; 24982 char cdb[CDB_GROUP5]; 24983 24984 #ifdef _MULTI_DATAMODEL 24985 /* To support ILP32 applications in an LP64 world */ 24986 struct cdrom_cdda32 cdrom_cdda32; 24987 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32; 24988 #endif /* _MULTI_DATAMODEL */ 24989 24990 if (data == NULL) { 24991 return (EINVAL); 24992 } 24993 24994 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24995 return (ENXIO); 24996 } 24997 24998 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP); 24999 25000 #ifdef _MULTI_DATAMODEL 25001 switch (ddi_model_convert_from(flag & FMODELS)) { 25002 case DDI_MODEL_ILP32: 25003 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) { 25004 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25005 "sr_read_cdda: ddi_copyin Failed\n"); 25006 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25007 return (EFAULT); 25008 } 25009 /* Convert the ILP32 uscsi data from the application to LP64 */ 25010 cdrom_cdda32tocdrom_cdda(cdda32, cdda); 25011 break; 25012 case DDI_MODEL_NONE: 25013 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 25014 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25015 "sr_read_cdda: ddi_copyin Failed\n"); 25016 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25017 return (EFAULT); 25018 } 25019 break; 25020 } 25021 #else /* ! _MULTI_DATAMODEL */ 25022 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 25023 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25024 "sr_read_cdda: ddi_copyin Failed\n"); 25025 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25026 return (EFAULT); 25027 } 25028 #endif /* _MULTI_DATAMODEL */ 25029 25030 /* 25031 * Since MMC-2 expects max 3 bytes for length, check if the 25032 * length input is greater than 3 bytes 25033 */ 25034 if ((cdda->cdda_length & 0xFF000000) != 0) { 25035 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: " 25036 "cdrom transfer length too large: %d (limit %d)\n", 25037 cdda->cdda_length, 0xFFFFFF); 25038 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25039 return (EINVAL); 25040 } 25041 25042 switch (cdda->cdda_subcode) { 25043 case CDROM_DA_NO_SUBCODE: 25044 buflen = CDROM_BLK_2352 * cdda->cdda_length; 25045 break; 25046 case CDROM_DA_SUBQ: 25047 buflen = CDROM_BLK_2368 * cdda->cdda_length; 25048 break; 25049 case CDROM_DA_ALL_SUBCODE: 25050 buflen = CDROM_BLK_2448 * cdda->cdda_length; 25051 break; 25052 case CDROM_DA_SUBCODE_ONLY: 25053 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length; 25054 break; 25055 default: 25056 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25057 "sr_read_cdda: Subcode '0x%x' Not Supported\n", 25058 cdda->cdda_subcode); 25059 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25060 return (EINVAL); 25061 } 25062 25063 /* Build and send the command */ 25064 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25065 bzero(cdb, CDB_GROUP5); 25066 25067 if (un->un_f_cfg_cdda == TRUE) { 25068 cdb[0] = (char)SCMD_READ_CD; 25069 cdb[1] = 0x04; 25070 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 25071 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 25072 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 25073 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 25074 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 25075 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 25076 cdb[8] = ((cdda->cdda_length) & 0x000000ff); 25077 cdb[9] = 0x10; 25078 switch (cdda->cdda_subcode) { 25079 case CDROM_DA_NO_SUBCODE : 25080 cdb[10] = 0x0; 25081 break; 25082 case CDROM_DA_SUBQ : 25083 cdb[10] = 0x2; 25084 break; 25085 case CDROM_DA_ALL_SUBCODE : 25086 cdb[10] = 0x1; 25087 break; 25088 case CDROM_DA_SUBCODE_ONLY : 25089 /* FALLTHROUGH */ 25090 default : 25091 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25092 kmem_free(com, sizeof (*com)); 25093 return (ENOTTY); 25094 } 25095 } else { 25096 cdb[0] = (char)SCMD_READ_CDDA; 25097 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 25098 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 25099 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 25100 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 25101 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24); 25102 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 25103 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 25104 cdb[9] = ((cdda->cdda_length) & 0x000000ff); 25105 cdb[10] = cdda->cdda_subcode; 25106 } 25107 25108 com->uscsi_cdb = cdb; 25109 com->uscsi_cdblen = CDB_GROUP5; 25110 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data; 25111 com->uscsi_buflen = buflen; 25112 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25113 25114 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25115 SD_PATH_STANDARD); 25116 25117 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25118 kmem_free(com, sizeof (*com)); 25119 return (rval); 25120 } 25121 25122 25123 /* 25124 * Function: sr_read_cdxa() 25125 * 25126 * Description: This routine is the driver entry point for handling CD-ROM 25127 * ioctl requests to return CD-XA (Extended Architecture) data. 25128 * (CDROMCDXA). 25129 * 25130 * Arguments: dev - the device 'dev_t' 25131 * data - pointer to user provided CD-XA structure specifying 25132 * the data starting address, transfer length, and format 25133 * flag - this argument is a pass through to ddi_copyxxx() 25134 * directly from the mode argument of ioctl(). 25135 * 25136 * Return Code: the code returned by sd_send_scsi_cmd() 25137 * EFAULT if ddi_copyxxx() fails 25138 * ENXIO if fail ddi_get_soft_state 25139 * EINVAL if data pointer is NULL 25140 */ 25141 25142 static int 25143 sr_read_cdxa(dev_t dev, caddr_t data, int flag) 25144 { 25145 struct sd_lun *un; 25146 struct uscsi_cmd *com; 25147 struct cdrom_cdxa *cdxa; 25148 int rval; 25149 size_t buflen; 25150 char cdb[CDB_GROUP5]; 25151 uchar_t read_flags; 25152 25153 #ifdef _MULTI_DATAMODEL 25154 /* To support ILP32 applications in an LP64 world */ 25155 struct cdrom_cdxa32 cdrom_cdxa32; 25156 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32; 25157 #endif /* _MULTI_DATAMODEL */ 25158 25159 if (data == NULL) { 25160 return (EINVAL); 25161 } 25162 25163 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25164 return (ENXIO); 25165 } 25166 25167 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP); 25168 25169 #ifdef _MULTI_DATAMODEL 25170 switch (ddi_model_convert_from(flag & FMODELS)) { 25171 case DDI_MODEL_ILP32: 25172 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) { 25173 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25174 return (EFAULT); 25175 } 25176 /* 25177 * Convert the ILP32 uscsi data from the 25178 * application to LP64 for internal use. 25179 */ 25180 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa); 25181 break; 25182 case DDI_MODEL_NONE: 25183 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 25184 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25185 return (EFAULT); 25186 } 25187 break; 25188 } 25189 #else /* ! _MULTI_DATAMODEL */ 25190 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 25191 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25192 return (EFAULT); 25193 } 25194 #endif /* _MULTI_DATAMODEL */ 25195 25196 /* 25197 * Since MMC-2 expects max 3 bytes for length, check if the 25198 * length input is greater than 3 bytes 25199 */ 25200 if ((cdxa->cdxa_length & 0xFF000000) != 0) { 25201 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: " 25202 "cdrom transfer length too large: %d (limit %d)\n", 25203 cdxa->cdxa_length, 0xFFFFFF); 25204 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25205 return (EINVAL); 25206 } 25207 25208 switch (cdxa->cdxa_format) { 25209 case CDROM_XA_DATA: 25210 buflen = CDROM_BLK_2048 * cdxa->cdxa_length; 25211 read_flags = 0x10; 25212 break; 25213 case CDROM_XA_SECTOR_DATA: 25214 buflen = CDROM_BLK_2352 * cdxa->cdxa_length; 25215 read_flags = 0xf8; 25216 break; 25217 case CDROM_XA_DATA_W_ERROR: 25218 buflen = CDROM_BLK_2646 * cdxa->cdxa_length; 25219 read_flags = 0xfc; 25220 break; 25221 default: 25222 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25223 "sr_read_cdxa: Format '0x%x' Not Supported\n", 25224 cdxa->cdxa_format); 25225 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25226 return (EINVAL); 25227 } 25228 25229 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25230 bzero(cdb, CDB_GROUP5); 25231 if (un->un_f_mmc_cap == TRUE) { 25232 cdb[0] = (char)SCMD_READ_CD; 25233 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 25234 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 25235 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 25236 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 25237 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 25238 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 25239 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff); 25240 cdb[9] = (char)read_flags; 25241 } else { 25242 /* 25243 * Note: A vendor specific command (0xDB) is being used her to 25244 * request a read of all subcodes. 25245 */ 25246 cdb[0] = (char)SCMD_READ_CDXA; 25247 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 25248 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 25249 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 25250 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 25251 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24); 25252 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 25253 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 25254 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff); 25255 cdb[10] = cdxa->cdxa_format; 25256 } 25257 com->uscsi_cdb = cdb; 25258 com->uscsi_cdblen = CDB_GROUP5; 25259 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data; 25260 com->uscsi_buflen = buflen; 25261 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25262 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25263 SD_PATH_STANDARD); 25264 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25265 kmem_free(com, sizeof (*com)); 25266 return (rval); 25267 } 25268 25269 25270 /* 25271 * Function: sr_eject() 25272 * 25273 * Description: This routine is the driver entry point for handling CD-ROM 25274 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT) 25275 * 25276 * Arguments: dev - the device 'dev_t' 25277 * 25278 * Return Code: the code returned by sd_send_scsi_cmd() 25279 */ 25280 25281 static int 25282 sr_eject(dev_t dev) 25283 { 25284 struct sd_lun *un; 25285 int rval; 25286 25287 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25288 (un->un_state == SD_STATE_OFFLINE)) { 25289 return (ENXIO); 25290 } 25291 25292 /* 25293 * To prevent race conditions with the eject 25294 * command, keep track of an eject command as 25295 * it progresses. If we are already handling 25296 * an eject command in the driver for the given 25297 * unit and another request to eject is received 25298 * immediately return EAGAIN so we don't lose 25299 * the command if the current eject command fails. 25300 */ 25301 mutex_enter(SD_MUTEX(un)); 25302 if (un->un_f_ejecting == TRUE) { 25303 mutex_exit(SD_MUTEX(un)); 25304 return (EAGAIN); 25305 } 25306 un->un_f_ejecting = TRUE; 25307 mutex_exit(SD_MUTEX(un)); 25308 25309 if ((rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 25310 SD_PATH_STANDARD)) != 0) { 25311 mutex_enter(SD_MUTEX(un)); 25312 un->un_f_ejecting = FALSE; 25313 mutex_exit(SD_MUTEX(un)); 25314 return (rval); 25315 } 25316 25317 rval = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_EJECT, 25318 SD_PATH_STANDARD); 25319 25320 if (rval == 0) { 25321 mutex_enter(SD_MUTEX(un)); 25322 sr_ejected(un); 25323 un->un_mediastate = DKIO_EJECTED; 25324 un->un_f_ejecting = FALSE; 25325 cv_broadcast(&un->un_state_cv); 25326 mutex_exit(SD_MUTEX(un)); 25327 } else { 25328 mutex_enter(SD_MUTEX(un)); 25329 un->un_f_ejecting = FALSE; 25330 mutex_exit(SD_MUTEX(un)); 25331 } 25332 return (rval); 25333 } 25334 25335 25336 /* 25337 * Function: sr_ejected() 25338 * 25339 * Description: This routine updates the soft state structure to invalidate the 25340 * geometry information after the media has been ejected or a 25341 * media eject has been detected. 25342 * 25343 * Arguments: un - driver soft state (unit) structure 25344 */ 25345 25346 static void 25347 sr_ejected(struct sd_lun *un) 25348 { 25349 struct sd_errstats *stp; 25350 25351 ASSERT(un != NULL); 25352 ASSERT(mutex_owned(SD_MUTEX(un))); 25353 25354 un->un_f_blockcount_is_valid = FALSE; 25355 un->un_f_tgt_blocksize_is_valid = FALSE; 25356 mutex_exit(SD_MUTEX(un)); 25357 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 25358 mutex_enter(SD_MUTEX(un)); 25359 25360 if (un->un_errstats != NULL) { 25361 stp = (struct sd_errstats *)un->un_errstats->ks_data; 25362 stp->sd_capacity.value.ui64 = 0; 25363 } 25364 } 25365 25366 25367 /* 25368 * Function: sr_check_wp() 25369 * 25370 * Description: This routine checks the write protection of a removable 25371 * media disk and hotpluggable devices via the write protect bit of 25372 * the Mode Page Header device specific field. Some devices choke 25373 * on unsupported mode page. In order to workaround this issue, 25374 * this routine has been implemented to use 0x3f mode page(request 25375 * for all pages) for all device types. 25376 * 25377 * Arguments: dev - the device 'dev_t' 25378 * 25379 * Return Code: int indicating if the device is write protected (1) or not (0) 25380 * 25381 * Context: Kernel thread. 25382 * 25383 */ 25384 25385 static int 25386 sr_check_wp(dev_t dev) 25387 { 25388 struct sd_lun *un; 25389 uchar_t device_specific; 25390 uchar_t *sense; 25391 int hdrlen; 25392 int rval = FALSE; 25393 25394 /* 25395 * Note: The return codes for this routine should be reworked to 25396 * properly handle the case of a NULL softstate. 25397 */ 25398 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25399 return (FALSE); 25400 } 25401 25402 if (un->un_f_cfg_is_atapi == TRUE) { 25403 /* 25404 * The mode page contents are not required; set the allocation 25405 * length for the mode page header only 25406 */ 25407 hdrlen = MODE_HEADER_LENGTH_GRP2; 25408 sense = kmem_zalloc(hdrlen, KM_SLEEP); 25409 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, hdrlen, 25410 MODEPAGE_ALLPAGES, SD_PATH_STANDARD) != 0) 25411 goto err_exit; 25412 device_specific = 25413 ((struct mode_header_grp2 *)sense)->device_specific; 25414 } else { 25415 hdrlen = MODE_HEADER_LENGTH; 25416 sense = kmem_zalloc(hdrlen, KM_SLEEP); 25417 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, hdrlen, 25418 MODEPAGE_ALLPAGES, SD_PATH_STANDARD) != 0) 25419 goto err_exit; 25420 device_specific = 25421 ((struct mode_header *)sense)->device_specific; 25422 } 25423 25424 /* 25425 * Write protect mode sense failed; not all disks 25426 * understand this query. Return FALSE assuming that 25427 * these devices are not writable. 25428 */ 25429 if (device_specific & WRITE_PROTECT) { 25430 rval = TRUE; 25431 } 25432 25433 err_exit: 25434 kmem_free(sense, hdrlen); 25435 return (rval); 25436 } 25437 25438 /* 25439 * Function: sr_volume_ctrl() 25440 * 25441 * Description: This routine is the driver entry point for handling CD-ROM 25442 * audio output volume ioctl requests. (CDROMVOLCTRL) 25443 * 25444 * Arguments: dev - the device 'dev_t' 25445 * data - pointer to user audio volume control structure 25446 * flag - this argument is a pass through to ddi_copyxxx() 25447 * directly from the mode argument of ioctl(). 25448 * 25449 * Return Code: the code returned by sd_send_scsi_cmd() 25450 * EFAULT if ddi_copyxxx() fails 25451 * ENXIO if fail ddi_get_soft_state 25452 * EINVAL if data pointer is NULL 25453 * 25454 */ 25455 25456 static int 25457 sr_volume_ctrl(dev_t dev, caddr_t data, int flag) 25458 { 25459 struct sd_lun *un; 25460 struct cdrom_volctrl volume; 25461 struct cdrom_volctrl *vol = &volume; 25462 uchar_t *sense_page; 25463 uchar_t *select_page; 25464 uchar_t *sense; 25465 uchar_t *select; 25466 int sense_buflen; 25467 int select_buflen; 25468 int rval; 25469 25470 if (data == NULL) { 25471 return (EINVAL); 25472 } 25473 25474 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25475 (un->un_state == SD_STATE_OFFLINE)) { 25476 return (ENXIO); 25477 } 25478 25479 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) { 25480 return (EFAULT); 25481 } 25482 25483 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 25484 struct mode_header_grp2 *sense_mhp; 25485 struct mode_header_grp2 *select_mhp; 25486 int bd_len; 25487 25488 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN; 25489 select_buflen = MODE_HEADER_LENGTH_GRP2 + 25490 MODEPAGE_AUDIO_CTRL_LEN; 25491 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 25492 select = kmem_zalloc(select_buflen, KM_SLEEP); 25493 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 25494 sense_buflen, MODEPAGE_AUDIO_CTRL, 25495 SD_PATH_STANDARD)) != 0) { 25496 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 25497 "sr_volume_ctrl: Mode Sense Failed\n"); 25498 kmem_free(sense, sense_buflen); 25499 kmem_free(select, select_buflen); 25500 return (rval); 25501 } 25502 sense_mhp = (struct mode_header_grp2 *)sense; 25503 select_mhp = (struct mode_header_grp2 *)select; 25504 bd_len = (sense_mhp->bdesc_length_hi << 8) | 25505 sense_mhp->bdesc_length_lo; 25506 if (bd_len > MODE_BLK_DESC_LENGTH) { 25507 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25508 "sr_volume_ctrl: Mode Sense returned invalid " 25509 "block descriptor length\n"); 25510 kmem_free(sense, sense_buflen); 25511 kmem_free(select, select_buflen); 25512 return (EIO); 25513 } 25514 sense_page = (uchar_t *) 25515 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 25516 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2); 25517 select_mhp->length_msb = 0; 25518 select_mhp->length_lsb = 0; 25519 select_mhp->bdesc_length_hi = 0; 25520 select_mhp->bdesc_length_lo = 0; 25521 } else { 25522 struct mode_header *sense_mhp, *select_mhp; 25523 25524 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 25525 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 25526 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 25527 select = kmem_zalloc(select_buflen, KM_SLEEP); 25528 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 25529 sense_buflen, MODEPAGE_AUDIO_CTRL, 25530 SD_PATH_STANDARD)) != 0) { 25531 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25532 "sr_volume_ctrl: Mode Sense Failed\n"); 25533 kmem_free(sense, sense_buflen); 25534 kmem_free(select, select_buflen); 25535 return (rval); 25536 } 25537 sense_mhp = (struct mode_header *)sense; 25538 select_mhp = (struct mode_header *)select; 25539 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) { 25540 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25541 "sr_volume_ctrl: Mode Sense returned invalid " 25542 "block descriptor length\n"); 25543 kmem_free(sense, sense_buflen); 25544 kmem_free(select, select_buflen); 25545 return (EIO); 25546 } 25547 sense_page = (uchar_t *) 25548 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 25549 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH); 25550 select_mhp->length = 0; 25551 select_mhp->bdesc_length = 0; 25552 } 25553 /* 25554 * Note: An audio control data structure could be created and overlayed 25555 * on the following in place of the array indexing method implemented. 25556 */ 25557 25558 /* Build the select data for the user volume data */ 25559 select_page[0] = MODEPAGE_AUDIO_CTRL; 25560 select_page[1] = 0xE; 25561 /* Set the immediate bit */ 25562 select_page[2] = 0x04; 25563 /* Zero out reserved fields */ 25564 select_page[3] = 0x00; 25565 select_page[4] = 0x00; 25566 /* Return sense data for fields not to be modified */ 25567 select_page[5] = sense_page[5]; 25568 select_page[6] = sense_page[6]; 25569 select_page[7] = sense_page[7]; 25570 /* Set the user specified volume levels for channel 0 and 1 */ 25571 select_page[8] = 0x01; 25572 select_page[9] = vol->channel0; 25573 select_page[10] = 0x02; 25574 select_page[11] = vol->channel1; 25575 /* Channel 2 and 3 are currently unsupported so return the sense data */ 25576 select_page[12] = sense_page[12]; 25577 select_page[13] = sense_page[13]; 25578 select_page[14] = sense_page[14]; 25579 select_page[15] = sense_page[15]; 25580 25581 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 25582 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, select, 25583 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 25584 } else { 25585 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 25586 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 25587 } 25588 25589 kmem_free(sense, sense_buflen); 25590 kmem_free(select, select_buflen); 25591 return (rval); 25592 } 25593 25594 25595 /* 25596 * Function: sr_read_sony_session_offset() 25597 * 25598 * Description: This routine is the driver entry point for handling CD-ROM 25599 * ioctl requests for session offset information. (CDROMREADOFFSET) 25600 * The address of the first track in the last session of a 25601 * multi-session CD-ROM is returned 25602 * 25603 * Note: This routine uses a vendor specific key value in the 25604 * command control field without implementing any vendor check here 25605 * or in the ioctl routine. 25606 * 25607 * Arguments: dev - the device 'dev_t' 25608 * data - pointer to an int to hold the requested address 25609 * flag - this argument is a pass through to ddi_copyxxx() 25610 * directly from the mode argument of ioctl(). 25611 * 25612 * Return Code: the code returned by sd_send_scsi_cmd() 25613 * EFAULT if ddi_copyxxx() fails 25614 * ENXIO if fail ddi_get_soft_state 25615 * EINVAL if data pointer is NULL 25616 */ 25617 25618 static int 25619 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag) 25620 { 25621 struct sd_lun *un; 25622 struct uscsi_cmd *com; 25623 caddr_t buffer; 25624 char cdb[CDB_GROUP1]; 25625 int session_offset = 0; 25626 int rval; 25627 25628 if (data == NULL) { 25629 return (EINVAL); 25630 } 25631 25632 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25633 (un->un_state == SD_STATE_OFFLINE)) { 25634 return (ENXIO); 25635 } 25636 25637 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP); 25638 bzero(cdb, CDB_GROUP1); 25639 cdb[0] = SCMD_READ_TOC; 25640 /* 25641 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 25642 * (4 byte TOC response header + 8 byte response data) 25643 */ 25644 cdb[8] = SONY_SESSION_OFFSET_LEN; 25645 /* Byte 9 is the control byte. A vendor specific value is used */ 25646 cdb[9] = SONY_SESSION_OFFSET_KEY; 25647 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25648 com->uscsi_cdb = cdb; 25649 com->uscsi_cdblen = CDB_GROUP1; 25650 com->uscsi_bufaddr = buffer; 25651 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN; 25652 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25653 25654 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25655 SD_PATH_STANDARD); 25656 if (rval != 0) { 25657 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 25658 kmem_free(com, sizeof (*com)); 25659 return (rval); 25660 } 25661 if (buffer[1] == SONY_SESSION_OFFSET_VALID) { 25662 session_offset = 25663 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 25664 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 25665 /* 25666 * Offset returned offset in current lbasize block's. Convert to 25667 * 2k block's to return to the user 25668 */ 25669 if (un->un_tgt_blocksize == CDROM_BLK_512) { 25670 session_offset >>= 2; 25671 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) { 25672 session_offset >>= 1; 25673 } 25674 } 25675 25676 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) { 25677 rval = EFAULT; 25678 } 25679 25680 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 25681 kmem_free(com, sizeof (*com)); 25682 return (rval); 25683 } 25684 25685 25686 /* 25687 * Function: sd_wm_cache_constructor() 25688 * 25689 * Description: Cache Constructor for the wmap cache for the read/modify/write 25690 * devices. 25691 * 25692 * Arguments: wm - A pointer to the sd_w_map to be initialized. 25693 * un - sd_lun structure for the device. 25694 * flag - the km flags passed to constructor 25695 * 25696 * Return Code: 0 on success. 25697 * -1 on failure. 25698 */ 25699 25700 /*ARGSUSED*/ 25701 static int 25702 sd_wm_cache_constructor(void *wm, void *un, int flags) 25703 { 25704 bzero(wm, sizeof (struct sd_w_map)); 25705 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL); 25706 return (0); 25707 } 25708 25709 25710 /* 25711 * Function: sd_wm_cache_destructor() 25712 * 25713 * Description: Cache destructor for the wmap cache for the read/modify/write 25714 * devices. 25715 * 25716 * Arguments: wm - A pointer to the sd_w_map to be initialized. 25717 * un - sd_lun structure for the device. 25718 */ 25719 /*ARGSUSED*/ 25720 static void 25721 sd_wm_cache_destructor(void *wm, void *un) 25722 { 25723 cv_destroy(&((struct sd_w_map *)wm)->wm_avail); 25724 } 25725 25726 25727 /* 25728 * Function: sd_range_lock() 25729 * 25730 * Description: Lock the range of blocks specified as parameter to ensure 25731 * that read, modify write is atomic and no other i/o writes 25732 * to the same location. The range is specified in terms 25733 * of start and end blocks. Block numbers are the actual 25734 * media block numbers and not system. 25735 * 25736 * Arguments: un - sd_lun structure for the device. 25737 * startb - The starting block number 25738 * endb - The end block number 25739 * typ - type of i/o - simple/read_modify_write 25740 * 25741 * Return Code: wm - pointer to the wmap structure. 25742 * 25743 * Context: This routine can sleep. 25744 */ 25745 25746 static struct sd_w_map * 25747 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ) 25748 { 25749 struct sd_w_map *wmp = NULL; 25750 struct sd_w_map *sl_wmp = NULL; 25751 struct sd_w_map *tmp_wmp; 25752 wm_state state = SD_WM_CHK_LIST; 25753 25754 25755 ASSERT(un != NULL); 25756 ASSERT(!mutex_owned(SD_MUTEX(un))); 25757 25758 mutex_enter(SD_MUTEX(un)); 25759 25760 while (state != SD_WM_DONE) { 25761 25762 switch (state) { 25763 case SD_WM_CHK_LIST: 25764 /* 25765 * This is the starting state. Check the wmap list 25766 * to see if the range is currently available. 25767 */ 25768 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) { 25769 /* 25770 * If this is a simple write and no rmw 25771 * i/o is pending then try to lock the 25772 * range as the range should be available. 25773 */ 25774 state = SD_WM_LOCK_RANGE; 25775 } else { 25776 tmp_wmp = sd_get_range(un, startb, endb); 25777 if (tmp_wmp != NULL) { 25778 if ((wmp != NULL) && ONLIST(un, wmp)) { 25779 /* 25780 * Should not keep onlist wmps 25781 * while waiting this macro 25782 * will also do wmp = NULL; 25783 */ 25784 FREE_ONLIST_WMAP(un, wmp); 25785 } 25786 /* 25787 * sl_wmp is the wmap on which wait 25788 * is done, since the tmp_wmp points 25789 * to the inuse wmap, set sl_wmp to 25790 * tmp_wmp and change the state to sleep 25791 */ 25792 sl_wmp = tmp_wmp; 25793 state = SD_WM_WAIT_MAP; 25794 } else { 25795 state = SD_WM_LOCK_RANGE; 25796 } 25797 25798 } 25799 break; 25800 25801 case SD_WM_LOCK_RANGE: 25802 ASSERT(un->un_wm_cache); 25803 /* 25804 * The range need to be locked, try to get a wmap. 25805 * First attempt it with NO_SLEEP, want to avoid a sleep 25806 * if possible as we will have to release the sd mutex 25807 * if we have to sleep. 25808 */ 25809 if (wmp == NULL) 25810 wmp = kmem_cache_alloc(un->un_wm_cache, 25811 KM_NOSLEEP); 25812 if (wmp == NULL) { 25813 mutex_exit(SD_MUTEX(un)); 25814 _NOTE(DATA_READABLE_WITHOUT_LOCK 25815 (sd_lun::un_wm_cache)) 25816 wmp = kmem_cache_alloc(un->un_wm_cache, 25817 KM_SLEEP); 25818 mutex_enter(SD_MUTEX(un)); 25819 /* 25820 * we released the mutex so recheck and go to 25821 * check list state. 25822 */ 25823 state = SD_WM_CHK_LIST; 25824 } else { 25825 /* 25826 * We exit out of state machine since we 25827 * have the wmap. Do the housekeeping first. 25828 * place the wmap on the wmap list if it is not 25829 * on it already and then set the state to done. 25830 */ 25831 wmp->wm_start = startb; 25832 wmp->wm_end = endb; 25833 wmp->wm_flags = typ | SD_WM_BUSY; 25834 if (typ & SD_WTYPE_RMW) { 25835 un->un_rmw_count++; 25836 } 25837 /* 25838 * If not already on the list then link 25839 */ 25840 if (!ONLIST(un, wmp)) { 25841 wmp->wm_next = un->un_wm; 25842 wmp->wm_prev = NULL; 25843 if (wmp->wm_next) 25844 wmp->wm_next->wm_prev = wmp; 25845 un->un_wm = wmp; 25846 } 25847 state = SD_WM_DONE; 25848 } 25849 break; 25850 25851 case SD_WM_WAIT_MAP: 25852 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY); 25853 /* 25854 * Wait is done on sl_wmp, which is set in the 25855 * check_list state. 25856 */ 25857 sl_wmp->wm_wanted_count++; 25858 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un)); 25859 sl_wmp->wm_wanted_count--; 25860 /* 25861 * We can reuse the memory from the completed sl_wmp 25862 * lock range for our new lock, but only if noone is 25863 * waiting for it. 25864 */ 25865 ASSERT(!(sl_wmp->wm_flags & SD_WM_BUSY)); 25866 if (sl_wmp->wm_wanted_count == 0) { 25867 if (wmp != NULL) 25868 CHK_N_FREEWMP(un, wmp); 25869 wmp = sl_wmp; 25870 } 25871 sl_wmp = NULL; 25872 /* 25873 * After waking up, need to recheck for availability of 25874 * range. 25875 */ 25876 state = SD_WM_CHK_LIST; 25877 break; 25878 25879 default: 25880 panic("sd_range_lock: " 25881 "Unknown state %d in sd_range_lock", state); 25882 /*NOTREACHED*/ 25883 } /* switch(state) */ 25884 25885 } /* while(state != SD_WM_DONE) */ 25886 25887 mutex_exit(SD_MUTEX(un)); 25888 25889 ASSERT(wmp != NULL); 25890 25891 return (wmp); 25892 } 25893 25894 25895 /* 25896 * Function: sd_get_range() 25897 * 25898 * Description: Find if there any overlapping I/O to this one 25899 * Returns the write-map of 1st such I/O, NULL otherwise. 25900 * 25901 * Arguments: un - sd_lun structure for the device. 25902 * startb - The starting block number 25903 * endb - The end block number 25904 * 25905 * Return Code: wm - pointer to the wmap structure. 25906 */ 25907 25908 static struct sd_w_map * 25909 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb) 25910 { 25911 struct sd_w_map *wmp; 25912 25913 ASSERT(un != NULL); 25914 25915 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) { 25916 if (!(wmp->wm_flags & SD_WM_BUSY)) { 25917 continue; 25918 } 25919 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) { 25920 break; 25921 } 25922 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) { 25923 break; 25924 } 25925 } 25926 25927 return (wmp); 25928 } 25929 25930 25931 /* 25932 * Function: sd_free_inlist_wmap() 25933 * 25934 * Description: Unlink and free a write map struct. 25935 * 25936 * Arguments: un - sd_lun structure for the device. 25937 * wmp - sd_w_map which needs to be unlinked. 25938 */ 25939 25940 static void 25941 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp) 25942 { 25943 ASSERT(un != NULL); 25944 25945 if (un->un_wm == wmp) { 25946 un->un_wm = wmp->wm_next; 25947 } else { 25948 wmp->wm_prev->wm_next = wmp->wm_next; 25949 } 25950 25951 if (wmp->wm_next) { 25952 wmp->wm_next->wm_prev = wmp->wm_prev; 25953 } 25954 25955 wmp->wm_next = wmp->wm_prev = NULL; 25956 25957 kmem_cache_free(un->un_wm_cache, wmp); 25958 } 25959 25960 25961 /* 25962 * Function: sd_range_unlock() 25963 * 25964 * Description: Unlock the range locked by wm. 25965 * Free write map if nobody else is waiting on it. 25966 * 25967 * Arguments: un - sd_lun structure for the device. 25968 * wmp - sd_w_map which needs to be unlinked. 25969 */ 25970 25971 static void 25972 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm) 25973 { 25974 ASSERT(un != NULL); 25975 ASSERT(wm != NULL); 25976 ASSERT(!mutex_owned(SD_MUTEX(un))); 25977 25978 mutex_enter(SD_MUTEX(un)); 25979 25980 if (wm->wm_flags & SD_WTYPE_RMW) { 25981 un->un_rmw_count--; 25982 } 25983 25984 if (wm->wm_wanted_count) { 25985 wm->wm_flags = 0; 25986 /* 25987 * Broadcast that the wmap is available now. 25988 */ 25989 cv_broadcast(&wm->wm_avail); 25990 } else { 25991 /* 25992 * If no one is waiting on the map, it should be free'ed. 25993 */ 25994 sd_free_inlist_wmap(un, wm); 25995 } 25996 25997 mutex_exit(SD_MUTEX(un)); 25998 } 25999 26000 26001 /* 26002 * Function: sd_read_modify_write_task 26003 * 26004 * Description: Called from a taskq thread to initiate the write phase of 26005 * a read-modify-write request. This is used for targets where 26006 * un->un_sys_blocksize != un->un_tgt_blocksize. 26007 * 26008 * Arguments: arg - a pointer to the buf(9S) struct for the write command. 26009 * 26010 * Context: Called under taskq thread context. 26011 */ 26012 26013 static void 26014 sd_read_modify_write_task(void *arg) 26015 { 26016 struct sd_mapblocksize_info *bsp; 26017 struct buf *bp; 26018 struct sd_xbuf *xp; 26019 struct sd_lun *un; 26020 26021 bp = arg; /* The bp is given in arg */ 26022 ASSERT(bp != NULL); 26023 26024 /* Get the pointer to the layer-private data struct */ 26025 xp = SD_GET_XBUF(bp); 26026 ASSERT(xp != NULL); 26027 bsp = xp->xb_private; 26028 ASSERT(bsp != NULL); 26029 26030 un = SD_GET_UN(bp); 26031 ASSERT(un != NULL); 26032 ASSERT(!mutex_owned(SD_MUTEX(un))); 26033 26034 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 26035 "sd_read_modify_write_task: entry: buf:0x%p\n", bp); 26036 26037 /* 26038 * This is the write phase of a read-modify-write request, called 26039 * under the context of a taskq thread in response to the completion 26040 * of the read portion of the rmw request completing under interrupt 26041 * context. The write request must be sent from here down the iostart 26042 * chain as if it were being sent from sd_mapblocksize_iostart(), so 26043 * we use the layer index saved in the layer-private data area. 26044 */ 26045 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp); 26046 26047 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 26048 "sd_read_modify_write_task: exit: buf:0x%p\n", bp); 26049 } 26050 26051 26052 /* 26053 * Function: sddump_do_read_of_rmw() 26054 * 26055 * Description: This routine will be called from sddump, If sddump is called 26056 * with an I/O which not aligned on device blocksize boundary 26057 * then the write has to be converted to read-modify-write. 26058 * Do the read part here in order to keep sddump simple. 26059 * Note - That the sd_mutex is held across the call to this 26060 * routine. 26061 * 26062 * Arguments: un - sd_lun 26063 * blkno - block number in terms of media block size. 26064 * nblk - number of blocks. 26065 * bpp - pointer to pointer to the buf structure. On return 26066 * from this function, *bpp points to the valid buffer 26067 * to which the write has to be done. 26068 * 26069 * Return Code: 0 for success or errno-type return code 26070 */ 26071 26072 static int 26073 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 26074 struct buf **bpp) 26075 { 26076 int err; 26077 int i; 26078 int rval; 26079 struct buf *bp; 26080 struct scsi_pkt *pkt = NULL; 26081 uint32_t target_blocksize; 26082 26083 ASSERT(un != NULL); 26084 ASSERT(mutex_owned(SD_MUTEX(un))); 26085 26086 target_blocksize = un->un_tgt_blocksize; 26087 26088 mutex_exit(SD_MUTEX(un)); 26089 26090 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL, 26091 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL); 26092 if (bp == NULL) { 26093 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26094 "no resources for dumping; giving up"); 26095 err = ENOMEM; 26096 goto done; 26097 } 26098 26099 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL, 26100 blkno, nblk); 26101 if (rval != 0) { 26102 scsi_free_consistent_buf(bp); 26103 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26104 "no resources for dumping; giving up"); 26105 err = ENOMEM; 26106 goto done; 26107 } 26108 26109 pkt->pkt_flags |= FLAG_NOINTR; 26110 26111 err = EIO; 26112 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 26113 26114 /* 26115 * Scsi_poll returns 0 (success) if the command completes and 26116 * the status block is STATUS_GOOD. We should only check 26117 * errors if this condition is not true. Even then we should 26118 * send our own request sense packet only if we have a check 26119 * condition and auto request sense has not been performed by 26120 * the hba. 26121 */ 26122 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n"); 26123 26124 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) { 26125 err = 0; 26126 break; 26127 } 26128 26129 /* 26130 * Check CMD_DEV_GONE 1st, give up if device is gone, 26131 * no need to read RQS data. 26132 */ 26133 if (pkt->pkt_reason == CMD_DEV_GONE) { 26134 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26135 "Device is gone\n"); 26136 break; 26137 } 26138 26139 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) { 26140 SD_INFO(SD_LOG_DUMP, un, 26141 "sddump: read failed with CHECK, try # %d\n", i); 26142 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) { 26143 (void) sd_send_polled_RQS(un); 26144 } 26145 26146 continue; 26147 } 26148 26149 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) { 26150 int reset_retval = 0; 26151 26152 SD_INFO(SD_LOG_DUMP, un, 26153 "sddump: read failed with BUSY, try # %d\n", i); 26154 26155 if (un->un_f_lun_reset_enabled == TRUE) { 26156 reset_retval = scsi_reset(SD_ADDRESS(un), 26157 RESET_LUN); 26158 } 26159 if (reset_retval == 0) { 26160 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 26161 } 26162 (void) sd_send_polled_RQS(un); 26163 26164 } else { 26165 SD_INFO(SD_LOG_DUMP, un, 26166 "sddump: read failed with 0x%x, try # %d\n", 26167 SD_GET_PKT_STATUS(pkt), i); 26168 mutex_enter(SD_MUTEX(un)); 26169 sd_reset_target(un, pkt); 26170 mutex_exit(SD_MUTEX(un)); 26171 } 26172 26173 /* 26174 * If we are not getting anywhere with lun/target resets, 26175 * let's reset the bus. 26176 */ 26177 if (i > SD_NDUMP_RETRIES/2) { 26178 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 26179 (void) sd_send_polled_RQS(un); 26180 } 26181 26182 } 26183 scsi_destroy_pkt(pkt); 26184 26185 if (err != 0) { 26186 scsi_free_consistent_buf(bp); 26187 *bpp = NULL; 26188 } else { 26189 *bpp = bp; 26190 } 26191 26192 done: 26193 mutex_enter(SD_MUTEX(un)); 26194 return (err); 26195 } 26196 26197 26198 /* 26199 * Function: sd_failfast_flushq 26200 * 26201 * Description: Take all bp's on the wait queue that have B_FAILFAST set 26202 * in b_flags and move them onto the failfast queue, then kick 26203 * off a thread to return all bp's on the failfast queue to 26204 * their owners with an error set. 26205 * 26206 * Arguments: un - pointer to the soft state struct for the instance. 26207 * 26208 * Context: may execute in interrupt context. 26209 */ 26210 26211 static void 26212 sd_failfast_flushq(struct sd_lun *un) 26213 { 26214 struct buf *bp; 26215 struct buf *next_waitq_bp; 26216 struct buf *prev_waitq_bp = NULL; 26217 26218 ASSERT(un != NULL); 26219 ASSERT(mutex_owned(SD_MUTEX(un))); 26220 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE); 26221 ASSERT(un->un_failfast_bp == NULL); 26222 26223 SD_TRACE(SD_LOG_IO_FAILFAST, un, 26224 "sd_failfast_flushq: entry: un:0x%p\n", un); 26225 26226 /* 26227 * Check if we should flush all bufs when entering failfast state, or 26228 * just those with B_FAILFAST set. 26229 */ 26230 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { 26231 /* 26232 * Move *all* bp's on the wait queue to the failfast flush 26233 * queue, including those that do NOT have B_FAILFAST set. 26234 */ 26235 if (un->un_failfast_headp == NULL) { 26236 ASSERT(un->un_failfast_tailp == NULL); 26237 un->un_failfast_headp = un->un_waitq_headp; 26238 } else { 26239 ASSERT(un->un_failfast_tailp != NULL); 26240 un->un_failfast_tailp->av_forw = un->un_waitq_headp; 26241 } 26242 26243 un->un_failfast_tailp = un->un_waitq_tailp; 26244 26245 /* update kstat for each bp moved out of the waitq */ 26246 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) { 26247 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 26248 } 26249 26250 /* empty the waitq */ 26251 un->un_waitq_headp = un->un_waitq_tailp = NULL; 26252 26253 } else { 26254 /* 26255 * Go thru the wait queue, pick off all entries with 26256 * B_FAILFAST set, and move these onto the failfast queue. 26257 */ 26258 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) { 26259 /* 26260 * Save the pointer to the next bp on the wait queue, 26261 * so we get to it on the next iteration of this loop. 26262 */ 26263 next_waitq_bp = bp->av_forw; 26264 26265 /* 26266 * If this bp from the wait queue does NOT have 26267 * B_FAILFAST set, just move on to the next element 26268 * in the wait queue. Note, this is the only place 26269 * where it is correct to set prev_waitq_bp. 26270 */ 26271 if ((bp->b_flags & B_FAILFAST) == 0) { 26272 prev_waitq_bp = bp; 26273 continue; 26274 } 26275 26276 /* 26277 * Remove the bp from the wait queue. 26278 */ 26279 if (bp == un->un_waitq_headp) { 26280 /* The bp is the first element of the waitq. */ 26281 un->un_waitq_headp = next_waitq_bp; 26282 if (un->un_waitq_headp == NULL) { 26283 /* The wait queue is now empty */ 26284 un->un_waitq_tailp = NULL; 26285 } 26286 } else { 26287 /* 26288 * The bp is either somewhere in the middle 26289 * or at the end of the wait queue. 26290 */ 26291 ASSERT(un->un_waitq_headp != NULL); 26292 ASSERT(prev_waitq_bp != NULL); 26293 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST) 26294 == 0); 26295 if (bp == un->un_waitq_tailp) { 26296 /* bp is the last entry on the waitq. */ 26297 ASSERT(next_waitq_bp == NULL); 26298 un->un_waitq_tailp = prev_waitq_bp; 26299 } 26300 prev_waitq_bp->av_forw = next_waitq_bp; 26301 } 26302 bp->av_forw = NULL; 26303 26304 /* 26305 * update kstat since the bp is moved out of 26306 * the waitq 26307 */ 26308 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 26309 26310 /* 26311 * Now put the bp onto the failfast queue. 26312 */ 26313 if (un->un_failfast_headp == NULL) { 26314 /* failfast queue is currently empty */ 26315 ASSERT(un->un_failfast_tailp == NULL); 26316 un->un_failfast_headp = 26317 un->un_failfast_tailp = bp; 26318 } else { 26319 /* Add the bp to the end of the failfast q */ 26320 ASSERT(un->un_failfast_tailp != NULL); 26321 ASSERT(un->un_failfast_tailp->b_flags & 26322 B_FAILFAST); 26323 un->un_failfast_tailp->av_forw = bp; 26324 un->un_failfast_tailp = bp; 26325 } 26326 } 26327 } 26328 26329 /* 26330 * Now return all bp's on the failfast queue to their owners. 26331 */ 26332 while ((bp = un->un_failfast_headp) != NULL) { 26333 26334 un->un_failfast_headp = bp->av_forw; 26335 if (un->un_failfast_headp == NULL) { 26336 un->un_failfast_tailp = NULL; 26337 } 26338 26339 /* 26340 * We want to return the bp with a failure error code, but 26341 * we do not want a call to sd_start_cmds() to occur here, 26342 * so use sd_return_failed_command_no_restart() instead of 26343 * sd_return_failed_command(). 26344 */ 26345 sd_return_failed_command_no_restart(un, bp, EIO); 26346 } 26347 26348 /* Flush the xbuf queues if required. */ 26349 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) { 26350 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback); 26351 } 26352 26353 SD_TRACE(SD_LOG_IO_FAILFAST, un, 26354 "sd_failfast_flushq: exit: un:0x%p\n", un); 26355 } 26356 26357 26358 /* 26359 * Function: sd_failfast_flushq_callback 26360 * 26361 * Description: Return TRUE if the given bp meets the criteria for failfast 26362 * flushing. Used with ddi_xbuf_flushq(9F). 26363 * 26364 * Arguments: bp - ptr to buf struct to be examined. 26365 * 26366 * Context: Any 26367 */ 26368 26369 static int 26370 sd_failfast_flushq_callback(struct buf *bp) 26371 { 26372 /* 26373 * Return TRUE if (1) we want to flush ALL bufs when the failfast 26374 * state is entered; OR (2) the given bp has B_FAILFAST set. 26375 */ 26376 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || 26377 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE); 26378 } 26379 26380 26381 26382 #if defined(__i386) || defined(__amd64) 26383 /* 26384 * Function: sd_setup_next_xfer 26385 * 26386 * Description: Prepare next I/O operation using DMA_PARTIAL 26387 * 26388 */ 26389 26390 static int 26391 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 26392 struct scsi_pkt *pkt, struct sd_xbuf *xp) 26393 { 26394 ssize_t num_blks_not_xfered; 26395 daddr_t strt_blk_num; 26396 ssize_t bytes_not_xfered; 26397 int rval; 26398 26399 ASSERT(pkt->pkt_resid == 0); 26400 26401 /* 26402 * Calculate next block number and amount to be transferred. 26403 * 26404 * How much data NOT transfered to the HBA yet. 26405 */ 26406 bytes_not_xfered = xp->xb_dma_resid; 26407 26408 /* 26409 * figure how many blocks NOT transfered to the HBA yet. 26410 */ 26411 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered); 26412 26413 /* 26414 * set starting block number to the end of what WAS transfered. 26415 */ 26416 strt_blk_num = xp->xb_blkno + 26417 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered); 26418 26419 /* 26420 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt 26421 * will call scsi_initpkt with NULL_FUNC so we do not have to release 26422 * the disk mutex here. 26423 */ 26424 rval = sd_setup_next_rw_pkt(un, pkt, bp, 26425 strt_blk_num, num_blks_not_xfered); 26426 26427 if (rval == 0) { 26428 26429 /* 26430 * Success. 26431 * 26432 * Adjust things if there are still more blocks to be 26433 * transfered. 26434 */ 26435 xp->xb_dma_resid = pkt->pkt_resid; 26436 pkt->pkt_resid = 0; 26437 26438 return (1); 26439 } 26440 26441 /* 26442 * There's really only one possible return value from 26443 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt 26444 * returns NULL. 26445 */ 26446 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 26447 26448 bp->b_resid = bp->b_bcount; 26449 bp->b_flags |= B_ERROR; 26450 26451 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26452 "Error setting up next portion of DMA transfer\n"); 26453 26454 return (0); 26455 } 26456 #endif 26457 26458 /* 26459 * Function: sd_panic_for_res_conflict 26460 * 26461 * Description: Call panic with a string formated with "Reservation Conflict" 26462 * and a human readable identifier indicating the SD instance 26463 * that experienced the reservation conflict. 26464 * 26465 * Arguments: un - pointer to the soft state struct for the instance. 26466 * 26467 * Context: may execute in interrupt context. 26468 */ 26469 26470 #define SD_RESV_CONFLICT_FMT_LEN 40 26471 void 26472 sd_panic_for_res_conflict(struct sd_lun *un) 26473 { 26474 char panic_str[SD_RESV_CONFLICT_FMT_LEN+MAXPATHLEN]; 26475 char path_str[MAXPATHLEN]; 26476 26477 (void) snprintf(panic_str, sizeof (panic_str), 26478 "Reservation Conflict\nDisk: %s", 26479 ddi_pathname(SD_DEVINFO(un), path_str)); 26480 26481 panic(panic_str); 26482 } 26483 26484 /* 26485 * Note: The following sd_faultinjection_ioctl( ) routines implement 26486 * driver support for handling fault injection for error analysis 26487 * causing faults in multiple layers of the driver. 26488 * 26489 */ 26490 26491 #ifdef SD_FAULT_INJECTION 26492 static uint_t sd_fault_injection_on = 0; 26493 26494 /* 26495 * Function: sd_faultinjection_ioctl() 26496 * 26497 * Description: This routine is the driver entry point for handling 26498 * faultinjection ioctls to inject errors into the 26499 * layer model 26500 * 26501 * Arguments: cmd - the ioctl cmd recieved 26502 * arg - the arguments from user and returns 26503 */ 26504 26505 static void 26506 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) { 26507 26508 uint_t i; 26509 uint_t rval; 26510 26511 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); 26512 26513 mutex_enter(SD_MUTEX(un)); 26514 26515 switch (cmd) { 26516 case SDIOCRUN: 26517 /* Allow pushed faults to be injected */ 26518 SD_INFO(SD_LOG_SDTEST, un, 26519 "sd_faultinjection_ioctl: Injecting Fault Run\n"); 26520 26521 sd_fault_injection_on = 1; 26522 26523 SD_INFO(SD_LOG_IOERR, un, 26524 "sd_faultinjection_ioctl: run finished\n"); 26525 break; 26526 26527 case SDIOCSTART: 26528 /* Start Injection Session */ 26529 SD_INFO(SD_LOG_SDTEST, un, 26530 "sd_faultinjection_ioctl: Injecting Fault Start\n"); 26531 26532 sd_fault_injection_on = 0; 26533 un->sd_injection_mask = 0xFFFFFFFF; 26534 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 26535 un->sd_fi_fifo_pkt[i] = NULL; 26536 un->sd_fi_fifo_xb[i] = NULL; 26537 un->sd_fi_fifo_un[i] = NULL; 26538 un->sd_fi_fifo_arq[i] = NULL; 26539 } 26540 un->sd_fi_fifo_start = 0; 26541 un->sd_fi_fifo_end = 0; 26542 26543 mutex_enter(&(un->un_fi_mutex)); 26544 un->sd_fi_log[0] = '\0'; 26545 un->sd_fi_buf_len = 0; 26546 mutex_exit(&(un->un_fi_mutex)); 26547 26548 SD_INFO(SD_LOG_IOERR, un, 26549 "sd_faultinjection_ioctl: start finished\n"); 26550 break; 26551 26552 case SDIOCSTOP: 26553 /* Stop Injection Session */ 26554 SD_INFO(SD_LOG_SDTEST, un, 26555 "sd_faultinjection_ioctl: Injecting Fault Stop\n"); 26556 sd_fault_injection_on = 0; 26557 un->sd_injection_mask = 0x0; 26558 26559 /* Empty stray or unuseds structs from fifo */ 26560 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 26561 if (un->sd_fi_fifo_pkt[i] != NULL) { 26562 kmem_free(un->sd_fi_fifo_pkt[i], 26563 sizeof (struct sd_fi_pkt)); 26564 } 26565 if (un->sd_fi_fifo_xb[i] != NULL) { 26566 kmem_free(un->sd_fi_fifo_xb[i], 26567 sizeof (struct sd_fi_xb)); 26568 } 26569 if (un->sd_fi_fifo_un[i] != NULL) { 26570 kmem_free(un->sd_fi_fifo_un[i], 26571 sizeof (struct sd_fi_un)); 26572 } 26573 if (un->sd_fi_fifo_arq[i] != NULL) { 26574 kmem_free(un->sd_fi_fifo_arq[i], 26575 sizeof (struct sd_fi_arq)); 26576 } 26577 un->sd_fi_fifo_pkt[i] = NULL; 26578 un->sd_fi_fifo_un[i] = NULL; 26579 un->sd_fi_fifo_xb[i] = NULL; 26580 un->sd_fi_fifo_arq[i] = NULL; 26581 } 26582 un->sd_fi_fifo_start = 0; 26583 un->sd_fi_fifo_end = 0; 26584 26585 SD_INFO(SD_LOG_IOERR, un, 26586 "sd_faultinjection_ioctl: stop finished\n"); 26587 break; 26588 26589 case SDIOCINSERTPKT: 26590 /* Store a packet struct to be pushed onto fifo */ 26591 SD_INFO(SD_LOG_SDTEST, un, 26592 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); 26593 26594 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 26595 26596 sd_fault_injection_on = 0; 26597 26598 /* No more that SD_FI_MAX_ERROR allowed in Queue */ 26599 if (un->sd_fi_fifo_pkt[i] != NULL) { 26600 kmem_free(un->sd_fi_fifo_pkt[i], 26601 sizeof (struct sd_fi_pkt)); 26602 } 26603 if (arg != NULL) { 26604 un->sd_fi_fifo_pkt[i] = 26605 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); 26606 if (un->sd_fi_fifo_pkt[i] == NULL) { 26607 /* Alloc failed don't store anything */ 26608 break; 26609 } 26610 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], 26611 sizeof (struct sd_fi_pkt), 0); 26612 if (rval == -1) { 26613 kmem_free(un->sd_fi_fifo_pkt[i], 26614 sizeof (struct sd_fi_pkt)); 26615 un->sd_fi_fifo_pkt[i] = NULL; 26616 } 26617 } else { 26618 SD_INFO(SD_LOG_IOERR, un, 26619 "sd_faultinjection_ioctl: pkt null\n"); 26620 } 26621 break; 26622 26623 case SDIOCINSERTXB: 26624 /* Store a xb struct to be pushed onto fifo */ 26625 SD_INFO(SD_LOG_SDTEST, un, 26626 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); 26627 26628 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 26629 26630 sd_fault_injection_on = 0; 26631 26632 if (un->sd_fi_fifo_xb[i] != NULL) { 26633 kmem_free(un->sd_fi_fifo_xb[i], 26634 sizeof (struct sd_fi_xb)); 26635 un->sd_fi_fifo_xb[i] = NULL; 26636 } 26637 if (arg != NULL) { 26638 un->sd_fi_fifo_xb[i] = 26639 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); 26640 if (un->sd_fi_fifo_xb[i] == NULL) { 26641 /* Alloc failed don't store anything */ 26642 break; 26643 } 26644 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], 26645 sizeof (struct sd_fi_xb), 0); 26646 26647 if (rval == -1) { 26648 kmem_free(un->sd_fi_fifo_xb[i], 26649 sizeof (struct sd_fi_xb)); 26650 un->sd_fi_fifo_xb[i] = NULL; 26651 } 26652 } else { 26653 SD_INFO(SD_LOG_IOERR, un, 26654 "sd_faultinjection_ioctl: xb null\n"); 26655 } 26656 break; 26657 26658 case SDIOCINSERTUN: 26659 /* Store a un struct to be pushed onto fifo */ 26660 SD_INFO(SD_LOG_SDTEST, un, 26661 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); 26662 26663 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 26664 26665 sd_fault_injection_on = 0; 26666 26667 if (un->sd_fi_fifo_un[i] != NULL) { 26668 kmem_free(un->sd_fi_fifo_un[i], 26669 sizeof (struct sd_fi_un)); 26670 un->sd_fi_fifo_un[i] = NULL; 26671 } 26672 if (arg != NULL) { 26673 un->sd_fi_fifo_un[i] = 26674 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); 26675 if (un->sd_fi_fifo_un[i] == NULL) { 26676 /* Alloc failed don't store anything */ 26677 break; 26678 } 26679 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], 26680 sizeof (struct sd_fi_un), 0); 26681 if (rval == -1) { 26682 kmem_free(un->sd_fi_fifo_un[i], 26683 sizeof (struct sd_fi_un)); 26684 un->sd_fi_fifo_un[i] = NULL; 26685 } 26686 26687 } else { 26688 SD_INFO(SD_LOG_IOERR, un, 26689 "sd_faultinjection_ioctl: un null\n"); 26690 } 26691 26692 break; 26693 26694 case SDIOCINSERTARQ: 26695 /* Store a arq struct to be pushed onto fifo */ 26696 SD_INFO(SD_LOG_SDTEST, un, 26697 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); 26698 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 26699 26700 sd_fault_injection_on = 0; 26701 26702 if (un->sd_fi_fifo_arq[i] != NULL) { 26703 kmem_free(un->sd_fi_fifo_arq[i], 26704 sizeof (struct sd_fi_arq)); 26705 un->sd_fi_fifo_arq[i] = NULL; 26706 } 26707 if (arg != NULL) { 26708 un->sd_fi_fifo_arq[i] = 26709 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); 26710 if (un->sd_fi_fifo_arq[i] == NULL) { 26711 /* Alloc failed don't store anything */ 26712 break; 26713 } 26714 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], 26715 sizeof (struct sd_fi_arq), 0); 26716 if (rval == -1) { 26717 kmem_free(un->sd_fi_fifo_arq[i], 26718 sizeof (struct sd_fi_arq)); 26719 un->sd_fi_fifo_arq[i] = NULL; 26720 } 26721 26722 } else { 26723 SD_INFO(SD_LOG_IOERR, un, 26724 "sd_faultinjection_ioctl: arq null\n"); 26725 } 26726 26727 break; 26728 26729 case SDIOCPUSH: 26730 /* Push stored xb, pkt, un, and arq onto fifo */ 26731 sd_fault_injection_on = 0; 26732 26733 if (arg != NULL) { 26734 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); 26735 if (rval != -1 && 26736 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 26737 un->sd_fi_fifo_end += i; 26738 } 26739 } else { 26740 SD_INFO(SD_LOG_IOERR, un, 26741 "sd_faultinjection_ioctl: push arg null\n"); 26742 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 26743 un->sd_fi_fifo_end++; 26744 } 26745 } 26746 SD_INFO(SD_LOG_IOERR, un, 26747 "sd_faultinjection_ioctl: push to end=%d\n", 26748 un->sd_fi_fifo_end); 26749 break; 26750 26751 case SDIOCRETRIEVE: 26752 /* Return buffer of log from Injection session */ 26753 SD_INFO(SD_LOG_SDTEST, un, 26754 "sd_faultinjection_ioctl: Injecting Fault Retreive"); 26755 26756 sd_fault_injection_on = 0; 26757 26758 mutex_enter(&(un->un_fi_mutex)); 26759 rval = ddi_copyout(un->sd_fi_log, (void *)arg, 26760 un->sd_fi_buf_len+1, 0); 26761 mutex_exit(&(un->un_fi_mutex)); 26762 26763 if (rval == -1) { 26764 /* 26765 * arg is possibly invalid setting 26766 * it to NULL for return 26767 */ 26768 arg = NULL; 26769 } 26770 break; 26771 } 26772 26773 mutex_exit(SD_MUTEX(un)); 26774 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl:" 26775 " exit\n"); 26776 } 26777 26778 26779 /* 26780 * Function: sd_injection_log() 26781 * 26782 * Description: This routine adds buff to the already existing injection log 26783 * for retrieval via faultinjection_ioctl for use in fault 26784 * detection and recovery 26785 * 26786 * Arguments: buf - the string to add to the log 26787 */ 26788 26789 static void 26790 sd_injection_log(char *buf, struct sd_lun *un) 26791 { 26792 uint_t len; 26793 26794 ASSERT(un != NULL); 26795 ASSERT(buf != NULL); 26796 26797 mutex_enter(&(un->un_fi_mutex)); 26798 26799 len = min(strlen(buf), 255); 26800 /* Add logged value to Injection log to be returned later */ 26801 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) { 26802 uint_t offset = strlen((char *)un->sd_fi_log); 26803 char *destp = (char *)un->sd_fi_log + offset; 26804 int i; 26805 for (i = 0; i < len; i++) { 26806 *destp++ = *buf++; 26807 } 26808 un->sd_fi_buf_len += len; 26809 un->sd_fi_log[un->sd_fi_buf_len] = '\0'; 26810 } 26811 26812 mutex_exit(&(un->un_fi_mutex)); 26813 } 26814 26815 26816 /* 26817 * Function: sd_faultinjection() 26818 * 26819 * Description: This routine takes the pkt and changes its 26820 * content based on error injection scenerio. 26821 * 26822 * Arguments: pktp - packet to be changed 26823 */ 26824 26825 static void 26826 sd_faultinjection(struct scsi_pkt *pktp) 26827 { 26828 uint_t i; 26829 struct sd_fi_pkt *fi_pkt; 26830 struct sd_fi_xb *fi_xb; 26831 struct sd_fi_un *fi_un; 26832 struct sd_fi_arq *fi_arq; 26833 struct buf *bp; 26834 struct sd_xbuf *xb; 26835 struct sd_lun *un; 26836 26837 ASSERT(pktp != NULL); 26838 26839 /* pull bp xb and un from pktp */ 26840 bp = (struct buf *)pktp->pkt_private; 26841 xb = SD_GET_XBUF(bp); 26842 un = SD_GET_UN(bp); 26843 26844 ASSERT(un != NULL); 26845 26846 mutex_enter(SD_MUTEX(un)); 26847 26848 SD_TRACE(SD_LOG_SDTEST, un, 26849 "sd_faultinjection: entry Injection from sdintr\n"); 26850 26851 /* if injection is off return */ 26852 if (sd_fault_injection_on == 0 || 26853 un->sd_fi_fifo_start == un->sd_fi_fifo_end) { 26854 mutex_exit(SD_MUTEX(un)); 26855 return; 26856 } 26857 26858 26859 /* take next set off fifo */ 26860 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; 26861 26862 fi_pkt = un->sd_fi_fifo_pkt[i]; 26863 fi_xb = un->sd_fi_fifo_xb[i]; 26864 fi_un = un->sd_fi_fifo_un[i]; 26865 fi_arq = un->sd_fi_fifo_arq[i]; 26866 26867 26868 /* set variables accordingly */ 26869 /* set pkt if it was on fifo */ 26870 if (fi_pkt != NULL) { 26871 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags"); 26872 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp"); 26873 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp"); 26874 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state"); 26875 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics"); 26876 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason"); 26877 26878 } 26879 26880 /* set xb if it was on fifo */ 26881 if (fi_xb != NULL) { 26882 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno"); 26883 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid"); 26884 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count"); 26885 SD_CONDSET(xb, xb, xb_victim_retry_count, 26886 "xb_victim_retry_count"); 26887 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status"); 26888 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state"); 26889 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid"); 26890 26891 /* copy in block data from sense */ 26892 if (fi_xb->xb_sense_data[0] != -1) { 26893 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, 26894 SENSE_LENGTH); 26895 } 26896 26897 /* copy in extended sense codes */ 26898 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_code, 26899 "es_code"); 26900 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_key, 26901 "es_key"); 26902 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_add_code, 26903 "es_add_code"); 26904 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, 26905 es_qual_code, "es_qual_code"); 26906 } 26907 26908 /* set un if it was on fifo */ 26909 if (fi_un != NULL) { 26910 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb"); 26911 SD_CONDSET(un, un, un_ctype, "un_ctype"); 26912 SD_CONDSET(un, un, un_reset_retry_count, 26913 "un_reset_retry_count"); 26914 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type"); 26915 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status"); 26916 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled"); 26917 SD_CONDSET(un, un, un_f_allow_bus_device_reset, 26918 "un_f_allow_bus_device_reset"); 26919 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing"); 26920 26921 } 26922 26923 /* copy in auto request sense if it was on fifo */ 26924 if (fi_arq != NULL) { 26925 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq)); 26926 } 26927 26928 /* free structs */ 26929 if (un->sd_fi_fifo_pkt[i] != NULL) { 26930 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); 26931 } 26932 if (un->sd_fi_fifo_xb[i] != NULL) { 26933 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); 26934 } 26935 if (un->sd_fi_fifo_un[i] != NULL) { 26936 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); 26937 } 26938 if (un->sd_fi_fifo_arq[i] != NULL) { 26939 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); 26940 } 26941 26942 /* 26943 * kmem_free does not gurantee to set to NULL 26944 * since we uses these to determine if we set 26945 * values or not lets confirm they are always 26946 * NULL after free 26947 */ 26948 un->sd_fi_fifo_pkt[i] = NULL; 26949 un->sd_fi_fifo_un[i] = NULL; 26950 un->sd_fi_fifo_xb[i] = NULL; 26951 un->sd_fi_fifo_arq[i] = NULL; 26952 26953 un->sd_fi_fifo_start++; 26954 26955 mutex_exit(SD_MUTEX(un)); 26956 26957 SD_TRACE(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n"); 26958 } 26959 26960 #endif /* SD_FAULT_INJECTION */ 26961 26962 /* 26963 * This routine is invoked in sd_unit_attach(). Before calling it, the 26964 * properties in conf file should be processed already, and "hotpluggable" 26965 * property was processed also. 26966 * 26967 * The sd driver distinguishes 3 different type of devices: removable media, 26968 * non-removable media, and hotpluggable. Below the differences are defined: 26969 * 26970 * 1. Device ID 26971 * 26972 * The device ID of a device is used to identify this device. Refer to 26973 * ddi_devid_register(9F). 26974 * 26975 * For a non-removable media disk device which can provide 0x80 or 0x83 26976 * VPD page (refer to INQUIRY command of SCSI SPC specification), a unique 26977 * device ID is created to identify this device. For other non-removable 26978 * media devices, a default device ID is created only if this device has 26979 * at least 2 alter cylinders. Otherwise, this device has no devid. 26980 * 26981 * ------------------------------------------------------- 26982 * removable media hotpluggable | Can Have Device ID 26983 * ------------------------------------------------------- 26984 * false false | Yes 26985 * false true | Yes 26986 * true x | No 26987 * ------------------------------------------------------ 26988 * 26989 * 26990 * 2. SCSI group 4 commands 26991 * 26992 * In SCSI specs, only some commands in group 4 command set can use 26993 * 8-byte addresses that can be used to access >2TB storage spaces. 26994 * Other commands have no such capability. Without supporting group4, 26995 * it is impossible to make full use of storage spaces of a disk with 26996 * capacity larger than 2TB. 26997 * 26998 * ----------------------------------------------- 26999 * removable media hotpluggable LP64 | Group 27000 * ----------------------------------------------- 27001 * false false false | 1 27002 * false false true | 4 27003 * false true false | 1 27004 * false true true | 4 27005 * true x x | 5 27006 * ----------------------------------------------- 27007 * 27008 * 27009 * 3. Check for VTOC Label 27010 * 27011 * If a direct-access disk has no EFI label, sd will check if it has a 27012 * valid VTOC label. Now, sd also does that check for removable media 27013 * and hotpluggable devices. 27014 * 27015 * -------------------------------------------------------------- 27016 * Direct-Access removable media hotpluggable | Check Label 27017 * ------------------------------------------------------------- 27018 * false false false | No 27019 * false false true | No 27020 * false true false | Yes 27021 * false true true | Yes 27022 * true x x | Yes 27023 * -------------------------------------------------------------- 27024 * 27025 * 27026 * 4. Building default VTOC label 27027 * 27028 * As section 3 says, sd checks if some kinds of devices have VTOC label. 27029 * If those devices have no valid VTOC label, sd(7d) will attempt to 27030 * create default VTOC for them. Currently sd creates default VTOC label 27031 * for all devices on x86 platform (VTOC_16), but only for removable 27032 * media devices on SPARC (VTOC_8). 27033 * 27034 * ----------------------------------------------------------- 27035 * removable media hotpluggable platform | Default Label 27036 * ----------------------------------------------------------- 27037 * false false sparc | No 27038 * false true x86 | Yes 27039 * false true sparc | Yes 27040 * true x x | Yes 27041 * ---------------------------------------------------------- 27042 * 27043 * 27044 * 5. Supported blocksizes of target devices 27045 * 27046 * Sd supports non-512-byte blocksize for removable media devices only. 27047 * For other devices, only 512-byte blocksize is supported. This may be 27048 * changed in near future because some RAID devices require non-512-byte 27049 * blocksize 27050 * 27051 * ----------------------------------------------------------- 27052 * removable media hotpluggable | non-512-byte blocksize 27053 * ----------------------------------------------------------- 27054 * false false | No 27055 * false true | No 27056 * true x | Yes 27057 * ----------------------------------------------------------- 27058 * 27059 * 27060 * 6. Automatic mount & unmount 27061 * 27062 * Sd(7d) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query 27063 * if a device is removable media device. It return 1 for removable media 27064 * devices, and 0 for others. 27065 * 27066 * The automatic mounting subsystem should distinguish between the types 27067 * of devices and apply automounting policies to each. 27068 * 27069 * 27070 * 7. fdisk partition management 27071 * 27072 * Fdisk is traditional partition method on x86 platform. Sd(7d) driver 27073 * just supports fdisk partitions on x86 platform. On sparc platform, sd 27074 * doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize 27075 * fdisk partitions on both x86 and SPARC platform. 27076 * 27077 * ----------------------------------------------------------- 27078 * platform removable media USB/1394 | fdisk supported 27079 * ----------------------------------------------------------- 27080 * x86 X X | true 27081 * ------------------------------------------------------------ 27082 * sparc X X | false 27083 * ------------------------------------------------------------ 27084 * 27085 * 27086 * 8. MBOOT/MBR 27087 * 27088 * Although sd(7d) doesn't support fdisk on SPARC platform, it does support 27089 * read/write mboot for removable media devices on sparc platform. 27090 * 27091 * ----------------------------------------------------------- 27092 * platform removable media USB/1394 | mboot supported 27093 * ----------------------------------------------------------- 27094 * x86 X X | true 27095 * ------------------------------------------------------------ 27096 * sparc false false | false 27097 * sparc false true | true 27098 * sparc true false | true 27099 * sparc true true | true 27100 * ------------------------------------------------------------ 27101 * 27102 * 27103 * 9. error handling during opening device 27104 * 27105 * If failed to open a disk device, an errno is returned. For some kinds 27106 * of errors, different errno is returned depending on if this device is 27107 * a removable media device. This brings USB/1394 hard disks in line with 27108 * expected hard disk behavior. It is not expected that this breaks any 27109 * application. 27110 * 27111 * ------------------------------------------------------ 27112 * removable media hotpluggable | errno 27113 * ------------------------------------------------------ 27114 * false false | EIO 27115 * false true | EIO 27116 * true x | ENXIO 27117 * ------------------------------------------------------ 27118 * 27119 * 27120 * 11. ioctls: DKIOCEJECT, CDROMEJECT 27121 * 27122 * These IOCTLs are applicable only to removable media devices. 27123 * 27124 * ----------------------------------------------------------- 27125 * removable media hotpluggable |DKIOCEJECT, CDROMEJECT 27126 * ----------------------------------------------------------- 27127 * false false | No 27128 * false true | No 27129 * true x | Yes 27130 * ----------------------------------------------------------- 27131 * 27132 * 27133 * 12. Kstats for partitions 27134 * 27135 * sd creates partition kstat for non-removable media devices. USB and 27136 * Firewire hard disks now have partition kstats 27137 * 27138 * ------------------------------------------------------ 27139 * removable media hotplugable | kstat 27140 * ------------------------------------------------------ 27141 * false false | Yes 27142 * false true | Yes 27143 * true x | No 27144 * ------------------------------------------------------ 27145 * 27146 * 27147 * 13. Removable media & hotpluggable properties 27148 * 27149 * Sd driver creates a "removable-media" property for removable media 27150 * devices. Parent nexus drivers create a "hotpluggable" property if 27151 * it supports hotplugging. 27152 * 27153 * --------------------------------------------------------------------- 27154 * removable media hotpluggable | "removable-media" " hotpluggable" 27155 * --------------------------------------------------------------------- 27156 * false false | No No 27157 * false true | No Yes 27158 * true false | Yes No 27159 * true true | Yes Yes 27160 * --------------------------------------------------------------------- 27161 * 27162 * 27163 * 14. Power Management 27164 * 27165 * sd only power manages removable media devices or devices that support 27166 * LOG_SENSE or have a "pm-capable" property (PSARC/2002/250) 27167 * 27168 * A parent nexus that supports hotplugging can also set "pm-capable" 27169 * if the disk can be power managed. 27170 * 27171 * ------------------------------------------------------------ 27172 * removable media hotpluggable pm-capable | power manage 27173 * ------------------------------------------------------------ 27174 * false false false | No 27175 * false false true | Yes 27176 * false true false | No 27177 * false true true | Yes 27178 * true x x | Yes 27179 * ------------------------------------------------------------ 27180 * 27181 * USB and firewire hard disks can now be power managed independently 27182 * of the framebuffer 27183 * 27184 * 27185 * 15. Support for USB disks with capacity larger than 1TB 27186 * 27187 * Currently, sd doesn't permit a fixed disk device with capacity 27188 * larger than 1TB to be used in a 32-bit operating system environment. 27189 * However, sd doesn't do that for removable media devices. Instead, it 27190 * assumes that removable media devices cannot have a capacity larger 27191 * than 1TB. Therefore, using those devices on 32-bit system is partially 27192 * supported, which can cause some unexpected results. 27193 * 27194 * --------------------------------------------------------------------- 27195 * removable media USB/1394 | Capacity > 1TB | Used in 32-bit env 27196 * --------------------------------------------------------------------- 27197 * false false | true | no 27198 * false true | true | no 27199 * true false | true | Yes 27200 * true true | true | Yes 27201 * --------------------------------------------------------------------- 27202 * 27203 * 27204 * 16. Check write-protection at open time 27205 * 27206 * When a removable media device is being opened for writing without NDELAY 27207 * flag, sd will check if this device is writable. If attempting to open 27208 * without NDELAY flag a write-protected device, this operation will abort. 27209 * 27210 * ------------------------------------------------------------ 27211 * removable media USB/1394 | WP Check 27212 * ------------------------------------------------------------ 27213 * false false | No 27214 * false true | No 27215 * true false | Yes 27216 * true true | Yes 27217 * ------------------------------------------------------------ 27218 * 27219 * 27220 * 17. syslog when corrupted VTOC is encountered 27221 * 27222 * Currently, if an invalid VTOC is encountered, sd only print syslog 27223 * for fixed SCSI disks. 27224 * ------------------------------------------------------------ 27225 * removable media USB/1394 | print syslog 27226 * ------------------------------------------------------------ 27227 * false false | Yes 27228 * false true | No 27229 * true false | No 27230 * true true | No 27231 * ------------------------------------------------------------ 27232 */ 27233 static void 27234 sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi) 27235 { 27236 int pm_capable_prop; 27237 27238 ASSERT(un->un_sd); 27239 ASSERT(un->un_sd->sd_inq); 27240 27241 /* 27242 * Enable SYNC CACHE support for all devices. 27243 */ 27244 un->un_f_sync_cache_supported = TRUE; 27245 27246 if (un->un_sd->sd_inq->inq_rmb) { 27247 /* 27248 * The media of this device is removable. And for this kind 27249 * of devices, it is possible to change medium after opening 27250 * devices. Thus we should support this operation. 27251 */ 27252 un->un_f_has_removable_media = TRUE; 27253 27254 /* 27255 * support non-512-byte blocksize of removable media devices 27256 */ 27257 un->un_f_non_devbsize_supported = TRUE; 27258 27259 /* 27260 * Assume that all removable media devices support DOOR_LOCK 27261 */ 27262 un->un_f_doorlock_supported = TRUE; 27263 27264 /* 27265 * For a removable media device, it is possible to be opened 27266 * with NDELAY flag when there is no media in drive, in this 27267 * case we don't care if device is writable. But if without 27268 * NDELAY flag, we need to check if media is write-protected. 27269 */ 27270 un->un_f_chk_wp_open = TRUE; 27271 27272 /* 27273 * need to start a SCSI watch thread to monitor media state, 27274 * when media is being inserted or ejected, notify syseventd. 27275 */ 27276 un->un_f_monitor_media_state = TRUE; 27277 27278 /* 27279 * Some devices don't support START_STOP_UNIT command. 27280 * Therefore, we'd better check if a device supports it 27281 * before sending it. 27282 */ 27283 un->un_f_check_start_stop = TRUE; 27284 27285 /* 27286 * support eject media ioctl: 27287 * FDEJECT, DKIOCEJECT, CDROMEJECT 27288 */ 27289 un->un_f_eject_media_supported = TRUE; 27290 27291 /* 27292 * Because many removable-media devices don't support 27293 * LOG_SENSE, we couldn't use this command to check if 27294 * a removable media device support power-management. 27295 * We assume that they support power-management via 27296 * START_STOP_UNIT command and can be spun up and down 27297 * without limitations. 27298 */ 27299 un->un_f_pm_supported = TRUE; 27300 27301 /* 27302 * Need to create a zero length (Boolean) property 27303 * removable-media for the removable media devices. 27304 * Note that the return value of the property is not being 27305 * checked, since if unable to create the property 27306 * then do not want the attach to fail altogether. Consistent 27307 * with other property creation in attach. 27308 */ 27309 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, 27310 DDI_PROP_CANSLEEP, "removable-media", NULL, 0); 27311 27312 } else { 27313 /* 27314 * create device ID for device 27315 */ 27316 un->un_f_devid_supported = TRUE; 27317 27318 /* 27319 * Spin up non-removable-media devices once it is attached 27320 */ 27321 un->un_f_attach_spinup = TRUE; 27322 27323 /* 27324 * According to SCSI specification, Sense data has two kinds of 27325 * format: fixed format, and descriptor format. At present, we 27326 * don't support descriptor format sense data for removable 27327 * media. 27328 */ 27329 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT) { 27330 un->un_f_descr_format_supported = TRUE; 27331 } 27332 27333 /* 27334 * kstats are created only for non-removable media devices. 27335 * 27336 * Set this in sd.conf to 0 in order to disable kstats. The 27337 * default is 1, so they are enabled by default. 27338 */ 27339 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY, 27340 SD_DEVINFO(un), DDI_PROP_DONTPASS, 27341 "enable-partition-kstats", 1)); 27342 27343 /* 27344 * Check if HBA has set the "pm-capable" property. 27345 * If "pm-capable" exists and is non-zero then we can 27346 * power manage the device without checking the start/stop 27347 * cycle count log sense page. 27348 * 27349 * If "pm-capable" exists and is SD_PM_CAPABLE_FALSE (0) 27350 * then we should not power manage the device. 27351 * 27352 * If "pm-capable" doesn't exist then pm_capable_prop will 27353 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case, 27354 * sd will check the start/stop cycle count log sense page 27355 * and power manage the device if the cycle count limit has 27356 * not been exceeded. 27357 */ 27358 pm_capable_prop = ddi_prop_get_int(DDI_DEV_T_ANY, devi, 27359 DDI_PROP_DONTPASS, "pm-capable", SD_PM_CAPABLE_UNDEFINED); 27360 if (pm_capable_prop == SD_PM_CAPABLE_UNDEFINED) { 27361 un->un_f_log_sense_supported = TRUE; 27362 } else { 27363 /* 27364 * pm-capable property exists. 27365 * 27366 * Convert "TRUE" values for pm_capable_prop to 27367 * SD_PM_CAPABLE_TRUE (1) to make it easier to check 27368 * later. "TRUE" values are any values except 27369 * SD_PM_CAPABLE_FALSE (0) and 27370 * SD_PM_CAPABLE_UNDEFINED (-1) 27371 */ 27372 if (pm_capable_prop == SD_PM_CAPABLE_FALSE) { 27373 un->un_f_log_sense_supported = FALSE; 27374 } else { 27375 un->un_f_pm_supported = TRUE; 27376 } 27377 27378 SD_INFO(SD_LOG_ATTACH_DETACH, un, 27379 "sd_unit_attach: un:0x%p pm-capable " 27380 "property set to %d.\n", un, un->un_f_pm_supported); 27381 } 27382 } 27383 27384 if (un->un_f_is_hotpluggable) { 27385 27386 /* 27387 * Have to watch hotpluggable devices as well, since 27388 * that's the only way for userland applications to 27389 * detect hot removal while device is busy/mounted. 27390 */ 27391 un->un_f_monitor_media_state = TRUE; 27392 27393 un->un_f_check_start_stop = TRUE; 27394 27395 } 27396 } 27397 27398 /* 27399 * sd_tg_rdwr: 27400 * Provides rdwr access for cmlb via sd_tgops. The start_block is 27401 * in sys block size, req_length in bytes. 27402 * 27403 */ 27404 static int 27405 sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 27406 diskaddr_t start_block, size_t reqlength, void *tg_cookie) 27407 { 27408 struct sd_lun *un; 27409 int path_flag = (int)(uintptr_t)tg_cookie; 27410 char *dkl = NULL; 27411 diskaddr_t real_addr = start_block; 27412 diskaddr_t first_byte, end_block; 27413 27414 size_t buffer_size = reqlength; 27415 int rval; 27416 diskaddr_t cap; 27417 uint32_t lbasize; 27418 27419 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 27420 if (un == NULL) 27421 return (ENXIO); 27422 27423 if (cmd != TG_READ && cmd != TG_WRITE) 27424 return (EINVAL); 27425 27426 mutex_enter(SD_MUTEX(un)); 27427 if (un->un_f_tgt_blocksize_is_valid == FALSE) { 27428 mutex_exit(SD_MUTEX(un)); 27429 rval = sd_send_scsi_READ_CAPACITY(un, (uint64_t *)&cap, 27430 &lbasize, path_flag); 27431 if (rval != 0) 27432 return (rval); 27433 mutex_enter(SD_MUTEX(un)); 27434 sd_update_block_info(un, lbasize, cap); 27435 if ((un->un_f_tgt_blocksize_is_valid == FALSE)) { 27436 mutex_exit(SD_MUTEX(un)); 27437 return (EIO); 27438 } 27439 } 27440 27441 if (NOT_DEVBSIZE(un)) { 27442 /* 27443 * sys_blocksize != tgt_blocksize, need to re-adjust 27444 * blkno and save the index to beginning of dk_label 27445 */ 27446 first_byte = SD_SYSBLOCKS2BYTES(un, start_block); 27447 real_addr = first_byte / un->un_tgt_blocksize; 27448 27449 end_block = (first_byte + reqlength + 27450 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize; 27451 27452 /* round up buffer size to multiple of target block size */ 27453 buffer_size = (end_block - real_addr) * un->un_tgt_blocksize; 27454 27455 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_tg_rdwr", 27456 "label_addr: 0x%x allocation size: 0x%x\n", 27457 real_addr, buffer_size); 27458 27459 if (((first_byte % un->un_tgt_blocksize) != 0) || 27460 (reqlength % un->un_tgt_blocksize) != 0) 27461 /* the request is not aligned */ 27462 dkl = kmem_zalloc(buffer_size, KM_SLEEP); 27463 } 27464 27465 /* 27466 * The MMC standard allows READ CAPACITY to be 27467 * inaccurate by a bounded amount (in the interest of 27468 * response latency). As a result, failed READs are 27469 * commonplace (due to the reading of metadata and not 27470 * data). Depending on the per-Vendor/drive Sense data, 27471 * the failed READ can cause many (unnecessary) retries. 27472 */ 27473 27474 if (ISCD(un) && (cmd == TG_READ) && 27475 (un->un_f_blockcount_is_valid == TRUE) && 27476 ((start_block == (un->un_blockcount - 1))|| 27477 (start_block == (un->un_blockcount - 2)))) { 27478 path_flag = SD_PATH_DIRECT_PRIORITY; 27479 } 27480 27481 mutex_exit(SD_MUTEX(un)); 27482 if (cmd == TG_READ) { 27483 rval = sd_send_scsi_READ(un, (dkl != NULL)? dkl: bufaddr, 27484 buffer_size, real_addr, path_flag); 27485 if (dkl != NULL) 27486 bcopy(dkl + SD_TGTBYTEOFFSET(un, start_block, 27487 real_addr), bufaddr, reqlength); 27488 } else { 27489 if (dkl) { 27490 rval = sd_send_scsi_READ(un, dkl, buffer_size, 27491 real_addr, path_flag); 27492 if (rval) { 27493 kmem_free(dkl, buffer_size); 27494 return (rval); 27495 } 27496 bcopy(bufaddr, dkl + SD_TGTBYTEOFFSET(un, start_block, 27497 real_addr), reqlength); 27498 } 27499 rval = sd_send_scsi_WRITE(un, (dkl != NULL)? dkl: bufaddr, 27500 buffer_size, real_addr, path_flag); 27501 } 27502 27503 if (dkl != NULL) 27504 kmem_free(dkl, buffer_size); 27505 27506 return (rval); 27507 } 27508 27509 27510 static int 27511 sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie) 27512 { 27513 27514 struct sd_lun *un; 27515 diskaddr_t cap; 27516 uint32_t lbasize; 27517 int path_flag = (int)(uintptr_t)tg_cookie; 27518 int ret = 0; 27519 27520 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 27521 if (un == NULL) 27522 return (ENXIO); 27523 27524 switch (cmd) { 27525 case TG_GETPHYGEOM: 27526 case TG_GETVIRTGEOM: 27527 case TG_GETCAPACITY: 27528 case TG_GETBLOCKSIZE: 27529 mutex_enter(SD_MUTEX(un)); 27530 27531 if ((un->un_f_blockcount_is_valid == TRUE) && 27532 (un->un_f_tgt_blocksize_is_valid == TRUE)) { 27533 cap = un->un_blockcount; 27534 lbasize = un->un_tgt_blocksize; 27535 mutex_exit(SD_MUTEX(un)); 27536 } else { 27537 mutex_exit(SD_MUTEX(un)); 27538 ret = sd_send_scsi_READ_CAPACITY(un, (uint64_t *)&cap, 27539 &lbasize, path_flag); 27540 if (ret != 0) 27541 return (ret); 27542 mutex_enter(SD_MUTEX(un)); 27543 sd_update_block_info(un, lbasize, cap); 27544 if ((un->un_f_blockcount_is_valid == FALSE) || 27545 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 27546 mutex_exit(SD_MUTEX(un)); 27547 return (EIO); 27548 } 27549 mutex_exit(SD_MUTEX(un)); 27550 } 27551 27552 if (cmd == TG_GETCAPACITY) { 27553 *(diskaddr_t *)arg = cap; 27554 return (0); 27555 } 27556 27557 if (cmd == TG_GETBLOCKSIZE) { 27558 *(uint32_t *)arg = lbasize; 27559 return (0); 27560 } 27561 27562 if (cmd == TG_GETPHYGEOM) 27563 ret = sd_get_physical_geometry(un, (cmlb_geom_t *)arg, 27564 cap, lbasize, path_flag); 27565 else 27566 /* TG_GETVIRTGEOM */ 27567 ret = sd_get_virtual_geometry(un, 27568 (cmlb_geom_t *)arg, cap, lbasize); 27569 27570 return (ret); 27571 27572 case TG_GETATTR: 27573 mutex_enter(SD_MUTEX(un)); 27574 ((tg_attribute_t *)arg)->media_is_writable = 27575 un->un_f_mmc_writable_media; 27576 mutex_exit(SD_MUTEX(un)); 27577 return (0); 27578 default: 27579 return (ENOTTY); 27580 27581 } 27582 27583 } 27584