1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * SCSI disk target driver. 30 */ 31 #include <sys/scsi/scsi.h> 32 #include <sys/dkbad.h> 33 #include <sys/dklabel.h> 34 #include <sys/dkio.h> 35 #include <sys/fdio.h> 36 #include <sys/cdio.h> 37 #include <sys/mhd.h> 38 #include <sys/vtoc.h> 39 #include <sys/dktp/fdisk.h> 40 #include <sys/kstat.h> 41 #include <sys/vtrace.h> 42 #include <sys/note.h> 43 #include <sys/thread.h> 44 #include <sys/proc.h> 45 #include <sys/efi_partition.h> 46 #include <sys/var.h> 47 #include <sys/aio_req.h> 48 49 #ifdef __lock_lint 50 #define _LP64 51 #define __amd64 52 #endif 53 54 #if (defined(__fibre)) 55 /* Note: is there a leadville version of the following? */ 56 #include <sys/fc4/fcal_linkapp.h> 57 #endif 58 #include <sys/taskq.h> 59 #include <sys/uuid.h> 60 #include <sys/byteorder.h> 61 #include <sys/sdt.h> 62 63 #include "sd_xbuf.h" 64 65 #include <sys/scsi/targets/sddef.h> 66 #include <sys/cmlb.h> 67 68 69 /* 70 * Loadable module info. 71 */ 72 #if (defined(__fibre)) 73 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver %I%" 74 char _depends_on[] = "misc/scsi misc/cmlb drv/fcp"; 75 #else 76 #define SD_MODULE_NAME "SCSI Disk Driver %I%" 77 char _depends_on[] = "misc/scsi misc/cmlb"; 78 #endif 79 80 /* 81 * Define the interconnect type, to allow the driver to distinguish 82 * between parallel SCSI (sd) and fibre channel (ssd) behaviors. 83 * 84 * This is really for backward compatability. In the future, the driver 85 * should actually check the "interconnect-type" property as reported by 86 * the HBA; however at present this property is not defined by all HBAs, 87 * so we will use this #define (1) to permit the driver to run in 88 * backward-compatability mode; and (2) to print a notification message 89 * if an FC HBA does not support the "interconnect-type" property. The 90 * behavior of the driver will be to assume parallel SCSI behaviors unless 91 * the "interconnect-type" property is defined by the HBA **AND** has a 92 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or 93 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre 94 * Channel behaviors (as per the old ssd). (Note that the 95 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and 96 * will result in the driver assuming parallel SCSI behaviors.) 97 * 98 * (see common/sys/scsi/impl/services.h) 99 * 100 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default 101 * since some FC HBAs may already support that, and there is some code in 102 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the 103 * default would confuse that code, and besides things should work fine 104 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the 105 * "interconnect_type" property. 106 * 107 */ 108 #if (defined(__fibre)) 109 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE 110 #else 111 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL 112 #endif 113 114 /* 115 * The name of the driver, established from the module name in _init. 116 */ 117 static char *sd_label = NULL; 118 119 /* 120 * Driver name is unfortunately prefixed on some driver.conf properties. 121 */ 122 #if (defined(__fibre)) 123 #define sd_max_xfer_size ssd_max_xfer_size 124 #define sd_config_list ssd_config_list 125 static char *sd_max_xfer_size = "ssd_max_xfer_size"; 126 static char *sd_config_list = "ssd-config-list"; 127 #else 128 static char *sd_max_xfer_size = "sd_max_xfer_size"; 129 static char *sd_config_list = "sd-config-list"; 130 #endif 131 132 /* 133 * Driver global variables 134 */ 135 136 #if (defined(__fibre)) 137 /* 138 * These #defines are to avoid namespace collisions that occur because this 139 * code is currently used to compile two seperate driver modules: sd and ssd. 140 * All global variables need to be treated this way (even if declared static) 141 * in order to allow the debugger to resolve the names properly. 142 * It is anticipated that in the near future the ssd module will be obsoleted, 143 * at which time this namespace issue should go away. 144 */ 145 #define sd_state ssd_state 146 #define sd_io_time ssd_io_time 147 #define sd_failfast_enable ssd_failfast_enable 148 #define sd_ua_retry_count ssd_ua_retry_count 149 #define sd_report_pfa ssd_report_pfa 150 #define sd_max_throttle ssd_max_throttle 151 #define sd_min_throttle ssd_min_throttle 152 #define sd_rot_delay ssd_rot_delay 153 154 #define sd_retry_on_reservation_conflict \ 155 ssd_retry_on_reservation_conflict 156 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay 157 #define sd_resv_conflict_name ssd_resv_conflict_name 158 159 #define sd_component_mask ssd_component_mask 160 #define sd_level_mask ssd_level_mask 161 #define sd_debug_un ssd_debug_un 162 #define sd_error_level ssd_error_level 163 164 #define sd_xbuf_active_limit ssd_xbuf_active_limit 165 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit 166 167 #define sd_tr ssd_tr 168 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout 169 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout 170 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable 171 #define sd_check_media_time ssd_check_media_time 172 #define sd_wait_cmds_complete ssd_wait_cmds_complete 173 #define sd_label_mutex ssd_label_mutex 174 #define sd_detach_mutex ssd_detach_mutex 175 #define sd_log_buf ssd_log_buf 176 #define sd_log_mutex ssd_log_mutex 177 178 #define sd_disk_table ssd_disk_table 179 #define sd_disk_table_size ssd_disk_table_size 180 #define sd_sense_mutex ssd_sense_mutex 181 #define sd_cdbtab ssd_cdbtab 182 183 #define sd_cb_ops ssd_cb_ops 184 #define sd_ops ssd_ops 185 #define sd_additional_codes ssd_additional_codes 186 #define sd_tgops ssd_tgops 187 188 #define sd_minor_data ssd_minor_data 189 #define sd_minor_data_efi ssd_minor_data_efi 190 191 #define sd_tq ssd_tq 192 #define sd_wmr_tq ssd_wmr_tq 193 #define sd_taskq_name ssd_taskq_name 194 #define sd_wmr_taskq_name ssd_wmr_taskq_name 195 #define sd_taskq_minalloc ssd_taskq_minalloc 196 #define sd_taskq_maxalloc ssd_taskq_maxalloc 197 198 #define sd_dump_format_string ssd_dump_format_string 199 200 #define sd_iostart_chain ssd_iostart_chain 201 #define sd_iodone_chain ssd_iodone_chain 202 203 #define sd_pm_idletime ssd_pm_idletime 204 205 #define sd_force_pm_supported ssd_force_pm_supported 206 207 #define sd_dtype_optical_bind ssd_dtype_optical_bind 208 209 #endif 210 211 212 #ifdef SDDEBUG 213 int sd_force_pm_supported = 0; 214 #endif /* SDDEBUG */ 215 216 void *sd_state = NULL; 217 int sd_io_time = SD_IO_TIME; 218 int sd_failfast_enable = 1; 219 int sd_ua_retry_count = SD_UA_RETRY_COUNT; 220 int sd_report_pfa = 1; 221 int sd_max_throttle = SD_MAX_THROTTLE; 222 int sd_min_throttle = SD_MIN_THROTTLE; 223 int sd_rot_delay = 4; /* Default 4ms Rotation delay */ 224 int sd_qfull_throttle_enable = TRUE; 225 226 int sd_retry_on_reservation_conflict = 1; 227 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 228 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) 229 230 static int sd_dtype_optical_bind = -1; 231 232 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ 233 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; 234 235 /* 236 * Global data for debug logging. To enable debug printing, sd_component_mask 237 * and sd_level_mask should be set to the desired bit patterns as outlined in 238 * sddef.h. 239 */ 240 uint_t sd_component_mask = 0x0; 241 uint_t sd_level_mask = 0x0; 242 struct sd_lun *sd_debug_un = NULL; 243 uint_t sd_error_level = SCSI_ERR_RETRYABLE; 244 245 /* Note: these may go away in the future... */ 246 static uint32_t sd_xbuf_active_limit = 512; 247 static uint32_t sd_xbuf_reserve_limit = 16; 248 249 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 }; 250 251 /* 252 * Timer value used to reset the throttle after it has been reduced 253 * (typically in response to TRAN_BUSY or STATUS_QFULL) 254 */ 255 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT; 256 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT; 257 258 /* 259 * Interval value associated with the media change scsi watch. 260 */ 261 static int sd_check_media_time = 3000000; 262 263 /* 264 * Wait value used for in progress operations during a DDI_SUSPEND 265 */ 266 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE; 267 268 /* 269 * sd_label_mutex protects a static buffer used in the disk label 270 * component of the driver 271 */ 272 static kmutex_t sd_label_mutex; 273 274 /* 275 * sd_detach_mutex protects un_layer_count, un_detach_count, and 276 * un_opens_in_progress in the sd_lun structure. 277 */ 278 static kmutex_t sd_detach_mutex; 279 280 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex, 281 sd_lun::{un_layer_count un_detach_count un_opens_in_progress})) 282 283 /* 284 * Global buffer and mutex for debug logging 285 */ 286 static char sd_log_buf[1024]; 287 static kmutex_t sd_log_mutex; 288 289 /* 290 * Structs and globals for recording attached lun information. 291 * This maintains a chain. Each node in the chain represents a SCSI controller. 292 * The structure records the number of luns attached to each target connected 293 * with the controller. 294 * For parallel scsi device only. 295 */ 296 struct sd_scsi_hba_tgt_lun { 297 struct sd_scsi_hba_tgt_lun *next; 298 dev_info_t *pdip; 299 int nlun[NTARGETS_WIDE]; 300 }; 301 302 /* 303 * Flag to indicate the lun is attached or detached 304 */ 305 #define SD_SCSI_LUN_ATTACH 0 306 #define SD_SCSI_LUN_DETACH 1 307 308 static kmutex_t sd_scsi_target_lun_mutex; 309 static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL; 310 311 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 312 sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip)) 313 314 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 315 sd_scsi_target_lun_head)) 316 317 /* 318 * "Smart" Probe Caching structs, globals, #defines, etc. 319 * For parallel scsi and non-self-identify device only. 320 */ 321 322 /* 323 * The following resources and routines are implemented to support 324 * "smart" probing, which caches the scsi_probe() results in an array, 325 * in order to help avoid long probe times. 326 */ 327 struct sd_scsi_probe_cache { 328 struct sd_scsi_probe_cache *next; 329 dev_info_t *pdip; 330 int cache[NTARGETS_WIDE]; 331 }; 332 333 static kmutex_t sd_scsi_probe_cache_mutex; 334 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; 335 336 /* 337 * Really we only need protection on the head of the linked list, but 338 * better safe than sorry. 339 */ 340 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 341 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) 342 343 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 344 sd_scsi_probe_cache_head)) 345 346 347 /* 348 * Vendor specific data name property declarations 349 */ 350 351 #if defined(__fibre) || defined(__i386) ||defined(__amd64) 352 353 static sd_tunables seagate_properties = { 354 SEAGATE_THROTTLE_VALUE, 355 0, 356 0, 357 0, 358 0, 359 0, 360 0, 361 0, 362 0 363 }; 364 365 366 static sd_tunables fujitsu_properties = { 367 FUJITSU_THROTTLE_VALUE, 368 0, 369 0, 370 0, 371 0, 372 0, 373 0, 374 0, 375 0 376 }; 377 378 static sd_tunables ibm_properties = { 379 IBM_THROTTLE_VALUE, 380 0, 381 0, 382 0, 383 0, 384 0, 385 0, 386 0, 387 0 388 }; 389 390 static sd_tunables purple_properties = { 391 PURPLE_THROTTLE_VALUE, 392 0, 393 0, 394 PURPLE_BUSY_RETRIES, 395 PURPLE_RESET_RETRY_COUNT, 396 PURPLE_RESERVE_RELEASE_TIME, 397 0, 398 0, 399 0 400 }; 401 402 static sd_tunables sve_properties = { 403 SVE_THROTTLE_VALUE, 404 0, 405 0, 406 SVE_BUSY_RETRIES, 407 SVE_RESET_RETRY_COUNT, 408 SVE_RESERVE_RELEASE_TIME, 409 SVE_MIN_THROTTLE_VALUE, 410 SVE_DISKSORT_DISABLED_FLAG, 411 0 412 }; 413 414 static sd_tunables maserati_properties = { 415 0, 416 0, 417 0, 418 0, 419 0, 420 0, 421 0, 422 MASERATI_DISKSORT_DISABLED_FLAG, 423 MASERATI_LUN_RESET_ENABLED_FLAG 424 }; 425 426 static sd_tunables pirus_properties = { 427 PIRUS_THROTTLE_VALUE, 428 0, 429 PIRUS_NRR_COUNT, 430 PIRUS_BUSY_RETRIES, 431 PIRUS_RESET_RETRY_COUNT, 432 0, 433 PIRUS_MIN_THROTTLE_VALUE, 434 PIRUS_DISKSORT_DISABLED_FLAG, 435 PIRUS_LUN_RESET_ENABLED_FLAG 436 }; 437 438 #endif 439 440 #if (defined(__sparc) && !defined(__fibre)) || \ 441 (defined(__i386) || defined(__amd64)) 442 443 444 static sd_tunables elite_properties = { 445 ELITE_THROTTLE_VALUE, 446 0, 447 0, 448 0, 449 0, 450 0, 451 0, 452 0, 453 0 454 }; 455 456 static sd_tunables st31200n_properties = { 457 ST31200N_THROTTLE_VALUE, 458 0, 459 0, 460 0, 461 0, 462 0, 463 0, 464 0, 465 0 466 }; 467 468 #endif /* Fibre or not */ 469 470 static sd_tunables lsi_properties_scsi = { 471 LSI_THROTTLE_VALUE, 472 0, 473 LSI_NOTREADY_RETRIES, 474 0, 475 0, 476 0, 477 0, 478 0, 479 0 480 }; 481 482 static sd_tunables symbios_properties = { 483 SYMBIOS_THROTTLE_VALUE, 484 0, 485 SYMBIOS_NOTREADY_RETRIES, 486 0, 487 0, 488 0, 489 0, 490 0, 491 0 492 }; 493 494 static sd_tunables lsi_properties = { 495 0, 496 0, 497 LSI_NOTREADY_RETRIES, 498 0, 499 0, 500 0, 501 0, 502 0, 503 0 504 }; 505 506 static sd_tunables lsi_oem_properties = { 507 0, 508 0, 509 LSI_OEM_NOTREADY_RETRIES, 510 0, 511 0, 512 0, 513 0, 514 0, 515 0 516 }; 517 518 519 520 #if (defined(SD_PROP_TST)) 521 522 #define SD_TST_CTYPE_VAL CTYPE_CDROM 523 #define SD_TST_THROTTLE_VAL 16 524 #define SD_TST_NOTREADY_VAL 12 525 #define SD_TST_BUSY_VAL 60 526 #define SD_TST_RST_RETRY_VAL 36 527 #define SD_TST_RSV_REL_TIME 60 528 529 static sd_tunables tst_properties = { 530 SD_TST_THROTTLE_VAL, 531 SD_TST_CTYPE_VAL, 532 SD_TST_NOTREADY_VAL, 533 SD_TST_BUSY_VAL, 534 SD_TST_RST_RETRY_VAL, 535 SD_TST_RSV_REL_TIME, 536 0, 537 0, 538 0 539 }; 540 #endif 541 542 /* This is similiar to the ANSI toupper implementation */ 543 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C)) 544 545 /* 546 * Static Driver Configuration Table 547 * 548 * This is the table of disks which need throttle adjustment (or, perhaps 549 * something else as defined by the flags at a future time.) device_id 550 * is a string consisting of concatenated vid (vendor), pid (product/model) 551 * and revision strings as defined in the scsi_inquiry structure. Offsets of 552 * the parts of the string are as defined by the sizes in the scsi_inquiry 553 * structure. Device type is searched as far as the device_id string is 554 * defined. Flags defines which values are to be set in the driver from the 555 * properties list. 556 * 557 * Entries below which begin and end with a "*" are a special case. 558 * These do not have a specific vendor, and the string which follows 559 * can appear anywhere in the 16 byte PID portion of the inquiry data. 560 * 561 * Entries below which begin and end with a " " (blank) are a special 562 * case. The comparison function will treat multiple consecutive blanks 563 * as equivalent to a single blank. For example, this causes a 564 * sd_disk_table entry of " NEC CDROM " to match a device's id string 565 * of "NEC CDROM". 566 * 567 * Note: The MD21 controller type has been obsoleted. 568 * ST318202F is a Legacy device 569 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been 570 * made with an FC connection. The entries here are a legacy. 571 */ 572 static sd_disk_config_t sd_disk_table[] = { 573 #if defined(__fibre) || defined(__i386) || defined(__amd64) 574 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 575 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 576 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 577 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 578 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 579 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 580 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 581 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 582 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 583 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 584 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 585 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 586 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 587 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 588 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 589 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 590 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 591 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 592 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 593 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 594 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 595 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 596 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 597 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties }, 598 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties }, 599 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties }, 600 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties }, 601 { "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 602 { "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 603 { "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 604 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 605 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 606 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 607 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 608 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 609 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 610 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 611 { "IBM 1814", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 612 { "IBM 1814-200", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 613 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 614 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 615 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 616 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 617 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 618 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 619 { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties }, 620 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, 621 { "SUN T3", SD_CONF_BSET_THROTTLE | 622 SD_CONF_BSET_BSY_RETRY_COUNT| 623 SD_CONF_BSET_RST_RETRIES| 624 SD_CONF_BSET_RSV_REL_TIME, 625 &purple_properties }, 626 { "SUN SESS01", SD_CONF_BSET_THROTTLE | 627 SD_CONF_BSET_BSY_RETRY_COUNT| 628 SD_CONF_BSET_RST_RETRIES| 629 SD_CONF_BSET_RSV_REL_TIME| 630 SD_CONF_BSET_MIN_THROTTLE| 631 SD_CONF_BSET_DISKSORT_DISABLED, 632 &sve_properties }, 633 { "SUN T4", SD_CONF_BSET_THROTTLE | 634 SD_CONF_BSET_BSY_RETRY_COUNT| 635 SD_CONF_BSET_RST_RETRIES| 636 SD_CONF_BSET_RSV_REL_TIME, 637 &purple_properties }, 638 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | 639 SD_CONF_BSET_LUN_RESET_ENABLED, 640 &maserati_properties }, 641 { "SUN SE6920", SD_CONF_BSET_THROTTLE | 642 SD_CONF_BSET_NRR_COUNT| 643 SD_CONF_BSET_BSY_RETRY_COUNT| 644 SD_CONF_BSET_RST_RETRIES| 645 SD_CONF_BSET_MIN_THROTTLE| 646 SD_CONF_BSET_DISKSORT_DISABLED| 647 SD_CONF_BSET_LUN_RESET_ENABLED, 648 &pirus_properties }, 649 { "SUN SE6940", SD_CONF_BSET_THROTTLE | 650 SD_CONF_BSET_NRR_COUNT| 651 SD_CONF_BSET_BSY_RETRY_COUNT| 652 SD_CONF_BSET_RST_RETRIES| 653 SD_CONF_BSET_MIN_THROTTLE| 654 SD_CONF_BSET_DISKSORT_DISABLED| 655 SD_CONF_BSET_LUN_RESET_ENABLED, 656 &pirus_properties }, 657 { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE | 658 SD_CONF_BSET_NRR_COUNT| 659 SD_CONF_BSET_BSY_RETRY_COUNT| 660 SD_CONF_BSET_RST_RETRIES| 661 SD_CONF_BSET_MIN_THROTTLE| 662 SD_CONF_BSET_DISKSORT_DISABLED| 663 SD_CONF_BSET_LUN_RESET_ENABLED, 664 &pirus_properties }, 665 { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE | 666 SD_CONF_BSET_NRR_COUNT| 667 SD_CONF_BSET_BSY_RETRY_COUNT| 668 SD_CONF_BSET_RST_RETRIES| 669 SD_CONF_BSET_MIN_THROTTLE| 670 SD_CONF_BSET_DISKSORT_DISABLED| 671 SD_CONF_BSET_LUN_RESET_ENABLED, 672 &pirus_properties }, 673 { "SUN PSX1000", SD_CONF_BSET_THROTTLE | 674 SD_CONF_BSET_NRR_COUNT| 675 SD_CONF_BSET_BSY_RETRY_COUNT| 676 SD_CONF_BSET_RST_RETRIES| 677 SD_CONF_BSET_MIN_THROTTLE| 678 SD_CONF_BSET_DISKSORT_DISABLED| 679 SD_CONF_BSET_LUN_RESET_ENABLED, 680 &pirus_properties }, 681 { "SUN SE6330", SD_CONF_BSET_THROTTLE | 682 SD_CONF_BSET_NRR_COUNT| 683 SD_CONF_BSET_BSY_RETRY_COUNT| 684 SD_CONF_BSET_RST_RETRIES| 685 SD_CONF_BSET_MIN_THROTTLE| 686 SD_CONF_BSET_DISKSORT_DISABLED| 687 SD_CONF_BSET_LUN_RESET_ENABLED, 688 &pirus_properties }, 689 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 690 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 691 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 692 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 693 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, 694 #endif /* fibre or NON-sparc platforms */ 695 #if ((defined(__sparc) && !defined(__fibre)) ||\ 696 (defined(__i386) || defined(__amd64))) 697 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, 698 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, 699 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, 700 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, 701 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, 702 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL }, 703 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, 704 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, 705 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, 706 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, 707 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, 708 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, 709 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, 710 &symbios_properties }, 711 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, 712 &lsi_properties_scsi }, 713 #if defined(__i386) || defined(__amd64) 714 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD 715 | SD_CONF_BSET_READSUB_BCD 716 | SD_CONF_BSET_READ_TOC_ADDR_BCD 717 | SD_CONF_BSET_NO_READ_HEADER 718 | SD_CONF_BSET_READ_CD_XD4), NULL }, 719 720 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD 721 | SD_CONF_BSET_READSUB_BCD 722 | SD_CONF_BSET_READ_TOC_ADDR_BCD 723 | SD_CONF_BSET_NO_READ_HEADER 724 | SD_CONF_BSET_READ_CD_XD4), NULL }, 725 #endif /* __i386 || __amd64 */ 726 #endif /* sparc NON-fibre or NON-sparc platforms */ 727 728 #if (defined(SD_PROP_TST)) 729 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE 730 | SD_CONF_BSET_CTYPE 731 | SD_CONF_BSET_NRR_COUNT 732 | SD_CONF_BSET_FAB_DEVID 733 | SD_CONF_BSET_NOCACHE 734 | SD_CONF_BSET_BSY_RETRY_COUNT 735 | SD_CONF_BSET_PLAYMSF_BCD 736 | SD_CONF_BSET_READSUB_BCD 737 | SD_CONF_BSET_READ_TOC_TRK_BCD 738 | SD_CONF_BSET_READ_TOC_ADDR_BCD 739 | SD_CONF_BSET_NO_READ_HEADER 740 | SD_CONF_BSET_READ_CD_XD4 741 | SD_CONF_BSET_RST_RETRIES 742 | SD_CONF_BSET_RSV_REL_TIME 743 | SD_CONF_BSET_TUR_CHECK), &tst_properties}, 744 #endif 745 }; 746 747 static const int sd_disk_table_size = 748 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); 749 750 751 752 #define SD_INTERCONNECT_PARALLEL 0 753 #define SD_INTERCONNECT_FABRIC 1 754 #define SD_INTERCONNECT_FIBRE 2 755 #define SD_INTERCONNECT_SSA 3 756 #define SD_INTERCONNECT_SATA 4 757 #define SD_IS_PARALLEL_SCSI(un) \ 758 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL) 759 #define SD_IS_SERIAL(un) \ 760 ((un)->un_interconnect_type == SD_INTERCONNECT_SATA) 761 762 /* 763 * Definitions used by device id registration routines 764 */ 765 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */ 766 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */ 767 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */ 768 769 static kmutex_t sd_sense_mutex = {0}; 770 771 /* 772 * Macros for updates of the driver state 773 */ 774 #define New_state(un, s) \ 775 (un)->un_last_state = (un)->un_state, (un)->un_state = (s) 776 #define Restore_state(un) \ 777 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); } 778 779 static struct sd_cdbinfo sd_cdbtab[] = { 780 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, }, 781 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, }, 782 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, }, 783 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, }, 784 }; 785 786 /* 787 * Specifies the number of seconds that must have elapsed since the last 788 * cmd. has completed for a device to be declared idle to the PM framework. 789 */ 790 static int sd_pm_idletime = 1; 791 792 /* 793 * Internal function prototypes 794 */ 795 796 #if (defined(__fibre)) 797 /* 798 * These #defines are to avoid namespace collisions that occur because this 799 * code is currently used to compile two seperate driver modules: sd and ssd. 800 * All function names need to be treated this way (even if declared static) 801 * in order to allow the debugger to resolve the names properly. 802 * It is anticipated that in the near future the ssd module will be obsoleted, 803 * at which time this ugliness should go away. 804 */ 805 #define sd_log_trace ssd_log_trace 806 #define sd_log_info ssd_log_info 807 #define sd_log_err ssd_log_err 808 #define sdprobe ssdprobe 809 #define sdinfo ssdinfo 810 #define sd_prop_op ssd_prop_op 811 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init 812 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini 813 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache 814 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache 815 #define sd_scsi_target_lun_init ssd_scsi_target_lun_init 816 #define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini 817 #define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count 818 #define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target 819 #define sd_spin_up_unit ssd_spin_up_unit 820 #define sd_enable_descr_sense ssd_enable_descr_sense 821 #define sd_reenable_dsense_task ssd_reenable_dsense_task 822 #define sd_set_mmc_caps ssd_set_mmc_caps 823 #define sd_read_unit_properties ssd_read_unit_properties 824 #define sd_process_sdconf_file ssd_process_sdconf_file 825 #define sd_process_sdconf_table ssd_process_sdconf_table 826 #define sd_sdconf_id_match ssd_sdconf_id_match 827 #define sd_blank_cmp ssd_blank_cmp 828 #define sd_chk_vers1_data ssd_chk_vers1_data 829 #define sd_set_vers1_properties ssd_set_vers1_properties 830 831 #define sd_get_physical_geometry ssd_get_physical_geometry 832 #define sd_get_virtual_geometry ssd_get_virtual_geometry 833 #define sd_update_block_info ssd_update_block_info 834 #define sd_register_devid ssd_register_devid 835 #define sd_get_devid ssd_get_devid 836 #define sd_create_devid ssd_create_devid 837 #define sd_write_deviceid ssd_write_deviceid 838 #define sd_check_vpd_page_support ssd_check_vpd_page_support 839 #define sd_setup_pm ssd_setup_pm 840 #define sd_create_pm_components ssd_create_pm_components 841 #define sd_ddi_suspend ssd_ddi_suspend 842 #define sd_ddi_pm_suspend ssd_ddi_pm_suspend 843 #define sd_ddi_resume ssd_ddi_resume 844 #define sd_ddi_pm_resume ssd_ddi_pm_resume 845 #define sdpower ssdpower 846 #define sdattach ssdattach 847 #define sddetach ssddetach 848 #define sd_unit_attach ssd_unit_attach 849 #define sd_unit_detach ssd_unit_detach 850 #define sd_set_unit_attributes ssd_set_unit_attributes 851 #define sd_create_errstats ssd_create_errstats 852 #define sd_set_errstats ssd_set_errstats 853 #define sd_set_pstats ssd_set_pstats 854 #define sddump ssddump 855 #define sd_scsi_poll ssd_scsi_poll 856 #define sd_send_polled_RQS ssd_send_polled_RQS 857 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll 858 #define sd_init_event_callbacks ssd_init_event_callbacks 859 #define sd_event_callback ssd_event_callback 860 #define sd_cache_control ssd_cache_control 861 #define sd_get_write_cache_enabled ssd_get_write_cache_enabled 862 #define sd_make_device ssd_make_device 863 #define sdopen ssdopen 864 #define sdclose ssdclose 865 #define sd_ready_and_valid ssd_ready_and_valid 866 #define sdmin ssdmin 867 #define sdread ssdread 868 #define sdwrite ssdwrite 869 #define sdaread ssdaread 870 #define sdawrite ssdawrite 871 #define sdstrategy ssdstrategy 872 #define sdioctl ssdioctl 873 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart 874 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart 875 #define sd_checksum_iostart ssd_checksum_iostart 876 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart 877 #define sd_pm_iostart ssd_pm_iostart 878 #define sd_core_iostart ssd_core_iostart 879 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone 880 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone 881 #define sd_checksum_iodone ssd_checksum_iodone 882 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone 883 #define sd_pm_iodone ssd_pm_iodone 884 #define sd_initpkt_for_buf ssd_initpkt_for_buf 885 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf 886 #define sd_setup_rw_pkt ssd_setup_rw_pkt 887 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt 888 #define sd_buf_iodone ssd_buf_iodone 889 #define sd_uscsi_strategy ssd_uscsi_strategy 890 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi 891 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi 892 #define sd_uscsi_iodone ssd_uscsi_iodone 893 #define sd_xbuf_strategy ssd_xbuf_strategy 894 #define sd_xbuf_init ssd_xbuf_init 895 #define sd_pm_entry ssd_pm_entry 896 #define sd_pm_exit ssd_pm_exit 897 898 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler 899 #define sd_pm_timeout_handler ssd_pm_timeout_handler 900 901 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq 902 #define sdintr ssdintr 903 #define sd_start_cmds ssd_start_cmds 904 #define sd_send_scsi_cmd ssd_send_scsi_cmd 905 #define sd_bioclone_alloc ssd_bioclone_alloc 906 #define sd_bioclone_free ssd_bioclone_free 907 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc 908 #define sd_shadow_buf_free ssd_shadow_buf_free 909 #define sd_print_transport_rejected_message \ 910 ssd_print_transport_rejected_message 911 #define sd_retry_command ssd_retry_command 912 #define sd_set_retry_bp ssd_set_retry_bp 913 #define sd_send_request_sense_command ssd_send_request_sense_command 914 #define sd_start_retry_command ssd_start_retry_command 915 #define sd_start_direct_priority_command \ 916 ssd_start_direct_priority_command 917 #define sd_return_failed_command ssd_return_failed_command 918 #define sd_return_failed_command_no_restart \ 919 ssd_return_failed_command_no_restart 920 #define sd_return_command ssd_return_command 921 #define sd_sync_with_callback ssd_sync_with_callback 922 #define sdrunout ssdrunout 923 #define sd_mark_rqs_busy ssd_mark_rqs_busy 924 #define sd_mark_rqs_idle ssd_mark_rqs_idle 925 #define sd_reduce_throttle ssd_reduce_throttle 926 #define sd_restore_throttle ssd_restore_throttle 927 #define sd_print_incomplete_msg ssd_print_incomplete_msg 928 #define sd_init_cdb_limits ssd_init_cdb_limits 929 #define sd_pkt_status_good ssd_pkt_status_good 930 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition 931 #define sd_pkt_status_busy ssd_pkt_status_busy 932 #define sd_pkt_status_reservation_conflict \ 933 ssd_pkt_status_reservation_conflict 934 #define sd_pkt_status_qfull ssd_pkt_status_qfull 935 #define sd_handle_request_sense ssd_handle_request_sense 936 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense 937 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg 938 #define sd_validate_sense_data ssd_validate_sense_data 939 #define sd_decode_sense ssd_decode_sense 940 #define sd_print_sense_msg ssd_print_sense_msg 941 #define sd_sense_key_no_sense ssd_sense_key_no_sense 942 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error 943 #define sd_sense_key_not_ready ssd_sense_key_not_ready 944 #define sd_sense_key_medium_or_hardware_error \ 945 ssd_sense_key_medium_or_hardware_error 946 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request 947 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention 948 #define sd_sense_key_fail_command ssd_sense_key_fail_command 949 #define sd_sense_key_blank_check ssd_sense_key_blank_check 950 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command 951 #define sd_sense_key_default ssd_sense_key_default 952 #define sd_print_retry_msg ssd_print_retry_msg 953 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg 954 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete 955 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err 956 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset 957 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted 958 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout 959 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free 960 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject 961 #define sd_pkt_reason_default ssd_pkt_reason_default 962 #define sd_reset_target ssd_reset_target 963 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback 964 #define sd_start_stop_unit_task ssd_start_stop_unit_task 965 #define sd_taskq_create ssd_taskq_create 966 #define sd_taskq_delete ssd_taskq_delete 967 #define sd_media_change_task ssd_media_change_task 968 #define sd_handle_mchange ssd_handle_mchange 969 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK 970 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY 971 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 972 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION 973 #define sd_send_scsi_feature_GET_CONFIGURATION \ 974 sd_send_scsi_feature_GET_CONFIGURATION 975 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT 976 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY 977 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY 978 #define sd_send_scsi_PERSISTENT_RESERVE_IN \ 979 ssd_send_scsi_PERSISTENT_RESERVE_IN 980 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ 981 ssd_send_scsi_PERSISTENT_RESERVE_OUT 982 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE 983 #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \ 984 ssd_send_scsi_SYNCHRONIZE_CACHE_biodone 985 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE 986 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT 987 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR 988 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE 989 #define sd_alloc_rqs ssd_alloc_rqs 990 #define sd_free_rqs ssd_free_rqs 991 #define sd_dump_memory ssd_dump_memory 992 #define sd_get_media_info ssd_get_media_info 993 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info 994 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf 995 #define sd_setup_next_xfer ssd_setup_next_xfer 996 #define sd_dkio_get_temp ssd_dkio_get_temp 997 #define sd_check_mhd ssd_check_mhd 998 #define sd_mhd_watch_cb ssd_mhd_watch_cb 999 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete 1000 #define sd_sname ssd_sname 1001 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover 1002 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread 1003 #define sd_take_ownership ssd_take_ownership 1004 #define sd_reserve_release ssd_reserve_release 1005 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req 1006 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb 1007 #define sd_persistent_reservation_in_read_keys \ 1008 ssd_persistent_reservation_in_read_keys 1009 #define sd_persistent_reservation_in_read_resv \ 1010 ssd_persistent_reservation_in_read_resv 1011 #define sd_mhdioc_takeown ssd_mhdioc_takeown 1012 #define sd_mhdioc_failfast ssd_mhdioc_failfast 1013 #define sd_mhdioc_release ssd_mhdioc_release 1014 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid 1015 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys 1016 #define sd_mhdioc_inresv ssd_mhdioc_inresv 1017 #define sr_change_blkmode ssr_change_blkmode 1018 #define sr_change_speed ssr_change_speed 1019 #define sr_atapi_change_speed ssr_atapi_change_speed 1020 #define sr_pause_resume ssr_pause_resume 1021 #define sr_play_msf ssr_play_msf 1022 #define sr_play_trkind ssr_play_trkind 1023 #define sr_read_all_subcodes ssr_read_all_subcodes 1024 #define sr_read_subchannel ssr_read_subchannel 1025 #define sr_read_tocentry ssr_read_tocentry 1026 #define sr_read_tochdr ssr_read_tochdr 1027 #define sr_read_cdda ssr_read_cdda 1028 #define sr_read_cdxa ssr_read_cdxa 1029 #define sr_read_mode1 ssr_read_mode1 1030 #define sr_read_mode2 ssr_read_mode2 1031 #define sr_read_cd_mode2 ssr_read_cd_mode2 1032 #define sr_sector_mode ssr_sector_mode 1033 #define sr_eject ssr_eject 1034 #define sr_ejected ssr_ejected 1035 #define sr_check_wp ssr_check_wp 1036 #define sd_check_media ssd_check_media 1037 #define sd_media_watch_cb ssd_media_watch_cb 1038 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast 1039 #define sr_volume_ctrl ssr_volume_ctrl 1040 #define sr_read_sony_session_offset ssr_read_sony_session_offset 1041 #define sd_log_page_supported ssd_log_page_supported 1042 #define sd_check_for_writable_cd ssd_check_for_writable_cd 1043 #define sd_wm_cache_constructor ssd_wm_cache_constructor 1044 #define sd_wm_cache_destructor ssd_wm_cache_destructor 1045 #define sd_range_lock ssd_range_lock 1046 #define sd_get_range ssd_get_range 1047 #define sd_free_inlist_wmap ssd_free_inlist_wmap 1048 #define sd_range_unlock ssd_range_unlock 1049 #define sd_read_modify_write_task ssd_read_modify_write_task 1050 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw 1051 1052 #define sd_iostart_chain ssd_iostart_chain 1053 #define sd_iodone_chain ssd_iodone_chain 1054 #define sd_initpkt_map ssd_initpkt_map 1055 #define sd_destroypkt_map ssd_destroypkt_map 1056 #define sd_chain_type_map ssd_chain_type_map 1057 #define sd_chain_index_map ssd_chain_index_map 1058 1059 #define sd_failfast_flushctl ssd_failfast_flushctl 1060 #define sd_failfast_flushq ssd_failfast_flushq 1061 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback 1062 1063 #define sd_is_lsi ssd_is_lsi 1064 #define sd_tg_rdwr ssd_tg_rdwr 1065 #define sd_tg_getinfo ssd_tg_getinfo 1066 1067 #endif /* #if (defined(__fibre)) */ 1068 1069 1070 int _init(void); 1071 int _fini(void); 1072 int _info(struct modinfo *modinfop); 1073 1074 /*PRINTFLIKE3*/ 1075 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1076 /*PRINTFLIKE3*/ 1077 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1078 /*PRINTFLIKE3*/ 1079 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1080 1081 static int sdprobe(dev_info_t *devi); 1082 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 1083 void **result); 1084 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1085 int mod_flags, char *name, caddr_t valuep, int *lengthp); 1086 1087 /* 1088 * Smart probe for parallel scsi 1089 */ 1090 static void sd_scsi_probe_cache_init(void); 1091 static void sd_scsi_probe_cache_fini(void); 1092 static void sd_scsi_clear_probe_cache(void); 1093 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)()); 1094 1095 /* 1096 * Attached luns on target for parallel scsi 1097 */ 1098 static void sd_scsi_target_lun_init(void); 1099 static void sd_scsi_target_lun_fini(void); 1100 static int sd_scsi_get_target_lun_count(dev_info_t *dip, int target); 1101 static void sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag); 1102 1103 static int sd_spin_up_unit(struct sd_lun *un); 1104 #ifdef _LP64 1105 static void sd_enable_descr_sense(struct sd_lun *un); 1106 static void sd_reenable_dsense_task(void *arg); 1107 #endif /* _LP64 */ 1108 1109 static void sd_set_mmc_caps(struct sd_lun *un); 1110 1111 static void sd_read_unit_properties(struct sd_lun *un); 1112 static int sd_process_sdconf_file(struct sd_lun *un); 1113 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags, 1114 int *data_list, sd_tunables *values); 1115 static void sd_process_sdconf_table(struct sd_lun *un); 1116 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen); 1117 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen); 1118 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 1119 int list_len, char *dataname_ptr); 1120 static void sd_set_vers1_properties(struct sd_lun *un, int flags, 1121 sd_tunables *prop_list); 1122 1123 static void sd_register_devid(struct sd_lun *un, dev_info_t *devi, 1124 int reservation_flag); 1125 static int sd_get_devid(struct sd_lun *un); 1126 static int sd_get_serialnum(struct sd_lun *un, uchar_t *wwn, int *len); 1127 static ddi_devid_t sd_create_devid(struct sd_lun *un); 1128 static int sd_write_deviceid(struct sd_lun *un); 1129 static int sd_get_devid_page(struct sd_lun *un, uchar_t *wwn, int *len); 1130 static int sd_check_vpd_page_support(struct sd_lun *un); 1131 1132 static void sd_setup_pm(struct sd_lun *un, dev_info_t *devi); 1133 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); 1134 1135 static int sd_ddi_suspend(dev_info_t *devi); 1136 static int sd_ddi_pm_suspend(struct sd_lun *un); 1137 static int sd_ddi_resume(dev_info_t *devi); 1138 static int sd_ddi_pm_resume(struct sd_lun *un); 1139 static int sdpower(dev_info_t *devi, int component, int level); 1140 1141 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 1142 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 1143 static int sd_unit_attach(dev_info_t *devi); 1144 static int sd_unit_detach(dev_info_t *devi); 1145 1146 static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi); 1147 static void sd_create_errstats(struct sd_lun *un, int instance); 1148 static void sd_set_errstats(struct sd_lun *un); 1149 static void sd_set_pstats(struct sd_lun *un); 1150 1151 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 1152 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); 1153 static int sd_send_polled_RQS(struct sd_lun *un); 1154 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); 1155 1156 #if (defined(__fibre)) 1157 /* 1158 * Event callbacks (photon) 1159 */ 1160 static void sd_init_event_callbacks(struct sd_lun *un); 1161 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); 1162 #endif 1163 1164 /* 1165 * Defines for sd_cache_control 1166 */ 1167 1168 #define SD_CACHE_ENABLE 1 1169 #define SD_CACHE_DISABLE 0 1170 #define SD_CACHE_NOCHANGE -1 1171 1172 static int sd_cache_control(struct sd_lun *un, int rcd_flag, int wce_flag); 1173 static int sd_get_write_cache_enabled(struct sd_lun *un, int *is_enabled); 1174 static dev_t sd_make_device(dev_info_t *devi); 1175 1176 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize, 1177 uint64_t capacity); 1178 1179 /* 1180 * Driver entry point functions. 1181 */ 1182 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 1183 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 1184 static int sd_ready_and_valid(struct sd_lun *un); 1185 1186 static void sdmin(struct buf *bp); 1187 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p); 1188 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 1189 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1190 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1191 1192 static int sdstrategy(struct buf *bp); 1193 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1194 1195 /* 1196 * Function prototypes for layering functions in the iostart chain. 1197 */ 1198 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un, 1199 struct buf *bp); 1200 static void sd_mapblocksize_iostart(int index, struct sd_lun *un, 1201 struct buf *bp); 1202 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp); 1203 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un, 1204 struct buf *bp); 1205 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp); 1206 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp); 1207 1208 /* 1209 * Function prototypes for layering functions in the iodone chain. 1210 */ 1211 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp); 1212 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp); 1213 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un, 1214 struct buf *bp); 1215 static void sd_mapblocksize_iodone(int index, struct sd_lun *un, 1216 struct buf *bp); 1217 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp); 1218 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un, 1219 struct buf *bp); 1220 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp); 1221 1222 /* 1223 * Prototypes for functions to support buf(9S) based IO. 1224 */ 1225 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg); 1226 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **); 1227 static void sd_destroypkt_for_buf(struct buf *); 1228 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp, 1229 struct buf *bp, int flags, 1230 int (*callback)(caddr_t), caddr_t callback_arg, 1231 diskaddr_t lba, uint32_t blockcount); 1232 #if defined(__i386) || defined(__amd64) 1233 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp, 1234 struct buf *bp, diskaddr_t lba, uint32_t blockcount); 1235 #endif /* defined(__i386) || defined(__amd64) */ 1236 1237 /* 1238 * Prototypes for functions to support USCSI IO. 1239 */ 1240 static int sd_uscsi_strategy(struct buf *bp); 1241 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **); 1242 static void sd_destroypkt_for_uscsi(struct buf *); 1243 1244 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 1245 uchar_t chain_type, void *pktinfop); 1246 1247 static int sd_pm_entry(struct sd_lun *un); 1248 static void sd_pm_exit(struct sd_lun *un); 1249 1250 static void sd_pm_idletimeout_handler(void *arg); 1251 1252 /* 1253 * sd_core internal functions (used at the sd_core_io layer). 1254 */ 1255 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp); 1256 static void sdintr(struct scsi_pkt *pktp); 1257 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp); 1258 1259 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1260 enum uio_seg dataspace, int path_flag); 1261 1262 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen, 1263 daddr_t blkno, int (*func)(struct buf *)); 1264 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen, 1265 uint_t bflags, daddr_t blkno, int (*func)(struct buf *)); 1266 static void sd_bioclone_free(struct buf *bp); 1267 static void sd_shadow_buf_free(struct buf *bp); 1268 1269 static void sd_print_transport_rejected_message(struct sd_lun *un, 1270 struct sd_xbuf *xp, int code); 1271 static void sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, 1272 void *arg, int code); 1273 static void sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, 1274 void *arg, int code); 1275 static void sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, 1276 void *arg, int code); 1277 1278 static void sd_retry_command(struct sd_lun *un, struct buf *bp, 1279 int retry_check_flag, 1280 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, 1281 int c), 1282 void *user_arg, int failure_code, clock_t retry_delay, 1283 void (*statp)(kstat_io_t *)); 1284 1285 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, 1286 clock_t retry_delay, void (*statp)(kstat_io_t *)); 1287 1288 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 1289 struct scsi_pkt *pktp); 1290 static void sd_start_retry_command(void *arg); 1291 static void sd_start_direct_priority_command(void *arg); 1292 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, 1293 int errcode); 1294 static void sd_return_failed_command_no_restart(struct sd_lun *un, 1295 struct buf *bp, int errcode); 1296 static void sd_return_command(struct sd_lun *un, struct buf *bp); 1297 static void sd_sync_with_callback(struct sd_lun *un); 1298 static int sdrunout(caddr_t arg); 1299 1300 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp); 1301 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp); 1302 1303 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type); 1304 static void sd_restore_throttle(void *arg); 1305 1306 static void sd_init_cdb_limits(struct sd_lun *un); 1307 1308 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 1309 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1310 1311 /* 1312 * Error handling functions 1313 */ 1314 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 1315 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1316 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, 1317 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1318 static void sd_pkt_status_reservation_conflict(struct sd_lun *un, 1319 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1320 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 1321 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1322 1323 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp, 1324 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1325 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 1326 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1327 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp, 1328 struct sd_xbuf *xp); 1329 static void sd_decode_sense(struct sd_lun *un, struct buf *bp, 1330 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1331 1332 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp, 1333 void *arg, int code); 1334 1335 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 1336 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1337 static void sd_sense_key_recoverable_error(struct sd_lun *un, 1338 uint8_t *sense_datap, 1339 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1340 static void sd_sense_key_not_ready(struct sd_lun *un, 1341 uint8_t *sense_datap, 1342 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1343 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 1344 uint8_t *sense_datap, 1345 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1346 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 1347 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1348 static void sd_sense_key_unit_attention(struct sd_lun *un, 1349 uint8_t *sense_datap, 1350 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1351 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 1352 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1353 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 1354 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1355 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 1356 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1357 static void sd_sense_key_default(struct sd_lun *un, 1358 uint8_t *sense_datap, 1359 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1360 1361 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp, 1362 void *arg, int flag); 1363 1364 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 1365 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1366 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 1367 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1368 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 1369 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1370 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 1371 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1372 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 1373 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1374 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 1375 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1376 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 1377 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1378 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 1379 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1380 1381 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp); 1382 1383 static void sd_start_stop_unit_callback(void *arg); 1384 static void sd_start_stop_unit_task(void *arg); 1385 1386 static void sd_taskq_create(void); 1387 static void sd_taskq_delete(void); 1388 static void sd_media_change_task(void *arg); 1389 1390 static int sd_handle_mchange(struct sd_lun *un); 1391 static int sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag); 1392 static int sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, 1393 uint32_t *lbap, int path_flag); 1394 static int sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 1395 uint32_t *lbap, int path_flag); 1396 static int sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, 1397 int path_flag); 1398 static int sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, 1399 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp); 1400 static int sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag); 1401 static int sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, 1402 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp); 1403 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, 1404 uchar_t usr_cmd, uchar_t *usr_bufp); 1405 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, 1406 struct dk_callback *dkc); 1407 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp); 1408 static int sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, 1409 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1410 uchar_t *bufaddr, uint_t buflen, int path_flag); 1411 static int sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 1412 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1413 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag); 1414 static int sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, 1415 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag); 1416 static int sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, 1417 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag); 1418 static int sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 1419 size_t buflen, daddr_t start_block, int path_flag); 1420 #define sd_send_scsi_READ(un, bufaddr, buflen, start_block, path_flag) \ 1421 sd_send_scsi_RDWR(un, SCMD_READ, bufaddr, buflen, start_block, \ 1422 path_flag) 1423 #define sd_send_scsi_WRITE(un, bufaddr, buflen, start_block, path_flag) \ 1424 sd_send_scsi_RDWR(un, SCMD_WRITE, bufaddr, buflen, start_block,\ 1425 path_flag) 1426 1427 static int sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, 1428 uint16_t buflen, uchar_t page_code, uchar_t page_control, 1429 uint16_t param_ptr, int path_flag); 1430 1431 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un); 1432 static void sd_free_rqs(struct sd_lun *un); 1433 1434 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, 1435 uchar_t *data, int len, int fmt); 1436 static void sd_panic_for_res_conflict(struct sd_lun *un); 1437 1438 /* 1439 * Disk Ioctl Function Prototypes 1440 */ 1441 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag); 1442 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag); 1443 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag); 1444 1445 /* 1446 * Multi-host Ioctl Prototypes 1447 */ 1448 static int sd_check_mhd(dev_t dev, int interval); 1449 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1450 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt); 1451 static char *sd_sname(uchar_t status); 1452 static void sd_mhd_resvd_recover(void *arg); 1453 static void sd_resv_reclaim_thread(); 1454 static int sd_take_ownership(dev_t dev, struct mhioctkown *p); 1455 static int sd_reserve_release(dev_t dev, int cmd); 1456 static void sd_rmv_resv_reclaim_req(dev_t dev); 1457 static void sd_mhd_reset_notify_cb(caddr_t arg); 1458 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un, 1459 mhioc_inkeys_t *usrp, int flag); 1460 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un, 1461 mhioc_inresvs_t *usrp, int flag); 1462 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag); 1463 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag); 1464 static int sd_mhdioc_release(dev_t dev); 1465 static int sd_mhdioc_register_devid(dev_t dev); 1466 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag); 1467 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag); 1468 1469 /* 1470 * SCSI removable prototypes 1471 */ 1472 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag); 1473 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1474 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1475 static int sr_pause_resume(dev_t dev, int mode); 1476 static int sr_play_msf(dev_t dev, caddr_t data, int flag); 1477 static int sr_play_trkind(dev_t dev, caddr_t data, int flag); 1478 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag); 1479 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag); 1480 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag); 1481 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag); 1482 static int sr_read_cdda(dev_t dev, caddr_t data, int flag); 1483 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag); 1484 static int sr_read_mode1(dev_t dev, caddr_t data, int flag); 1485 static int sr_read_mode2(dev_t dev, caddr_t data, int flag); 1486 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag); 1487 static int sr_sector_mode(dev_t dev, uint32_t blksize); 1488 static int sr_eject(dev_t dev); 1489 static void sr_ejected(register struct sd_lun *un); 1490 static int sr_check_wp(dev_t dev); 1491 static int sd_check_media(dev_t dev, enum dkio_state state); 1492 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1493 static void sd_delayed_cv_broadcast(void *arg); 1494 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); 1495 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); 1496 1497 static int sd_log_page_supported(struct sd_lun *un, int log_page); 1498 1499 /* 1500 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. 1501 */ 1502 static void sd_check_for_writable_cd(struct sd_lun *un, int path_flag); 1503 static int sd_wm_cache_constructor(void *wm, void *un, int flags); 1504 static void sd_wm_cache_destructor(void *wm, void *un); 1505 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb, 1506 daddr_t endb, ushort_t typ); 1507 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb, 1508 daddr_t endb); 1509 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp); 1510 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm); 1511 static void sd_read_modify_write_task(void * arg); 1512 static int 1513 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 1514 struct buf **bpp); 1515 1516 1517 /* 1518 * Function prototypes for failfast support. 1519 */ 1520 static void sd_failfast_flushq(struct sd_lun *un); 1521 static int sd_failfast_flushq_callback(struct buf *bp); 1522 1523 /* 1524 * Function prototypes to check for lsi devices 1525 */ 1526 static void sd_is_lsi(struct sd_lun *un); 1527 1528 /* 1529 * Function prototypes for x86 support 1530 */ 1531 #if defined(__i386) || defined(__amd64) 1532 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 1533 struct scsi_pkt *pkt, struct sd_xbuf *xp); 1534 #endif 1535 1536 1537 /* Function prototypes for cmlb */ 1538 static int sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 1539 diskaddr_t start_block, size_t reqlength, void *tg_cookie); 1540 1541 static int sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie); 1542 1543 /* 1544 * Constants for failfast support: 1545 * 1546 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO 1547 * failfast processing being performed. 1548 * 1549 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing 1550 * failfast processing on all bufs with B_FAILFAST set. 1551 */ 1552 1553 #define SD_FAILFAST_INACTIVE 0 1554 #define SD_FAILFAST_ACTIVE 1 1555 1556 /* 1557 * Bitmask to control behavior of buf(9S) flushes when a transition to 1558 * the failfast state occurs. Optional bits include: 1559 * 1560 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that 1561 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will 1562 * be flushed. 1563 * 1564 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the 1565 * driver, in addition to the regular wait queue. This includes the xbuf 1566 * queues. When clear, only the driver's wait queue will be flushed. 1567 */ 1568 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 1569 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 1570 1571 /* 1572 * The default behavior is to only flush bufs that have B_FAILFAST set, but 1573 * to flush all queues within the driver. 1574 */ 1575 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; 1576 1577 1578 /* 1579 * SD Testing Fault Injection 1580 */ 1581 #ifdef SD_FAULT_INJECTION 1582 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); 1583 static void sd_faultinjection(struct scsi_pkt *pktp); 1584 static void sd_injection_log(char *buf, struct sd_lun *un); 1585 #endif 1586 1587 /* 1588 * Device driver ops vector 1589 */ 1590 static struct cb_ops sd_cb_ops = { 1591 sdopen, /* open */ 1592 sdclose, /* close */ 1593 sdstrategy, /* strategy */ 1594 nodev, /* print */ 1595 sddump, /* dump */ 1596 sdread, /* read */ 1597 sdwrite, /* write */ 1598 sdioctl, /* ioctl */ 1599 nodev, /* devmap */ 1600 nodev, /* mmap */ 1601 nodev, /* segmap */ 1602 nochpoll, /* poll */ 1603 sd_prop_op, /* cb_prop_op */ 1604 0, /* streamtab */ 1605 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */ 1606 CB_REV, /* cb_rev */ 1607 sdaread, /* async I/O read entry point */ 1608 sdawrite /* async I/O write entry point */ 1609 }; 1610 1611 static struct dev_ops sd_ops = { 1612 DEVO_REV, /* devo_rev, */ 1613 0, /* refcnt */ 1614 sdinfo, /* info */ 1615 nulldev, /* identify */ 1616 sdprobe, /* probe */ 1617 sdattach, /* attach */ 1618 sddetach, /* detach */ 1619 nodev, /* reset */ 1620 &sd_cb_ops, /* driver operations */ 1621 NULL, /* bus operations */ 1622 sdpower /* power */ 1623 }; 1624 1625 1626 /* 1627 * This is the loadable module wrapper. 1628 */ 1629 #include <sys/modctl.h> 1630 1631 static struct modldrv modldrv = { 1632 &mod_driverops, /* Type of module. This one is a driver */ 1633 SD_MODULE_NAME, /* Module name. */ 1634 &sd_ops /* driver ops */ 1635 }; 1636 1637 1638 static struct modlinkage modlinkage = { 1639 MODREV_1, 1640 &modldrv, 1641 NULL 1642 }; 1643 1644 static cmlb_tg_ops_t sd_tgops = { 1645 TG_DK_OPS_VERSION_1, 1646 sd_tg_rdwr, 1647 sd_tg_getinfo 1648 }; 1649 1650 static struct scsi_asq_key_strings sd_additional_codes[] = { 1651 0x81, 0, "Logical Unit is Reserved", 1652 0x85, 0, "Audio Address Not Valid", 1653 0xb6, 0, "Media Load Mechanism Failed", 1654 0xB9, 0, "Audio Play Operation Aborted", 1655 0xbf, 0, "Buffer Overflow for Read All Subcodes Command", 1656 0x53, 2, "Medium removal prevented", 1657 0x6f, 0, "Authentication failed during key exchange", 1658 0x6f, 1, "Key not present", 1659 0x6f, 2, "Key not established", 1660 0x6f, 3, "Read without proper authentication", 1661 0x6f, 4, "Mismatched region to this logical unit", 1662 0x6f, 5, "Region reset count error", 1663 0xffff, 0x0, NULL 1664 }; 1665 1666 1667 /* 1668 * Struct for passing printing information for sense data messages 1669 */ 1670 struct sd_sense_info { 1671 int ssi_severity; 1672 int ssi_pfa_flag; 1673 }; 1674 1675 /* 1676 * Table of function pointers for iostart-side routines. Seperate "chains" 1677 * of layered function calls are formed by placing the function pointers 1678 * sequentially in the desired order. Functions are called according to an 1679 * incrementing table index ordering. The last function in each chain must 1680 * be sd_core_iostart(). The corresponding iodone-side routines are expected 1681 * in the sd_iodone_chain[] array. 1682 * 1683 * Note: It may seem more natural to organize both the iostart and iodone 1684 * functions together, into an array of structures (or some similar 1685 * organization) with a common index, rather than two seperate arrays which 1686 * must be maintained in synchronization. The purpose of this division is 1687 * to achiece improved performance: individual arrays allows for more 1688 * effective cache line utilization on certain platforms. 1689 */ 1690 1691 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp); 1692 1693 1694 static sd_chain_t sd_iostart_chain[] = { 1695 1696 /* Chain for buf IO for disk drive targets (PM enabled) */ 1697 sd_mapblockaddr_iostart, /* Index: 0 */ 1698 sd_pm_iostart, /* Index: 1 */ 1699 sd_core_iostart, /* Index: 2 */ 1700 1701 /* Chain for buf IO for disk drive targets (PM disabled) */ 1702 sd_mapblockaddr_iostart, /* Index: 3 */ 1703 sd_core_iostart, /* Index: 4 */ 1704 1705 /* Chain for buf IO for removable-media targets (PM enabled) */ 1706 sd_mapblockaddr_iostart, /* Index: 5 */ 1707 sd_mapblocksize_iostart, /* Index: 6 */ 1708 sd_pm_iostart, /* Index: 7 */ 1709 sd_core_iostart, /* Index: 8 */ 1710 1711 /* Chain for buf IO for removable-media targets (PM disabled) */ 1712 sd_mapblockaddr_iostart, /* Index: 9 */ 1713 sd_mapblocksize_iostart, /* Index: 10 */ 1714 sd_core_iostart, /* Index: 11 */ 1715 1716 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1717 sd_mapblockaddr_iostart, /* Index: 12 */ 1718 sd_checksum_iostart, /* Index: 13 */ 1719 sd_pm_iostart, /* Index: 14 */ 1720 sd_core_iostart, /* Index: 15 */ 1721 1722 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1723 sd_mapblockaddr_iostart, /* Index: 16 */ 1724 sd_checksum_iostart, /* Index: 17 */ 1725 sd_core_iostart, /* Index: 18 */ 1726 1727 /* Chain for USCSI commands (all targets) */ 1728 sd_pm_iostart, /* Index: 19 */ 1729 sd_core_iostart, /* Index: 20 */ 1730 1731 /* Chain for checksumming USCSI commands (all targets) */ 1732 sd_checksum_uscsi_iostart, /* Index: 21 */ 1733 sd_pm_iostart, /* Index: 22 */ 1734 sd_core_iostart, /* Index: 23 */ 1735 1736 /* Chain for "direct" USCSI commands (all targets) */ 1737 sd_core_iostart, /* Index: 24 */ 1738 1739 /* Chain for "direct priority" USCSI commands (all targets) */ 1740 sd_core_iostart, /* Index: 25 */ 1741 }; 1742 1743 /* 1744 * Macros to locate the first function of each iostart chain in the 1745 * sd_iostart_chain[] array. These are located by the index in the array. 1746 */ 1747 #define SD_CHAIN_DISK_IOSTART 0 1748 #define SD_CHAIN_DISK_IOSTART_NO_PM 3 1749 #define SD_CHAIN_RMMEDIA_IOSTART 5 1750 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9 1751 #define SD_CHAIN_CHKSUM_IOSTART 12 1752 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16 1753 #define SD_CHAIN_USCSI_CMD_IOSTART 19 1754 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21 1755 #define SD_CHAIN_DIRECT_CMD_IOSTART 24 1756 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25 1757 1758 1759 /* 1760 * Table of function pointers for the iodone-side routines for the driver- 1761 * internal layering mechanism. The calling sequence for iodone routines 1762 * uses a decrementing table index, so the last routine called in a chain 1763 * must be at the lowest array index location for that chain. The last 1764 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs) 1765 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering 1766 * of the functions in an iodone side chain must correspond to the ordering 1767 * of the iostart routines for that chain. Note that there is no iodone 1768 * side routine that corresponds to sd_core_iostart(), so there is no 1769 * entry in the table for this. 1770 */ 1771 1772 static sd_chain_t sd_iodone_chain[] = { 1773 1774 /* Chain for buf IO for disk drive targets (PM enabled) */ 1775 sd_buf_iodone, /* Index: 0 */ 1776 sd_mapblockaddr_iodone, /* Index: 1 */ 1777 sd_pm_iodone, /* Index: 2 */ 1778 1779 /* Chain for buf IO for disk drive targets (PM disabled) */ 1780 sd_buf_iodone, /* Index: 3 */ 1781 sd_mapblockaddr_iodone, /* Index: 4 */ 1782 1783 /* Chain for buf IO for removable-media targets (PM enabled) */ 1784 sd_buf_iodone, /* Index: 5 */ 1785 sd_mapblockaddr_iodone, /* Index: 6 */ 1786 sd_mapblocksize_iodone, /* Index: 7 */ 1787 sd_pm_iodone, /* Index: 8 */ 1788 1789 /* Chain for buf IO for removable-media targets (PM disabled) */ 1790 sd_buf_iodone, /* Index: 9 */ 1791 sd_mapblockaddr_iodone, /* Index: 10 */ 1792 sd_mapblocksize_iodone, /* Index: 11 */ 1793 1794 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1795 sd_buf_iodone, /* Index: 12 */ 1796 sd_mapblockaddr_iodone, /* Index: 13 */ 1797 sd_checksum_iodone, /* Index: 14 */ 1798 sd_pm_iodone, /* Index: 15 */ 1799 1800 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1801 sd_buf_iodone, /* Index: 16 */ 1802 sd_mapblockaddr_iodone, /* Index: 17 */ 1803 sd_checksum_iodone, /* Index: 18 */ 1804 1805 /* Chain for USCSI commands (non-checksum targets) */ 1806 sd_uscsi_iodone, /* Index: 19 */ 1807 sd_pm_iodone, /* Index: 20 */ 1808 1809 /* Chain for USCSI commands (checksum targets) */ 1810 sd_uscsi_iodone, /* Index: 21 */ 1811 sd_checksum_uscsi_iodone, /* Index: 22 */ 1812 sd_pm_iodone, /* Index: 22 */ 1813 1814 /* Chain for "direct" USCSI commands (all targets) */ 1815 sd_uscsi_iodone, /* Index: 24 */ 1816 1817 /* Chain for "direct priority" USCSI commands (all targets) */ 1818 sd_uscsi_iodone, /* Index: 25 */ 1819 }; 1820 1821 1822 /* 1823 * Macros to locate the "first" function in the sd_iodone_chain[] array for 1824 * each iodone-side chain. These are located by the array index, but as the 1825 * iodone side functions are called in a decrementing-index order, the 1826 * highest index number in each chain must be specified (as these correspond 1827 * to the first function in the iodone chain that will be called by the core 1828 * at IO completion time). 1829 */ 1830 1831 #define SD_CHAIN_DISK_IODONE 2 1832 #define SD_CHAIN_DISK_IODONE_NO_PM 4 1833 #define SD_CHAIN_RMMEDIA_IODONE 8 1834 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11 1835 #define SD_CHAIN_CHKSUM_IODONE 15 1836 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18 1837 #define SD_CHAIN_USCSI_CMD_IODONE 20 1838 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22 1839 #define SD_CHAIN_DIRECT_CMD_IODONE 24 1840 #define SD_CHAIN_PRIORITY_CMD_IODONE 25 1841 1842 1843 1844 1845 /* 1846 * Array to map a layering chain index to the appropriate initpkt routine. 1847 * The redundant entries are present so that the index used for accessing 1848 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1849 * with this table as well. 1850 */ 1851 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **); 1852 1853 static sd_initpkt_t sd_initpkt_map[] = { 1854 1855 /* Chain for buf IO for disk drive targets (PM enabled) */ 1856 sd_initpkt_for_buf, /* Index: 0 */ 1857 sd_initpkt_for_buf, /* Index: 1 */ 1858 sd_initpkt_for_buf, /* Index: 2 */ 1859 1860 /* Chain for buf IO for disk drive targets (PM disabled) */ 1861 sd_initpkt_for_buf, /* Index: 3 */ 1862 sd_initpkt_for_buf, /* Index: 4 */ 1863 1864 /* Chain for buf IO for removable-media targets (PM enabled) */ 1865 sd_initpkt_for_buf, /* Index: 5 */ 1866 sd_initpkt_for_buf, /* Index: 6 */ 1867 sd_initpkt_for_buf, /* Index: 7 */ 1868 sd_initpkt_for_buf, /* Index: 8 */ 1869 1870 /* Chain for buf IO for removable-media targets (PM disabled) */ 1871 sd_initpkt_for_buf, /* Index: 9 */ 1872 sd_initpkt_for_buf, /* Index: 10 */ 1873 sd_initpkt_for_buf, /* Index: 11 */ 1874 1875 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1876 sd_initpkt_for_buf, /* Index: 12 */ 1877 sd_initpkt_for_buf, /* Index: 13 */ 1878 sd_initpkt_for_buf, /* Index: 14 */ 1879 sd_initpkt_for_buf, /* Index: 15 */ 1880 1881 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1882 sd_initpkt_for_buf, /* Index: 16 */ 1883 sd_initpkt_for_buf, /* Index: 17 */ 1884 sd_initpkt_for_buf, /* Index: 18 */ 1885 1886 /* Chain for USCSI commands (non-checksum targets) */ 1887 sd_initpkt_for_uscsi, /* Index: 19 */ 1888 sd_initpkt_for_uscsi, /* Index: 20 */ 1889 1890 /* Chain for USCSI commands (checksum targets) */ 1891 sd_initpkt_for_uscsi, /* Index: 21 */ 1892 sd_initpkt_for_uscsi, /* Index: 22 */ 1893 sd_initpkt_for_uscsi, /* Index: 22 */ 1894 1895 /* Chain for "direct" USCSI commands (all targets) */ 1896 sd_initpkt_for_uscsi, /* Index: 24 */ 1897 1898 /* Chain for "direct priority" USCSI commands (all targets) */ 1899 sd_initpkt_for_uscsi, /* Index: 25 */ 1900 1901 }; 1902 1903 1904 /* 1905 * Array to map a layering chain index to the appropriate destroypktpkt routine. 1906 * The redundant entries are present so that the index used for accessing 1907 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1908 * with this table as well. 1909 */ 1910 typedef void (*sd_destroypkt_t)(struct buf *); 1911 1912 static sd_destroypkt_t sd_destroypkt_map[] = { 1913 1914 /* Chain for buf IO for disk drive targets (PM enabled) */ 1915 sd_destroypkt_for_buf, /* Index: 0 */ 1916 sd_destroypkt_for_buf, /* Index: 1 */ 1917 sd_destroypkt_for_buf, /* Index: 2 */ 1918 1919 /* Chain for buf IO for disk drive targets (PM disabled) */ 1920 sd_destroypkt_for_buf, /* Index: 3 */ 1921 sd_destroypkt_for_buf, /* Index: 4 */ 1922 1923 /* Chain for buf IO for removable-media targets (PM enabled) */ 1924 sd_destroypkt_for_buf, /* Index: 5 */ 1925 sd_destroypkt_for_buf, /* Index: 6 */ 1926 sd_destroypkt_for_buf, /* Index: 7 */ 1927 sd_destroypkt_for_buf, /* Index: 8 */ 1928 1929 /* Chain for buf IO for removable-media targets (PM disabled) */ 1930 sd_destroypkt_for_buf, /* Index: 9 */ 1931 sd_destroypkt_for_buf, /* Index: 10 */ 1932 sd_destroypkt_for_buf, /* Index: 11 */ 1933 1934 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1935 sd_destroypkt_for_buf, /* Index: 12 */ 1936 sd_destroypkt_for_buf, /* Index: 13 */ 1937 sd_destroypkt_for_buf, /* Index: 14 */ 1938 sd_destroypkt_for_buf, /* Index: 15 */ 1939 1940 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1941 sd_destroypkt_for_buf, /* Index: 16 */ 1942 sd_destroypkt_for_buf, /* Index: 17 */ 1943 sd_destroypkt_for_buf, /* Index: 18 */ 1944 1945 /* Chain for USCSI commands (non-checksum targets) */ 1946 sd_destroypkt_for_uscsi, /* Index: 19 */ 1947 sd_destroypkt_for_uscsi, /* Index: 20 */ 1948 1949 /* Chain for USCSI commands (checksum targets) */ 1950 sd_destroypkt_for_uscsi, /* Index: 21 */ 1951 sd_destroypkt_for_uscsi, /* Index: 22 */ 1952 sd_destroypkt_for_uscsi, /* Index: 22 */ 1953 1954 /* Chain for "direct" USCSI commands (all targets) */ 1955 sd_destroypkt_for_uscsi, /* Index: 24 */ 1956 1957 /* Chain for "direct priority" USCSI commands (all targets) */ 1958 sd_destroypkt_for_uscsi, /* Index: 25 */ 1959 1960 }; 1961 1962 1963 1964 /* 1965 * Array to map a layering chain index to the appropriate chain "type". 1966 * The chain type indicates a specific property/usage of the chain. 1967 * The redundant entries are present so that the index used for accessing 1968 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1969 * with this table as well. 1970 */ 1971 1972 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */ 1973 #define SD_CHAIN_BUFIO 1 /* regular buf IO */ 1974 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */ 1975 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */ 1976 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */ 1977 /* (for error recovery) */ 1978 1979 static int sd_chain_type_map[] = { 1980 1981 /* Chain for buf IO for disk drive targets (PM enabled) */ 1982 SD_CHAIN_BUFIO, /* Index: 0 */ 1983 SD_CHAIN_BUFIO, /* Index: 1 */ 1984 SD_CHAIN_BUFIO, /* Index: 2 */ 1985 1986 /* Chain for buf IO for disk drive targets (PM disabled) */ 1987 SD_CHAIN_BUFIO, /* Index: 3 */ 1988 SD_CHAIN_BUFIO, /* Index: 4 */ 1989 1990 /* Chain for buf IO for removable-media targets (PM enabled) */ 1991 SD_CHAIN_BUFIO, /* Index: 5 */ 1992 SD_CHAIN_BUFIO, /* Index: 6 */ 1993 SD_CHAIN_BUFIO, /* Index: 7 */ 1994 SD_CHAIN_BUFIO, /* Index: 8 */ 1995 1996 /* Chain for buf IO for removable-media targets (PM disabled) */ 1997 SD_CHAIN_BUFIO, /* Index: 9 */ 1998 SD_CHAIN_BUFIO, /* Index: 10 */ 1999 SD_CHAIN_BUFIO, /* Index: 11 */ 2000 2001 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2002 SD_CHAIN_BUFIO, /* Index: 12 */ 2003 SD_CHAIN_BUFIO, /* Index: 13 */ 2004 SD_CHAIN_BUFIO, /* Index: 14 */ 2005 SD_CHAIN_BUFIO, /* Index: 15 */ 2006 2007 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2008 SD_CHAIN_BUFIO, /* Index: 16 */ 2009 SD_CHAIN_BUFIO, /* Index: 17 */ 2010 SD_CHAIN_BUFIO, /* Index: 18 */ 2011 2012 /* Chain for USCSI commands (non-checksum targets) */ 2013 SD_CHAIN_USCSI, /* Index: 19 */ 2014 SD_CHAIN_USCSI, /* Index: 20 */ 2015 2016 /* Chain for USCSI commands (checksum targets) */ 2017 SD_CHAIN_USCSI, /* Index: 21 */ 2018 SD_CHAIN_USCSI, /* Index: 22 */ 2019 SD_CHAIN_USCSI, /* Index: 22 */ 2020 2021 /* Chain for "direct" USCSI commands (all targets) */ 2022 SD_CHAIN_DIRECT, /* Index: 24 */ 2023 2024 /* Chain for "direct priority" USCSI commands (all targets) */ 2025 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */ 2026 }; 2027 2028 2029 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */ 2030 #define SD_IS_BUFIO(xp) \ 2031 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO) 2032 2033 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */ 2034 #define SD_IS_DIRECT_PRIORITY(xp) \ 2035 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY) 2036 2037 2038 2039 /* 2040 * Struct, array, and macros to map a specific chain to the appropriate 2041 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays. 2042 * 2043 * The sd_chain_index_map[] array is used at attach time to set the various 2044 * un_xxx_chain type members of the sd_lun softstate to the specific layering 2045 * chain to be used with the instance. This allows different instances to use 2046 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart 2047 * and xb_chain_iodone index values in the sd_xbuf are initialized to these 2048 * values at sd_xbuf init time, this allows (1) layering chains may be changed 2049 * dynamically & without the use of locking; and (2) a layer may update the 2050 * xb_chain_io[start|done] member in a given xbuf with its current index value, 2051 * to allow for deferred processing of an IO within the same chain from a 2052 * different execution context. 2053 */ 2054 2055 struct sd_chain_index { 2056 int sci_iostart_index; 2057 int sci_iodone_index; 2058 }; 2059 2060 static struct sd_chain_index sd_chain_index_map[] = { 2061 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE }, 2062 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM }, 2063 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE }, 2064 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM }, 2065 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE }, 2066 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM }, 2067 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE }, 2068 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE }, 2069 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE }, 2070 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE }, 2071 }; 2072 2073 2074 /* 2075 * The following are indexes into the sd_chain_index_map[] array. 2076 */ 2077 2078 /* un->un_buf_chain_type must be set to one of these */ 2079 #define SD_CHAIN_INFO_DISK 0 2080 #define SD_CHAIN_INFO_DISK_NO_PM 1 2081 #define SD_CHAIN_INFO_RMMEDIA 2 2082 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3 2083 #define SD_CHAIN_INFO_CHKSUM 4 2084 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5 2085 2086 /* un->un_uscsi_chain_type must be set to one of these */ 2087 #define SD_CHAIN_INFO_USCSI_CMD 6 2088 /* USCSI with PM disabled is the same as DIRECT */ 2089 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8 2090 #define SD_CHAIN_INFO_USCSI_CHKSUM 7 2091 2092 /* un->un_direct_chain_type must be set to one of these */ 2093 #define SD_CHAIN_INFO_DIRECT_CMD 8 2094 2095 /* un->un_priority_chain_type must be set to one of these */ 2096 #define SD_CHAIN_INFO_PRIORITY_CMD 9 2097 2098 /* size for devid inquiries */ 2099 #define MAX_INQUIRY_SIZE 0xF0 2100 2101 /* 2102 * Macros used by functions to pass a given buf(9S) struct along to the 2103 * next function in the layering chain for further processing. 2104 * 2105 * In the following macros, passing more than three arguments to the called 2106 * routines causes the optimizer for the SPARC compiler to stop doing tail 2107 * call elimination which results in significant performance degradation. 2108 */ 2109 #define SD_BEGIN_IOSTART(index, un, bp) \ 2110 ((*(sd_iostart_chain[index]))(index, un, bp)) 2111 2112 #define SD_BEGIN_IODONE(index, un, bp) \ 2113 ((*(sd_iodone_chain[index]))(index, un, bp)) 2114 2115 #define SD_NEXT_IOSTART(index, un, bp) \ 2116 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp)) 2117 2118 #define SD_NEXT_IODONE(index, un, bp) \ 2119 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp)) 2120 2121 /* 2122 * Function: _init 2123 * 2124 * Description: This is the driver _init(9E) entry point. 2125 * 2126 * Return Code: Returns the value from mod_install(9F) or 2127 * ddi_soft_state_init(9F) as appropriate. 2128 * 2129 * Context: Called when driver module loaded. 2130 */ 2131 2132 int 2133 _init(void) 2134 { 2135 int err; 2136 2137 /* establish driver name from module name */ 2138 sd_label = mod_modname(&modlinkage); 2139 2140 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun), 2141 SD_MAXUNIT); 2142 2143 if (err != 0) { 2144 return (err); 2145 } 2146 2147 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL); 2148 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL); 2149 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL); 2150 2151 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL); 2152 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL); 2153 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL); 2154 2155 /* 2156 * it's ok to init here even for fibre device 2157 */ 2158 sd_scsi_probe_cache_init(); 2159 2160 sd_scsi_target_lun_init(); 2161 2162 /* 2163 * Creating taskq before mod_install ensures that all callers (threads) 2164 * that enter the module after a successfull mod_install encounter 2165 * a valid taskq. 2166 */ 2167 sd_taskq_create(); 2168 2169 err = mod_install(&modlinkage); 2170 if (err != 0) { 2171 /* delete taskq if install fails */ 2172 sd_taskq_delete(); 2173 2174 mutex_destroy(&sd_detach_mutex); 2175 mutex_destroy(&sd_log_mutex); 2176 mutex_destroy(&sd_label_mutex); 2177 2178 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2179 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2180 cv_destroy(&sd_tr.srq_inprocess_cv); 2181 2182 sd_scsi_probe_cache_fini(); 2183 2184 sd_scsi_target_lun_fini(); 2185 2186 ddi_soft_state_fini(&sd_state); 2187 return (err); 2188 } 2189 2190 return (err); 2191 } 2192 2193 2194 /* 2195 * Function: _fini 2196 * 2197 * Description: This is the driver _fini(9E) entry point. 2198 * 2199 * Return Code: Returns the value from mod_remove(9F) 2200 * 2201 * Context: Called when driver module is unloaded. 2202 */ 2203 2204 int 2205 _fini(void) 2206 { 2207 int err; 2208 2209 if ((err = mod_remove(&modlinkage)) != 0) { 2210 return (err); 2211 } 2212 2213 sd_taskq_delete(); 2214 2215 mutex_destroy(&sd_detach_mutex); 2216 mutex_destroy(&sd_log_mutex); 2217 mutex_destroy(&sd_label_mutex); 2218 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2219 2220 sd_scsi_probe_cache_fini(); 2221 2222 sd_scsi_target_lun_fini(); 2223 2224 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2225 cv_destroy(&sd_tr.srq_inprocess_cv); 2226 2227 ddi_soft_state_fini(&sd_state); 2228 2229 return (err); 2230 } 2231 2232 2233 /* 2234 * Function: _info 2235 * 2236 * Description: This is the driver _info(9E) entry point. 2237 * 2238 * Arguments: modinfop - pointer to the driver modinfo structure 2239 * 2240 * Return Code: Returns the value from mod_info(9F). 2241 * 2242 * Context: Kernel thread context 2243 */ 2244 2245 int 2246 _info(struct modinfo *modinfop) 2247 { 2248 return (mod_info(&modlinkage, modinfop)); 2249 } 2250 2251 2252 /* 2253 * The following routines implement the driver message logging facility. 2254 * They provide component- and level- based debug output filtering. 2255 * Output may also be restricted to messages for a single instance by 2256 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set 2257 * to NULL, then messages for all instances are printed. 2258 * 2259 * These routines have been cloned from each other due to the language 2260 * constraints of macros and variable argument list processing. 2261 */ 2262 2263 2264 /* 2265 * Function: sd_log_err 2266 * 2267 * Description: This routine is called by the SD_ERROR macro for debug 2268 * logging of error conditions. 2269 * 2270 * Arguments: comp - driver component being logged 2271 * dev - pointer to driver info structure 2272 * fmt - error string and format to be logged 2273 */ 2274 2275 static void 2276 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...) 2277 { 2278 va_list ap; 2279 dev_info_t *dev; 2280 2281 ASSERT(un != NULL); 2282 dev = SD_DEVINFO(un); 2283 ASSERT(dev != NULL); 2284 2285 /* 2286 * Filter messages based on the global component and level masks. 2287 * Also print if un matches the value of sd_debug_un, or if 2288 * sd_debug_un is set to NULL. 2289 */ 2290 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) && 2291 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2292 mutex_enter(&sd_log_mutex); 2293 va_start(ap, fmt); 2294 (void) vsprintf(sd_log_buf, fmt, ap); 2295 va_end(ap); 2296 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2297 mutex_exit(&sd_log_mutex); 2298 } 2299 #ifdef SD_FAULT_INJECTION 2300 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2301 if (un->sd_injection_mask & comp) { 2302 mutex_enter(&sd_log_mutex); 2303 va_start(ap, fmt); 2304 (void) vsprintf(sd_log_buf, fmt, ap); 2305 va_end(ap); 2306 sd_injection_log(sd_log_buf, un); 2307 mutex_exit(&sd_log_mutex); 2308 } 2309 #endif 2310 } 2311 2312 2313 /* 2314 * Function: sd_log_info 2315 * 2316 * Description: This routine is called by the SD_INFO macro for debug 2317 * logging of general purpose informational conditions. 2318 * 2319 * Arguments: comp - driver component being logged 2320 * dev - pointer to driver info structure 2321 * fmt - info string and format to be logged 2322 */ 2323 2324 static void 2325 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...) 2326 { 2327 va_list ap; 2328 dev_info_t *dev; 2329 2330 ASSERT(un != NULL); 2331 dev = SD_DEVINFO(un); 2332 ASSERT(dev != NULL); 2333 2334 /* 2335 * Filter messages based on the global component and level masks. 2336 * Also print if un matches the value of sd_debug_un, or if 2337 * sd_debug_un is set to NULL. 2338 */ 2339 if ((sd_component_mask & component) && 2340 (sd_level_mask & SD_LOGMASK_INFO) && 2341 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2342 mutex_enter(&sd_log_mutex); 2343 va_start(ap, fmt); 2344 (void) vsprintf(sd_log_buf, fmt, ap); 2345 va_end(ap); 2346 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2347 mutex_exit(&sd_log_mutex); 2348 } 2349 #ifdef SD_FAULT_INJECTION 2350 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2351 if (un->sd_injection_mask & component) { 2352 mutex_enter(&sd_log_mutex); 2353 va_start(ap, fmt); 2354 (void) vsprintf(sd_log_buf, fmt, ap); 2355 va_end(ap); 2356 sd_injection_log(sd_log_buf, un); 2357 mutex_exit(&sd_log_mutex); 2358 } 2359 #endif 2360 } 2361 2362 2363 /* 2364 * Function: sd_log_trace 2365 * 2366 * Description: This routine is called by the SD_TRACE macro for debug 2367 * logging of trace conditions (i.e. function entry/exit). 2368 * 2369 * Arguments: comp - driver component being logged 2370 * dev - pointer to driver info structure 2371 * fmt - trace string and format to be logged 2372 */ 2373 2374 static void 2375 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...) 2376 { 2377 va_list ap; 2378 dev_info_t *dev; 2379 2380 ASSERT(un != NULL); 2381 dev = SD_DEVINFO(un); 2382 ASSERT(dev != NULL); 2383 2384 /* 2385 * Filter messages based on the global component and level masks. 2386 * Also print if un matches the value of sd_debug_un, or if 2387 * sd_debug_un is set to NULL. 2388 */ 2389 if ((sd_component_mask & component) && 2390 (sd_level_mask & SD_LOGMASK_TRACE) && 2391 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2392 mutex_enter(&sd_log_mutex); 2393 va_start(ap, fmt); 2394 (void) vsprintf(sd_log_buf, fmt, ap); 2395 va_end(ap); 2396 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2397 mutex_exit(&sd_log_mutex); 2398 } 2399 #ifdef SD_FAULT_INJECTION 2400 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2401 if (un->sd_injection_mask & component) { 2402 mutex_enter(&sd_log_mutex); 2403 va_start(ap, fmt); 2404 (void) vsprintf(sd_log_buf, fmt, ap); 2405 va_end(ap); 2406 sd_injection_log(sd_log_buf, un); 2407 mutex_exit(&sd_log_mutex); 2408 } 2409 #endif 2410 } 2411 2412 2413 /* 2414 * Function: sdprobe 2415 * 2416 * Description: This is the driver probe(9e) entry point function. 2417 * 2418 * Arguments: devi - opaque device info handle 2419 * 2420 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful. 2421 * DDI_PROBE_FAILURE: If the probe failed. 2422 * DDI_PROBE_PARTIAL: If the instance is not present now, 2423 * but may be present in the future. 2424 */ 2425 2426 static int 2427 sdprobe(dev_info_t *devi) 2428 { 2429 struct scsi_device *devp; 2430 int rval; 2431 int instance; 2432 2433 /* 2434 * if it wasn't for pln, sdprobe could actually be nulldev 2435 * in the "__fibre" case. 2436 */ 2437 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { 2438 return (DDI_PROBE_DONTCARE); 2439 } 2440 2441 devp = ddi_get_driver_private(devi); 2442 2443 if (devp == NULL) { 2444 /* Ooops... nexus driver is mis-configured... */ 2445 return (DDI_PROBE_FAILURE); 2446 } 2447 2448 instance = ddi_get_instance(devi); 2449 2450 if (ddi_get_soft_state(sd_state, instance) != NULL) { 2451 return (DDI_PROBE_PARTIAL); 2452 } 2453 2454 /* 2455 * Call the SCSA utility probe routine to see if we actually 2456 * have a target at this SCSI nexus. 2457 */ 2458 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) { 2459 case SCSIPROBE_EXISTS: 2460 switch (devp->sd_inq->inq_dtype) { 2461 case DTYPE_DIRECT: 2462 rval = DDI_PROBE_SUCCESS; 2463 break; 2464 case DTYPE_RODIRECT: 2465 /* CDs etc. Can be removable media */ 2466 rval = DDI_PROBE_SUCCESS; 2467 break; 2468 case DTYPE_OPTICAL: 2469 /* 2470 * Rewritable optical driver HP115AA 2471 * Can also be removable media 2472 */ 2473 2474 /* 2475 * Do not attempt to bind to DTYPE_OPTICAL if 2476 * pre solaris 9 sparc sd behavior is required 2477 * 2478 * If first time through and sd_dtype_optical_bind 2479 * has not been set in /etc/system check properties 2480 */ 2481 2482 if (sd_dtype_optical_bind < 0) { 2483 sd_dtype_optical_bind = ddi_prop_get_int 2484 (DDI_DEV_T_ANY, devi, 0, 2485 "optical-device-bind", 1); 2486 } 2487 2488 if (sd_dtype_optical_bind == 0) { 2489 rval = DDI_PROBE_FAILURE; 2490 } else { 2491 rval = DDI_PROBE_SUCCESS; 2492 } 2493 break; 2494 2495 case DTYPE_NOTPRESENT: 2496 default: 2497 rval = DDI_PROBE_FAILURE; 2498 break; 2499 } 2500 break; 2501 default: 2502 rval = DDI_PROBE_PARTIAL; 2503 break; 2504 } 2505 2506 /* 2507 * This routine checks for resource allocation prior to freeing, 2508 * so it will take care of the "smart probing" case where a 2509 * scsi_probe() may or may not have been issued and will *not* 2510 * free previously-freed resources. 2511 */ 2512 scsi_unprobe(devp); 2513 return (rval); 2514 } 2515 2516 2517 /* 2518 * Function: sdinfo 2519 * 2520 * Description: This is the driver getinfo(9e) entry point function. 2521 * Given the device number, return the devinfo pointer from 2522 * the scsi_device structure or the instance number 2523 * associated with the dev_t. 2524 * 2525 * Arguments: dip - pointer to device info structure 2526 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO, 2527 * DDI_INFO_DEVT2INSTANCE) 2528 * arg - driver dev_t 2529 * resultp - user buffer for request response 2530 * 2531 * Return Code: DDI_SUCCESS 2532 * DDI_FAILURE 2533 */ 2534 /* ARGSUSED */ 2535 static int 2536 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2537 { 2538 struct sd_lun *un; 2539 dev_t dev; 2540 int instance; 2541 int error; 2542 2543 switch (infocmd) { 2544 case DDI_INFO_DEVT2DEVINFO: 2545 dev = (dev_t)arg; 2546 instance = SDUNIT(dev); 2547 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 2548 return (DDI_FAILURE); 2549 } 2550 *result = (void *) SD_DEVINFO(un); 2551 error = DDI_SUCCESS; 2552 break; 2553 case DDI_INFO_DEVT2INSTANCE: 2554 dev = (dev_t)arg; 2555 instance = SDUNIT(dev); 2556 *result = (void *)(uintptr_t)instance; 2557 error = DDI_SUCCESS; 2558 break; 2559 default: 2560 error = DDI_FAILURE; 2561 } 2562 return (error); 2563 } 2564 2565 /* 2566 * Function: sd_prop_op 2567 * 2568 * Description: This is the driver prop_op(9e) entry point function. 2569 * Return the number of blocks for the partition in question 2570 * or forward the request to the property facilities. 2571 * 2572 * Arguments: dev - device number 2573 * dip - pointer to device info structure 2574 * prop_op - property operator 2575 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent 2576 * name - pointer to property name 2577 * valuep - pointer or address of the user buffer 2578 * lengthp - property length 2579 * 2580 * Return Code: DDI_PROP_SUCCESS 2581 * DDI_PROP_NOT_FOUND 2582 * DDI_PROP_UNDEFINED 2583 * DDI_PROP_NO_MEMORY 2584 * DDI_PROP_BUF_TOO_SMALL 2585 */ 2586 2587 static int 2588 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 2589 char *name, caddr_t valuep, int *lengthp) 2590 { 2591 int instance = ddi_get_instance(dip); 2592 struct sd_lun *un; 2593 uint64_t nblocks64; 2594 2595 /* 2596 * Our dynamic properties are all device specific and size oriented. 2597 * Requests issued under conditions where size is valid are passed 2598 * to ddi_prop_op_nblocks with the size information, otherwise the 2599 * request is passed to ddi_prop_op. Size depends on valid geometry. 2600 */ 2601 un = ddi_get_soft_state(sd_state, instance); 2602 if ((dev == DDI_DEV_T_ANY) || (un == NULL)) { 2603 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2604 name, valuep, lengthp)); 2605 } else if (!SD_IS_VALID_LABEL(un)) { 2606 return (ddi_prop_op(dev, dip, prop_op, mod_flags, name, 2607 valuep, lengthp)); 2608 } 2609 2610 /* get nblocks value */ 2611 ASSERT(!mutex_owned(SD_MUTEX(un))); 2612 2613 (void) cmlb_partinfo(un->un_cmlbhandle, SDPART(dev), 2614 (diskaddr_t *)&nblocks64, NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 2615 2616 return (ddi_prop_op_nblocks(dev, dip, prop_op, mod_flags, 2617 name, valuep, lengthp, nblocks64)); 2618 } 2619 2620 /* 2621 * The following functions are for smart probing: 2622 * sd_scsi_probe_cache_init() 2623 * sd_scsi_probe_cache_fini() 2624 * sd_scsi_clear_probe_cache() 2625 * sd_scsi_probe_with_cache() 2626 */ 2627 2628 /* 2629 * Function: sd_scsi_probe_cache_init 2630 * 2631 * Description: Initializes the probe response cache mutex and head pointer. 2632 * 2633 * Context: Kernel thread context 2634 */ 2635 2636 static void 2637 sd_scsi_probe_cache_init(void) 2638 { 2639 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL); 2640 sd_scsi_probe_cache_head = NULL; 2641 } 2642 2643 2644 /* 2645 * Function: sd_scsi_probe_cache_fini 2646 * 2647 * Description: Frees all resources associated with the probe response cache. 2648 * 2649 * Context: Kernel thread context 2650 */ 2651 2652 static void 2653 sd_scsi_probe_cache_fini(void) 2654 { 2655 struct sd_scsi_probe_cache *cp; 2656 struct sd_scsi_probe_cache *ncp; 2657 2658 /* Clean up our smart probing linked list */ 2659 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) { 2660 ncp = cp->next; 2661 kmem_free(cp, sizeof (struct sd_scsi_probe_cache)); 2662 } 2663 sd_scsi_probe_cache_head = NULL; 2664 mutex_destroy(&sd_scsi_probe_cache_mutex); 2665 } 2666 2667 2668 /* 2669 * Function: sd_scsi_clear_probe_cache 2670 * 2671 * Description: This routine clears the probe response cache. This is 2672 * done when open() returns ENXIO so that when deferred 2673 * attach is attempted (possibly after a device has been 2674 * turned on) we will retry the probe. Since we don't know 2675 * which target we failed to open, we just clear the 2676 * entire cache. 2677 * 2678 * Context: Kernel thread context 2679 */ 2680 2681 static void 2682 sd_scsi_clear_probe_cache(void) 2683 { 2684 struct sd_scsi_probe_cache *cp; 2685 int i; 2686 2687 mutex_enter(&sd_scsi_probe_cache_mutex); 2688 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2689 /* 2690 * Reset all entries to SCSIPROBE_EXISTS. This will 2691 * force probing to be performed the next time 2692 * sd_scsi_probe_with_cache is called. 2693 */ 2694 for (i = 0; i < NTARGETS_WIDE; i++) { 2695 cp->cache[i] = SCSIPROBE_EXISTS; 2696 } 2697 } 2698 mutex_exit(&sd_scsi_probe_cache_mutex); 2699 } 2700 2701 2702 /* 2703 * Function: sd_scsi_probe_with_cache 2704 * 2705 * Description: This routine implements support for a scsi device probe 2706 * with cache. The driver maintains a cache of the target 2707 * responses to scsi probes. If we get no response from a 2708 * target during a probe inquiry, we remember that, and we 2709 * avoid additional calls to scsi_probe on non-zero LUNs 2710 * on the same target until the cache is cleared. By doing 2711 * so we avoid the 1/4 sec selection timeout for nonzero 2712 * LUNs. lun0 of a target is always probed. 2713 * 2714 * Arguments: devp - Pointer to a scsi_device(9S) structure 2715 * waitfunc - indicates what the allocator routines should 2716 * do when resources are not available. This value 2717 * is passed on to scsi_probe() when that routine 2718 * is called. 2719 * 2720 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache; 2721 * otherwise the value returned by scsi_probe(9F). 2722 * 2723 * Context: Kernel thread context 2724 */ 2725 2726 static int 2727 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)()) 2728 { 2729 struct sd_scsi_probe_cache *cp; 2730 dev_info_t *pdip = ddi_get_parent(devp->sd_dev); 2731 int lun, tgt; 2732 2733 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2734 SCSI_ADDR_PROP_LUN, 0); 2735 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2736 SCSI_ADDR_PROP_TARGET, -1); 2737 2738 /* Make sure caching enabled and target in range */ 2739 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) { 2740 /* do it the old way (no cache) */ 2741 return (scsi_probe(devp, waitfn)); 2742 } 2743 2744 mutex_enter(&sd_scsi_probe_cache_mutex); 2745 2746 /* Find the cache for this scsi bus instance */ 2747 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2748 if (cp->pdip == pdip) { 2749 break; 2750 } 2751 } 2752 2753 /* If we can't find a cache for this pdip, create one */ 2754 if (cp == NULL) { 2755 int i; 2756 2757 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache), 2758 KM_SLEEP); 2759 cp->pdip = pdip; 2760 cp->next = sd_scsi_probe_cache_head; 2761 sd_scsi_probe_cache_head = cp; 2762 for (i = 0; i < NTARGETS_WIDE; i++) { 2763 cp->cache[i] = SCSIPROBE_EXISTS; 2764 } 2765 } 2766 2767 mutex_exit(&sd_scsi_probe_cache_mutex); 2768 2769 /* Recompute the cache for this target if LUN zero */ 2770 if (lun == 0) { 2771 cp->cache[tgt] = SCSIPROBE_EXISTS; 2772 } 2773 2774 /* Don't probe if cache remembers a NORESP from a previous LUN. */ 2775 if (cp->cache[tgt] != SCSIPROBE_EXISTS) { 2776 return (SCSIPROBE_NORESP); 2777 } 2778 2779 /* Do the actual probe; save & return the result */ 2780 return (cp->cache[tgt] = scsi_probe(devp, waitfn)); 2781 } 2782 2783 2784 /* 2785 * Function: sd_scsi_target_lun_init 2786 * 2787 * Description: Initializes the attached lun chain mutex and head pointer. 2788 * 2789 * Context: Kernel thread context 2790 */ 2791 2792 static void 2793 sd_scsi_target_lun_init(void) 2794 { 2795 mutex_init(&sd_scsi_target_lun_mutex, NULL, MUTEX_DRIVER, NULL); 2796 sd_scsi_target_lun_head = NULL; 2797 } 2798 2799 2800 /* 2801 * Function: sd_scsi_target_lun_fini 2802 * 2803 * Description: Frees all resources associated with the attached lun 2804 * chain 2805 * 2806 * Context: Kernel thread context 2807 */ 2808 2809 static void 2810 sd_scsi_target_lun_fini(void) 2811 { 2812 struct sd_scsi_hba_tgt_lun *cp; 2813 struct sd_scsi_hba_tgt_lun *ncp; 2814 2815 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = ncp) { 2816 ncp = cp->next; 2817 kmem_free(cp, sizeof (struct sd_scsi_hba_tgt_lun)); 2818 } 2819 sd_scsi_target_lun_head = NULL; 2820 mutex_destroy(&sd_scsi_target_lun_mutex); 2821 } 2822 2823 2824 /* 2825 * Function: sd_scsi_get_target_lun_count 2826 * 2827 * Description: This routine will check in the attached lun chain to see 2828 * how many luns are attached on the required SCSI controller 2829 * and target. Currently, some capabilities like tagged queue 2830 * are supported per target based by HBA. So all luns in a 2831 * target have the same capabilities. Based on this assumption, 2832 * sd should only set these capabilities once per target. This 2833 * function is called when sd needs to decide how many luns 2834 * already attached on a target. 2835 * 2836 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2837 * controller device. 2838 * target - The target ID on the controller's SCSI bus. 2839 * 2840 * Return Code: The number of luns attached on the required target and 2841 * controller. 2842 * -1 if target ID is not in parallel SCSI scope or the given 2843 * dip is not in the chain. 2844 * 2845 * Context: Kernel thread context 2846 */ 2847 2848 static int 2849 sd_scsi_get_target_lun_count(dev_info_t *dip, int target) 2850 { 2851 struct sd_scsi_hba_tgt_lun *cp; 2852 2853 if ((target < 0) || (target >= NTARGETS_WIDE)) { 2854 return (-1); 2855 } 2856 2857 mutex_enter(&sd_scsi_target_lun_mutex); 2858 2859 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2860 if (cp->pdip == dip) { 2861 break; 2862 } 2863 } 2864 2865 mutex_exit(&sd_scsi_target_lun_mutex); 2866 2867 if (cp == NULL) { 2868 return (-1); 2869 } 2870 2871 return (cp->nlun[target]); 2872 } 2873 2874 2875 /* 2876 * Function: sd_scsi_update_lun_on_target 2877 * 2878 * Description: This routine is used to update the attached lun chain when a 2879 * lun is attached or detached on a target. 2880 * 2881 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2882 * controller device. 2883 * target - The target ID on the controller's SCSI bus. 2884 * flag - Indicate the lun is attached or detached. 2885 * 2886 * Context: Kernel thread context 2887 */ 2888 2889 static void 2890 sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag) 2891 { 2892 struct sd_scsi_hba_tgt_lun *cp; 2893 2894 mutex_enter(&sd_scsi_target_lun_mutex); 2895 2896 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2897 if (cp->pdip == dip) { 2898 break; 2899 } 2900 } 2901 2902 if ((cp == NULL) && (flag == SD_SCSI_LUN_ATTACH)) { 2903 cp = kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun), 2904 KM_SLEEP); 2905 cp->pdip = dip; 2906 cp->next = sd_scsi_target_lun_head; 2907 sd_scsi_target_lun_head = cp; 2908 } 2909 2910 mutex_exit(&sd_scsi_target_lun_mutex); 2911 2912 if (cp != NULL) { 2913 if (flag == SD_SCSI_LUN_ATTACH) { 2914 cp->nlun[target] ++; 2915 } else { 2916 cp->nlun[target] --; 2917 } 2918 } 2919 } 2920 2921 2922 /* 2923 * Function: sd_spin_up_unit 2924 * 2925 * Description: Issues the following commands to spin-up the device: 2926 * START STOP UNIT, and INQUIRY. 2927 * 2928 * Arguments: un - driver soft state (unit) structure 2929 * 2930 * Return Code: 0 - success 2931 * EIO - failure 2932 * EACCES - reservation conflict 2933 * 2934 * Context: Kernel thread context 2935 */ 2936 2937 static int 2938 sd_spin_up_unit(struct sd_lun *un) 2939 { 2940 size_t resid = 0; 2941 int has_conflict = FALSE; 2942 uchar_t *bufaddr; 2943 2944 ASSERT(un != NULL); 2945 2946 /* 2947 * Send a throwaway START UNIT command. 2948 * 2949 * If we fail on this, we don't care presently what precisely 2950 * is wrong. EMC's arrays will also fail this with a check 2951 * condition (0x2/0x4/0x3) if the device is "inactive," but 2952 * we don't want to fail the attach because it may become 2953 * "active" later. 2954 */ 2955 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, SD_PATH_DIRECT) 2956 == EACCES) 2957 has_conflict = TRUE; 2958 2959 /* 2960 * Send another INQUIRY command to the target. This is necessary for 2961 * non-removable media direct access devices because their INQUIRY data 2962 * may not be fully qualified until they are spun up (perhaps via the 2963 * START command above). Note: This seems to be needed for some 2964 * legacy devices only.) The INQUIRY command should succeed even if a 2965 * Reservation Conflict is present. 2966 */ 2967 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP); 2968 if (sd_send_scsi_INQUIRY(un, bufaddr, SUN_INQSIZE, 0, 0, &resid) != 0) { 2969 kmem_free(bufaddr, SUN_INQSIZE); 2970 return (EIO); 2971 } 2972 2973 /* 2974 * If we got enough INQUIRY data, copy it over the old INQUIRY data. 2975 * Note that this routine does not return a failure here even if the 2976 * INQUIRY command did not return any data. This is a legacy behavior. 2977 */ 2978 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) { 2979 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE); 2980 } 2981 2982 kmem_free(bufaddr, SUN_INQSIZE); 2983 2984 /* If we hit a reservation conflict above, tell the caller. */ 2985 if (has_conflict == TRUE) { 2986 return (EACCES); 2987 } 2988 2989 return (0); 2990 } 2991 2992 #ifdef _LP64 2993 /* 2994 * Function: sd_enable_descr_sense 2995 * 2996 * Description: This routine attempts to select descriptor sense format 2997 * using the Control mode page. Devices that support 64 bit 2998 * LBAs (for >2TB luns) should also implement descriptor 2999 * sense data so we will call this function whenever we see 3000 * a lun larger than 2TB. If for some reason the device 3001 * supports 64 bit LBAs but doesn't support descriptor sense 3002 * presumably the mode select will fail. Everything will 3003 * continue to work normally except that we will not get 3004 * complete sense data for commands that fail with an LBA 3005 * larger than 32 bits. 3006 * 3007 * Arguments: un - driver soft state (unit) structure 3008 * 3009 * Context: Kernel thread context only 3010 */ 3011 3012 static void 3013 sd_enable_descr_sense(struct sd_lun *un) 3014 { 3015 uchar_t *header; 3016 struct mode_control_scsi3 *ctrl_bufp; 3017 size_t buflen; 3018 size_t bd_len; 3019 3020 /* 3021 * Read MODE SENSE page 0xA, Control Mode Page 3022 */ 3023 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH + 3024 sizeof (struct mode_control_scsi3); 3025 header = kmem_zalloc(buflen, KM_SLEEP); 3026 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 3027 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT) != 0) { 3028 SD_ERROR(SD_LOG_COMMON, un, 3029 "sd_enable_descr_sense: mode sense ctrl page failed\n"); 3030 goto eds_exit; 3031 } 3032 3033 /* 3034 * Determine size of Block Descriptors in order to locate 3035 * the mode page data. ATAPI devices return 0, SCSI devices 3036 * should return MODE_BLK_DESC_LENGTH. 3037 */ 3038 bd_len = ((struct mode_header *)header)->bdesc_length; 3039 3040 ctrl_bufp = (struct mode_control_scsi3 *) 3041 (header + MODE_HEADER_LENGTH + bd_len); 3042 3043 /* 3044 * Clear PS bit for MODE SELECT 3045 */ 3046 ctrl_bufp->mode_page.ps = 0; 3047 3048 /* 3049 * Set D_SENSE to enable descriptor sense format. 3050 */ 3051 ctrl_bufp->d_sense = 1; 3052 3053 /* 3054 * Use MODE SELECT to commit the change to the D_SENSE bit 3055 */ 3056 if (sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 3057 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT) != 0) { 3058 SD_INFO(SD_LOG_COMMON, un, 3059 "sd_enable_descr_sense: mode select ctrl page failed\n"); 3060 goto eds_exit; 3061 } 3062 3063 eds_exit: 3064 kmem_free(header, buflen); 3065 } 3066 3067 /* 3068 * Function: sd_reenable_dsense_task 3069 * 3070 * Description: Re-enable descriptor sense after device or bus reset 3071 * 3072 * Context: Executes in a taskq() thread context 3073 */ 3074 static void 3075 sd_reenable_dsense_task(void *arg) 3076 { 3077 struct sd_lun *un = arg; 3078 3079 ASSERT(un != NULL); 3080 sd_enable_descr_sense(un); 3081 } 3082 #endif /* _LP64 */ 3083 3084 /* 3085 * Function: sd_set_mmc_caps 3086 * 3087 * Description: This routine determines if the device is MMC compliant and if 3088 * the device supports CDDA via a mode sense of the CDVD 3089 * capabilities mode page. Also checks if the device is a 3090 * dvdram writable device. 3091 * 3092 * Arguments: un - driver soft state (unit) structure 3093 * 3094 * Context: Kernel thread context only 3095 */ 3096 3097 static void 3098 sd_set_mmc_caps(struct sd_lun *un) 3099 { 3100 struct mode_header_grp2 *sense_mhp; 3101 uchar_t *sense_page; 3102 caddr_t buf; 3103 int bd_len; 3104 int status; 3105 struct uscsi_cmd com; 3106 int rtn; 3107 uchar_t *out_data_rw, *out_data_hd; 3108 uchar_t *rqbuf_rw, *rqbuf_hd; 3109 3110 ASSERT(un != NULL); 3111 3112 /* 3113 * The flags which will be set in this function are - mmc compliant, 3114 * dvdram writable device, cdda support. Initialize them to FALSE 3115 * and if a capability is detected - it will be set to TRUE. 3116 */ 3117 un->un_f_mmc_cap = FALSE; 3118 un->un_f_dvdram_writable_device = FALSE; 3119 un->un_f_cfg_cdda = FALSE; 3120 3121 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3122 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 3123 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 3124 3125 if (status != 0) { 3126 /* command failed; just return */ 3127 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3128 return; 3129 } 3130 /* 3131 * If the mode sense request for the CDROM CAPABILITIES 3132 * page (0x2A) succeeds the device is assumed to be MMC. 3133 */ 3134 un->un_f_mmc_cap = TRUE; 3135 3136 /* Get to the page data */ 3137 sense_mhp = (struct mode_header_grp2 *)buf; 3138 bd_len = (sense_mhp->bdesc_length_hi << 8) | 3139 sense_mhp->bdesc_length_lo; 3140 if (bd_len > MODE_BLK_DESC_LENGTH) { 3141 /* 3142 * We did not get back the expected block descriptor 3143 * length so we cannot determine if the device supports 3144 * CDDA. However, we still indicate the device is MMC 3145 * according to the successful response to the page 3146 * 0x2A mode sense request. 3147 */ 3148 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3149 "sd_set_mmc_caps: Mode Sense returned " 3150 "invalid block descriptor length\n"); 3151 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3152 return; 3153 } 3154 3155 /* See if read CDDA is supported */ 3156 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + 3157 bd_len); 3158 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE; 3159 3160 /* See if writing DVD RAM is supported. */ 3161 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE; 3162 if (un->un_f_dvdram_writable_device == TRUE) { 3163 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3164 return; 3165 } 3166 3167 /* 3168 * If the device presents DVD or CD capabilities in the mode 3169 * page, we can return here since a RRD will not have 3170 * these capabilities. 3171 */ 3172 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3173 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3174 return; 3175 } 3176 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3177 3178 /* 3179 * If un->un_f_dvdram_writable_device is still FALSE, 3180 * check for a Removable Rigid Disk (RRD). A RRD 3181 * device is identified by the features RANDOM_WRITABLE and 3182 * HARDWARE_DEFECT_MANAGEMENT. 3183 */ 3184 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3185 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3186 3187 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3188 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3189 RANDOM_WRITABLE, SD_PATH_STANDARD); 3190 if (rtn != 0) { 3191 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3192 kmem_free(rqbuf_rw, SENSE_LENGTH); 3193 return; 3194 } 3195 3196 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3197 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3198 3199 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3200 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3201 HARDWARE_DEFECT_MANAGEMENT, SD_PATH_STANDARD); 3202 if (rtn == 0) { 3203 /* 3204 * We have good information, check for random writable 3205 * and hardware defect features. 3206 */ 3207 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3208 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) { 3209 un->un_f_dvdram_writable_device = TRUE; 3210 } 3211 } 3212 3213 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3214 kmem_free(rqbuf_rw, SENSE_LENGTH); 3215 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3216 kmem_free(rqbuf_hd, SENSE_LENGTH); 3217 } 3218 3219 /* 3220 * Function: sd_check_for_writable_cd 3221 * 3222 * Description: This routine determines if the media in the device is 3223 * writable or not. It uses the get configuration command (0x46) 3224 * to determine if the media is writable 3225 * 3226 * Arguments: un - driver soft state (unit) structure 3227 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" 3228 * chain and the normal command waitq, or 3229 * SD_PATH_DIRECT_PRIORITY to use the USCSI 3230 * "direct" chain and bypass the normal command 3231 * waitq. 3232 * 3233 * Context: Never called at interrupt context. 3234 */ 3235 3236 static void 3237 sd_check_for_writable_cd(struct sd_lun *un, int path_flag) 3238 { 3239 struct uscsi_cmd com; 3240 uchar_t *out_data; 3241 uchar_t *rqbuf; 3242 int rtn; 3243 uchar_t *out_data_rw, *out_data_hd; 3244 uchar_t *rqbuf_rw, *rqbuf_hd; 3245 struct mode_header_grp2 *sense_mhp; 3246 uchar_t *sense_page; 3247 caddr_t buf; 3248 int bd_len; 3249 int status; 3250 3251 ASSERT(un != NULL); 3252 ASSERT(mutex_owned(SD_MUTEX(un))); 3253 3254 /* 3255 * Initialize the writable media to false, if configuration info. 3256 * tells us otherwise then only we will set it. 3257 */ 3258 un->un_f_mmc_writable_media = FALSE; 3259 mutex_exit(SD_MUTEX(un)); 3260 3261 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 3262 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3263 3264 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, SENSE_LENGTH, 3265 out_data, SD_PROFILE_HEADER_LEN, path_flag); 3266 3267 mutex_enter(SD_MUTEX(un)); 3268 if (rtn == 0) { 3269 /* 3270 * We have good information, check for writable DVD. 3271 */ 3272 if ((out_data[6] == 0) && (out_data[7] == 0x12)) { 3273 un->un_f_mmc_writable_media = TRUE; 3274 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3275 kmem_free(rqbuf, SENSE_LENGTH); 3276 return; 3277 } 3278 } 3279 3280 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3281 kmem_free(rqbuf, SENSE_LENGTH); 3282 3283 /* 3284 * Determine if this is a RRD type device. 3285 */ 3286 mutex_exit(SD_MUTEX(un)); 3287 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3288 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 3289 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, path_flag); 3290 mutex_enter(SD_MUTEX(un)); 3291 if (status != 0) { 3292 /* command failed; just return */ 3293 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3294 return; 3295 } 3296 3297 /* Get to the page data */ 3298 sense_mhp = (struct mode_header_grp2 *)buf; 3299 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 3300 if (bd_len > MODE_BLK_DESC_LENGTH) { 3301 /* 3302 * We did not get back the expected block descriptor length so 3303 * we cannot check the mode page. 3304 */ 3305 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3306 "sd_check_for_writable_cd: Mode Sense returned " 3307 "invalid block descriptor length\n"); 3308 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3309 return; 3310 } 3311 3312 /* 3313 * If the device presents DVD or CD capabilities in the mode 3314 * page, we can return here since a RRD device will not have 3315 * these capabilities. 3316 */ 3317 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len); 3318 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3319 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3320 return; 3321 } 3322 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3323 3324 /* 3325 * If un->un_f_mmc_writable_media is still FALSE, 3326 * check for RRD type media. A RRD device is identified 3327 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT. 3328 */ 3329 mutex_exit(SD_MUTEX(un)); 3330 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3331 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3332 3333 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3334 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3335 RANDOM_WRITABLE, path_flag); 3336 if (rtn != 0) { 3337 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3338 kmem_free(rqbuf_rw, SENSE_LENGTH); 3339 mutex_enter(SD_MUTEX(un)); 3340 return; 3341 } 3342 3343 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3344 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3345 3346 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3347 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3348 HARDWARE_DEFECT_MANAGEMENT, path_flag); 3349 mutex_enter(SD_MUTEX(un)); 3350 if (rtn == 0) { 3351 /* 3352 * We have good information, check for random writable 3353 * and hardware defect features as current. 3354 */ 3355 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3356 (out_data_rw[10] & 0x1) && 3357 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) && 3358 (out_data_hd[10] & 0x1)) { 3359 un->un_f_mmc_writable_media = TRUE; 3360 } 3361 } 3362 3363 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3364 kmem_free(rqbuf_rw, SENSE_LENGTH); 3365 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3366 kmem_free(rqbuf_hd, SENSE_LENGTH); 3367 } 3368 3369 /* 3370 * Function: sd_read_unit_properties 3371 * 3372 * Description: The following implements a property lookup mechanism. 3373 * Properties for particular disks (keyed on vendor, model 3374 * and rev numbers) are sought in the sd.conf file via 3375 * sd_process_sdconf_file(), and if not found there, are 3376 * looked for in a list hardcoded in this driver via 3377 * sd_process_sdconf_table() Once located the properties 3378 * are used to update the driver unit structure. 3379 * 3380 * Arguments: un - driver soft state (unit) structure 3381 */ 3382 3383 static void 3384 sd_read_unit_properties(struct sd_lun *un) 3385 { 3386 /* 3387 * sd_process_sdconf_file returns SD_FAILURE if it cannot find 3388 * the "sd-config-list" property (from the sd.conf file) or if 3389 * there was not a match for the inquiry vid/pid. If this event 3390 * occurs the static driver configuration table is searched for 3391 * a match. 3392 */ 3393 ASSERT(un != NULL); 3394 if (sd_process_sdconf_file(un) == SD_FAILURE) { 3395 sd_process_sdconf_table(un); 3396 } 3397 3398 /* check for LSI device */ 3399 sd_is_lsi(un); 3400 3401 3402 } 3403 3404 3405 /* 3406 * Function: sd_process_sdconf_file 3407 * 3408 * Description: Use ddi_getlongprop to obtain the properties from the 3409 * driver's config file (ie, sd.conf) and update the driver 3410 * soft state structure accordingly. 3411 * 3412 * Arguments: un - driver soft state (unit) structure 3413 * 3414 * Return Code: SD_SUCCESS - The properties were successfully set according 3415 * to the driver configuration file. 3416 * SD_FAILURE - The driver config list was not obtained or 3417 * there was no vid/pid match. This indicates that 3418 * the static config table should be used. 3419 * 3420 * The config file has a property, "sd-config-list", which consists of 3421 * one or more duplets as follows: 3422 * 3423 * sd-config-list= 3424 * <duplet>, 3425 * [<duplet>,] 3426 * [<duplet>]; 3427 * 3428 * The structure of each duplet is as follows: 3429 * 3430 * <duplet>:= <vid+pid>,<data-property-name_list> 3431 * 3432 * The first entry of the duplet is the device ID string (the concatenated 3433 * vid & pid; not to be confused with a device_id). This is defined in 3434 * the same way as in the sd_disk_table. 3435 * 3436 * The second part of the duplet is a string that identifies a 3437 * data-property-name-list. The data-property-name-list is defined as 3438 * follows: 3439 * 3440 * <data-property-name-list>:=<data-property-name> [<data-property-name>] 3441 * 3442 * The syntax of <data-property-name> depends on the <version> field. 3443 * 3444 * If version = SD_CONF_VERSION_1 we have the following syntax: 3445 * 3446 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3447 * 3448 * where the prop0 value will be used to set prop0 if bit0 set in the 3449 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1 3450 * 3451 */ 3452 3453 static int 3454 sd_process_sdconf_file(struct sd_lun *un) 3455 { 3456 char *config_list = NULL; 3457 int config_list_len; 3458 int len; 3459 int dupletlen = 0; 3460 char *vidptr; 3461 int vidlen; 3462 char *dnlist_ptr; 3463 char *dataname_ptr; 3464 int dnlist_len; 3465 int dataname_len; 3466 int *data_list; 3467 int data_list_len; 3468 int rval = SD_FAILURE; 3469 int i; 3470 3471 ASSERT(un != NULL); 3472 3473 /* Obtain the configuration list associated with the .conf file */ 3474 if (ddi_getlongprop(DDI_DEV_T_ANY, SD_DEVINFO(un), DDI_PROP_DONTPASS, 3475 sd_config_list, (caddr_t)&config_list, &config_list_len) 3476 != DDI_PROP_SUCCESS) { 3477 return (SD_FAILURE); 3478 } 3479 3480 /* 3481 * Compare vids in each duplet to the inquiry vid - if a match is 3482 * made, get the data value and update the soft state structure 3483 * accordingly. 3484 * 3485 * Note: This algorithm is complex and difficult to maintain. It should 3486 * be replaced with a more robust implementation. 3487 */ 3488 for (len = config_list_len, vidptr = config_list; len > 0; 3489 vidptr += dupletlen, len -= dupletlen) { 3490 /* 3491 * Note: The assumption here is that each vid entry is on 3492 * a unique line from its associated duplet. 3493 */ 3494 vidlen = dupletlen = (int)strlen(vidptr); 3495 if ((vidlen == 0) || 3496 (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS)) { 3497 dupletlen++; 3498 continue; 3499 } 3500 3501 /* 3502 * dnlist contains 1 or more blank separated 3503 * data-property-name entries 3504 */ 3505 dnlist_ptr = vidptr + vidlen + 1; 3506 dnlist_len = (int)strlen(dnlist_ptr); 3507 dupletlen += dnlist_len + 2; 3508 3509 /* 3510 * Set a pointer for the first data-property-name 3511 * entry in the list 3512 */ 3513 dataname_ptr = dnlist_ptr; 3514 dataname_len = 0; 3515 3516 /* 3517 * Loop through all data-property-name entries in the 3518 * data-property-name-list setting the properties for each. 3519 */ 3520 while (dataname_len < dnlist_len) { 3521 int version; 3522 3523 /* 3524 * Determine the length of the current 3525 * data-property-name entry by indexing until a 3526 * blank or NULL is encountered. When the space is 3527 * encountered reset it to a NULL for compliance 3528 * with ddi_getlongprop(). 3529 */ 3530 for (i = 0; ((dataname_ptr[i] != ' ') && 3531 (dataname_ptr[i] != '\0')); i++) { 3532 ; 3533 } 3534 3535 dataname_len += i; 3536 /* If not null terminated, Make it so */ 3537 if (dataname_ptr[i] == ' ') { 3538 dataname_ptr[i] = '\0'; 3539 } 3540 dataname_len++; 3541 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3542 "sd_process_sdconf_file: disk:%s, data:%s\n", 3543 vidptr, dataname_ptr); 3544 3545 /* Get the data list */ 3546 if (ddi_getlongprop(DDI_DEV_T_ANY, SD_DEVINFO(un), 0, 3547 dataname_ptr, (caddr_t)&data_list, &data_list_len) 3548 != DDI_PROP_SUCCESS) { 3549 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3550 "sd_process_sdconf_file: data property (%s)" 3551 " has no value\n", dataname_ptr); 3552 dataname_ptr = dnlist_ptr + dataname_len; 3553 continue; 3554 } 3555 3556 version = data_list[0]; 3557 3558 if (version == SD_CONF_VERSION_1) { 3559 sd_tunables values; 3560 3561 /* Set the properties */ 3562 if (sd_chk_vers1_data(un, data_list[1], 3563 &data_list[2], data_list_len, dataname_ptr) 3564 == SD_SUCCESS) { 3565 sd_get_tunables_from_conf(un, 3566 data_list[1], &data_list[2], 3567 &values); 3568 sd_set_vers1_properties(un, 3569 data_list[1], &values); 3570 rval = SD_SUCCESS; 3571 } else { 3572 rval = SD_FAILURE; 3573 } 3574 } else { 3575 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3576 "data property %s version 0x%x is invalid.", 3577 dataname_ptr, version); 3578 rval = SD_FAILURE; 3579 } 3580 kmem_free(data_list, data_list_len); 3581 dataname_ptr = dnlist_ptr + dataname_len; 3582 } 3583 } 3584 3585 /* free up the memory allocated by ddi_getlongprop */ 3586 if (config_list) { 3587 kmem_free(config_list, config_list_len); 3588 } 3589 3590 return (rval); 3591 } 3592 3593 /* 3594 * Function: sd_get_tunables_from_conf() 3595 * 3596 * 3597 * This function reads the data list from the sd.conf file and pulls 3598 * the values that can have numeric values as arguments and places 3599 * the values in the apropriate sd_tunables member. 3600 * Since the order of the data list members varies across platforms 3601 * This function reads them from the data list in a platform specific 3602 * order and places them into the correct sd_tunable member that is 3603 * a consistant across all platforms. 3604 */ 3605 static void 3606 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list, 3607 sd_tunables *values) 3608 { 3609 int i; 3610 int mask; 3611 3612 bzero(values, sizeof (sd_tunables)); 3613 3614 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3615 3616 mask = 1 << i; 3617 if (mask > flags) { 3618 break; 3619 } 3620 3621 switch (mask & flags) { 3622 case 0: /* This mask bit not set in flags */ 3623 continue; 3624 case SD_CONF_BSET_THROTTLE: 3625 values->sdt_throttle = data_list[i]; 3626 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3627 "sd_get_tunables_from_conf: throttle = %d\n", 3628 values->sdt_throttle); 3629 break; 3630 case SD_CONF_BSET_CTYPE: 3631 values->sdt_ctype = data_list[i]; 3632 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3633 "sd_get_tunables_from_conf: ctype = %d\n", 3634 values->sdt_ctype); 3635 break; 3636 case SD_CONF_BSET_NRR_COUNT: 3637 values->sdt_not_rdy_retries = data_list[i]; 3638 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3639 "sd_get_tunables_from_conf: not_rdy_retries = %d\n", 3640 values->sdt_not_rdy_retries); 3641 break; 3642 case SD_CONF_BSET_BSY_RETRY_COUNT: 3643 values->sdt_busy_retries = data_list[i]; 3644 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3645 "sd_get_tunables_from_conf: busy_retries = %d\n", 3646 values->sdt_busy_retries); 3647 break; 3648 case SD_CONF_BSET_RST_RETRIES: 3649 values->sdt_reset_retries = data_list[i]; 3650 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3651 "sd_get_tunables_from_conf: reset_retries = %d\n", 3652 values->sdt_reset_retries); 3653 break; 3654 case SD_CONF_BSET_RSV_REL_TIME: 3655 values->sdt_reserv_rel_time = data_list[i]; 3656 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3657 "sd_get_tunables_from_conf: reserv_rel_time = %d\n", 3658 values->sdt_reserv_rel_time); 3659 break; 3660 case SD_CONF_BSET_MIN_THROTTLE: 3661 values->sdt_min_throttle = data_list[i]; 3662 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3663 "sd_get_tunables_from_conf: min_throttle = %d\n", 3664 values->sdt_min_throttle); 3665 break; 3666 case SD_CONF_BSET_DISKSORT_DISABLED: 3667 values->sdt_disk_sort_dis = data_list[i]; 3668 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3669 "sd_get_tunables_from_conf: disk_sort_dis = %d\n", 3670 values->sdt_disk_sort_dis); 3671 break; 3672 case SD_CONF_BSET_LUN_RESET_ENABLED: 3673 values->sdt_lun_reset_enable = data_list[i]; 3674 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3675 "sd_get_tunables_from_conf: lun_reset_enable = %d" 3676 "\n", values->sdt_lun_reset_enable); 3677 break; 3678 } 3679 } 3680 } 3681 3682 /* 3683 * Function: sd_process_sdconf_table 3684 * 3685 * Description: Search the static configuration table for a match on the 3686 * inquiry vid/pid and update the driver soft state structure 3687 * according to the table property values for the device. 3688 * 3689 * The form of a configuration table entry is: 3690 * <vid+pid>,<flags>,<property-data> 3691 * "SEAGATE ST42400N",1,63,0,0 (Fibre) 3692 * "SEAGATE ST42400N",1,63,0,0,0,0 (Sparc) 3693 * "SEAGATE ST42400N",1,63,0,0,0,0,0,0,0,0,0,0 (Intel) 3694 * 3695 * Arguments: un - driver soft state (unit) structure 3696 */ 3697 3698 static void 3699 sd_process_sdconf_table(struct sd_lun *un) 3700 { 3701 char *id = NULL; 3702 int table_index; 3703 int idlen; 3704 3705 ASSERT(un != NULL); 3706 for (table_index = 0; table_index < sd_disk_table_size; 3707 table_index++) { 3708 id = sd_disk_table[table_index].device_id; 3709 idlen = strlen(id); 3710 if (idlen == 0) { 3711 continue; 3712 } 3713 3714 /* 3715 * The static configuration table currently does not 3716 * implement version 10 properties. Additionally, 3717 * multiple data-property-name entries are not 3718 * implemented in the static configuration table. 3719 */ 3720 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 3721 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3722 "sd_process_sdconf_table: disk %s\n", id); 3723 sd_set_vers1_properties(un, 3724 sd_disk_table[table_index].flags, 3725 sd_disk_table[table_index].properties); 3726 break; 3727 } 3728 } 3729 } 3730 3731 3732 /* 3733 * Function: sd_sdconf_id_match 3734 * 3735 * Description: This local function implements a case sensitive vid/pid 3736 * comparison as well as the boundary cases of wild card and 3737 * multiple blanks. 3738 * 3739 * Note: An implicit assumption made here is that the scsi 3740 * inquiry structure will always keep the vid, pid and 3741 * revision strings in consecutive sequence, so they can be 3742 * read as a single string. If this assumption is not the 3743 * case, a separate string, to be used for the check, needs 3744 * to be built with these strings concatenated. 3745 * 3746 * Arguments: un - driver soft state (unit) structure 3747 * id - table or config file vid/pid 3748 * idlen - length of the vid/pid (bytes) 3749 * 3750 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 3751 * SD_FAILURE - Indicates no match with the inquiry vid/pid 3752 */ 3753 3754 static int 3755 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen) 3756 { 3757 struct scsi_inquiry *sd_inq; 3758 int rval = SD_SUCCESS; 3759 3760 ASSERT(un != NULL); 3761 sd_inq = un->un_sd->sd_inq; 3762 ASSERT(id != NULL); 3763 3764 /* 3765 * We use the inq_vid as a pointer to a buffer containing the 3766 * vid and pid and use the entire vid/pid length of the table 3767 * entry for the comparison. This works because the inq_pid 3768 * data member follows inq_vid in the scsi_inquiry structure. 3769 */ 3770 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) { 3771 /* 3772 * The user id string is compared to the inquiry vid/pid 3773 * using a case insensitive comparison and ignoring 3774 * multiple spaces. 3775 */ 3776 rval = sd_blank_cmp(un, id, idlen); 3777 if (rval != SD_SUCCESS) { 3778 /* 3779 * User id strings that start and end with a "*" 3780 * are a special case. These do not have a 3781 * specific vendor, and the product string can 3782 * appear anywhere in the 16 byte PID portion of 3783 * the inquiry data. This is a simple strstr() 3784 * type search for the user id in the inquiry data. 3785 */ 3786 if ((id[0] == '*') && (id[idlen - 1] == '*')) { 3787 char *pidptr = &id[1]; 3788 int i; 3789 int j; 3790 int pidstrlen = idlen - 2; 3791 j = sizeof (SD_INQUIRY(un)->inq_pid) - 3792 pidstrlen; 3793 3794 if (j < 0) { 3795 return (SD_FAILURE); 3796 } 3797 for (i = 0; i < j; i++) { 3798 if (bcmp(&SD_INQUIRY(un)->inq_pid[i], 3799 pidptr, pidstrlen) == 0) { 3800 rval = SD_SUCCESS; 3801 break; 3802 } 3803 } 3804 } 3805 } 3806 } 3807 return (rval); 3808 } 3809 3810 3811 /* 3812 * Function: sd_blank_cmp 3813 * 3814 * Description: If the id string starts and ends with a space, treat 3815 * multiple consecutive spaces as equivalent to a single 3816 * space. For example, this causes a sd_disk_table entry 3817 * of " NEC CDROM " to match a device's id string of 3818 * "NEC CDROM". 3819 * 3820 * Note: The success exit condition for this routine is if 3821 * the pointer to the table entry is '\0' and the cnt of 3822 * the inquiry length is zero. This will happen if the inquiry 3823 * string returned by the device is padded with spaces to be 3824 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The 3825 * SCSI spec states that the inquiry string is to be padded with 3826 * spaces. 3827 * 3828 * Arguments: un - driver soft state (unit) structure 3829 * id - table or config file vid/pid 3830 * idlen - length of the vid/pid (bytes) 3831 * 3832 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 3833 * SD_FAILURE - Indicates no match with the inquiry vid/pid 3834 */ 3835 3836 static int 3837 sd_blank_cmp(struct sd_lun *un, char *id, int idlen) 3838 { 3839 char *p1; 3840 char *p2; 3841 int cnt; 3842 cnt = sizeof (SD_INQUIRY(un)->inq_vid) + 3843 sizeof (SD_INQUIRY(un)->inq_pid); 3844 3845 ASSERT(un != NULL); 3846 p2 = un->un_sd->sd_inq->inq_vid; 3847 ASSERT(id != NULL); 3848 p1 = id; 3849 3850 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) { 3851 /* 3852 * Note: string p1 is terminated by a NUL but string p2 3853 * isn't. The end of p2 is determined by cnt. 3854 */ 3855 for (;;) { 3856 /* skip over any extra blanks in both strings */ 3857 while ((*p1 != '\0') && (*p1 == ' ')) { 3858 p1++; 3859 } 3860 while ((cnt != 0) && (*p2 == ' ')) { 3861 p2++; 3862 cnt--; 3863 } 3864 3865 /* compare the two strings */ 3866 if ((cnt == 0) || 3867 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) { 3868 break; 3869 } 3870 while ((cnt > 0) && 3871 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) { 3872 p1++; 3873 p2++; 3874 cnt--; 3875 } 3876 } 3877 } 3878 3879 /* return SD_SUCCESS if both strings match */ 3880 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE); 3881 } 3882 3883 3884 /* 3885 * Function: sd_chk_vers1_data 3886 * 3887 * Description: Verify the version 1 device properties provided by the 3888 * user via the configuration file 3889 * 3890 * Arguments: un - driver soft state (unit) structure 3891 * flags - integer mask indicating properties to be set 3892 * prop_list - integer list of property values 3893 * list_len - length of user provided data 3894 * 3895 * Return Code: SD_SUCCESS - Indicates the user provided data is valid 3896 * SD_FAILURE - Indicates the user provided data is invalid 3897 */ 3898 3899 static int 3900 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 3901 int list_len, char *dataname_ptr) 3902 { 3903 int i; 3904 int mask = 1; 3905 int index = 0; 3906 3907 ASSERT(un != NULL); 3908 3909 /* Check for a NULL property name and list */ 3910 if (dataname_ptr == NULL) { 3911 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3912 "sd_chk_vers1_data: NULL data property name."); 3913 return (SD_FAILURE); 3914 } 3915 if (prop_list == NULL) { 3916 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3917 "sd_chk_vers1_data: %s NULL data property list.", 3918 dataname_ptr); 3919 return (SD_FAILURE); 3920 } 3921 3922 /* Display a warning if undefined bits are set in the flags */ 3923 if (flags & ~SD_CONF_BIT_MASK) { 3924 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3925 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. " 3926 "Properties not set.", 3927 (flags & ~SD_CONF_BIT_MASK), dataname_ptr); 3928 return (SD_FAILURE); 3929 } 3930 3931 /* 3932 * Verify the length of the list by identifying the highest bit set 3933 * in the flags and validating that the property list has a length 3934 * up to the index of this bit. 3935 */ 3936 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3937 if (flags & mask) { 3938 index++; 3939 } 3940 mask = 1 << i; 3941 } 3942 if ((list_len / sizeof (int)) < (index + 2)) { 3943 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3944 "sd_chk_vers1_data: " 3945 "Data property list %s size is incorrect. " 3946 "Properties not set.", dataname_ptr); 3947 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: " 3948 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS); 3949 return (SD_FAILURE); 3950 } 3951 return (SD_SUCCESS); 3952 } 3953 3954 3955 /* 3956 * Function: sd_set_vers1_properties 3957 * 3958 * Description: Set version 1 device properties based on a property list 3959 * retrieved from the driver configuration file or static 3960 * configuration table. Version 1 properties have the format: 3961 * 3962 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3963 * 3964 * where the prop0 value will be used to set prop0 if bit0 3965 * is set in the flags 3966 * 3967 * Arguments: un - driver soft state (unit) structure 3968 * flags - integer mask indicating properties to be set 3969 * prop_list - integer list of property values 3970 */ 3971 3972 static void 3973 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list) 3974 { 3975 ASSERT(un != NULL); 3976 3977 /* 3978 * Set the flag to indicate cache is to be disabled. An attempt 3979 * to disable the cache via sd_cache_control() will be made 3980 * later during attach once the basic initialization is complete. 3981 */ 3982 if (flags & SD_CONF_BSET_NOCACHE) { 3983 un->un_f_opt_disable_cache = TRUE; 3984 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3985 "sd_set_vers1_properties: caching disabled flag set\n"); 3986 } 3987 3988 /* CD-specific configuration parameters */ 3989 if (flags & SD_CONF_BSET_PLAYMSF_BCD) { 3990 un->un_f_cfg_playmsf_bcd = TRUE; 3991 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3992 "sd_set_vers1_properties: playmsf_bcd set\n"); 3993 } 3994 if (flags & SD_CONF_BSET_READSUB_BCD) { 3995 un->un_f_cfg_readsub_bcd = TRUE; 3996 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3997 "sd_set_vers1_properties: readsub_bcd set\n"); 3998 } 3999 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) { 4000 un->un_f_cfg_read_toc_trk_bcd = TRUE; 4001 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4002 "sd_set_vers1_properties: read_toc_trk_bcd set\n"); 4003 } 4004 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) { 4005 un->un_f_cfg_read_toc_addr_bcd = TRUE; 4006 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4007 "sd_set_vers1_properties: read_toc_addr_bcd set\n"); 4008 } 4009 if (flags & SD_CONF_BSET_NO_READ_HEADER) { 4010 un->un_f_cfg_no_read_header = TRUE; 4011 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4012 "sd_set_vers1_properties: no_read_header set\n"); 4013 } 4014 if (flags & SD_CONF_BSET_READ_CD_XD4) { 4015 un->un_f_cfg_read_cd_xd4 = TRUE; 4016 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4017 "sd_set_vers1_properties: read_cd_xd4 set\n"); 4018 } 4019 4020 /* Support for devices which do not have valid/unique serial numbers */ 4021 if (flags & SD_CONF_BSET_FAB_DEVID) { 4022 un->un_f_opt_fab_devid = TRUE; 4023 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4024 "sd_set_vers1_properties: fab_devid bit set\n"); 4025 } 4026 4027 /* Support for user throttle configuration */ 4028 if (flags & SD_CONF_BSET_THROTTLE) { 4029 ASSERT(prop_list != NULL); 4030 un->un_saved_throttle = un->un_throttle = 4031 prop_list->sdt_throttle; 4032 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4033 "sd_set_vers1_properties: throttle set to %d\n", 4034 prop_list->sdt_throttle); 4035 } 4036 4037 /* Set the per disk retry count according to the conf file or table. */ 4038 if (flags & SD_CONF_BSET_NRR_COUNT) { 4039 ASSERT(prop_list != NULL); 4040 if (prop_list->sdt_not_rdy_retries) { 4041 un->un_notready_retry_count = 4042 prop_list->sdt_not_rdy_retries; 4043 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4044 "sd_set_vers1_properties: not ready retry count" 4045 " set to %d\n", un->un_notready_retry_count); 4046 } 4047 } 4048 4049 /* The controller type is reported for generic disk driver ioctls */ 4050 if (flags & SD_CONF_BSET_CTYPE) { 4051 ASSERT(prop_list != NULL); 4052 switch (prop_list->sdt_ctype) { 4053 case CTYPE_CDROM: 4054 un->un_ctype = prop_list->sdt_ctype; 4055 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4056 "sd_set_vers1_properties: ctype set to " 4057 "CTYPE_CDROM\n"); 4058 break; 4059 case CTYPE_CCS: 4060 un->un_ctype = prop_list->sdt_ctype; 4061 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4062 "sd_set_vers1_properties: ctype set to " 4063 "CTYPE_CCS\n"); 4064 break; 4065 case CTYPE_ROD: /* RW optical */ 4066 un->un_ctype = prop_list->sdt_ctype; 4067 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4068 "sd_set_vers1_properties: ctype set to " 4069 "CTYPE_ROD\n"); 4070 break; 4071 default: 4072 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4073 "sd_set_vers1_properties: Could not set " 4074 "invalid ctype value (%d)", 4075 prop_list->sdt_ctype); 4076 } 4077 } 4078 4079 /* Purple failover timeout */ 4080 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) { 4081 ASSERT(prop_list != NULL); 4082 un->un_busy_retry_count = 4083 prop_list->sdt_busy_retries; 4084 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4085 "sd_set_vers1_properties: " 4086 "busy retry count set to %d\n", 4087 un->un_busy_retry_count); 4088 } 4089 4090 /* Purple reset retry count */ 4091 if (flags & SD_CONF_BSET_RST_RETRIES) { 4092 ASSERT(prop_list != NULL); 4093 un->un_reset_retry_count = 4094 prop_list->sdt_reset_retries; 4095 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4096 "sd_set_vers1_properties: " 4097 "reset retry count set to %d\n", 4098 un->un_reset_retry_count); 4099 } 4100 4101 /* Purple reservation release timeout */ 4102 if (flags & SD_CONF_BSET_RSV_REL_TIME) { 4103 ASSERT(prop_list != NULL); 4104 un->un_reserve_release_time = 4105 prop_list->sdt_reserv_rel_time; 4106 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4107 "sd_set_vers1_properties: " 4108 "reservation release timeout set to %d\n", 4109 un->un_reserve_release_time); 4110 } 4111 4112 /* 4113 * Driver flag telling the driver to verify that no commands are pending 4114 * for a device before issuing a Test Unit Ready. This is a workaround 4115 * for a firmware bug in some Seagate eliteI drives. 4116 */ 4117 if (flags & SD_CONF_BSET_TUR_CHECK) { 4118 un->un_f_cfg_tur_check = TRUE; 4119 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4120 "sd_set_vers1_properties: tur queue check set\n"); 4121 } 4122 4123 if (flags & SD_CONF_BSET_MIN_THROTTLE) { 4124 un->un_min_throttle = prop_list->sdt_min_throttle; 4125 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4126 "sd_set_vers1_properties: min throttle set to %d\n", 4127 un->un_min_throttle); 4128 } 4129 4130 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) { 4131 un->un_f_disksort_disabled = 4132 (prop_list->sdt_disk_sort_dis != 0) ? 4133 TRUE : FALSE; 4134 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4135 "sd_set_vers1_properties: disksort disabled " 4136 "flag set to %d\n", 4137 prop_list->sdt_disk_sort_dis); 4138 } 4139 4140 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) { 4141 un->un_f_lun_reset_enabled = 4142 (prop_list->sdt_lun_reset_enable != 0) ? 4143 TRUE : FALSE; 4144 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4145 "sd_set_vers1_properties: lun reset enabled " 4146 "flag set to %d\n", 4147 prop_list->sdt_lun_reset_enable); 4148 } 4149 4150 /* 4151 * Validate the throttle values. 4152 * If any of the numbers are invalid, set everything to defaults. 4153 */ 4154 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4155 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4156 (un->un_min_throttle > un->un_throttle)) { 4157 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4158 un->un_min_throttle = sd_min_throttle; 4159 } 4160 } 4161 4162 /* 4163 * Function: sd_is_lsi() 4164 * 4165 * Description: Check for lsi devices, step throught the static device 4166 * table to match vid/pid. 4167 * 4168 * Args: un - ptr to sd_lun 4169 * 4170 * Notes: When creating new LSI property, need to add the new LSI property 4171 * to this function. 4172 */ 4173 static void 4174 sd_is_lsi(struct sd_lun *un) 4175 { 4176 char *id = NULL; 4177 int table_index; 4178 int idlen; 4179 void *prop; 4180 4181 ASSERT(un != NULL); 4182 for (table_index = 0; table_index < sd_disk_table_size; 4183 table_index++) { 4184 id = sd_disk_table[table_index].device_id; 4185 idlen = strlen(id); 4186 if (idlen == 0) { 4187 continue; 4188 } 4189 4190 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4191 prop = sd_disk_table[table_index].properties; 4192 if (prop == &lsi_properties || 4193 prop == &lsi_oem_properties || 4194 prop == &lsi_properties_scsi || 4195 prop == &symbios_properties) { 4196 un->un_f_cfg_is_lsi = TRUE; 4197 } 4198 break; 4199 } 4200 } 4201 } 4202 4203 /* 4204 * Function: sd_get_physical_geometry 4205 * 4206 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and 4207 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the 4208 * target, and use this information to initialize the physical 4209 * geometry cache specified by pgeom_p. 4210 * 4211 * MODE SENSE is an optional command, so failure in this case 4212 * does not necessarily denote an error. We want to use the 4213 * MODE SENSE commands to derive the physical geometry of the 4214 * device, but if either command fails, the logical geometry is 4215 * used as the fallback for disk label geometry in cmlb. 4216 * 4217 * This requires that un->un_blockcount and un->un_tgt_blocksize 4218 * have already been initialized for the current target and 4219 * that the current values be passed as args so that we don't 4220 * end up ever trying to use -1 as a valid value. This could 4221 * happen if either value is reset while we're not holding 4222 * the mutex. 4223 * 4224 * Arguments: un - driver soft state (unit) structure 4225 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4226 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4227 * to use the USCSI "direct" chain and bypass the normal 4228 * command waitq. 4229 * 4230 * Context: Kernel thread only (can sleep). 4231 */ 4232 4233 static int 4234 sd_get_physical_geometry(struct sd_lun *un, cmlb_geom_t *pgeom_p, 4235 diskaddr_t capacity, int lbasize, int path_flag) 4236 { 4237 struct mode_format *page3p; 4238 struct mode_geometry *page4p; 4239 struct mode_header *headerp; 4240 int sector_size; 4241 int nsect; 4242 int nhead; 4243 int ncyl; 4244 int intrlv; 4245 int spc; 4246 diskaddr_t modesense_capacity; 4247 int rpm; 4248 int bd_len; 4249 int mode_header_length; 4250 uchar_t *p3bufp; 4251 uchar_t *p4bufp; 4252 int cdbsize; 4253 int ret = EIO; 4254 4255 ASSERT(un != NULL); 4256 4257 if (lbasize == 0) { 4258 if (ISCD(un)) { 4259 lbasize = 2048; 4260 } else { 4261 lbasize = un->un_sys_blocksize; 4262 } 4263 } 4264 pgeom_p->g_secsize = (unsigned short)lbasize; 4265 4266 /* 4267 * If the unit is a cd/dvd drive MODE SENSE page three 4268 * and MODE SENSE page four are reserved (see SBC spec 4269 * and MMC spec). To prevent soft errors just return 4270 * using the default LBA size. 4271 */ 4272 if (ISCD(un)) 4273 return (ret); 4274 4275 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0; 4276 4277 /* 4278 * Retrieve MODE SENSE page 3 - Format Device Page 4279 */ 4280 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP); 4281 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p3bufp, 4282 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag) 4283 != 0) { 4284 SD_ERROR(SD_LOG_COMMON, un, 4285 "sd_get_physical_geometry: mode sense page 3 failed\n"); 4286 goto page3_exit; 4287 } 4288 4289 /* 4290 * Determine size of Block Descriptors in order to locate the mode 4291 * page data. ATAPI devices return 0, SCSI devices should return 4292 * MODE_BLK_DESC_LENGTH. 4293 */ 4294 headerp = (struct mode_header *)p3bufp; 4295 if (un->un_f_cfg_is_atapi == TRUE) { 4296 struct mode_header_grp2 *mhp = 4297 (struct mode_header_grp2 *)headerp; 4298 mode_header_length = MODE_HEADER_LENGTH_GRP2; 4299 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4300 } else { 4301 mode_header_length = MODE_HEADER_LENGTH; 4302 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4303 } 4304 4305 if (bd_len > MODE_BLK_DESC_LENGTH) { 4306 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4307 "received unexpected bd_len of %d, page3\n", bd_len); 4308 goto page3_exit; 4309 } 4310 4311 page3p = (struct mode_format *) 4312 ((caddr_t)headerp + mode_header_length + bd_len); 4313 4314 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) { 4315 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4316 "mode sense pg3 code mismatch %d\n", 4317 page3p->mode_page.code); 4318 goto page3_exit; 4319 } 4320 4321 /* 4322 * Use this physical geometry data only if BOTH MODE SENSE commands 4323 * complete successfully; otherwise, revert to the logical geometry. 4324 * So, we need to save everything in temporary variables. 4325 */ 4326 sector_size = BE_16(page3p->data_bytes_sect); 4327 4328 /* 4329 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size 4330 */ 4331 if (sector_size == 0) { 4332 sector_size = un->un_sys_blocksize; 4333 } else { 4334 sector_size &= ~(un->un_sys_blocksize - 1); 4335 } 4336 4337 nsect = BE_16(page3p->sect_track); 4338 intrlv = BE_16(page3p->interleave); 4339 4340 SD_INFO(SD_LOG_COMMON, un, 4341 "sd_get_physical_geometry: Format Parameters (page 3)\n"); 4342 SD_INFO(SD_LOG_COMMON, un, 4343 " mode page: %d; nsect: %d; sector size: %d;\n", 4344 page3p->mode_page.code, nsect, sector_size); 4345 SD_INFO(SD_LOG_COMMON, un, 4346 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv, 4347 BE_16(page3p->track_skew), 4348 BE_16(page3p->cylinder_skew)); 4349 4350 4351 /* 4352 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page 4353 */ 4354 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP); 4355 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p4bufp, 4356 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag) 4357 != 0) { 4358 SD_ERROR(SD_LOG_COMMON, un, 4359 "sd_get_physical_geometry: mode sense page 4 failed\n"); 4360 goto page4_exit; 4361 } 4362 4363 /* 4364 * Determine size of Block Descriptors in order to locate the mode 4365 * page data. ATAPI devices return 0, SCSI devices should return 4366 * MODE_BLK_DESC_LENGTH. 4367 */ 4368 headerp = (struct mode_header *)p4bufp; 4369 if (un->un_f_cfg_is_atapi == TRUE) { 4370 struct mode_header_grp2 *mhp = 4371 (struct mode_header_grp2 *)headerp; 4372 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4373 } else { 4374 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4375 } 4376 4377 if (bd_len > MODE_BLK_DESC_LENGTH) { 4378 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4379 "received unexpected bd_len of %d, page4\n", bd_len); 4380 goto page4_exit; 4381 } 4382 4383 page4p = (struct mode_geometry *) 4384 ((caddr_t)headerp + mode_header_length + bd_len); 4385 4386 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) { 4387 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4388 "mode sense pg4 code mismatch %d\n", 4389 page4p->mode_page.code); 4390 goto page4_exit; 4391 } 4392 4393 /* 4394 * Stash the data now, after we know that both commands completed. 4395 */ 4396 4397 4398 nhead = (int)page4p->heads; /* uchar, so no conversion needed */ 4399 spc = nhead * nsect; 4400 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb; 4401 rpm = BE_16(page4p->rpm); 4402 4403 modesense_capacity = spc * ncyl; 4404 4405 SD_INFO(SD_LOG_COMMON, un, 4406 "sd_get_physical_geometry: Geometry Parameters (page 4)\n"); 4407 SD_INFO(SD_LOG_COMMON, un, 4408 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm); 4409 SD_INFO(SD_LOG_COMMON, un, 4410 " computed capacity(h*s*c): %d;\n", modesense_capacity); 4411 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n", 4412 (void *)pgeom_p, capacity); 4413 4414 /* 4415 * Compensate if the drive's geometry is not rectangular, i.e., 4416 * the product of C * H * S returned by MODE SENSE >= that returned 4417 * by read capacity. This is an idiosyncrasy of the original x86 4418 * disk subsystem. 4419 */ 4420 if (modesense_capacity >= capacity) { 4421 SD_INFO(SD_LOG_COMMON, un, 4422 "sd_get_physical_geometry: adjusting acyl; " 4423 "old: %d; new: %d\n", pgeom_p->g_acyl, 4424 (modesense_capacity - capacity + spc - 1) / spc); 4425 if (sector_size != 0) { 4426 /* 1243403: NEC D38x7 drives don't support sec size */ 4427 pgeom_p->g_secsize = (unsigned short)sector_size; 4428 } 4429 pgeom_p->g_nsect = (unsigned short)nsect; 4430 pgeom_p->g_nhead = (unsigned short)nhead; 4431 pgeom_p->g_capacity = capacity; 4432 pgeom_p->g_acyl = 4433 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc; 4434 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl; 4435 } 4436 4437 pgeom_p->g_rpm = (unsigned short)rpm; 4438 pgeom_p->g_intrlv = (unsigned short)intrlv; 4439 ret = 0; 4440 4441 SD_INFO(SD_LOG_COMMON, un, 4442 "sd_get_physical_geometry: mode sense geometry:\n"); 4443 SD_INFO(SD_LOG_COMMON, un, 4444 " nsect: %d; sector size: %d; interlv: %d\n", 4445 nsect, sector_size, intrlv); 4446 SD_INFO(SD_LOG_COMMON, un, 4447 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n", 4448 nhead, ncyl, rpm, modesense_capacity); 4449 SD_INFO(SD_LOG_COMMON, un, 4450 "sd_get_physical_geometry: (cached)\n"); 4451 SD_INFO(SD_LOG_COMMON, un, 4452 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 4453 pgeom_p->g_ncyl, pgeom_p->g_acyl, 4454 pgeom_p->g_nhead, pgeom_p->g_nsect); 4455 SD_INFO(SD_LOG_COMMON, un, 4456 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n", 4457 pgeom_p->g_secsize, pgeom_p->g_capacity, 4458 pgeom_p->g_intrlv, pgeom_p->g_rpm); 4459 4460 page4_exit: 4461 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH); 4462 page3_exit: 4463 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH); 4464 4465 return (ret); 4466 } 4467 4468 /* 4469 * Function: sd_get_virtual_geometry 4470 * 4471 * Description: Ask the controller to tell us about the target device. 4472 * 4473 * Arguments: un - pointer to softstate 4474 * capacity - disk capacity in #blocks 4475 * lbasize - disk block size in bytes 4476 * 4477 * Context: Kernel thread only 4478 */ 4479 4480 static int 4481 sd_get_virtual_geometry(struct sd_lun *un, cmlb_geom_t *lgeom_p, 4482 diskaddr_t capacity, int lbasize) 4483 { 4484 uint_t geombuf; 4485 int spc; 4486 4487 ASSERT(un != NULL); 4488 4489 /* Set sector size, and total number of sectors */ 4490 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1); 4491 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1); 4492 4493 /* Let the HBA tell us its geometry */ 4494 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1); 4495 4496 /* A value of -1 indicates an undefined "geometry" property */ 4497 if (geombuf == (-1)) { 4498 return (EINVAL); 4499 } 4500 4501 /* Initialize the logical geometry cache. */ 4502 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff; 4503 lgeom_p->g_nsect = geombuf & 0xffff; 4504 lgeom_p->g_secsize = un->un_sys_blocksize; 4505 4506 spc = lgeom_p->g_nhead * lgeom_p->g_nsect; 4507 4508 /* 4509 * Note: The driver originally converted the capacity value from 4510 * target blocks to system blocks. However, the capacity value passed 4511 * to this routine is already in terms of system blocks (this scaling 4512 * is done when the READ CAPACITY command is issued and processed). 4513 * This 'error' may have gone undetected because the usage of g_ncyl 4514 * (which is based upon g_capacity) is very limited within the driver 4515 */ 4516 lgeom_p->g_capacity = capacity; 4517 4518 /* 4519 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The 4520 * hba may return zero values if the device has been removed. 4521 */ 4522 if (spc == 0) { 4523 lgeom_p->g_ncyl = 0; 4524 } else { 4525 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc; 4526 } 4527 lgeom_p->g_acyl = 0; 4528 4529 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n"); 4530 return (0); 4531 4532 } 4533 /* 4534 * Function: sd_update_block_info 4535 * 4536 * Description: Calculate a byte count to sector count bitshift value 4537 * from sector size. 4538 * 4539 * Arguments: un: unit struct. 4540 * lbasize: new target sector size 4541 * capacity: new target capacity, ie. block count 4542 * 4543 * Context: Kernel thread context 4544 */ 4545 4546 static void 4547 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity) 4548 { 4549 if (lbasize != 0) { 4550 un->un_tgt_blocksize = lbasize; 4551 un->un_f_tgt_blocksize_is_valid = TRUE; 4552 } 4553 4554 if (capacity != 0) { 4555 un->un_blockcount = capacity; 4556 un->un_f_blockcount_is_valid = TRUE; 4557 } 4558 } 4559 4560 4561 /* 4562 * Function: sd_register_devid 4563 * 4564 * Description: This routine will obtain the device id information from the 4565 * target, obtain the serial number, and register the device 4566 * id with the ddi framework. 4567 * 4568 * Arguments: devi - the system's dev_info_t for the device. 4569 * un - driver soft state (unit) structure 4570 * reservation_flag - indicates if a reservation conflict 4571 * occurred during attach 4572 * 4573 * Context: Kernel Thread 4574 */ 4575 static void 4576 sd_register_devid(struct sd_lun *un, dev_info_t *devi, int reservation_flag) 4577 { 4578 int rval = 0; 4579 uchar_t *inq80 = NULL; 4580 size_t inq80_len = MAX_INQUIRY_SIZE; 4581 size_t inq80_resid = 0; 4582 uchar_t *inq83 = NULL; 4583 size_t inq83_len = MAX_INQUIRY_SIZE; 4584 size_t inq83_resid = 0; 4585 4586 ASSERT(un != NULL); 4587 ASSERT(mutex_owned(SD_MUTEX(un))); 4588 ASSERT((SD_DEVINFO(un)) == devi); 4589 4590 /* 4591 * This is the case of antiquated Sun disk drives that have the 4592 * FAB_DEVID property set in the disk_table. These drives 4593 * manage the devid's by storing them in last 2 available sectors 4594 * on the drive and have them fabricated by the ddi layer by calling 4595 * ddi_devid_init and passing the DEVID_FAB flag. 4596 */ 4597 if (un->un_f_opt_fab_devid == TRUE) { 4598 /* 4599 * Depending on EINVAL isn't reliable, since a reserved disk 4600 * may result in invalid geometry, so check to make sure a 4601 * reservation conflict did not occur during attach. 4602 */ 4603 if ((sd_get_devid(un) == EINVAL) && 4604 (reservation_flag != SD_TARGET_IS_RESERVED)) { 4605 /* 4606 * The devid is invalid AND there is no reservation 4607 * conflict. Fabricate a new devid. 4608 */ 4609 (void) sd_create_devid(un); 4610 } 4611 4612 /* Register the devid if it exists */ 4613 if (un->un_devid != NULL) { 4614 (void) ddi_devid_register(SD_DEVINFO(un), 4615 un->un_devid); 4616 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4617 "sd_register_devid: Devid Fabricated\n"); 4618 } 4619 return; 4620 } 4621 4622 /* 4623 * We check the availibility of the World Wide Name (0x83) and Unit 4624 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using 4625 * un_vpd_page_mask from them, we decide which way to get the WWN. If 4626 * 0x83 is availible, that is the best choice. Our next choice is 4627 * 0x80. If neither are availible, we munge the devid from the device 4628 * vid/pid/serial # for Sun qualified disks, or use the ddi framework 4629 * to fabricate a devid for non-Sun qualified disks. 4630 */ 4631 if (sd_check_vpd_page_support(un) == 0) { 4632 /* collect page 80 data if available */ 4633 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) { 4634 4635 mutex_exit(SD_MUTEX(un)); 4636 inq80 = kmem_zalloc(inq80_len, KM_SLEEP); 4637 rval = sd_send_scsi_INQUIRY(un, inq80, inq80_len, 4638 0x01, 0x80, &inq80_resid); 4639 4640 if (rval != 0) { 4641 kmem_free(inq80, inq80_len); 4642 inq80 = NULL; 4643 inq80_len = 0; 4644 } 4645 mutex_enter(SD_MUTEX(un)); 4646 } 4647 4648 /* collect page 83 data if available */ 4649 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) { 4650 mutex_exit(SD_MUTEX(un)); 4651 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 4652 rval = sd_send_scsi_INQUIRY(un, inq83, inq83_len, 4653 0x01, 0x83, &inq83_resid); 4654 4655 if (rval != 0) { 4656 kmem_free(inq83, inq83_len); 4657 inq83 = NULL; 4658 inq83_len = 0; 4659 } 4660 mutex_enter(SD_MUTEX(un)); 4661 } 4662 } 4663 4664 /* encode best devid possible based on data available */ 4665 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, 4666 (char *)ddi_driver_name(SD_DEVINFO(un)), 4667 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)), 4668 inq80, inq80_len - inq80_resid, inq83, inq83_len - 4669 inq83_resid, &un->un_devid) == DDI_SUCCESS) { 4670 4671 /* devid successfully encoded, register devid */ 4672 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); 4673 4674 } else { 4675 /* 4676 * Unable to encode a devid based on data available. 4677 * This is not a Sun qualified disk. Older Sun disk 4678 * drives that have the SD_FAB_DEVID property 4679 * set in the disk_table and non Sun qualified 4680 * disks are treated in the same manner. These 4681 * drives manage the devid's by storing them in 4682 * last 2 available sectors on the drive and 4683 * have them fabricated by the ddi layer by 4684 * calling ddi_devid_init and passing the 4685 * DEVID_FAB flag. 4686 * Create a fabricate devid only if there's no 4687 * fabricate devid existed. 4688 */ 4689 if (sd_get_devid(un) == EINVAL) { 4690 (void) sd_create_devid(un); 4691 } 4692 un->un_f_opt_fab_devid = TRUE; 4693 4694 /* Register the devid if it exists */ 4695 if (un->un_devid != NULL) { 4696 (void) ddi_devid_register(SD_DEVINFO(un), 4697 un->un_devid); 4698 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4699 "sd_register_devid: devid fabricated using " 4700 "ddi framework\n"); 4701 } 4702 } 4703 4704 /* clean up resources */ 4705 if (inq80 != NULL) { 4706 kmem_free(inq80, inq80_len); 4707 } 4708 if (inq83 != NULL) { 4709 kmem_free(inq83, inq83_len); 4710 } 4711 } 4712 4713 4714 4715 /* 4716 * Function: sd_get_devid 4717 * 4718 * Description: This routine will return 0 if a valid device id has been 4719 * obtained from the target and stored in the soft state. If a 4720 * valid device id has not been previously read and stored, a 4721 * read attempt will be made. 4722 * 4723 * Arguments: un - driver soft state (unit) structure 4724 * 4725 * Return Code: 0 if we successfully get the device id 4726 * 4727 * Context: Kernel Thread 4728 */ 4729 4730 static int 4731 sd_get_devid(struct sd_lun *un) 4732 { 4733 struct dk_devid *dkdevid; 4734 ddi_devid_t tmpid; 4735 uint_t *ip; 4736 size_t sz; 4737 diskaddr_t blk; 4738 int status; 4739 int chksum; 4740 int i; 4741 size_t buffer_size; 4742 4743 ASSERT(un != NULL); 4744 ASSERT(mutex_owned(SD_MUTEX(un))); 4745 4746 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n", 4747 un); 4748 4749 if (un->un_devid != NULL) { 4750 return (0); 4751 } 4752 4753 mutex_exit(SD_MUTEX(un)); 4754 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 4755 (void *)SD_PATH_DIRECT) != 0) { 4756 mutex_enter(SD_MUTEX(un)); 4757 return (EINVAL); 4758 } 4759 4760 /* 4761 * Read and verify device id, stored in the reserved cylinders at the 4762 * end of the disk. Backup label is on the odd sectors of the last 4763 * track of the last cylinder. Device id will be on track of the next 4764 * to last cylinder. 4765 */ 4766 mutex_enter(SD_MUTEX(un)); 4767 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid)); 4768 mutex_exit(SD_MUTEX(un)); 4769 dkdevid = kmem_alloc(buffer_size, KM_SLEEP); 4770 status = sd_send_scsi_READ(un, dkdevid, buffer_size, blk, 4771 SD_PATH_DIRECT); 4772 if (status != 0) { 4773 goto error; 4774 } 4775 4776 /* Validate the revision */ 4777 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 4778 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 4779 status = EINVAL; 4780 goto error; 4781 } 4782 4783 /* Calculate the checksum */ 4784 chksum = 0; 4785 ip = (uint_t *)dkdevid; 4786 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 4787 i++) { 4788 chksum ^= ip[i]; 4789 } 4790 4791 /* Compare the checksums */ 4792 if (DKD_GETCHKSUM(dkdevid) != chksum) { 4793 status = EINVAL; 4794 goto error; 4795 } 4796 4797 /* Validate the device id */ 4798 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 4799 status = EINVAL; 4800 goto error; 4801 } 4802 4803 /* 4804 * Store the device id in the driver soft state 4805 */ 4806 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 4807 tmpid = kmem_alloc(sz, KM_SLEEP); 4808 4809 mutex_enter(SD_MUTEX(un)); 4810 4811 un->un_devid = tmpid; 4812 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 4813 4814 kmem_free(dkdevid, buffer_size); 4815 4816 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un); 4817 4818 return (status); 4819 error: 4820 mutex_enter(SD_MUTEX(un)); 4821 kmem_free(dkdevid, buffer_size); 4822 return (status); 4823 } 4824 4825 4826 /* 4827 * Function: sd_create_devid 4828 * 4829 * Description: This routine will fabricate the device id and write it 4830 * to the disk. 4831 * 4832 * Arguments: un - driver soft state (unit) structure 4833 * 4834 * Return Code: value of the fabricated device id 4835 * 4836 * Context: Kernel Thread 4837 */ 4838 4839 static ddi_devid_t 4840 sd_create_devid(struct sd_lun *un) 4841 { 4842 ASSERT(un != NULL); 4843 4844 /* Fabricate the devid */ 4845 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid) 4846 == DDI_FAILURE) { 4847 return (NULL); 4848 } 4849 4850 /* Write the devid to disk */ 4851 if (sd_write_deviceid(un) != 0) { 4852 ddi_devid_free(un->un_devid); 4853 un->un_devid = NULL; 4854 } 4855 4856 return (un->un_devid); 4857 } 4858 4859 4860 /* 4861 * Function: sd_write_deviceid 4862 * 4863 * Description: This routine will write the device id to the disk 4864 * reserved sector. 4865 * 4866 * Arguments: un - driver soft state (unit) structure 4867 * 4868 * Return Code: EINVAL 4869 * value returned by sd_send_scsi_cmd 4870 * 4871 * Context: Kernel Thread 4872 */ 4873 4874 static int 4875 sd_write_deviceid(struct sd_lun *un) 4876 { 4877 struct dk_devid *dkdevid; 4878 diskaddr_t blk; 4879 uint_t *ip, chksum; 4880 int status; 4881 int i; 4882 4883 ASSERT(mutex_owned(SD_MUTEX(un))); 4884 4885 mutex_exit(SD_MUTEX(un)); 4886 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 4887 (void *)SD_PATH_DIRECT) != 0) { 4888 mutex_enter(SD_MUTEX(un)); 4889 return (-1); 4890 } 4891 4892 4893 /* Allocate the buffer */ 4894 dkdevid = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP); 4895 4896 /* Fill in the revision */ 4897 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 4898 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 4899 4900 /* Copy in the device id */ 4901 mutex_enter(SD_MUTEX(un)); 4902 bcopy(un->un_devid, &dkdevid->dkd_devid, 4903 ddi_devid_sizeof(un->un_devid)); 4904 mutex_exit(SD_MUTEX(un)); 4905 4906 /* Calculate the checksum */ 4907 chksum = 0; 4908 ip = (uint_t *)dkdevid; 4909 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 4910 i++) { 4911 chksum ^= ip[i]; 4912 } 4913 4914 /* Fill-in checksum */ 4915 DKD_FORMCHKSUM(chksum, dkdevid); 4916 4917 /* Write the reserved sector */ 4918 status = sd_send_scsi_WRITE(un, dkdevid, un->un_sys_blocksize, blk, 4919 SD_PATH_DIRECT); 4920 4921 kmem_free(dkdevid, un->un_sys_blocksize); 4922 4923 mutex_enter(SD_MUTEX(un)); 4924 return (status); 4925 } 4926 4927 4928 /* 4929 * Function: sd_check_vpd_page_support 4930 * 4931 * Description: This routine sends an inquiry command with the EVPD bit set and 4932 * a page code of 0x00 to the device. It is used to determine which 4933 * vital product pages are availible to find the devid. We are 4934 * looking for pages 0x83 or 0x80. If we return a negative 1, the 4935 * device does not support that command. 4936 * 4937 * Arguments: un - driver soft state (unit) structure 4938 * 4939 * Return Code: 0 - success 4940 * 1 - check condition 4941 * 4942 * Context: This routine can sleep. 4943 */ 4944 4945 static int 4946 sd_check_vpd_page_support(struct sd_lun *un) 4947 { 4948 uchar_t *page_list = NULL; 4949 uchar_t page_length = 0xff; /* Use max possible length */ 4950 uchar_t evpd = 0x01; /* Set the EVPD bit */ 4951 uchar_t page_code = 0x00; /* Supported VPD Pages */ 4952 int rval = 0; 4953 int counter; 4954 4955 ASSERT(un != NULL); 4956 ASSERT(mutex_owned(SD_MUTEX(un))); 4957 4958 mutex_exit(SD_MUTEX(un)); 4959 4960 /* 4961 * We'll set the page length to the maximum to save figuring it out 4962 * with an additional call. 4963 */ 4964 page_list = kmem_zalloc(page_length, KM_SLEEP); 4965 4966 rval = sd_send_scsi_INQUIRY(un, page_list, page_length, evpd, 4967 page_code, NULL); 4968 4969 mutex_enter(SD_MUTEX(un)); 4970 4971 /* 4972 * Now we must validate that the device accepted the command, as some 4973 * drives do not support it. If the drive does support it, we will 4974 * return 0, and the supported pages will be in un_vpd_page_mask. If 4975 * not, we return -1. 4976 */ 4977 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) { 4978 /* Loop to find one of the 2 pages we need */ 4979 counter = 4; /* Supported pages start at byte 4, with 0x00 */ 4980 4981 /* 4982 * Pages are returned in ascending order, and 0x83 is what we 4983 * are hoping for. 4984 */ 4985 while ((page_list[counter] <= 0x83) && 4986 (counter <= (page_list[VPD_PAGE_LENGTH] + 4987 VPD_HEAD_OFFSET))) { 4988 /* 4989 * Add 3 because page_list[3] is the number of 4990 * pages minus 3 4991 */ 4992 4993 switch (page_list[counter]) { 4994 case 0x00: 4995 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG; 4996 break; 4997 case 0x80: 4998 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG; 4999 break; 5000 case 0x81: 5001 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG; 5002 break; 5003 case 0x82: 5004 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG; 5005 break; 5006 case 0x83: 5007 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; 5008 break; 5009 } 5010 counter++; 5011 } 5012 5013 } else { 5014 rval = -1; 5015 5016 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5017 "sd_check_vpd_page_support: This drive does not implement " 5018 "VPD pages.\n"); 5019 } 5020 5021 kmem_free(page_list, page_length); 5022 5023 return (rval); 5024 } 5025 5026 5027 /* 5028 * Function: sd_setup_pm 5029 * 5030 * Description: Initialize Power Management on the device 5031 * 5032 * Context: Kernel Thread 5033 */ 5034 5035 static void 5036 sd_setup_pm(struct sd_lun *un, dev_info_t *devi) 5037 { 5038 uint_t log_page_size; 5039 uchar_t *log_page_data; 5040 int rval; 5041 5042 /* 5043 * Since we are called from attach, holding a mutex for 5044 * un is unnecessary. Because some of the routines called 5045 * from here require SD_MUTEX to not be held, assert this 5046 * right up front. 5047 */ 5048 ASSERT(!mutex_owned(SD_MUTEX(un))); 5049 /* 5050 * Since the sd device does not have the 'reg' property, 5051 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 5052 * The following code is to tell cpr that this device 5053 * DOES need to be suspended and resumed. 5054 */ 5055 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 5056 "pm-hardware-state", "needs-suspend-resume"); 5057 5058 /* 5059 * This complies with the new power management framework 5060 * for certain desktop machines. Create the pm_components 5061 * property as a string array property. 5062 */ 5063 if (un->un_f_pm_supported) { 5064 /* 5065 * not all devices have a motor, try it first. 5066 * some devices may return ILLEGAL REQUEST, some 5067 * will hang 5068 * The following START_STOP_UNIT is used to check if target 5069 * device has a motor. 5070 */ 5071 un->un_f_start_stop_supported = TRUE; 5072 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 5073 SD_PATH_DIRECT) != 0) { 5074 un->un_f_start_stop_supported = FALSE; 5075 } 5076 5077 /* 5078 * create pm properties anyways otherwise the parent can't 5079 * go to sleep 5080 */ 5081 (void) sd_create_pm_components(devi, un); 5082 un->un_f_pm_is_enabled = TRUE; 5083 return; 5084 } 5085 5086 if (!un->un_f_log_sense_supported) { 5087 un->un_power_level = SD_SPINDLE_ON; 5088 un->un_f_pm_is_enabled = FALSE; 5089 return; 5090 } 5091 5092 rval = sd_log_page_supported(un, START_STOP_CYCLE_PAGE); 5093 5094 #ifdef SDDEBUG 5095 if (sd_force_pm_supported) { 5096 /* Force a successful result */ 5097 rval = 1; 5098 } 5099 #endif 5100 5101 /* 5102 * If the start-stop cycle counter log page is not supported 5103 * or if the pm-capable property is SD_PM_CAPABLE_FALSE (0) 5104 * then we should not create the pm_components property. 5105 */ 5106 if (rval == -1) { 5107 /* 5108 * Error. 5109 * Reading log sense failed, most likely this is 5110 * an older drive that does not support log sense. 5111 * If this fails auto-pm is not supported. 5112 */ 5113 un->un_power_level = SD_SPINDLE_ON; 5114 un->un_f_pm_is_enabled = FALSE; 5115 5116 } else if (rval == 0) { 5117 /* 5118 * Page not found. 5119 * The start stop cycle counter is implemented as page 5120 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For 5121 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE). 5122 */ 5123 if (sd_log_page_supported(un, START_STOP_CYCLE_VU_PAGE) == 1) { 5124 /* 5125 * Page found, use this one. 5126 */ 5127 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE; 5128 un->un_f_pm_is_enabled = TRUE; 5129 } else { 5130 /* 5131 * Error or page not found. 5132 * auto-pm is not supported for this device. 5133 */ 5134 un->un_power_level = SD_SPINDLE_ON; 5135 un->un_f_pm_is_enabled = FALSE; 5136 } 5137 } else { 5138 /* 5139 * Page found, use it. 5140 */ 5141 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE; 5142 un->un_f_pm_is_enabled = TRUE; 5143 } 5144 5145 5146 if (un->un_f_pm_is_enabled == TRUE) { 5147 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5148 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5149 5150 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 5151 log_page_size, un->un_start_stop_cycle_page, 5152 0x01, 0, SD_PATH_DIRECT); 5153 #ifdef SDDEBUG 5154 if (sd_force_pm_supported) { 5155 /* Force a successful result */ 5156 rval = 0; 5157 } 5158 #endif 5159 5160 /* 5161 * If the Log sense for Page( Start/stop cycle counter page) 5162 * succeeds, then power managment is supported and we can 5163 * enable auto-pm. 5164 */ 5165 if (rval == 0) { 5166 (void) sd_create_pm_components(devi, un); 5167 } else { 5168 un->un_power_level = SD_SPINDLE_ON; 5169 un->un_f_pm_is_enabled = FALSE; 5170 } 5171 5172 kmem_free(log_page_data, log_page_size); 5173 } 5174 } 5175 5176 5177 /* 5178 * Function: sd_create_pm_components 5179 * 5180 * Description: Initialize PM property. 5181 * 5182 * Context: Kernel thread context 5183 */ 5184 5185 static void 5186 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un) 5187 { 5188 char *pm_comp[] = { "NAME=spindle-motor", "0=off", "1=on", NULL }; 5189 5190 ASSERT(!mutex_owned(SD_MUTEX(un))); 5191 5192 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 5193 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 5194 /* 5195 * When components are initially created they are idle, 5196 * power up any non-removables. 5197 * Note: the return value of pm_raise_power can't be used 5198 * for determining if PM should be enabled for this device. 5199 * Even if you check the return values and remove this 5200 * property created above, the PM framework will not honor the 5201 * change after the first call to pm_raise_power. Hence, 5202 * removal of that property does not help if pm_raise_power 5203 * fails. In the case of removable media, the start/stop 5204 * will fail if the media is not present. 5205 */ 5206 if (un->un_f_attach_spinup && (pm_raise_power(SD_DEVINFO(un), 0, 5207 SD_SPINDLE_ON) == DDI_SUCCESS)) { 5208 mutex_enter(SD_MUTEX(un)); 5209 un->un_power_level = SD_SPINDLE_ON; 5210 mutex_enter(&un->un_pm_mutex); 5211 /* Set to on and not busy. */ 5212 un->un_pm_count = 0; 5213 } else { 5214 mutex_enter(SD_MUTEX(un)); 5215 un->un_power_level = SD_SPINDLE_OFF; 5216 mutex_enter(&un->un_pm_mutex); 5217 /* Set to off. */ 5218 un->un_pm_count = -1; 5219 } 5220 mutex_exit(&un->un_pm_mutex); 5221 mutex_exit(SD_MUTEX(un)); 5222 } else { 5223 un->un_power_level = SD_SPINDLE_ON; 5224 un->un_f_pm_is_enabled = FALSE; 5225 } 5226 } 5227 5228 5229 /* 5230 * Function: sd_ddi_suspend 5231 * 5232 * Description: Performs system power-down operations. This includes 5233 * setting the drive state to indicate its suspended so 5234 * that no new commands will be accepted. Also, wait for 5235 * all commands that are in transport or queued to a timer 5236 * for retry to complete. All timeout threads are cancelled. 5237 * 5238 * Return Code: DDI_FAILURE or DDI_SUCCESS 5239 * 5240 * Context: Kernel thread context 5241 */ 5242 5243 static int 5244 sd_ddi_suspend(dev_info_t *devi) 5245 { 5246 struct sd_lun *un; 5247 clock_t wait_cmds_complete; 5248 5249 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5250 if (un == NULL) { 5251 return (DDI_FAILURE); 5252 } 5253 5254 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n"); 5255 5256 mutex_enter(SD_MUTEX(un)); 5257 5258 /* Return success if the device is already suspended. */ 5259 if (un->un_state == SD_STATE_SUSPENDED) { 5260 mutex_exit(SD_MUTEX(un)); 5261 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5262 "device already suspended, exiting\n"); 5263 return (DDI_SUCCESS); 5264 } 5265 5266 /* Return failure if the device is being used by HA */ 5267 if (un->un_resvd_status & 5268 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) { 5269 mutex_exit(SD_MUTEX(un)); 5270 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5271 "device in use by HA, exiting\n"); 5272 return (DDI_FAILURE); 5273 } 5274 5275 /* 5276 * Return failure if the device is in a resource wait 5277 * or power changing state. 5278 */ 5279 if ((un->un_state == SD_STATE_RWAIT) || 5280 (un->un_state == SD_STATE_PM_CHANGING)) { 5281 mutex_exit(SD_MUTEX(un)); 5282 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5283 "device in resource wait state, exiting\n"); 5284 return (DDI_FAILURE); 5285 } 5286 5287 5288 un->un_save_state = un->un_last_state; 5289 New_state(un, SD_STATE_SUSPENDED); 5290 5291 /* 5292 * Wait for all commands that are in transport or queued to a timer 5293 * for retry to complete. 5294 * 5295 * While waiting, no new commands will be accepted or sent because of 5296 * the new state we set above. 5297 * 5298 * Wait till current operation has completed. If we are in the resource 5299 * wait state (with an intr outstanding) then we need to wait till the 5300 * intr completes and starts the next cmd. We want to wait for 5301 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND. 5302 */ 5303 wait_cmds_complete = ddi_get_lbolt() + 5304 (sd_wait_cmds_complete * drv_usectohz(1000000)); 5305 5306 while (un->un_ncmds_in_transport != 0) { 5307 /* 5308 * Fail if commands do not finish in the specified time. 5309 */ 5310 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un), 5311 wait_cmds_complete) == -1) { 5312 /* 5313 * Undo the state changes made above. Everything 5314 * must go back to it's original value. 5315 */ 5316 Restore_state(un); 5317 un->un_last_state = un->un_save_state; 5318 /* Wake up any threads that might be waiting. */ 5319 cv_broadcast(&un->un_suspend_cv); 5320 mutex_exit(SD_MUTEX(un)); 5321 SD_ERROR(SD_LOG_IO_PM, un, 5322 "sd_ddi_suspend: failed due to outstanding cmds\n"); 5323 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n"); 5324 return (DDI_FAILURE); 5325 } 5326 } 5327 5328 /* 5329 * Cancel SCSI watch thread and timeouts, if any are active 5330 */ 5331 5332 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) { 5333 opaque_t temp_token = un->un_swr_token; 5334 mutex_exit(SD_MUTEX(un)); 5335 scsi_watch_suspend(temp_token); 5336 mutex_enter(SD_MUTEX(un)); 5337 } 5338 5339 if (un->un_reset_throttle_timeid != NULL) { 5340 timeout_id_t temp_id = un->un_reset_throttle_timeid; 5341 un->un_reset_throttle_timeid = NULL; 5342 mutex_exit(SD_MUTEX(un)); 5343 (void) untimeout(temp_id); 5344 mutex_enter(SD_MUTEX(un)); 5345 } 5346 5347 if (un->un_dcvb_timeid != NULL) { 5348 timeout_id_t temp_id = un->un_dcvb_timeid; 5349 un->un_dcvb_timeid = NULL; 5350 mutex_exit(SD_MUTEX(un)); 5351 (void) untimeout(temp_id); 5352 mutex_enter(SD_MUTEX(un)); 5353 } 5354 5355 mutex_enter(&un->un_pm_mutex); 5356 if (un->un_pm_timeid != NULL) { 5357 timeout_id_t temp_id = un->un_pm_timeid; 5358 un->un_pm_timeid = NULL; 5359 mutex_exit(&un->un_pm_mutex); 5360 mutex_exit(SD_MUTEX(un)); 5361 (void) untimeout(temp_id); 5362 mutex_enter(SD_MUTEX(un)); 5363 } else { 5364 mutex_exit(&un->un_pm_mutex); 5365 } 5366 5367 if (un->un_retry_timeid != NULL) { 5368 timeout_id_t temp_id = un->un_retry_timeid; 5369 un->un_retry_timeid = NULL; 5370 mutex_exit(SD_MUTEX(un)); 5371 (void) untimeout(temp_id); 5372 mutex_enter(SD_MUTEX(un)); 5373 } 5374 5375 if (un->un_direct_priority_timeid != NULL) { 5376 timeout_id_t temp_id = un->un_direct_priority_timeid; 5377 un->un_direct_priority_timeid = NULL; 5378 mutex_exit(SD_MUTEX(un)); 5379 (void) untimeout(temp_id); 5380 mutex_enter(SD_MUTEX(un)); 5381 } 5382 5383 if (un->un_f_is_fibre == TRUE) { 5384 /* 5385 * Remove callbacks for insert and remove events 5386 */ 5387 if (un->un_insert_event != NULL) { 5388 mutex_exit(SD_MUTEX(un)); 5389 (void) ddi_remove_event_handler(un->un_insert_cb_id); 5390 mutex_enter(SD_MUTEX(un)); 5391 un->un_insert_event = NULL; 5392 } 5393 5394 if (un->un_remove_event != NULL) { 5395 mutex_exit(SD_MUTEX(un)); 5396 (void) ddi_remove_event_handler(un->un_remove_cb_id); 5397 mutex_enter(SD_MUTEX(un)); 5398 un->un_remove_event = NULL; 5399 } 5400 } 5401 5402 mutex_exit(SD_MUTEX(un)); 5403 5404 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); 5405 5406 return (DDI_SUCCESS); 5407 } 5408 5409 5410 /* 5411 * Function: sd_ddi_pm_suspend 5412 * 5413 * Description: Set the drive state to low power. 5414 * Someone else is required to actually change the drive 5415 * power level. 5416 * 5417 * Arguments: un - driver soft state (unit) structure 5418 * 5419 * Return Code: DDI_FAILURE or DDI_SUCCESS 5420 * 5421 * Context: Kernel thread context 5422 */ 5423 5424 static int 5425 sd_ddi_pm_suspend(struct sd_lun *un) 5426 { 5427 ASSERT(un != NULL); 5428 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: entry\n"); 5429 5430 ASSERT(!mutex_owned(SD_MUTEX(un))); 5431 mutex_enter(SD_MUTEX(un)); 5432 5433 /* 5434 * Exit if power management is not enabled for this device, or if 5435 * the device is being used by HA. 5436 */ 5437 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status & 5438 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) { 5439 mutex_exit(SD_MUTEX(un)); 5440 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exiting\n"); 5441 return (DDI_SUCCESS); 5442 } 5443 5444 SD_INFO(SD_LOG_POWER, un, "sd_ddi_pm_suspend: un_ncmds_in_driver=%ld\n", 5445 un->un_ncmds_in_driver); 5446 5447 /* 5448 * See if the device is not busy, ie.: 5449 * - we have no commands in the driver for this device 5450 * - not waiting for resources 5451 */ 5452 if ((un->un_ncmds_in_driver == 0) && 5453 (un->un_state != SD_STATE_RWAIT)) { 5454 /* 5455 * The device is not busy, so it is OK to go to low power state. 5456 * Indicate low power, but rely on someone else to actually 5457 * change it. 5458 */ 5459 mutex_enter(&un->un_pm_mutex); 5460 un->un_pm_count = -1; 5461 mutex_exit(&un->un_pm_mutex); 5462 un->un_power_level = SD_SPINDLE_OFF; 5463 } 5464 5465 mutex_exit(SD_MUTEX(un)); 5466 5467 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exit\n"); 5468 5469 return (DDI_SUCCESS); 5470 } 5471 5472 5473 /* 5474 * Function: sd_ddi_resume 5475 * 5476 * Description: Performs system power-up operations.. 5477 * 5478 * Return Code: DDI_SUCCESS 5479 * DDI_FAILURE 5480 * 5481 * Context: Kernel thread context 5482 */ 5483 5484 static int 5485 sd_ddi_resume(dev_info_t *devi) 5486 { 5487 struct sd_lun *un; 5488 5489 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5490 if (un == NULL) { 5491 return (DDI_FAILURE); 5492 } 5493 5494 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n"); 5495 5496 mutex_enter(SD_MUTEX(un)); 5497 Restore_state(un); 5498 5499 /* 5500 * Restore the state which was saved to give the 5501 * the right state in un_last_state 5502 */ 5503 un->un_last_state = un->un_save_state; 5504 /* 5505 * Note: throttle comes back at full. 5506 * Also note: this MUST be done before calling pm_raise_power 5507 * otherwise the system can get hung in biowait. The scenario where 5508 * this'll happen is under cpr suspend. Writing of the system 5509 * state goes through sddump, which writes 0 to un_throttle. If 5510 * writing the system state then fails, example if the partition is 5511 * too small, then cpr attempts a resume. If throttle isn't restored 5512 * from the saved value until after calling pm_raise_power then 5513 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs 5514 * in biowait. 5515 */ 5516 un->un_throttle = un->un_saved_throttle; 5517 5518 /* 5519 * The chance of failure is very rare as the only command done in power 5520 * entry point is START command when you transition from 0->1 or 5521 * unknown->1. Put it to SPINDLE ON state irrespective of the state at 5522 * which suspend was done. Ignore the return value as the resume should 5523 * not be failed. In the case of removable media the media need not be 5524 * inserted and hence there is a chance that raise power will fail with 5525 * media not present. 5526 */ 5527 if (un->un_f_attach_spinup) { 5528 mutex_exit(SD_MUTEX(un)); 5529 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 5530 mutex_enter(SD_MUTEX(un)); 5531 } 5532 5533 /* 5534 * Don't broadcast to the suspend cv and therefore possibly 5535 * start I/O until after power has been restored. 5536 */ 5537 cv_broadcast(&un->un_suspend_cv); 5538 cv_broadcast(&un->un_state_cv); 5539 5540 /* restart thread */ 5541 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { 5542 scsi_watch_resume(un->un_swr_token); 5543 } 5544 5545 #if (defined(__fibre)) 5546 if (un->un_f_is_fibre == TRUE) { 5547 /* 5548 * Add callbacks for insert and remove events 5549 */ 5550 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 5551 sd_init_event_callbacks(un); 5552 } 5553 } 5554 #endif 5555 5556 /* 5557 * Transport any pending commands to the target. 5558 * 5559 * If this is a low-activity device commands in queue will have to wait 5560 * until new commands come in, which may take awhile. Also, we 5561 * specifically don't check un_ncmds_in_transport because we know that 5562 * there really are no commands in progress after the unit was 5563 * suspended and we could have reached the throttle level, been 5564 * suspended, and have no new commands coming in for awhile. Highly 5565 * unlikely, but so is the low-activity disk scenario. 5566 */ 5567 ddi_xbuf_dispatch(un->un_xbuf_attr); 5568 5569 sd_start_cmds(un, NULL); 5570 mutex_exit(SD_MUTEX(un)); 5571 5572 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n"); 5573 5574 return (DDI_SUCCESS); 5575 } 5576 5577 5578 /* 5579 * Function: sd_ddi_pm_resume 5580 * 5581 * Description: Set the drive state to powered on. 5582 * Someone else is required to actually change the drive 5583 * power level. 5584 * 5585 * Arguments: un - driver soft state (unit) structure 5586 * 5587 * Return Code: DDI_SUCCESS 5588 * 5589 * Context: Kernel thread context 5590 */ 5591 5592 static int 5593 sd_ddi_pm_resume(struct sd_lun *un) 5594 { 5595 ASSERT(un != NULL); 5596 5597 ASSERT(!mutex_owned(SD_MUTEX(un))); 5598 mutex_enter(SD_MUTEX(un)); 5599 un->un_power_level = SD_SPINDLE_ON; 5600 5601 ASSERT(!mutex_owned(&un->un_pm_mutex)); 5602 mutex_enter(&un->un_pm_mutex); 5603 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 5604 un->un_pm_count++; 5605 ASSERT(un->un_pm_count == 0); 5606 /* 5607 * Note: no longer do the cv_broadcast on un_suspend_cv. The 5608 * un_suspend_cv is for a system resume, not a power management 5609 * device resume. (4297749) 5610 * cv_broadcast(&un->un_suspend_cv); 5611 */ 5612 } 5613 mutex_exit(&un->un_pm_mutex); 5614 mutex_exit(SD_MUTEX(un)); 5615 5616 return (DDI_SUCCESS); 5617 } 5618 5619 5620 /* 5621 * Function: sd_pm_idletimeout_handler 5622 * 5623 * Description: A timer routine that's active only while a device is busy. 5624 * The purpose is to extend slightly the pm framework's busy 5625 * view of the device to prevent busy/idle thrashing for 5626 * back-to-back commands. Do this by comparing the current time 5627 * to the time at which the last command completed and when the 5628 * difference is greater than sd_pm_idletime, call 5629 * pm_idle_component. In addition to indicating idle to the pm 5630 * framework, update the chain type to again use the internal pm 5631 * layers of the driver. 5632 * 5633 * Arguments: arg - driver soft state (unit) structure 5634 * 5635 * Context: Executes in a timeout(9F) thread context 5636 */ 5637 5638 static void 5639 sd_pm_idletimeout_handler(void *arg) 5640 { 5641 struct sd_lun *un = arg; 5642 5643 time_t now; 5644 5645 mutex_enter(&sd_detach_mutex); 5646 if (un->un_detach_count != 0) { 5647 /* Abort if the instance is detaching */ 5648 mutex_exit(&sd_detach_mutex); 5649 return; 5650 } 5651 mutex_exit(&sd_detach_mutex); 5652 5653 now = ddi_get_time(); 5654 /* 5655 * Grab both mutexes, in the proper order, since we're accessing 5656 * both PM and softstate variables. 5657 */ 5658 mutex_enter(SD_MUTEX(un)); 5659 mutex_enter(&un->un_pm_mutex); 5660 if (((now - un->un_pm_idle_time) > sd_pm_idletime) && 5661 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) { 5662 /* 5663 * Update the chain types. 5664 * This takes affect on the next new command received. 5665 */ 5666 if (un->un_f_non_devbsize_supported) { 5667 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 5668 } else { 5669 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 5670 } 5671 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 5672 5673 SD_TRACE(SD_LOG_IO_PM, un, 5674 "sd_pm_idletimeout_handler: idling device\n"); 5675 (void) pm_idle_component(SD_DEVINFO(un), 0); 5676 un->un_pm_idle_timeid = NULL; 5677 } else { 5678 un->un_pm_idle_timeid = 5679 timeout(sd_pm_idletimeout_handler, un, 5680 (drv_usectohz((clock_t)300000))); /* 300 ms. */ 5681 } 5682 mutex_exit(&un->un_pm_mutex); 5683 mutex_exit(SD_MUTEX(un)); 5684 } 5685 5686 5687 /* 5688 * Function: sd_pm_timeout_handler 5689 * 5690 * Description: Callback to tell framework we are idle. 5691 * 5692 * Context: timeout(9f) thread context. 5693 */ 5694 5695 static void 5696 sd_pm_timeout_handler(void *arg) 5697 { 5698 struct sd_lun *un = arg; 5699 5700 (void) pm_idle_component(SD_DEVINFO(un), 0); 5701 mutex_enter(&un->un_pm_mutex); 5702 un->un_pm_timeid = NULL; 5703 mutex_exit(&un->un_pm_mutex); 5704 } 5705 5706 5707 /* 5708 * Function: sdpower 5709 * 5710 * Description: PM entry point. 5711 * 5712 * Return Code: DDI_SUCCESS 5713 * DDI_FAILURE 5714 * 5715 * Context: Kernel thread context 5716 */ 5717 5718 static int 5719 sdpower(dev_info_t *devi, int component, int level) 5720 { 5721 struct sd_lun *un; 5722 int instance; 5723 int rval = DDI_SUCCESS; 5724 uint_t i, log_page_size, maxcycles, ncycles; 5725 uchar_t *log_page_data; 5726 int log_sense_page; 5727 int medium_present; 5728 time_t intvlp; 5729 dev_t dev; 5730 struct pm_trans_data sd_pm_tran_data; 5731 uchar_t save_state; 5732 int sval; 5733 uchar_t state_before_pm; 5734 int got_semaphore_here; 5735 5736 instance = ddi_get_instance(devi); 5737 5738 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 5739 (SD_SPINDLE_OFF > level) || (level > SD_SPINDLE_ON) || 5740 component != 0) { 5741 return (DDI_FAILURE); 5742 } 5743 5744 dev = sd_make_device(SD_DEVINFO(un)); 5745 5746 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level); 5747 5748 /* 5749 * Must synchronize power down with close. 5750 * Attempt to decrement/acquire the open/close semaphore, 5751 * but do NOT wait on it. If it's not greater than zero, 5752 * ie. it can't be decremented without waiting, then 5753 * someone else, either open or close, already has it 5754 * and the try returns 0. Use that knowledge here to determine 5755 * if it's OK to change the device power level. 5756 * Also, only increment it on exit if it was decremented, ie. gotten, 5757 * here. 5758 */ 5759 got_semaphore_here = sema_tryp(&un->un_semoclose); 5760 5761 mutex_enter(SD_MUTEX(un)); 5762 5763 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n", 5764 un->un_ncmds_in_driver); 5765 5766 /* 5767 * If un_ncmds_in_driver is non-zero it indicates commands are 5768 * already being processed in the driver, or if the semaphore was 5769 * not gotten here it indicates an open or close is being processed. 5770 * At the same time somebody is requesting to go low power which 5771 * can't happen, therefore we need to return failure. 5772 */ 5773 if ((level == SD_SPINDLE_OFF) && 5774 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) { 5775 mutex_exit(SD_MUTEX(un)); 5776 5777 if (got_semaphore_here != 0) { 5778 sema_v(&un->un_semoclose); 5779 } 5780 SD_TRACE(SD_LOG_IO_PM, un, 5781 "sdpower: exit, device has queued cmds.\n"); 5782 return (DDI_FAILURE); 5783 } 5784 5785 /* 5786 * if it is OFFLINE that means the disk is completely dead 5787 * in our case we have to put the disk in on or off by sending commands 5788 * Of course that will fail anyway so return back here. 5789 * 5790 * Power changes to a device that's OFFLINE or SUSPENDED 5791 * are not allowed. 5792 */ 5793 if ((un->un_state == SD_STATE_OFFLINE) || 5794 (un->un_state == SD_STATE_SUSPENDED)) { 5795 mutex_exit(SD_MUTEX(un)); 5796 5797 if (got_semaphore_here != 0) { 5798 sema_v(&un->un_semoclose); 5799 } 5800 SD_TRACE(SD_LOG_IO_PM, un, 5801 "sdpower: exit, device is off-line.\n"); 5802 return (DDI_FAILURE); 5803 } 5804 5805 /* 5806 * Change the device's state to indicate it's power level 5807 * is being changed. Do this to prevent a power off in the 5808 * middle of commands, which is especially bad on devices 5809 * that are really powered off instead of just spun down. 5810 */ 5811 state_before_pm = un->un_state; 5812 un->un_state = SD_STATE_PM_CHANGING; 5813 5814 mutex_exit(SD_MUTEX(un)); 5815 5816 /* 5817 * If "pm-capable" property is set to TRUE by HBA drivers, 5818 * bypass the following checking, otherwise, check the log 5819 * sense information for this device 5820 */ 5821 if ((level == SD_SPINDLE_OFF) && un->un_f_log_sense_supported) { 5822 /* 5823 * Get the log sense information to understand whether the 5824 * the powercycle counts have gone beyond the threshhold. 5825 */ 5826 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5827 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5828 5829 mutex_enter(SD_MUTEX(un)); 5830 log_sense_page = un->un_start_stop_cycle_page; 5831 mutex_exit(SD_MUTEX(un)); 5832 5833 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 5834 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT); 5835 #ifdef SDDEBUG 5836 if (sd_force_pm_supported) { 5837 /* Force a successful result */ 5838 rval = 0; 5839 } 5840 #endif 5841 if (rval != 0) { 5842 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 5843 "Log Sense Failed\n"); 5844 kmem_free(log_page_data, log_page_size); 5845 /* Cannot support power management on those drives */ 5846 5847 if (got_semaphore_here != 0) { 5848 sema_v(&un->un_semoclose); 5849 } 5850 /* 5851 * On exit put the state back to it's original value 5852 * and broadcast to anyone waiting for the power 5853 * change completion. 5854 */ 5855 mutex_enter(SD_MUTEX(un)); 5856 un->un_state = state_before_pm; 5857 cv_broadcast(&un->un_suspend_cv); 5858 mutex_exit(SD_MUTEX(un)); 5859 SD_TRACE(SD_LOG_IO_PM, un, 5860 "sdpower: exit, Log Sense Failed.\n"); 5861 return (DDI_FAILURE); 5862 } 5863 5864 /* 5865 * From the page data - Convert the essential information to 5866 * pm_trans_data 5867 */ 5868 maxcycles = 5869 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) | 5870 (log_page_data[0x1E] << 8) | log_page_data[0x1F]; 5871 5872 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles; 5873 5874 ncycles = 5875 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) | 5876 (log_page_data[0x26] << 8) | log_page_data[0x27]; 5877 5878 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles; 5879 5880 for (i = 0; i < DC_SCSI_MFR_LEN; i++) { 5881 sd_pm_tran_data.un.scsi_cycles.svc_date[i] = 5882 log_page_data[8+i]; 5883 } 5884 5885 kmem_free(log_page_data, log_page_size); 5886 5887 /* 5888 * Call pm_trans_check routine to get the Ok from 5889 * the global policy 5890 */ 5891 5892 sd_pm_tran_data.format = DC_SCSI_FORMAT; 5893 sd_pm_tran_data.un.scsi_cycles.flag = 0; 5894 5895 rval = pm_trans_check(&sd_pm_tran_data, &intvlp); 5896 #ifdef SDDEBUG 5897 if (sd_force_pm_supported) { 5898 /* Force a successful result */ 5899 rval = 1; 5900 } 5901 #endif 5902 switch (rval) { 5903 case 0: 5904 /* 5905 * Not Ok to Power cycle or error in parameters passed 5906 * Would have given the advised time to consider power 5907 * cycle. Based on the new intvlp parameter we are 5908 * supposed to pretend we are busy so that pm framework 5909 * will never call our power entry point. Because of 5910 * that install a timeout handler and wait for the 5911 * recommended time to elapse so that power management 5912 * can be effective again. 5913 * 5914 * To effect this behavior, call pm_busy_component to 5915 * indicate to the framework this device is busy. 5916 * By not adjusting un_pm_count the rest of PM in 5917 * the driver will function normally, and independant 5918 * of this but because the framework is told the device 5919 * is busy it won't attempt powering down until it gets 5920 * a matching idle. The timeout handler sends this. 5921 * Note: sd_pm_entry can't be called here to do this 5922 * because sdpower may have been called as a result 5923 * of a call to pm_raise_power from within sd_pm_entry. 5924 * 5925 * If a timeout handler is already active then 5926 * don't install another. 5927 */ 5928 mutex_enter(&un->un_pm_mutex); 5929 if (un->un_pm_timeid == NULL) { 5930 un->un_pm_timeid = 5931 timeout(sd_pm_timeout_handler, 5932 un, intvlp * drv_usectohz(1000000)); 5933 mutex_exit(&un->un_pm_mutex); 5934 (void) pm_busy_component(SD_DEVINFO(un), 0); 5935 } else { 5936 mutex_exit(&un->un_pm_mutex); 5937 } 5938 if (got_semaphore_here != 0) { 5939 sema_v(&un->un_semoclose); 5940 } 5941 /* 5942 * On exit put the state back to it's original value 5943 * and broadcast to anyone waiting for the power 5944 * change completion. 5945 */ 5946 mutex_enter(SD_MUTEX(un)); 5947 un->un_state = state_before_pm; 5948 cv_broadcast(&un->un_suspend_cv); 5949 mutex_exit(SD_MUTEX(un)); 5950 5951 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, " 5952 "trans check Failed, not ok to power cycle.\n"); 5953 return (DDI_FAILURE); 5954 5955 case -1: 5956 if (got_semaphore_here != 0) { 5957 sema_v(&un->un_semoclose); 5958 } 5959 /* 5960 * On exit put the state back to it's original value 5961 * and broadcast to anyone waiting for the power 5962 * change completion. 5963 */ 5964 mutex_enter(SD_MUTEX(un)); 5965 un->un_state = state_before_pm; 5966 cv_broadcast(&un->un_suspend_cv); 5967 mutex_exit(SD_MUTEX(un)); 5968 SD_TRACE(SD_LOG_IO_PM, un, 5969 "sdpower: exit, trans check command Failed.\n"); 5970 return (DDI_FAILURE); 5971 } 5972 } 5973 5974 if (level == SD_SPINDLE_OFF) { 5975 /* 5976 * Save the last state... if the STOP FAILS we need it 5977 * for restoring 5978 */ 5979 mutex_enter(SD_MUTEX(un)); 5980 save_state = un->un_last_state; 5981 /* 5982 * There must not be any cmds. getting processed 5983 * in the driver when we get here. Power to the 5984 * device is potentially going off. 5985 */ 5986 ASSERT(un->un_ncmds_in_driver == 0); 5987 mutex_exit(SD_MUTEX(un)); 5988 5989 /* 5990 * For now suspend the device completely before spindle is 5991 * turned off 5992 */ 5993 if ((rval = sd_ddi_pm_suspend(un)) == DDI_FAILURE) { 5994 if (got_semaphore_here != 0) { 5995 sema_v(&un->un_semoclose); 5996 } 5997 /* 5998 * On exit put the state back to it's original value 5999 * and broadcast to anyone waiting for the power 6000 * change completion. 6001 */ 6002 mutex_enter(SD_MUTEX(un)); 6003 un->un_state = state_before_pm; 6004 cv_broadcast(&un->un_suspend_cv); 6005 mutex_exit(SD_MUTEX(un)); 6006 SD_TRACE(SD_LOG_IO_PM, un, 6007 "sdpower: exit, PM suspend Failed.\n"); 6008 return (DDI_FAILURE); 6009 } 6010 } 6011 6012 /* 6013 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open, 6014 * close, or strategy. Dump no long uses this routine, it uses it's 6015 * own code so it can be done in polled mode. 6016 */ 6017 6018 medium_present = TRUE; 6019 6020 /* 6021 * When powering up, issue a TUR in case the device is at unit 6022 * attention. Don't do retries. Bypass the PM layer, otherwise 6023 * a deadlock on un_pm_busy_cv will occur. 6024 */ 6025 if (level == SD_SPINDLE_ON) { 6026 (void) sd_send_scsi_TEST_UNIT_READY(un, 6027 SD_DONT_RETRY_TUR | SD_BYPASS_PM); 6028 } 6029 6030 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n", 6031 ((level == SD_SPINDLE_ON) ? "START" : "STOP")); 6032 6033 sval = sd_send_scsi_START_STOP_UNIT(un, 6034 ((level == SD_SPINDLE_ON) ? SD_TARGET_START : SD_TARGET_STOP), 6035 SD_PATH_DIRECT); 6036 /* Command failed, check for media present. */ 6037 if ((sval == ENXIO) && un->un_f_has_removable_media) { 6038 medium_present = FALSE; 6039 } 6040 6041 /* 6042 * The conditions of interest here are: 6043 * if a spindle off with media present fails, 6044 * then restore the state and return an error. 6045 * else if a spindle on fails, 6046 * then return an error (there's no state to restore). 6047 * In all other cases we setup for the new state 6048 * and return success. 6049 */ 6050 switch (level) { 6051 case SD_SPINDLE_OFF: 6052 if ((medium_present == TRUE) && (sval != 0)) { 6053 /* The stop command from above failed */ 6054 rval = DDI_FAILURE; 6055 /* 6056 * The stop command failed, and we have media 6057 * present. Put the level back by calling the 6058 * sd_pm_resume() and set the state back to 6059 * it's previous value. 6060 */ 6061 (void) sd_ddi_pm_resume(un); 6062 mutex_enter(SD_MUTEX(un)); 6063 un->un_last_state = save_state; 6064 mutex_exit(SD_MUTEX(un)); 6065 break; 6066 } 6067 /* 6068 * The stop command from above succeeded. 6069 */ 6070 if (un->un_f_monitor_media_state) { 6071 /* 6072 * Terminate watch thread in case of removable media 6073 * devices going into low power state. This is as per 6074 * the requirements of pm framework, otherwise commands 6075 * will be generated for the device (through watch 6076 * thread), even when the device is in low power state. 6077 */ 6078 mutex_enter(SD_MUTEX(un)); 6079 un->un_f_watcht_stopped = FALSE; 6080 if (un->un_swr_token != NULL) { 6081 opaque_t temp_token = un->un_swr_token; 6082 un->un_f_watcht_stopped = TRUE; 6083 un->un_swr_token = NULL; 6084 mutex_exit(SD_MUTEX(un)); 6085 (void) scsi_watch_request_terminate(temp_token, 6086 SCSI_WATCH_TERMINATE_WAIT); 6087 } else { 6088 mutex_exit(SD_MUTEX(un)); 6089 } 6090 } 6091 break; 6092 6093 default: /* The level requested is spindle on... */ 6094 /* 6095 * Legacy behavior: return success on a failed spinup 6096 * if there is no media in the drive. 6097 * Do this by looking at medium_present here. 6098 */ 6099 if ((sval != 0) && medium_present) { 6100 /* The start command from above failed */ 6101 rval = DDI_FAILURE; 6102 break; 6103 } 6104 /* 6105 * The start command from above succeeded 6106 * Resume the devices now that we have 6107 * started the disks 6108 */ 6109 (void) sd_ddi_pm_resume(un); 6110 6111 /* 6112 * Resume the watch thread since it was suspended 6113 * when the device went into low power mode. 6114 */ 6115 if (un->un_f_monitor_media_state) { 6116 mutex_enter(SD_MUTEX(un)); 6117 if (un->un_f_watcht_stopped == TRUE) { 6118 opaque_t temp_token; 6119 6120 un->un_f_watcht_stopped = FALSE; 6121 mutex_exit(SD_MUTEX(un)); 6122 temp_token = scsi_watch_request_submit( 6123 SD_SCSI_DEVP(un), 6124 sd_check_media_time, 6125 SENSE_LENGTH, sd_media_watch_cb, 6126 (caddr_t)dev); 6127 mutex_enter(SD_MUTEX(un)); 6128 un->un_swr_token = temp_token; 6129 } 6130 mutex_exit(SD_MUTEX(un)); 6131 } 6132 } 6133 if (got_semaphore_here != 0) { 6134 sema_v(&un->un_semoclose); 6135 } 6136 /* 6137 * On exit put the state back to it's original value 6138 * and broadcast to anyone waiting for the power 6139 * change completion. 6140 */ 6141 mutex_enter(SD_MUTEX(un)); 6142 un->un_state = state_before_pm; 6143 cv_broadcast(&un->un_suspend_cv); 6144 mutex_exit(SD_MUTEX(un)); 6145 6146 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval); 6147 6148 return (rval); 6149 } 6150 6151 6152 6153 /* 6154 * Function: sdattach 6155 * 6156 * Description: Driver's attach(9e) entry point function. 6157 * 6158 * Arguments: devi - opaque device info handle 6159 * cmd - attach type 6160 * 6161 * Return Code: DDI_SUCCESS 6162 * DDI_FAILURE 6163 * 6164 * Context: Kernel thread context 6165 */ 6166 6167 static int 6168 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 6169 { 6170 switch (cmd) { 6171 case DDI_ATTACH: 6172 return (sd_unit_attach(devi)); 6173 case DDI_RESUME: 6174 return (sd_ddi_resume(devi)); 6175 default: 6176 break; 6177 } 6178 return (DDI_FAILURE); 6179 } 6180 6181 6182 /* 6183 * Function: sddetach 6184 * 6185 * Description: Driver's detach(9E) entry point function. 6186 * 6187 * Arguments: devi - opaque device info handle 6188 * cmd - detach type 6189 * 6190 * Return Code: DDI_SUCCESS 6191 * DDI_FAILURE 6192 * 6193 * Context: Kernel thread context 6194 */ 6195 6196 static int 6197 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 6198 { 6199 switch (cmd) { 6200 case DDI_DETACH: 6201 return (sd_unit_detach(devi)); 6202 case DDI_SUSPEND: 6203 return (sd_ddi_suspend(devi)); 6204 default: 6205 break; 6206 } 6207 return (DDI_FAILURE); 6208 } 6209 6210 6211 /* 6212 * Function: sd_sync_with_callback 6213 * 6214 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft 6215 * state while the callback routine is active. 6216 * 6217 * Arguments: un: softstate structure for the instance 6218 * 6219 * Context: Kernel thread context 6220 */ 6221 6222 static void 6223 sd_sync_with_callback(struct sd_lun *un) 6224 { 6225 ASSERT(un != NULL); 6226 6227 mutex_enter(SD_MUTEX(un)); 6228 6229 ASSERT(un->un_in_callback >= 0); 6230 6231 while (un->un_in_callback > 0) { 6232 mutex_exit(SD_MUTEX(un)); 6233 delay(2); 6234 mutex_enter(SD_MUTEX(un)); 6235 } 6236 6237 mutex_exit(SD_MUTEX(un)); 6238 } 6239 6240 /* 6241 * Function: sd_unit_attach 6242 * 6243 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates 6244 * the soft state structure for the device and performs 6245 * all necessary structure and device initializations. 6246 * 6247 * Arguments: devi: the system's dev_info_t for the device. 6248 * 6249 * Return Code: DDI_SUCCESS if attach is successful. 6250 * DDI_FAILURE if any part of the attach fails. 6251 * 6252 * Context: Called at attach(9e) time for the DDI_ATTACH flag. 6253 * Kernel thread context only. Can sleep. 6254 */ 6255 6256 static int 6257 sd_unit_attach(dev_info_t *devi) 6258 { 6259 struct scsi_device *devp; 6260 struct sd_lun *un; 6261 char *variantp; 6262 int reservation_flag = SD_TARGET_IS_UNRESERVED; 6263 int instance; 6264 int rval; 6265 int wc_enabled; 6266 int tgt; 6267 uint64_t capacity; 6268 uint_t lbasize = 0; 6269 dev_info_t *pdip = ddi_get_parent(devi); 6270 int offbyone = 0; 6271 int geom_label_valid = 0; 6272 6273 /* 6274 * Retrieve the target driver's private data area. This was set 6275 * up by the HBA. 6276 */ 6277 devp = ddi_get_driver_private(devi); 6278 6279 /* 6280 * Retrieve the target ID of the device. 6281 */ 6282 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6283 SCSI_ADDR_PROP_TARGET, -1); 6284 6285 /* 6286 * Since we have no idea what state things were left in by the last 6287 * user of the device, set up some 'default' settings, ie. turn 'em 6288 * off. The scsi_ifsetcap calls force re-negotiations with the drive. 6289 * Do this before the scsi_probe, which sends an inquiry. 6290 * This is a fix for bug (4430280). 6291 * Of special importance is wide-xfer. The drive could have been left 6292 * in wide transfer mode by the last driver to communicate with it, 6293 * this includes us. If that's the case, and if the following is not 6294 * setup properly or we don't re-negotiate with the drive prior to 6295 * transferring data to/from the drive, it causes bus parity errors, 6296 * data overruns, and unexpected interrupts. This first occurred when 6297 * the fix for bug (4378686) was made. 6298 */ 6299 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1); 6300 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1); 6301 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1); 6302 6303 /* 6304 * Currently, scsi_ifsetcap sets tagged-qing capability for all LUNs 6305 * on a target. Setting it per lun instance actually sets the 6306 * capability of this target, which affects those luns already 6307 * attached on the same target. So during attach, we can only disable 6308 * this capability only when no other lun has been attached on this 6309 * target. By doing this, we assume a target has the same tagged-qing 6310 * capability for every lun. The condition can be removed when HBA 6311 * is changed to support per lun based tagged-qing capability. 6312 */ 6313 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 6314 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1); 6315 } 6316 6317 /* 6318 * Use scsi_probe() to issue an INQUIRY command to the device. 6319 * This call will allocate and fill in the scsi_inquiry structure 6320 * and point the sd_inq member of the scsi_device structure to it. 6321 * If the attach succeeds, then this memory will not be de-allocated 6322 * (via scsi_unprobe()) until the instance is detached. 6323 */ 6324 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 6325 goto probe_failed; 6326 } 6327 6328 /* 6329 * Check the device type as specified in the inquiry data and 6330 * claim it if it is of a type that we support. 6331 */ 6332 switch (devp->sd_inq->inq_dtype) { 6333 case DTYPE_DIRECT: 6334 break; 6335 case DTYPE_RODIRECT: 6336 break; 6337 case DTYPE_OPTICAL: 6338 break; 6339 case DTYPE_NOTPRESENT: 6340 default: 6341 /* Unsupported device type; fail the attach. */ 6342 goto probe_failed; 6343 } 6344 6345 /* 6346 * Allocate the soft state structure for this unit. 6347 * 6348 * We rely upon this memory being set to all zeroes by 6349 * ddi_soft_state_zalloc(). We assume that any member of the 6350 * soft state structure that is not explicitly initialized by 6351 * this routine will have a value of zero. 6352 */ 6353 instance = ddi_get_instance(devp->sd_dev); 6354 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) { 6355 goto probe_failed; 6356 } 6357 6358 /* 6359 * Retrieve a pointer to the newly-allocated soft state. 6360 * 6361 * This should NEVER fail if the ddi_soft_state_zalloc() call above 6362 * was successful, unless something has gone horribly wrong and the 6363 * ddi's soft state internals are corrupt (in which case it is 6364 * probably better to halt here than just fail the attach....) 6365 */ 6366 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 6367 panic("sd_unit_attach: NULL soft state on instance:0x%x", 6368 instance); 6369 /*NOTREACHED*/ 6370 } 6371 6372 /* 6373 * Link the back ptr of the driver soft state to the scsi_device 6374 * struct for this lun. 6375 * Save a pointer to the softstate in the driver-private area of 6376 * the scsi_device struct. 6377 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until 6378 * we first set un->un_sd below. 6379 */ 6380 un->un_sd = devp; 6381 devp->sd_private = (opaque_t)un; 6382 6383 /* 6384 * The following must be after devp is stored in the soft state struct. 6385 */ 6386 #ifdef SDDEBUG 6387 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6388 "%s_unit_attach: un:0x%p instance:%d\n", 6389 ddi_driver_name(devi), un, instance); 6390 #endif 6391 6392 /* 6393 * Set up the device type and node type (for the minor nodes). 6394 * By default we assume that the device can at least support the 6395 * Common Command Set. Call it a CD-ROM if it reports itself 6396 * as a RODIRECT device. 6397 */ 6398 switch (devp->sd_inq->inq_dtype) { 6399 case DTYPE_RODIRECT: 6400 un->un_node_type = DDI_NT_CD_CHAN; 6401 un->un_ctype = CTYPE_CDROM; 6402 break; 6403 case DTYPE_OPTICAL: 6404 un->un_node_type = DDI_NT_BLOCK_CHAN; 6405 un->un_ctype = CTYPE_ROD; 6406 break; 6407 default: 6408 un->un_node_type = DDI_NT_BLOCK_CHAN; 6409 un->un_ctype = CTYPE_CCS; 6410 break; 6411 } 6412 6413 /* 6414 * Try to read the interconnect type from the HBA. 6415 * 6416 * Note: This driver is currently compiled as two binaries, a parallel 6417 * scsi version (sd) and a fibre channel version (ssd). All functional 6418 * differences are determined at compile time. In the future a single 6419 * binary will be provided and the inteconnect type will be used to 6420 * differentiate between fibre and parallel scsi behaviors. At that time 6421 * it will be necessary for all fibre channel HBAs to support this 6422 * property. 6423 * 6424 * set un_f_is_fiber to TRUE ( default fiber ) 6425 */ 6426 un->un_f_is_fibre = TRUE; 6427 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { 6428 case INTERCONNECT_SSA: 6429 un->un_interconnect_type = SD_INTERCONNECT_SSA; 6430 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6431 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); 6432 break; 6433 case INTERCONNECT_PARALLEL: 6434 un->un_f_is_fibre = FALSE; 6435 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 6436 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6437 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); 6438 break; 6439 case INTERCONNECT_SATA: 6440 un->un_f_is_fibre = FALSE; 6441 un->un_interconnect_type = SD_INTERCONNECT_SATA; 6442 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6443 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un); 6444 break; 6445 case INTERCONNECT_FIBRE: 6446 un->un_interconnect_type = SD_INTERCONNECT_FIBRE; 6447 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6448 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); 6449 break; 6450 case INTERCONNECT_FABRIC: 6451 un->un_interconnect_type = SD_INTERCONNECT_FABRIC; 6452 un->un_node_type = DDI_NT_BLOCK_FABRIC; 6453 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6454 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); 6455 break; 6456 default: 6457 #ifdef SD_DEFAULT_INTERCONNECT_TYPE 6458 /* 6459 * The HBA does not support the "interconnect-type" property 6460 * (or did not provide a recognized type). 6461 * 6462 * Note: This will be obsoleted when a single fibre channel 6463 * and parallel scsi driver is delivered. In the meantime the 6464 * interconnect type will be set to the platform default.If that 6465 * type is not parallel SCSI, it means that we should be 6466 * assuming "ssd" semantics. However, here this also means that 6467 * the FC HBA is not supporting the "interconnect-type" property 6468 * like we expect it to, so log this occurrence. 6469 */ 6470 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; 6471 if (!SD_IS_PARALLEL_SCSI(un)) { 6472 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6473 "sd_unit_attach: un:0x%p Assuming " 6474 "INTERCONNECT_FIBRE\n", un); 6475 } else { 6476 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6477 "sd_unit_attach: un:0x%p Assuming " 6478 "INTERCONNECT_PARALLEL\n", un); 6479 un->un_f_is_fibre = FALSE; 6480 } 6481 #else 6482 /* 6483 * Note: This source will be implemented when a single fibre 6484 * channel and parallel scsi driver is delivered. The default 6485 * will be to assume that if a device does not support the 6486 * "interconnect-type" property it is a parallel SCSI HBA and 6487 * we will set the interconnect type for parallel scsi. 6488 */ 6489 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 6490 un->un_f_is_fibre = FALSE; 6491 #endif 6492 break; 6493 } 6494 6495 if (un->un_f_is_fibre == TRUE) { 6496 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == 6497 SCSI_VERSION_3) { 6498 switch (un->un_interconnect_type) { 6499 case SD_INTERCONNECT_FIBRE: 6500 case SD_INTERCONNECT_SSA: 6501 un->un_node_type = DDI_NT_BLOCK_WWN; 6502 break; 6503 default: 6504 break; 6505 } 6506 } 6507 } 6508 6509 /* 6510 * Initialize the Request Sense command for the target 6511 */ 6512 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { 6513 goto alloc_rqs_failed; 6514 } 6515 6516 /* 6517 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc 6518 * with seperate binary for sd and ssd. 6519 * 6520 * x86 has 1 binary, un_retry_count is set base on connection type. 6521 * The hardcoded values will go away when Sparc uses 1 binary 6522 * for sd and ssd. This hardcoded values need to match 6523 * SD_RETRY_COUNT in sddef.h 6524 * The value used is base on interconnect type. 6525 * fibre = 3, parallel = 5 6526 */ 6527 #if defined(__i386) || defined(__amd64) 6528 un->un_retry_count = un->un_f_is_fibre ? 3 : 5; 6529 #else 6530 un->un_retry_count = SD_RETRY_COUNT; 6531 #endif 6532 6533 /* 6534 * Set the per disk retry count to the default number of retries 6535 * for disks and CDROMs. This value can be overridden by the 6536 * disk property list or an entry in sd.conf. 6537 */ 6538 un->un_notready_retry_count = 6539 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un) 6540 : DISK_NOT_READY_RETRY_COUNT(un); 6541 6542 /* 6543 * Set the busy retry count to the default value of un_retry_count. 6544 * This can be overridden by entries in sd.conf or the device 6545 * config table. 6546 */ 6547 un->un_busy_retry_count = un->un_retry_count; 6548 6549 /* 6550 * Init the reset threshold for retries. This number determines 6551 * how many retries must be performed before a reset can be issued 6552 * (for certain error conditions). This can be overridden by entries 6553 * in sd.conf or the device config table. 6554 */ 6555 un->un_reset_retry_count = (un->un_retry_count / 2); 6556 6557 /* 6558 * Set the victim_retry_count to the default un_retry_count 6559 */ 6560 un->un_victim_retry_count = (2 * un->un_retry_count); 6561 6562 /* 6563 * Set the reservation release timeout to the default value of 6564 * 5 seconds. This can be overridden by entries in ssd.conf or the 6565 * device config table. 6566 */ 6567 un->un_reserve_release_time = 5; 6568 6569 /* 6570 * Set up the default maximum transfer size. Note that this may 6571 * get updated later in the attach, when setting up default wide 6572 * operations for disks. 6573 */ 6574 #if defined(__i386) || defined(__amd64) 6575 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; 6576 #else 6577 un->un_max_xfer_size = (uint_t)maxphys; 6578 #endif 6579 6580 /* 6581 * Get "allow bus device reset" property (defaults to "enabled" if 6582 * the property was not defined). This is to disable bus resets for 6583 * certain kinds of error recovery. Note: In the future when a run-time 6584 * fibre check is available the soft state flag should default to 6585 * enabled. 6586 */ 6587 if (un->un_f_is_fibre == TRUE) { 6588 un->un_f_allow_bus_device_reset = TRUE; 6589 } else { 6590 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6591 "allow-bus-device-reset", 1) != 0) { 6592 un->un_f_allow_bus_device_reset = TRUE; 6593 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6594 "sd_unit_attach: un:0x%p Bus device reset enabled\n", 6595 un); 6596 } else { 6597 un->un_f_allow_bus_device_reset = FALSE; 6598 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6599 "sd_unit_attach: un:0x%p Bus device reset disabled\n", 6600 un); 6601 } 6602 } 6603 6604 /* 6605 * Check if this is an ATAPI device. ATAPI devices use Group 1 6606 * Read/Write commands and Group 2 Mode Sense/Select commands. 6607 * 6608 * Note: The "obsolete" way of doing this is to check for the "atapi" 6609 * property. The new "variant" property with a value of "atapi" has been 6610 * introduced so that future 'variants' of standard SCSI behavior (like 6611 * atapi) could be specified by the underlying HBA drivers by supplying 6612 * a new value for the "variant" property, instead of having to define a 6613 * new property. 6614 */ 6615 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { 6616 un->un_f_cfg_is_atapi = TRUE; 6617 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6618 "sd_unit_attach: un:0x%p Atapi device\n", un); 6619 } 6620 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", 6621 &variantp) == DDI_PROP_SUCCESS) { 6622 if (strcmp(variantp, "atapi") == 0) { 6623 un->un_f_cfg_is_atapi = TRUE; 6624 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6625 "sd_unit_attach: un:0x%p Atapi device\n", un); 6626 } 6627 ddi_prop_free(variantp); 6628 } 6629 6630 un->un_cmd_timeout = SD_IO_TIME; 6631 6632 /* Info on current states, statuses, etc. (Updated frequently) */ 6633 un->un_state = SD_STATE_NORMAL; 6634 un->un_last_state = SD_STATE_NORMAL; 6635 6636 /* Control & status info for command throttling */ 6637 un->un_throttle = sd_max_throttle; 6638 un->un_saved_throttle = sd_max_throttle; 6639 un->un_min_throttle = sd_min_throttle; 6640 6641 if (un->un_f_is_fibre == TRUE) { 6642 un->un_f_use_adaptive_throttle = TRUE; 6643 } else { 6644 un->un_f_use_adaptive_throttle = FALSE; 6645 } 6646 6647 /* Removable media support. */ 6648 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 6649 un->un_mediastate = DKIO_NONE; 6650 un->un_specified_mediastate = DKIO_NONE; 6651 6652 /* CVs for suspend/resume (PM or DR) */ 6653 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 6654 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 6655 6656 /* Power management support. */ 6657 un->un_power_level = SD_SPINDLE_UNINIT; 6658 6659 cv_init(&un->un_wcc_cv, NULL, CV_DRIVER, NULL); 6660 un->un_f_wcc_inprog = 0; 6661 6662 /* 6663 * The open/close semaphore is used to serialize threads executing 6664 * in the driver's open & close entry point routines for a given 6665 * instance. 6666 */ 6667 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 6668 6669 /* 6670 * The conf file entry and softstate variable is a forceful override, 6671 * meaning a non-zero value must be entered to change the default. 6672 */ 6673 un->un_f_disksort_disabled = FALSE; 6674 6675 /* 6676 * Retrieve the properties from the static driver table or the driver 6677 * configuration file (.conf) for this unit and update the soft state 6678 * for the device as needed for the indicated properties. 6679 * Note: the property configuration needs to occur here as some of the 6680 * following routines may have dependancies on soft state flags set 6681 * as part of the driver property configuration. 6682 */ 6683 sd_read_unit_properties(un); 6684 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6685 "sd_unit_attach: un:0x%p property configuration complete.\n", un); 6686 6687 /* 6688 * Only if a device has "hotpluggable" property, it is 6689 * treated as hotpluggable device. Otherwise, it is 6690 * regarded as non-hotpluggable one. 6691 */ 6692 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable", 6693 -1) != -1) { 6694 un->un_f_is_hotpluggable = TRUE; 6695 } 6696 6697 /* 6698 * set unit's attributes(flags) according to "hotpluggable" and 6699 * RMB bit in INQUIRY data. 6700 */ 6701 sd_set_unit_attributes(un, devi); 6702 6703 /* 6704 * By default, we mark the capacity, lbasize, and geometry 6705 * as invalid. Only if we successfully read a valid capacity 6706 * will we update the un_blockcount and un_tgt_blocksize with the 6707 * valid values (the geometry will be validated later). 6708 */ 6709 un->un_f_blockcount_is_valid = FALSE; 6710 un->un_f_tgt_blocksize_is_valid = FALSE; 6711 6712 /* 6713 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine 6714 * otherwise. 6715 */ 6716 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE; 6717 un->un_blockcount = 0; 6718 6719 /* 6720 * Set up the per-instance info needed to determine the correct 6721 * CDBs and other info for issuing commands to the target. 6722 */ 6723 sd_init_cdb_limits(un); 6724 6725 /* 6726 * Set up the IO chains to use, based upon the target type. 6727 */ 6728 if (un->un_f_non_devbsize_supported) { 6729 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 6730 } else { 6731 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 6732 } 6733 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 6734 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD; 6735 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD; 6736 6737 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf), 6738 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit, 6739 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER); 6740 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi); 6741 6742 6743 if (ISCD(un)) { 6744 un->un_additional_codes = sd_additional_codes; 6745 } else { 6746 un->un_additional_codes = NULL; 6747 } 6748 6749 /* 6750 * Create the kstats here so they can be available for attach-time 6751 * routines that send commands to the unit (either polled or via 6752 * sd_send_scsi_cmd). 6753 * 6754 * Note: This is a critical sequence that needs to be maintained: 6755 * 1) Instantiate the kstats here, before any routines using the 6756 * iopath (i.e. sd_send_scsi_cmd). 6757 * 2) Instantiate and initialize the partition stats 6758 * (sd_set_pstats). 6759 * 3) Initialize the error stats (sd_set_errstats), following 6760 * sd_validate_geometry(),sd_register_devid(), 6761 * and sd_cache_control(). 6762 */ 6763 6764 un->un_stats = kstat_create(sd_label, instance, 6765 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 6766 if (un->un_stats != NULL) { 6767 un->un_stats->ks_lock = SD_MUTEX(un); 6768 kstat_install(un->un_stats); 6769 } 6770 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6771 "sd_unit_attach: un:0x%p un_stats created\n", un); 6772 6773 sd_create_errstats(un, instance); 6774 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6775 "sd_unit_attach: un:0x%p errstats created\n", un); 6776 6777 /* 6778 * The following if/else code was relocated here from below as part 6779 * of the fix for bug (4430280). However with the default setup added 6780 * on entry to this routine, it's no longer absolutely necessary for 6781 * this to be before the call to sd_spin_up_unit. 6782 */ 6783 if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) { 6784 /* 6785 * If SCSI-2 tagged queueing is supported by the target 6786 * and by the host adapter then we will enable it. 6787 */ 6788 un->un_tagflags = 0; 6789 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && 6790 (devp->sd_inq->inq_cmdque) && 6791 (un->un_f_arq_enabled == TRUE)) { 6792 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 6793 1, 1) == 1) { 6794 un->un_tagflags = FLAG_STAG; 6795 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6796 "sd_unit_attach: un:0x%p tag queueing " 6797 "enabled\n", un); 6798 } else if (scsi_ifgetcap(SD_ADDRESS(un), 6799 "untagged-qing", 0) == 1) { 6800 un->un_f_opt_queueing = TRUE; 6801 un->un_saved_throttle = un->un_throttle = 6802 min(un->un_throttle, 3); 6803 } else { 6804 un->un_f_opt_queueing = FALSE; 6805 un->un_saved_throttle = un->un_throttle = 1; 6806 } 6807 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) 6808 == 1) && (un->un_f_arq_enabled == TRUE)) { 6809 /* The Host Adapter supports internal queueing. */ 6810 un->un_f_opt_queueing = TRUE; 6811 un->un_saved_throttle = un->un_throttle = 6812 min(un->un_throttle, 3); 6813 } else { 6814 un->un_f_opt_queueing = FALSE; 6815 un->un_saved_throttle = un->un_throttle = 1; 6816 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6817 "sd_unit_attach: un:0x%p no tag queueing\n", un); 6818 } 6819 6820 /* 6821 * Enable large transfers for SATA/SAS drives 6822 */ 6823 if (SD_IS_SERIAL(un)) { 6824 un->un_max_xfer_size = 6825 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 6826 sd_max_xfer_size, SD_MAX_XFER_SIZE); 6827 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6828 "sd_unit_attach: un:0x%p max transfer " 6829 "size=0x%x\n", un, un->un_max_xfer_size); 6830 6831 } 6832 6833 /* Setup or tear down default wide operations for disks */ 6834 6835 /* 6836 * Note: Legacy: it may be possible for both "sd_max_xfer_size" 6837 * and "ssd_max_xfer_size" to exist simultaneously on the same 6838 * system and be set to different values. In the future this 6839 * code may need to be updated when the ssd module is 6840 * obsoleted and removed from the system. (4299588) 6841 */ 6842 if (SD_IS_PARALLEL_SCSI(un) && 6843 (devp->sd_inq->inq_rdf == RDF_SCSI2) && 6844 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { 6845 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 6846 1, 1) == 1) { 6847 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6848 "sd_unit_attach: un:0x%p Wide Transfer " 6849 "enabled\n", un); 6850 } 6851 6852 /* 6853 * If tagged queuing has also been enabled, then 6854 * enable large xfers 6855 */ 6856 if (un->un_saved_throttle == sd_max_throttle) { 6857 un->un_max_xfer_size = 6858 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 6859 sd_max_xfer_size, SD_MAX_XFER_SIZE); 6860 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6861 "sd_unit_attach: un:0x%p max transfer " 6862 "size=0x%x\n", un, un->un_max_xfer_size); 6863 } 6864 } else { 6865 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 6866 0, 1) == 1) { 6867 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6868 "sd_unit_attach: un:0x%p " 6869 "Wide Transfer disabled\n", un); 6870 } 6871 } 6872 } else { 6873 un->un_tagflags = FLAG_STAG; 6874 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, 6875 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); 6876 } 6877 6878 /* 6879 * If this target supports LUN reset, try to enable it. 6880 */ 6881 if (un->un_f_lun_reset_enabled) { 6882 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { 6883 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 6884 "un:0x%p lun_reset capability set\n", un); 6885 } else { 6886 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 6887 "un:0x%p lun-reset capability not set\n", un); 6888 } 6889 } 6890 6891 /* 6892 * At this point in the attach, we have enough info in the 6893 * soft state to be able to issue commands to the target. 6894 * 6895 * All command paths used below MUST issue their commands as 6896 * SD_PATH_DIRECT. This is important as intermediate layers 6897 * are not all initialized yet (such as PM). 6898 */ 6899 6900 /* 6901 * Send a TEST UNIT READY command to the device. This should clear 6902 * any outstanding UNIT ATTENTION that may be present. 6903 * 6904 * Note: Don't check for success, just track if there is a reservation, 6905 * this is a throw away command to clear any unit attentions. 6906 * 6907 * Note: This MUST be the first command issued to the target during 6908 * attach to ensure power on UNIT ATTENTIONS are cleared. 6909 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated 6910 * with attempts at spinning up a device with no media. 6911 */ 6912 if (sd_send_scsi_TEST_UNIT_READY(un, SD_DONT_RETRY_TUR) == EACCES) { 6913 reservation_flag = SD_TARGET_IS_RESERVED; 6914 } 6915 6916 /* 6917 * If the device is NOT a removable media device, attempt to spin 6918 * it up (using the START_STOP_UNIT command) and read its capacity 6919 * (using the READ CAPACITY command). Note, however, that either 6920 * of these could fail and in some cases we would continue with 6921 * the attach despite the failure (see below). 6922 */ 6923 if (un->un_f_descr_format_supported) { 6924 switch (sd_spin_up_unit(un)) { 6925 case 0: 6926 /* 6927 * Spin-up was successful; now try to read the 6928 * capacity. If successful then save the results 6929 * and mark the capacity & lbasize as valid. 6930 */ 6931 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6932 "sd_unit_attach: un:0x%p spin-up successful\n", un); 6933 6934 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, 6935 &lbasize, SD_PATH_DIRECT)) { 6936 case 0: { 6937 if (capacity > DK_MAX_BLOCKS) { 6938 #ifdef _LP64 6939 if (capacity + 1 > 6940 SD_GROUP1_MAX_ADDRESS) { 6941 /* 6942 * Enable descriptor format 6943 * sense data so that we can 6944 * get 64 bit sense data 6945 * fields. 6946 */ 6947 sd_enable_descr_sense(un); 6948 } 6949 #else 6950 /* 32-bit kernels can't handle this */ 6951 scsi_log(SD_DEVINFO(un), 6952 sd_label, CE_WARN, 6953 "disk has %llu blocks, which " 6954 "is too large for a 32-bit " 6955 "kernel", capacity); 6956 6957 #if defined(__i386) || defined(__amd64) 6958 /* 6959 * 1TB disk was treated as (1T - 512)B 6960 * in the past, so that it might have 6961 * valid VTOC and solaris partitions, 6962 * we have to allow it to continue to 6963 * work. 6964 */ 6965 if (capacity -1 > DK_MAX_BLOCKS) 6966 #endif 6967 goto spinup_failed; 6968 #endif 6969 } 6970 6971 /* 6972 * Here it's not necessary to check the case: 6973 * the capacity of the device is bigger than 6974 * what the max hba cdb can support. Because 6975 * sd_send_scsi_READ_CAPACITY will retrieve 6976 * the capacity by sending USCSI command, which 6977 * is constrained by the max hba cdb. Actually, 6978 * sd_send_scsi_READ_CAPACITY will return 6979 * EINVAL when using bigger cdb than required 6980 * cdb length. Will handle this case in 6981 * "case EINVAL". 6982 */ 6983 6984 /* 6985 * The following relies on 6986 * sd_send_scsi_READ_CAPACITY never 6987 * returning 0 for capacity and/or lbasize. 6988 */ 6989 sd_update_block_info(un, lbasize, capacity); 6990 6991 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6992 "sd_unit_attach: un:0x%p capacity = %ld " 6993 "blocks; lbasize= %ld.\n", un, 6994 un->un_blockcount, un->un_tgt_blocksize); 6995 6996 break; 6997 } 6998 case EINVAL: 6999 /* 7000 * In the case where the max-cdb-length property 7001 * is smaller than the required CDB length for 7002 * a SCSI device, a target driver can fail to 7003 * attach to that device. 7004 */ 7005 scsi_log(SD_DEVINFO(un), 7006 sd_label, CE_WARN, 7007 "disk capacity is too large " 7008 "for current cdb length"); 7009 goto spinup_failed; 7010 case EACCES: 7011 /* 7012 * Should never get here if the spin-up 7013 * succeeded, but code it in anyway. 7014 * From here, just continue with the attach... 7015 */ 7016 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7017 "sd_unit_attach: un:0x%p " 7018 "sd_send_scsi_READ_CAPACITY " 7019 "returned reservation conflict\n", un); 7020 reservation_flag = SD_TARGET_IS_RESERVED; 7021 break; 7022 default: 7023 /* 7024 * Likewise, should never get here if the 7025 * spin-up succeeded. Just continue with 7026 * the attach... 7027 */ 7028 break; 7029 } 7030 break; 7031 case EACCES: 7032 /* 7033 * Device is reserved by another host. In this case 7034 * we could not spin it up or read the capacity, but 7035 * we continue with the attach anyway. 7036 */ 7037 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7038 "sd_unit_attach: un:0x%p spin-up reservation " 7039 "conflict.\n", un); 7040 reservation_flag = SD_TARGET_IS_RESERVED; 7041 break; 7042 default: 7043 /* Fail the attach if the spin-up failed. */ 7044 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7045 "sd_unit_attach: un:0x%p spin-up failed.", un); 7046 goto spinup_failed; 7047 } 7048 } 7049 7050 /* 7051 * Check to see if this is a MMC drive 7052 */ 7053 if (ISCD(un)) { 7054 sd_set_mmc_caps(un); 7055 } 7056 7057 7058 /* 7059 * Add a zero-length attribute to tell the world we support 7060 * kernel ioctls (for layered drivers) 7061 */ 7062 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7063 DDI_KERNEL_IOCTL, NULL, 0); 7064 7065 /* 7066 * Add a boolean property to tell the world we support 7067 * the B_FAILFAST flag (for layered drivers) 7068 */ 7069 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7070 "ddi-failfast-supported", NULL, 0); 7071 7072 /* 7073 * Initialize power management 7074 */ 7075 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); 7076 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); 7077 sd_setup_pm(un, devi); 7078 if (un->un_f_pm_is_enabled == FALSE) { 7079 /* 7080 * For performance, point to a jump table that does 7081 * not include pm. 7082 * The direct and priority chains don't change with PM. 7083 * 7084 * Note: this is currently done based on individual device 7085 * capabilities. When an interface for determining system 7086 * power enabled state becomes available, or when additional 7087 * layers are added to the command chain, these values will 7088 * have to be re-evaluated for correctness. 7089 */ 7090 if (un->un_f_non_devbsize_supported) { 7091 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM; 7092 } else { 7093 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM; 7094 } 7095 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 7096 } 7097 7098 /* 7099 * This property is set to 0 by HA software to avoid retries 7100 * on a reserved disk. (The preferred property name is 7101 * "retry-on-reservation-conflict") (1189689) 7102 * 7103 * Note: The use of a global here can have unintended consequences. A 7104 * per instance variable is preferrable to match the capabilities of 7105 * different underlying hba's (4402600) 7106 */ 7107 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, 7108 DDI_PROP_DONTPASS, "retry-on-reservation-conflict", 7109 sd_retry_on_reservation_conflict); 7110 if (sd_retry_on_reservation_conflict != 0) { 7111 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, 7112 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, 7113 sd_retry_on_reservation_conflict); 7114 } 7115 7116 /* Set up options for QFULL handling. */ 7117 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7118 "qfull-retries", -1)) != -1) { 7119 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries", 7120 rval, 1); 7121 } 7122 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7123 "qfull-retry-interval", -1)) != -1) { 7124 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval", 7125 rval, 1); 7126 } 7127 7128 /* 7129 * This just prints a message that announces the existence of the 7130 * device. The message is always printed in the system logfile, but 7131 * only appears on the console if the system is booted with the 7132 * -v (verbose) argument. 7133 */ 7134 ddi_report_dev(devi); 7135 7136 un->un_mediastate = DKIO_NONE; 7137 7138 cmlb_alloc_handle(&un->un_cmlbhandle); 7139 7140 #if defined(__i386) || defined(__amd64) 7141 /* 7142 * On x86, compensate for off-by-1 legacy error 7143 */ 7144 if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable && 7145 (lbasize == un->un_sys_blocksize)) 7146 offbyone = CMLB_OFF_BY_ONE; 7147 #endif 7148 7149 if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype, 7150 un->un_f_has_removable_media, un->un_f_is_hotpluggable, 7151 un->un_node_type, offbyone, un->un_cmlbhandle, 7152 (void *)SD_PATH_DIRECT) != 0) { 7153 goto cmlb_attach_failed; 7154 } 7155 7156 7157 /* 7158 * Read and validate the device's geometry (ie, disk label) 7159 * A new unformatted drive will not have a valid geometry, but 7160 * the driver needs to successfully attach to this device so 7161 * the drive can be formatted via ioctls. 7162 */ 7163 geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0, 7164 (void *)SD_PATH_DIRECT) == 0) ? 1: 0; 7165 7166 mutex_enter(SD_MUTEX(un)); 7167 7168 /* 7169 * Read and initialize the devid for the unit. 7170 */ 7171 ASSERT(un->un_errstats != NULL); 7172 if (un->un_f_devid_supported) { 7173 sd_register_devid(un, devi, reservation_flag); 7174 } 7175 mutex_exit(SD_MUTEX(un)); 7176 7177 #if (defined(__fibre)) 7178 /* 7179 * Register callbacks for fibre only. You can't do this soley 7180 * on the basis of the devid_type because this is hba specific. 7181 * We need to query our hba capabilities to find out whether to 7182 * register or not. 7183 */ 7184 if (un->un_f_is_fibre) { 7185 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 7186 sd_init_event_callbacks(un); 7187 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7188 "sd_unit_attach: un:0x%p event callbacks inserted", un); 7189 } 7190 } 7191 #endif 7192 7193 if (un->un_f_opt_disable_cache == TRUE) { 7194 /* 7195 * Disable both read cache and write cache. This is 7196 * the historic behavior of the keywords in the config file. 7197 */ 7198 if (sd_cache_control(un, SD_CACHE_DISABLE, SD_CACHE_DISABLE) != 7199 0) { 7200 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7201 "sd_unit_attach: un:0x%p Could not disable " 7202 "caching", un); 7203 goto devid_failed; 7204 } 7205 } 7206 7207 /* 7208 * Check the value of the WCE bit now and 7209 * set un_f_write_cache_enabled accordingly. 7210 */ 7211 (void) sd_get_write_cache_enabled(un, &wc_enabled); 7212 mutex_enter(SD_MUTEX(un)); 7213 un->un_f_write_cache_enabled = (wc_enabled != 0); 7214 mutex_exit(SD_MUTEX(un)); 7215 7216 /* 7217 * Find out what type of reservation this disk supports. 7218 */ 7219 switch (sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 0, NULL)) { 7220 case 0: 7221 /* 7222 * SCSI-3 reservations are supported. 7223 */ 7224 un->un_reservation_type = SD_SCSI3_RESERVATION; 7225 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7226 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un); 7227 break; 7228 case ENOTSUP: 7229 /* 7230 * The PERSISTENT RESERVE IN command would not be recognized by 7231 * a SCSI-2 device, so assume the reservation type is SCSI-2. 7232 */ 7233 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7234 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un); 7235 un->un_reservation_type = SD_SCSI2_RESERVATION; 7236 break; 7237 default: 7238 /* 7239 * default to SCSI-3 reservations 7240 */ 7241 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7242 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un); 7243 un->un_reservation_type = SD_SCSI3_RESERVATION; 7244 break; 7245 } 7246 7247 /* 7248 * Set the pstat and error stat values here, so data obtained during the 7249 * previous attach-time routines is available. 7250 * 7251 * Note: This is a critical sequence that needs to be maintained: 7252 * 1) Instantiate the kstats before any routines using the iopath 7253 * (i.e. sd_send_scsi_cmd). 7254 * 2) Initialize the error stats (sd_set_errstats) and partition 7255 * stats (sd_set_pstats)here, following 7256 * cmlb_validate_geometry(), sd_register_devid(), and 7257 * sd_cache_control(). 7258 */ 7259 7260 if (un->un_f_pkstats_enabled && geom_label_valid) { 7261 sd_set_pstats(un); 7262 SD_TRACE(SD_LOG_IO_PARTITION, un, 7263 "sd_unit_attach: un:0x%p pstats created and set\n", un); 7264 } 7265 7266 sd_set_errstats(un); 7267 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7268 "sd_unit_attach: un:0x%p errstats set\n", un); 7269 7270 7271 /* 7272 * After successfully attaching an instance, we record the information 7273 * of how many luns have been attached on the relative target and 7274 * controller for parallel SCSI. This information is used when sd tries 7275 * to set the tagged queuing capability in HBA. 7276 */ 7277 if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) { 7278 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH); 7279 } 7280 7281 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7282 "sd_unit_attach: un:0x%p exit success\n", un); 7283 7284 return (DDI_SUCCESS); 7285 7286 /* 7287 * An error occurred during the attach; clean up & return failure. 7288 */ 7289 7290 devid_failed: 7291 7292 setup_pm_failed: 7293 ddi_remove_minor_node(devi, NULL); 7294 7295 cmlb_attach_failed: 7296 /* 7297 * Cleanup from the scsi_ifsetcap() calls (437868) 7298 */ 7299 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 7300 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 7301 7302 /* 7303 * Refer to the comments of setting tagged-qing in the beginning of 7304 * sd_unit_attach. We can only disable tagged queuing when there is 7305 * no lun attached on the target. 7306 */ 7307 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 7308 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 7309 } 7310 7311 if (un->un_f_is_fibre == FALSE) { 7312 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 7313 } 7314 7315 spinup_failed: 7316 7317 mutex_enter(SD_MUTEX(un)); 7318 7319 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ 7320 if (un->un_direct_priority_timeid != NULL) { 7321 timeout_id_t temp_id = un->un_direct_priority_timeid; 7322 un->un_direct_priority_timeid = NULL; 7323 mutex_exit(SD_MUTEX(un)); 7324 (void) untimeout(temp_id); 7325 mutex_enter(SD_MUTEX(un)); 7326 } 7327 7328 /* Cancel any pending start/stop timeouts */ 7329 if (un->un_startstop_timeid != NULL) { 7330 timeout_id_t temp_id = un->un_startstop_timeid; 7331 un->un_startstop_timeid = NULL; 7332 mutex_exit(SD_MUTEX(un)); 7333 (void) untimeout(temp_id); 7334 mutex_enter(SD_MUTEX(un)); 7335 } 7336 7337 /* Cancel any pending reset-throttle timeouts */ 7338 if (un->un_reset_throttle_timeid != NULL) { 7339 timeout_id_t temp_id = un->un_reset_throttle_timeid; 7340 un->un_reset_throttle_timeid = NULL; 7341 mutex_exit(SD_MUTEX(un)); 7342 (void) untimeout(temp_id); 7343 mutex_enter(SD_MUTEX(un)); 7344 } 7345 7346 /* Cancel any pending retry timeouts */ 7347 if (un->un_retry_timeid != NULL) { 7348 timeout_id_t temp_id = un->un_retry_timeid; 7349 un->un_retry_timeid = NULL; 7350 mutex_exit(SD_MUTEX(un)); 7351 (void) untimeout(temp_id); 7352 mutex_enter(SD_MUTEX(un)); 7353 } 7354 7355 /* Cancel any pending delayed cv broadcast timeouts */ 7356 if (un->un_dcvb_timeid != NULL) { 7357 timeout_id_t temp_id = un->un_dcvb_timeid; 7358 un->un_dcvb_timeid = NULL; 7359 mutex_exit(SD_MUTEX(un)); 7360 (void) untimeout(temp_id); 7361 mutex_enter(SD_MUTEX(un)); 7362 } 7363 7364 mutex_exit(SD_MUTEX(un)); 7365 7366 /* There should not be any in-progress I/O so ASSERT this check */ 7367 ASSERT(un->un_ncmds_in_transport == 0); 7368 ASSERT(un->un_ncmds_in_driver == 0); 7369 7370 /* Do not free the softstate if the callback routine is active */ 7371 sd_sync_with_callback(un); 7372 7373 /* 7374 * Partition stats apparently are not used with removables. These would 7375 * not have been created during attach, so no need to clean them up... 7376 */ 7377 if (un->un_stats != NULL) { 7378 kstat_delete(un->un_stats); 7379 un->un_stats = NULL; 7380 } 7381 if (un->un_errstats != NULL) { 7382 kstat_delete(un->un_errstats); 7383 un->un_errstats = NULL; 7384 } 7385 7386 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 7387 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 7388 7389 ddi_prop_remove_all(devi); 7390 sema_destroy(&un->un_semoclose); 7391 cv_destroy(&un->un_state_cv); 7392 7393 getrbuf_failed: 7394 7395 sd_free_rqs(un); 7396 7397 alloc_rqs_failed: 7398 7399 devp->sd_private = NULL; 7400 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ 7401 7402 get_softstate_failed: 7403 /* 7404 * Note: the man pages are unclear as to whether or not doing a 7405 * ddi_soft_state_free(sd_state, instance) is the right way to 7406 * clean up after the ddi_soft_state_zalloc() if the subsequent 7407 * ddi_get_soft_state() fails. The implication seems to be 7408 * that the get_soft_state cannot fail if the zalloc succeeds. 7409 */ 7410 ddi_soft_state_free(sd_state, instance); 7411 7412 probe_failed: 7413 scsi_unprobe(devp); 7414 #ifdef SDDEBUG 7415 if ((sd_component_mask & SD_LOG_ATTACH_DETACH) && 7416 (sd_level_mask & SD_LOGMASK_TRACE)) { 7417 cmn_err(CE_CONT, "sd_unit_attach: un:0x%p exit failure\n", 7418 (void *)un); 7419 } 7420 #endif 7421 return (DDI_FAILURE); 7422 } 7423 7424 7425 /* 7426 * Function: sd_unit_detach 7427 * 7428 * Description: Performs DDI_DETACH processing for sddetach(). 7429 * 7430 * Return Code: DDI_SUCCESS 7431 * DDI_FAILURE 7432 * 7433 * Context: Kernel thread context 7434 */ 7435 7436 static int 7437 sd_unit_detach(dev_info_t *devi) 7438 { 7439 struct scsi_device *devp; 7440 struct sd_lun *un; 7441 int i; 7442 int tgt; 7443 dev_t dev; 7444 dev_info_t *pdip = ddi_get_parent(devi); 7445 int instance = ddi_get_instance(devi); 7446 7447 mutex_enter(&sd_detach_mutex); 7448 7449 /* 7450 * Fail the detach for any of the following: 7451 * - Unable to get the sd_lun struct for the instance 7452 * - A layered driver has an outstanding open on the instance 7453 * - Another thread is already detaching this instance 7454 * - Another thread is currently performing an open 7455 */ 7456 devp = ddi_get_driver_private(devi); 7457 if ((devp == NULL) || 7458 ((un = (struct sd_lun *)devp->sd_private) == NULL) || 7459 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) || 7460 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) { 7461 mutex_exit(&sd_detach_mutex); 7462 return (DDI_FAILURE); 7463 } 7464 7465 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); 7466 7467 /* 7468 * Mark this instance as currently in a detach, to inhibit any 7469 * opens from a layered driver. 7470 */ 7471 un->un_detach_count++; 7472 mutex_exit(&sd_detach_mutex); 7473 7474 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7475 SCSI_ADDR_PROP_TARGET, -1); 7476 7477 dev = sd_make_device(SD_DEVINFO(un)); 7478 7479 #ifndef lint 7480 _NOTE(COMPETING_THREADS_NOW); 7481 #endif 7482 7483 mutex_enter(SD_MUTEX(un)); 7484 7485 /* 7486 * Fail the detach if there are any outstanding layered 7487 * opens on this device. 7488 */ 7489 for (i = 0; i < NDKMAP; i++) { 7490 if (un->un_ocmap.lyropen[i] != 0) { 7491 goto err_notclosed; 7492 } 7493 } 7494 7495 /* 7496 * Verify there are NO outstanding commands issued to this device. 7497 * ie, un_ncmds_in_transport == 0. 7498 * It's possible to have outstanding commands through the physio 7499 * code path, even though everything's closed. 7500 */ 7501 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || 7502 (un->un_direct_priority_timeid != NULL) || 7503 (un->un_state == SD_STATE_RWAIT)) { 7504 mutex_exit(SD_MUTEX(un)); 7505 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7506 "sd_dr_detach: Detach failure due to outstanding cmds\n"); 7507 goto err_stillbusy; 7508 } 7509 7510 /* 7511 * If we have the device reserved, release the reservation. 7512 */ 7513 if ((un->un_resvd_status & SD_RESERVE) && 7514 !(un->un_resvd_status & SD_LOST_RESERVE)) { 7515 mutex_exit(SD_MUTEX(un)); 7516 /* 7517 * Note: sd_reserve_release sends a command to the device 7518 * via the sd_ioctlcmd() path, and can sleep. 7519 */ 7520 if (sd_reserve_release(dev, SD_RELEASE) != 0) { 7521 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7522 "sd_dr_detach: Cannot release reservation \n"); 7523 } 7524 } else { 7525 mutex_exit(SD_MUTEX(un)); 7526 } 7527 7528 /* 7529 * Untimeout any reserve recover, throttle reset, restart unit 7530 * and delayed broadcast timeout threads. Protect the timeout pointer 7531 * from getting nulled by their callback functions. 7532 */ 7533 mutex_enter(SD_MUTEX(un)); 7534 if (un->un_resvd_timeid != NULL) { 7535 timeout_id_t temp_id = un->un_resvd_timeid; 7536 un->un_resvd_timeid = NULL; 7537 mutex_exit(SD_MUTEX(un)); 7538 (void) untimeout(temp_id); 7539 mutex_enter(SD_MUTEX(un)); 7540 } 7541 7542 if (un->un_reset_throttle_timeid != NULL) { 7543 timeout_id_t temp_id = un->un_reset_throttle_timeid; 7544 un->un_reset_throttle_timeid = NULL; 7545 mutex_exit(SD_MUTEX(un)); 7546 (void) untimeout(temp_id); 7547 mutex_enter(SD_MUTEX(un)); 7548 } 7549 7550 if (un->un_startstop_timeid != NULL) { 7551 timeout_id_t temp_id = un->un_startstop_timeid; 7552 un->un_startstop_timeid = NULL; 7553 mutex_exit(SD_MUTEX(un)); 7554 (void) untimeout(temp_id); 7555 mutex_enter(SD_MUTEX(un)); 7556 } 7557 7558 if (un->un_dcvb_timeid != NULL) { 7559 timeout_id_t temp_id = un->un_dcvb_timeid; 7560 un->un_dcvb_timeid = NULL; 7561 mutex_exit(SD_MUTEX(un)); 7562 (void) untimeout(temp_id); 7563 } else { 7564 mutex_exit(SD_MUTEX(un)); 7565 } 7566 7567 /* Remove any pending reservation reclaim requests for this device */ 7568 sd_rmv_resv_reclaim_req(dev); 7569 7570 mutex_enter(SD_MUTEX(un)); 7571 7572 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ 7573 if (un->un_direct_priority_timeid != NULL) { 7574 timeout_id_t temp_id = un->un_direct_priority_timeid; 7575 un->un_direct_priority_timeid = NULL; 7576 mutex_exit(SD_MUTEX(un)); 7577 (void) untimeout(temp_id); 7578 mutex_enter(SD_MUTEX(un)); 7579 } 7580 7581 /* Cancel any active multi-host disk watch thread requests */ 7582 if (un->un_mhd_token != NULL) { 7583 mutex_exit(SD_MUTEX(un)); 7584 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); 7585 if (scsi_watch_request_terminate(un->un_mhd_token, 7586 SCSI_WATCH_TERMINATE_NOWAIT)) { 7587 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7588 "sd_dr_detach: Cannot cancel mhd watch request\n"); 7589 /* 7590 * Note: We are returning here after having removed 7591 * some driver timeouts above. This is consistent with 7592 * the legacy implementation but perhaps the watch 7593 * terminate call should be made with the wait flag set. 7594 */ 7595 goto err_stillbusy; 7596 } 7597 mutex_enter(SD_MUTEX(un)); 7598 un->un_mhd_token = NULL; 7599 } 7600 7601 if (un->un_swr_token != NULL) { 7602 mutex_exit(SD_MUTEX(un)); 7603 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); 7604 if (scsi_watch_request_terminate(un->un_swr_token, 7605 SCSI_WATCH_TERMINATE_NOWAIT)) { 7606 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7607 "sd_dr_detach: Cannot cancel swr watch request\n"); 7608 /* 7609 * Note: We are returning here after having removed 7610 * some driver timeouts above. This is consistent with 7611 * the legacy implementation but perhaps the watch 7612 * terminate call should be made with the wait flag set. 7613 */ 7614 goto err_stillbusy; 7615 } 7616 mutex_enter(SD_MUTEX(un)); 7617 un->un_swr_token = NULL; 7618 } 7619 7620 mutex_exit(SD_MUTEX(un)); 7621 7622 /* 7623 * Clear any scsi_reset_notifies. We clear the reset notifies 7624 * if we have not registered one. 7625 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! 7626 */ 7627 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 7628 sd_mhd_reset_notify_cb, (caddr_t)un); 7629 7630 /* 7631 * protect the timeout pointers from getting nulled by 7632 * their callback functions during the cancellation process. 7633 * In such a scenario untimeout can be invoked with a null value. 7634 */ 7635 _NOTE(NO_COMPETING_THREADS_NOW); 7636 7637 mutex_enter(&un->un_pm_mutex); 7638 if (un->un_pm_idle_timeid != NULL) { 7639 timeout_id_t temp_id = un->un_pm_idle_timeid; 7640 un->un_pm_idle_timeid = NULL; 7641 mutex_exit(&un->un_pm_mutex); 7642 7643 /* 7644 * Timeout is active; cancel it. 7645 * Note that it'll never be active on a device 7646 * that does not support PM therefore we don't 7647 * have to check before calling pm_idle_component. 7648 */ 7649 (void) untimeout(temp_id); 7650 (void) pm_idle_component(SD_DEVINFO(un), 0); 7651 mutex_enter(&un->un_pm_mutex); 7652 } 7653 7654 /* 7655 * Check whether there is already a timeout scheduled for power 7656 * management. If yes then don't lower the power here, that's. 7657 * the timeout handler's job. 7658 */ 7659 if (un->un_pm_timeid != NULL) { 7660 timeout_id_t temp_id = un->un_pm_timeid; 7661 un->un_pm_timeid = NULL; 7662 mutex_exit(&un->un_pm_mutex); 7663 /* 7664 * Timeout is active; cancel it. 7665 * Note that it'll never be active on a device 7666 * that does not support PM therefore we don't 7667 * have to check before calling pm_idle_component. 7668 */ 7669 (void) untimeout(temp_id); 7670 (void) pm_idle_component(SD_DEVINFO(un), 0); 7671 7672 } else { 7673 mutex_exit(&un->un_pm_mutex); 7674 if ((un->un_f_pm_is_enabled == TRUE) && 7675 (pm_lower_power(SD_DEVINFO(un), 0, SD_SPINDLE_OFF) != 7676 DDI_SUCCESS)) { 7677 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7678 "sd_dr_detach: Lower power request failed, ignoring.\n"); 7679 /* 7680 * Fix for bug: 4297749, item # 13 7681 * The above test now includes a check to see if PM is 7682 * supported by this device before call 7683 * pm_lower_power(). 7684 * Note, the following is not dead code. The call to 7685 * pm_lower_power above will generate a call back into 7686 * our sdpower routine which might result in a timeout 7687 * handler getting activated. Therefore the following 7688 * code is valid and necessary. 7689 */ 7690 mutex_enter(&un->un_pm_mutex); 7691 if (un->un_pm_timeid != NULL) { 7692 timeout_id_t temp_id = un->un_pm_timeid; 7693 un->un_pm_timeid = NULL; 7694 mutex_exit(&un->un_pm_mutex); 7695 (void) untimeout(temp_id); 7696 (void) pm_idle_component(SD_DEVINFO(un), 0); 7697 } else { 7698 mutex_exit(&un->un_pm_mutex); 7699 } 7700 } 7701 } 7702 7703 /* 7704 * Cleanup from the scsi_ifsetcap() calls (437868) 7705 * Relocated here from above to be after the call to 7706 * pm_lower_power, which was getting errors. 7707 */ 7708 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 7709 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 7710 7711 /* 7712 * Currently, tagged queuing is supported per target based by HBA. 7713 * Setting this per lun instance actually sets the capability of this 7714 * target in HBA, which affects those luns already attached on the 7715 * same target. So during detach, we can only disable this capability 7716 * only when this is the only lun left on this target. By doing 7717 * this, we assume a target has the same tagged queuing capability 7718 * for every lun. The condition can be removed when HBA is changed to 7719 * support per lun based tagged queuing capability. 7720 */ 7721 if (sd_scsi_get_target_lun_count(pdip, tgt) <= 1) { 7722 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 7723 } 7724 7725 if (un->un_f_is_fibre == FALSE) { 7726 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 7727 } 7728 7729 /* 7730 * Remove any event callbacks, fibre only 7731 */ 7732 if (un->un_f_is_fibre == TRUE) { 7733 if ((un->un_insert_event != NULL) && 7734 (ddi_remove_event_handler(un->un_insert_cb_id) != 7735 DDI_SUCCESS)) { 7736 /* 7737 * Note: We are returning here after having done 7738 * substantial cleanup above. This is consistent 7739 * with the legacy implementation but this may not 7740 * be the right thing to do. 7741 */ 7742 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7743 "sd_dr_detach: Cannot cancel insert event\n"); 7744 goto err_remove_event; 7745 } 7746 un->un_insert_event = NULL; 7747 7748 if ((un->un_remove_event != NULL) && 7749 (ddi_remove_event_handler(un->un_remove_cb_id) != 7750 DDI_SUCCESS)) { 7751 /* 7752 * Note: We are returning here after having done 7753 * substantial cleanup above. This is consistent 7754 * with the legacy implementation but this may not 7755 * be the right thing to do. 7756 */ 7757 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7758 "sd_dr_detach: Cannot cancel remove event\n"); 7759 goto err_remove_event; 7760 } 7761 un->un_remove_event = NULL; 7762 } 7763 7764 /* Do not free the softstate if the callback routine is active */ 7765 sd_sync_with_callback(un); 7766 7767 cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 7768 cmlb_free_handle(&un->un_cmlbhandle); 7769 7770 /* 7771 * Hold the detach mutex here, to make sure that no other threads ever 7772 * can access a (partially) freed soft state structure. 7773 */ 7774 mutex_enter(&sd_detach_mutex); 7775 7776 /* 7777 * Clean up the soft state struct. 7778 * Cleanup is done in reverse order of allocs/inits. 7779 * At this point there should be no competing threads anymore. 7780 */ 7781 7782 /* Unregister and free device id. */ 7783 ddi_devid_unregister(devi); 7784 if (un->un_devid) { 7785 ddi_devid_free(un->un_devid); 7786 un->un_devid = NULL; 7787 } 7788 7789 /* 7790 * Destroy wmap cache if it exists. 7791 */ 7792 if (un->un_wm_cache != NULL) { 7793 kmem_cache_destroy(un->un_wm_cache); 7794 un->un_wm_cache = NULL; 7795 } 7796 7797 /* 7798 * kstat cleanup is done in detach for all device types (4363169). 7799 * We do not want to fail detach if the device kstats are not deleted 7800 * since there is a confusion about the devo_refcnt for the device. 7801 * We just delete the kstats and let detach complete successfully. 7802 */ 7803 if (un->un_stats != NULL) { 7804 kstat_delete(un->un_stats); 7805 un->un_stats = NULL; 7806 } 7807 if (un->un_errstats != NULL) { 7808 kstat_delete(un->un_errstats); 7809 un->un_errstats = NULL; 7810 } 7811 7812 /* Remove partition stats */ 7813 if (un->un_f_pkstats_enabled) { 7814 for (i = 0; i < NSDMAP; i++) { 7815 if (un->un_pstats[i] != NULL) { 7816 kstat_delete(un->un_pstats[i]); 7817 un->un_pstats[i] = NULL; 7818 } 7819 } 7820 } 7821 7822 /* Remove xbuf registration */ 7823 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 7824 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 7825 7826 /* Remove driver properties */ 7827 ddi_prop_remove_all(devi); 7828 7829 mutex_destroy(&un->un_pm_mutex); 7830 cv_destroy(&un->un_pm_busy_cv); 7831 7832 cv_destroy(&un->un_wcc_cv); 7833 7834 /* Open/close semaphore */ 7835 sema_destroy(&un->un_semoclose); 7836 7837 /* Removable media condvar. */ 7838 cv_destroy(&un->un_state_cv); 7839 7840 /* Suspend/resume condvar. */ 7841 cv_destroy(&un->un_suspend_cv); 7842 cv_destroy(&un->un_disk_busy_cv); 7843 7844 sd_free_rqs(un); 7845 7846 /* Free up soft state */ 7847 devp->sd_private = NULL; 7848 7849 bzero(un, sizeof (struct sd_lun)); 7850 ddi_soft_state_free(sd_state, instance); 7851 7852 mutex_exit(&sd_detach_mutex); 7853 7854 /* This frees up the INQUIRY data associated with the device. */ 7855 scsi_unprobe(devp); 7856 7857 /* 7858 * After successfully detaching an instance, we update the information 7859 * of how many luns have been attached in the relative target and 7860 * controller for parallel SCSI. This information is used when sd tries 7861 * to set the tagged queuing capability in HBA. 7862 * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to 7863 * check if the device is parallel SCSI. However, we don't need to 7864 * check here because we've already checked during attach. No device 7865 * that is not parallel SCSI is in the chain. 7866 */ 7867 if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) { 7868 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH); 7869 } 7870 7871 return (DDI_SUCCESS); 7872 7873 err_notclosed: 7874 mutex_exit(SD_MUTEX(un)); 7875 7876 err_stillbusy: 7877 _NOTE(NO_COMPETING_THREADS_NOW); 7878 7879 err_remove_event: 7880 mutex_enter(&sd_detach_mutex); 7881 un->un_detach_count--; 7882 mutex_exit(&sd_detach_mutex); 7883 7884 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); 7885 return (DDI_FAILURE); 7886 } 7887 7888 7889 /* 7890 * Function: sd_create_errstats 7891 * 7892 * Description: This routine instantiates the device error stats. 7893 * 7894 * Note: During attach the stats are instantiated first so they are 7895 * available for attach-time routines that utilize the driver 7896 * iopath to send commands to the device. The stats are initialized 7897 * separately so data obtained during some attach-time routines is 7898 * available. (4362483) 7899 * 7900 * Arguments: un - driver soft state (unit) structure 7901 * instance - driver instance 7902 * 7903 * Context: Kernel thread context 7904 */ 7905 7906 static void 7907 sd_create_errstats(struct sd_lun *un, int instance) 7908 { 7909 struct sd_errstats *stp; 7910 char kstatmodule_err[KSTAT_STRLEN]; 7911 char kstatname[KSTAT_STRLEN]; 7912 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t)); 7913 7914 ASSERT(un != NULL); 7915 7916 if (un->un_errstats != NULL) { 7917 return; 7918 } 7919 7920 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err), 7921 "%serr", sd_label); 7922 (void) snprintf(kstatname, sizeof (kstatname), 7923 "%s%d,err", sd_label, instance); 7924 7925 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname, 7926 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 7927 7928 if (un->un_errstats == NULL) { 7929 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7930 "sd_create_errstats: Failed kstat_create\n"); 7931 return; 7932 } 7933 7934 stp = (struct sd_errstats *)un->un_errstats->ks_data; 7935 kstat_named_init(&stp->sd_softerrs, "Soft Errors", 7936 KSTAT_DATA_UINT32); 7937 kstat_named_init(&stp->sd_harderrs, "Hard Errors", 7938 KSTAT_DATA_UINT32); 7939 kstat_named_init(&stp->sd_transerrs, "Transport Errors", 7940 KSTAT_DATA_UINT32); 7941 kstat_named_init(&stp->sd_vid, "Vendor", 7942 KSTAT_DATA_CHAR); 7943 kstat_named_init(&stp->sd_pid, "Product", 7944 KSTAT_DATA_CHAR); 7945 kstat_named_init(&stp->sd_revision, "Revision", 7946 KSTAT_DATA_CHAR); 7947 kstat_named_init(&stp->sd_serial, "Serial No", 7948 KSTAT_DATA_CHAR); 7949 kstat_named_init(&stp->sd_capacity, "Size", 7950 KSTAT_DATA_ULONGLONG); 7951 kstat_named_init(&stp->sd_rq_media_err, "Media Error", 7952 KSTAT_DATA_UINT32); 7953 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready", 7954 KSTAT_DATA_UINT32); 7955 kstat_named_init(&stp->sd_rq_nodev_err, "No Device", 7956 KSTAT_DATA_UINT32); 7957 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable", 7958 KSTAT_DATA_UINT32); 7959 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request", 7960 KSTAT_DATA_UINT32); 7961 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis", 7962 KSTAT_DATA_UINT32); 7963 7964 un->un_errstats->ks_private = un; 7965 un->un_errstats->ks_update = nulldev; 7966 7967 kstat_install(un->un_errstats); 7968 } 7969 7970 7971 /* 7972 * Function: sd_set_errstats 7973 * 7974 * Description: This routine sets the value of the vendor id, product id, 7975 * revision, serial number, and capacity device error stats. 7976 * 7977 * Note: During attach the stats are instantiated first so they are 7978 * available for attach-time routines that utilize the driver 7979 * iopath to send commands to the device. The stats are initialized 7980 * separately so data obtained during some attach-time routines is 7981 * available. (4362483) 7982 * 7983 * Arguments: un - driver soft state (unit) structure 7984 * 7985 * Context: Kernel thread context 7986 */ 7987 7988 static void 7989 sd_set_errstats(struct sd_lun *un) 7990 { 7991 struct sd_errstats *stp; 7992 7993 ASSERT(un != NULL); 7994 ASSERT(un->un_errstats != NULL); 7995 stp = (struct sd_errstats *)un->un_errstats->ks_data; 7996 ASSERT(stp != NULL); 7997 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8); 7998 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16); 7999 (void) strncpy(stp->sd_revision.value.c, 8000 un->un_sd->sd_inq->inq_revision, 4); 8001 8002 /* 8003 * All the errstats are persistent across detach/attach, 8004 * so reset all the errstats here in case of the hot 8005 * replacement of disk drives, except for not changed 8006 * Sun qualified drives. 8007 */ 8008 if ((bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) != 0) || 8009 (bcmp(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8010 sizeof (SD_INQUIRY(un)->inq_serial)) != 0)) { 8011 stp->sd_softerrs.value.ui32 = 0; 8012 stp->sd_harderrs.value.ui32 = 0; 8013 stp->sd_transerrs.value.ui32 = 0; 8014 stp->sd_rq_media_err.value.ui32 = 0; 8015 stp->sd_rq_ntrdy_err.value.ui32 = 0; 8016 stp->sd_rq_nodev_err.value.ui32 = 0; 8017 stp->sd_rq_recov_err.value.ui32 = 0; 8018 stp->sd_rq_illrq_err.value.ui32 = 0; 8019 stp->sd_rq_pfa_err.value.ui32 = 0; 8020 } 8021 8022 /* 8023 * Set the "Serial No" kstat for Sun qualified drives (indicated by 8024 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid) 8025 * (4376302)) 8026 */ 8027 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) { 8028 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8029 sizeof (SD_INQUIRY(un)->inq_serial)); 8030 } 8031 8032 if (un->un_f_blockcount_is_valid != TRUE) { 8033 /* 8034 * Set capacity error stat to 0 for no media. This ensures 8035 * a valid capacity is displayed in response to 'iostat -E' 8036 * when no media is present in the device. 8037 */ 8038 stp->sd_capacity.value.ui64 = 0; 8039 } else { 8040 /* 8041 * Multiply un_blockcount by un->un_sys_blocksize to get 8042 * capacity. 8043 * 8044 * Note: for non-512 blocksize devices "un_blockcount" has been 8045 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by 8046 * (un_tgt_blocksize / un->un_sys_blocksize). 8047 */ 8048 stp->sd_capacity.value.ui64 = (uint64_t) 8049 ((uint64_t)un->un_blockcount * un->un_sys_blocksize); 8050 } 8051 } 8052 8053 8054 /* 8055 * Function: sd_set_pstats 8056 * 8057 * Description: This routine instantiates and initializes the partition 8058 * stats for each partition with more than zero blocks. 8059 * (4363169) 8060 * 8061 * Arguments: un - driver soft state (unit) structure 8062 * 8063 * Context: Kernel thread context 8064 */ 8065 8066 static void 8067 sd_set_pstats(struct sd_lun *un) 8068 { 8069 char kstatname[KSTAT_STRLEN]; 8070 int instance; 8071 int i; 8072 diskaddr_t nblks = 0; 8073 char *partname = NULL; 8074 8075 ASSERT(un != NULL); 8076 8077 instance = ddi_get_instance(SD_DEVINFO(un)); 8078 8079 /* Note:x86: is this a VTOC8/VTOC16 difference? */ 8080 for (i = 0; i < NSDMAP; i++) { 8081 8082 if (cmlb_partinfo(un->un_cmlbhandle, i, 8083 &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0) 8084 continue; 8085 mutex_enter(SD_MUTEX(un)); 8086 8087 if ((un->un_pstats[i] == NULL) && 8088 (nblks != 0)) { 8089 8090 (void) snprintf(kstatname, sizeof (kstatname), 8091 "%s%d,%s", sd_label, instance, 8092 partname); 8093 8094 un->un_pstats[i] = kstat_create(sd_label, 8095 instance, kstatname, "partition", KSTAT_TYPE_IO, 8096 1, KSTAT_FLAG_PERSISTENT); 8097 if (un->un_pstats[i] != NULL) { 8098 un->un_pstats[i]->ks_lock = SD_MUTEX(un); 8099 kstat_install(un->un_pstats[i]); 8100 } 8101 } 8102 mutex_exit(SD_MUTEX(un)); 8103 } 8104 } 8105 8106 8107 #if (defined(__fibre)) 8108 /* 8109 * Function: sd_init_event_callbacks 8110 * 8111 * Description: This routine initializes the insertion and removal event 8112 * callbacks. (fibre only) 8113 * 8114 * Arguments: un - driver soft state (unit) structure 8115 * 8116 * Context: Kernel thread context 8117 */ 8118 8119 static void 8120 sd_init_event_callbacks(struct sd_lun *un) 8121 { 8122 ASSERT(un != NULL); 8123 8124 if ((un->un_insert_event == NULL) && 8125 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, 8126 &un->un_insert_event) == DDI_SUCCESS)) { 8127 /* 8128 * Add the callback for an insertion event 8129 */ 8130 (void) ddi_add_event_handler(SD_DEVINFO(un), 8131 un->un_insert_event, sd_event_callback, (void *)un, 8132 &(un->un_insert_cb_id)); 8133 } 8134 8135 if ((un->un_remove_event == NULL) && 8136 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, 8137 &un->un_remove_event) == DDI_SUCCESS)) { 8138 /* 8139 * Add the callback for a removal event 8140 */ 8141 (void) ddi_add_event_handler(SD_DEVINFO(un), 8142 un->un_remove_event, sd_event_callback, (void *)un, 8143 &(un->un_remove_cb_id)); 8144 } 8145 } 8146 8147 8148 /* 8149 * Function: sd_event_callback 8150 * 8151 * Description: This routine handles insert/remove events (photon). The 8152 * state is changed to OFFLINE which can be used to supress 8153 * error msgs. (fibre only) 8154 * 8155 * Arguments: un - driver soft state (unit) structure 8156 * 8157 * Context: Callout thread context 8158 */ 8159 /* ARGSUSED */ 8160 static void 8161 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, 8162 void *bus_impldata) 8163 { 8164 struct sd_lun *un = (struct sd_lun *)arg; 8165 8166 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); 8167 if (event == un->un_insert_event) { 8168 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); 8169 mutex_enter(SD_MUTEX(un)); 8170 if (un->un_state == SD_STATE_OFFLINE) { 8171 if (un->un_last_state != SD_STATE_SUSPENDED) { 8172 un->un_state = un->un_last_state; 8173 } else { 8174 /* 8175 * We have gone through SUSPEND/RESUME while 8176 * we were offline. Restore the last state 8177 */ 8178 un->un_state = un->un_save_state; 8179 } 8180 } 8181 mutex_exit(SD_MUTEX(un)); 8182 8183 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); 8184 } else if (event == un->un_remove_event) { 8185 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); 8186 mutex_enter(SD_MUTEX(un)); 8187 /* 8188 * We need to handle an event callback that occurs during 8189 * the suspend operation, since we don't prevent it. 8190 */ 8191 if (un->un_state != SD_STATE_OFFLINE) { 8192 if (un->un_state != SD_STATE_SUSPENDED) { 8193 New_state(un, SD_STATE_OFFLINE); 8194 } else { 8195 un->un_last_state = SD_STATE_OFFLINE; 8196 } 8197 } 8198 mutex_exit(SD_MUTEX(un)); 8199 } else { 8200 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 8201 "!Unknown event\n"); 8202 } 8203 8204 } 8205 #endif 8206 8207 /* 8208 * Function: sd_cache_control() 8209 * 8210 * Description: This routine is the driver entry point for setting 8211 * read and write caching by modifying the WCE (write cache 8212 * enable) and RCD (read cache disable) bits of mode 8213 * page 8 (MODEPAGE_CACHING). 8214 * 8215 * Arguments: un - driver soft state (unit) structure 8216 * rcd_flag - flag for controlling the read cache 8217 * wce_flag - flag for controlling the write cache 8218 * 8219 * Return Code: EIO 8220 * code returned by sd_send_scsi_MODE_SENSE and 8221 * sd_send_scsi_MODE_SELECT 8222 * 8223 * Context: Kernel Thread 8224 */ 8225 8226 static int 8227 sd_cache_control(struct sd_lun *un, int rcd_flag, int wce_flag) 8228 { 8229 struct mode_caching *mode_caching_page; 8230 uchar_t *header; 8231 size_t buflen; 8232 int hdrlen; 8233 int bd_len; 8234 int rval = 0; 8235 struct mode_header_grp2 *mhp; 8236 8237 ASSERT(un != NULL); 8238 8239 /* 8240 * Do a test unit ready, otherwise a mode sense may not work if this 8241 * is the first command sent to the device after boot. 8242 */ 8243 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 8244 8245 if (un->un_f_cfg_is_atapi == TRUE) { 8246 hdrlen = MODE_HEADER_LENGTH_GRP2; 8247 } else { 8248 hdrlen = MODE_HEADER_LENGTH; 8249 } 8250 8251 /* 8252 * Allocate memory for the retrieved mode page and its headers. Set 8253 * a pointer to the page itself. Use mode_cache_scsi3 to insure 8254 * we get all of the mode sense data otherwise, the mode select 8255 * will fail. mode_cache_scsi3 is a superset of mode_caching. 8256 */ 8257 buflen = hdrlen + MODE_BLK_DESC_LENGTH + 8258 sizeof (struct mode_cache_scsi3); 8259 8260 header = kmem_zalloc(buflen, KM_SLEEP); 8261 8262 /* Get the information from the device. */ 8263 if (un->un_f_cfg_is_atapi == TRUE) { 8264 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, header, buflen, 8265 MODEPAGE_CACHING, SD_PATH_DIRECT); 8266 } else { 8267 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 8268 MODEPAGE_CACHING, SD_PATH_DIRECT); 8269 } 8270 if (rval != 0) { 8271 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 8272 "sd_cache_control: Mode Sense Failed\n"); 8273 kmem_free(header, buflen); 8274 return (rval); 8275 } 8276 8277 /* 8278 * Determine size of Block Descriptors in order to locate 8279 * the mode page data. ATAPI devices return 0, SCSI devices 8280 * should return MODE_BLK_DESC_LENGTH. 8281 */ 8282 if (un->un_f_cfg_is_atapi == TRUE) { 8283 mhp = (struct mode_header_grp2 *)header; 8284 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 8285 } else { 8286 bd_len = ((struct mode_header *)header)->bdesc_length; 8287 } 8288 8289 if (bd_len > MODE_BLK_DESC_LENGTH) { 8290 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 8291 "sd_cache_control: Mode Sense returned invalid " 8292 "block descriptor length\n"); 8293 kmem_free(header, buflen); 8294 return (EIO); 8295 } 8296 8297 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 8298 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 8299 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 8300 " caching page code mismatch %d\n", 8301 mode_caching_page->mode_page.code); 8302 kmem_free(header, buflen); 8303 return (EIO); 8304 } 8305 8306 /* Check the relevant bits on successful mode sense. */ 8307 if ((mode_caching_page->rcd && rcd_flag == SD_CACHE_ENABLE) || 8308 (!mode_caching_page->rcd && rcd_flag == SD_CACHE_DISABLE) || 8309 (mode_caching_page->wce && wce_flag == SD_CACHE_DISABLE) || 8310 (!mode_caching_page->wce && wce_flag == SD_CACHE_ENABLE)) { 8311 8312 size_t sbuflen; 8313 uchar_t save_pg; 8314 8315 /* 8316 * Construct select buffer length based on the 8317 * length of the sense data returned. 8318 */ 8319 sbuflen = hdrlen + MODE_BLK_DESC_LENGTH + 8320 sizeof (struct mode_page) + 8321 (int)mode_caching_page->mode_page.length; 8322 8323 /* 8324 * Set the caching bits as requested. 8325 */ 8326 if (rcd_flag == SD_CACHE_ENABLE) 8327 mode_caching_page->rcd = 0; 8328 else if (rcd_flag == SD_CACHE_DISABLE) 8329 mode_caching_page->rcd = 1; 8330 8331 if (wce_flag == SD_CACHE_ENABLE) 8332 mode_caching_page->wce = 1; 8333 else if (wce_flag == SD_CACHE_DISABLE) 8334 mode_caching_page->wce = 0; 8335 8336 /* 8337 * Save the page if the mode sense says the 8338 * drive supports it. 8339 */ 8340 save_pg = mode_caching_page->mode_page.ps ? 8341 SD_SAVE_PAGE : SD_DONTSAVE_PAGE; 8342 8343 /* Clear reserved bits before mode select. */ 8344 mode_caching_page->mode_page.ps = 0; 8345 8346 /* 8347 * Clear out mode header for mode select. 8348 * The rest of the retrieved page will be reused. 8349 */ 8350 bzero(header, hdrlen); 8351 8352 if (un->un_f_cfg_is_atapi == TRUE) { 8353 mhp = (struct mode_header_grp2 *)header; 8354 mhp->bdesc_length_hi = bd_len >> 8; 8355 mhp->bdesc_length_lo = (uchar_t)bd_len & 0xff; 8356 } else { 8357 ((struct mode_header *)header)->bdesc_length = bd_len; 8358 } 8359 8360 /* Issue mode select to change the cache settings */ 8361 if (un->un_f_cfg_is_atapi == TRUE) { 8362 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, header, 8363 sbuflen, save_pg, SD_PATH_DIRECT); 8364 } else { 8365 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 8366 sbuflen, save_pg, SD_PATH_DIRECT); 8367 } 8368 } 8369 8370 kmem_free(header, buflen); 8371 return (rval); 8372 } 8373 8374 8375 /* 8376 * Function: sd_get_write_cache_enabled() 8377 * 8378 * Description: This routine is the driver entry point for determining if 8379 * write caching is enabled. It examines the WCE (write cache 8380 * enable) bits of mode page 8 (MODEPAGE_CACHING). 8381 * 8382 * Arguments: un - driver soft state (unit) structure 8383 * is_enabled - pointer to int where write cache enabled state 8384 * is returned (non-zero -> write cache enabled) 8385 * 8386 * 8387 * Return Code: EIO 8388 * code returned by sd_send_scsi_MODE_SENSE 8389 * 8390 * Context: Kernel Thread 8391 * 8392 * NOTE: If ioctl is added to disable write cache, this sequence should 8393 * be followed so that no locking is required for accesses to 8394 * un->un_f_write_cache_enabled: 8395 * do mode select to clear wce 8396 * do synchronize cache to flush cache 8397 * set un->un_f_write_cache_enabled = FALSE 8398 * 8399 * Conversely, an ioctl to enable the write cache should be done 8400 * in this order: 8401 * set un->un_f_write_cache_enabled = TRUE 8402 * do mode select to set wce 8403 */ 8404 8405 static int 8406 sd_get_write_cache_enabled(struct sd_lun *un, int *is_enabled) 8407 { 8408 struct mode_caching *mode_caching_page; 8409 uchar_t *header; 8410 size_t buflen; 8411 int hdrlen; 8412 int bd_len; 8413 int rval = 0; 8414 8415 ASSERT(un != NULL); 8416 ASSERT(is_enabled != NULL); 8417 8418 /* in case of error, flag as enabled */ 8419 *is_enabled = TRUE; 8420 8421 /* 8422 * Do a test unit ready, otherwise a mode sense may not work if this 8423 * is the first command sent to the device after boot. 8424 */ 8425 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 8426 8427 if (un->un_f_cfg_is_atapi == TRUE) { 8428 hdrlen = MODE_HEADER_LENGTH_GRP2; 8429 } else { 8430 hdrlen = MODE_HEADER_LENGTH; 8431 } 8432 8433 /* 8434 * Allocate memory for the retrieved mode page and its headers. Set 8435 * a pointer to the page itself. 8436 */ 8437 buflen = hdrlen + MODE_BLK_DESC_LENGTH + sizeof (struct mode_caching); 8438 header = kmem_zalloc(buflen, KM_SLEEP); 8439 8440 /* Get the information from the device. */ 8441 if (un->un_f_cfg_is_atapi == TRUE) { 8442 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, header, buflen, 8443 MODEPAGE_CACHING, SD_PATH_DIRECT); 8444 } else { 8445 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 8446 MODEPAGE_CACHING, SD_PATH_DIRECT); 8447 } 8448 if (rval != 0) { 8449 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 8450 "sd_get_write_cache_enabled: Mode Sense Failed\n"); 8451 kmem_free(header, buflen); 8452 return (rval); 8453 } 8454 8455 /* 8456 * Determine size of Block Descriptors in order to locate 8457 * the mode page data. ATAPI devices return 0, SCSI devices 8458 * should return MODE_BLK_DESC_LENGTH. 8459 */ 8460 if (un->un_f_cfg_is_atapi == TRUE) { 8461 struct mode_header_grp2 *mhp; 8462 mhp = (struct mode_header_grp2 *)header; 8463 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 8464 } else { 8465 bd_len = ((struct mode_header *)header)->bdesc_length; 8466 } 8467 8468 if (bd_len > MODE_BLK_DESC_LENGTH) { 8469 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 8470 "sd_get_write_cache_enabled: Mode Sense returned invalid " 8471 "block descriptor length\n"); 8472 kmem_free(header, buflen); 8473 return (EIO); 8474 } 8475 8476 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 8477 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 8478 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 8479 " caching page code mismatch %d\n", 8480 mode_caching_page->mode_page.code); 8481 kmem_free(header, buflen); 8482 return (EIO); 8483 } 8484 *is_enabled = mode_caching_page->wce; 8485 8486 kmem_free(header, buflen); 8487 return (0); 8488 } 8489 8490 8491 /* 8492 * Function: sd_make_device 8493 * 8494 * Description: Utility routine to return the Solaris device number from 8495 * the data in the device's dev_info structure. 8496 * 8497 * Return Code: The Solaris device number 8498 * 8499 * Context: Any 8500 */ 8501 8502 static dev_t 8503 sd_make_device(dev_info_t *devi) 8504 { 8505 return (makedevice(ddi_name_to_major(ddi_get_name(devi)), 8506 ddi_get_instance(devi) << SDUNIT_SHIFT)); 8507 } 8508 8509 8510 /* 8511 * Function: sd_pm_entry 8512 * 8513 * Description: Called at the start of a new command to manage power 8514 * and busy status of a device. This includes determining whether 8515 * the current power state of the device is sufficient for 8516 * performing the command or whether it must be changed. 8517 * The PM framework is notified appropriately. 8518 * Only with a return status of DDI_SUCCESS will the 8519 * component be busy to the framework. 8520 * 8521 * All callers of sd_pm_entry must check the return status 8522 * and only call sd_pm_exit it it was DDI_SUCCESS. A status 8523 * of DDI_FAILURE indicates the device failed to power up. 8524 * In this case un_pm_count has been adjusted so the result 8525 * on exit is still powered down, ie. count is less than 0. 8526 * Calling sd_pm_exit with this count value hits an ASSERT. 8527 * 8528 * Return Code: DDI_SUCCESS or DDI_FAILURE 8529 * 8530 * Context: Kernel thread context. 8531 */ 8532 8533 static int 8534 sd_pm_entry(struct sd_lun *un) 8535 { 8536 int return_status = DDI_SUCCESS; 8537 8538 ASSERT(!mutex_owned(SD_MUTEX(un))); 8539 ASSERT(!mutex_owned(&un->un_pm_mutex)); 8540 8541 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n"); 8542 8543 if (un->un_f_pm_is_enabled == FALSE) { 8544 SD_TRACE(SD_LOG_IO_PM, un, 8545 "sd_pm_entry: exiting, PM not enabled\n"); 8546 return (return_status); 8547 } 8548 8549 /* 8550 * Just increment a counter if PM is enabled. On the transition from 8551 * 0 ==> 1, mark the device as busy. The iodone side will decrement 8552 * the count with each IO and mark the device as idle when the count 8553 * hits 0. 8554 * 8555 * If the count is less than 0 the device is powered down. If a powered 8556 * down device is successfully powered up then the count must be 8557 * incremented to reflect the power up. Note that it'll get incremented 8558 * a second time to become busy. 8559 * 8560 * Because the following has the potential to change the device state 8561 * and must release the un_pm_mutex to do so, only one thread can be 8562 * allowed through at a time. 8563 */ 8564 8565 mutex_enter(&un->un_pm_mutex); 8566 while (un->un_pm_busy == TRUE) { 8567 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex); 8568 } 8569 un->un_pm_busy = TRUE; 8570 8571 if (un->un_pm_count < 1) { 8572 8573 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n"); 8574 8575 /* 8576 * Indicate we are now busy so the framework won't attempt to 8577 * power down the device. This call will only fail if either 8578 * we passed a bad component number or the device has no 8579 * components. Neither of these should ever happen. 8580 */ 8581 mutex_exit(&un->un_pm_mutex); 8582 return_status = pm_busy_component(SD_DEVINFO(un), 0); 8583 ASSERT(return_status == DDI_SUCCESS); 8584 8585 mutex_enter(&un->un_pm_mutex); 8586 8587 if (un->un_pm_count < 0) { 8588 mutex_exit(&un->un_pm_mutex); 8589 8590 SD_TRACE(SD_LOG_IO_PM, un, 8591 "sd_pm_entry: power up component\n"); 8592 8593 /* 8594 * pm_raise_power will cause sdpower to be called 8595 * which brings the device power level to the 8596 * desired state, ON in this case. If successful, 8597 * un_pm_count and un_power_level will be updated 8598 * appropriately. 8599 */ 8600 return_status = pm_raise_power(SD_DEVINFO(un), 0, 8601 SD_SPINDLE_ON); 8602 8603 mutex_enter(&un->un_pm_mutex); 8604 8605 if (return_status != DDI_SUCCESS) { 8606 /* 8607 * Power up failed. 8608 * Idle the device and adjust the count 8609 * so the result on exit is that we're 8610 * still powered down, ie. count is less than 0. 8611 */ 8612 SD_TRACE(SD_LOG_IO_PM, un, 8613 "sd_pm_entry: power up failed," 8614 " idle the component\n"); 8615 8616 (void) pm_idle_component(SD_DEVINFO(un), 0); 8617 un->un_pm_count--; 8618 } else { 8619 /* 8620 * Device is powered up, verify the 8621 * count is non-negative. 8622 * This is debug only. 8623 */ 8624 ASSERT(un->un_pm_count == 0); 8625 } 8626 } 8627 8628 if (return_status == DDI_SUCCESS) { 8629 /* 8630 * For performance, now that the device has been tagged 8631 * as busy, and it's known to be powered up, update the 8632 * chain types to use jump tables that do not include 8633 * pm. This significantly lowers the overhead and 8634 * therefore improves performance. 8635 */ 8636 8637 mutex_exit(&un->un_pm_mutex); 8638 mutex_enter(SD_MUTEX(un)); 8639 SD_TRACE(SD_LOG_IO_PM, un, 8640 "sd_pm_entry: changing uscsi_chain_type from %d\n", 8641 un->un_uscsi_chain_type); 8642 8643 if (un->un_f_non_devbsize_supported) { 8644 un->un_buf_chain_type = 8645 SD_CHAIN_INFO_RMMEDIA_NO_PM; 8646 } else { 8647 un->un_buf_chain_type = 8648 SD_CHAIN_INFO_DISK_NO_PM; 8649 } 8650 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 8651 8652 SD_TRACE(SD_LOG_IO_PM, un, 8653 " changed uscsi_chain_type to %d\n", 8654 un->un_uscsi_chain_type); 8655 mutex_exit(SD_MUTEX(un)); 8656 mutex_enter(&un->un_pm_mutex); 8657 8658 if (un->un_pm_idle_timeid == NULL) { 8659 /* 300 ms. */ 8660 un->un_pm_idle_timeid = 8661 timeout(sd_pm_idletimeout_handler, un, 8662 (drv_usectohz((clock_t)300000))); 8663 /* 8664 * Include an extra call to busy which keeps the 8665 * device busy with-respect-to the PM layer 8666 * until the timer fires, at which time it'll 8667 * get the extra idle call. 8668 */ 8669 (void) pm_busy_component(SD_DEVINFO(un), 0); 8670 } 8671 } 8672 } 8673 un->un_pm_busy = FALSE; 8674 /* Next... */ 8675 cv_signal(&un->un_pm_busy_cv); 8676 8677 un->un_pm_count++; 8678 8679 SD_TRACE(SD_LOG_IO_PM, un, 8680 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count); 8681 8682 mutex_exit(&un->un_pm_mutex); 8683 8684 return (return_status); 8685 } 8686 8687 8688 /* 8689 * Function: sd_pm_exit 8690 * 8691 * Description: Called at the completion of a command to manage busy 8692 * status for the device. If the device becomes idle the 8693 * PM framework is notified. 8694 * 8695 * Context: Kernel thread context 8696 */ 8697 8698 static void 8699 sd_pm_exit(struct sd_lun *un) 8700 { 8701 ASSERT(!mutex_owned(SD_MUTEX(un))); 8702 ASSERT(!mutex_owned(&un->un_pm_mutex)); 8703 8704 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n"); 8705 8706 /* 8707 * After attach the following flag is only read, so don't 8708 * take the penalty of acquiring a mutex for it. 8709 */ 8710 if (un->un_f_pm_is_enabled == TRUE) { 8711 8712 mutex_enter(&un->un_pm_mutex); 8713 un->un_pm_count--; 8714 8715 SD_TRACE(SD_LOG_IO_PM, un, 8716 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count); 8717 8718 ASSERT(un->un_pm_count >= 0); 8719 if (un->un_pm_count == 0) { 8720 mutex_exit(&un->un_pm_mutex); 8721 8722 SD_TRACE(SD_LOG_IO_PM, un, 8723 "sd_pm_exit: idle component\n"); 8724 8725 (void) pm_idle_component(SD_DEVINFO(un), 0); 8726 8727 } else { 8728 mutex_exit(&un->un_pm_mutex); 8729 } 8730 } 8731 8732 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n"); 8733 } 8734 8735 8736 /* 8737 * Function: sdopen 8738 * 8739 * Description: Driver's open(9e) entry point function. 8740 * 8741 * Arguments: dev_i - pointer to device number 8742 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE) 8743 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 8744 * cred_p - user credential pointer 8745 * 8746 * Return Code: EINVAL 8747 * ENXIO 8748 * EIO 8749 * EROFS 8750 * EBUSY 8751 * 8752 * Context: Kernel thread context 8753 */ 8754 /* ARGSUSED */ 8755 static int 8756 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 8757 { 8758 struct sd_lun *un; 8759 int nodelay; 8760 int part; 8761 uint64_t partmask; 8762 int instance; 8763 dev_t dev; 8764 int rval = EIO; 8765 diskaddr_t nblks = 0; 8766 8767 /* Validate the open type */ 8768 if (otyp >= OTYPCNT) { 8769 return (EINVAL); 8770 } 8771 8772 dev = *dev_p; 8773 instance = SDUNIT(dev); 8774 mutex_enter(&sd_detach_mutex); 8775 8776 /* 8777 * Fail the open if there is no softstate for the instance, or 8778 * if another thread somewhere is trying to detach the instance. 8779 */ 8780 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 8781 (un->un_detach_count != 0)) { 8782 mutex_exit(&sd_detach_mutex); 8783 /* 8784 * The probe cache only needs to be cleared when open (9e) fails 8785 * with ENXIO (4238046). 8786 */ 8787 /* 8788 * un-conditionally clearing probe cache is ok with 8789 * separate sd/ssd binaries 8790 * x86 platform can be an issue with both parallel 8791 * and fibre in 1 binary 8792 */ 8793 sd_scsi_clear_probe_cache(); 8794 return (ENXIO); 8795 } 8796 8797 /* 8798 * The un_layer_count is to prevent another thread in specfs from 8799 * trying to detach the instance, which can happen when we are 8800 * called from a higher-layer driver instead of thru specfs. 8801 * This will not be needed when DDI provides a layered driver 8802 * interface that allows specfs to know that an instance is in 8803 * use by a layered driver & should not be detached. 8804 * 8805 * Note: the semantics for layered driver opens are exactly one 8806 * close for every open. 8807 */ 8808 if (otyp == OTYP_LYR) { 8809 un->un_layer_count++; 8810 } 8811 8812 /* 8813 * Keep a count of the current # of opens in progress. This is because 8814 * some layered drivers try to call us as a regular open. This can 8815 * cause problems that we cannot prevent, however by keeping this count 8816 * we can at least keep our open and detach routines from racing against 8817 * each other under such conditions. 8818 */ 8819 un->un_opens_in_progress++; 8820 mutex_exit(&sd_detach_mutex); 8821 8822 nodelay = (flag & (FNDELAY | FNONBLOCK)); 8823 part = SDPART(dev); 8824 partmask = 1 << part; 8825 8826 /* 8827 * We use a semaphore here in order to serialize 8828 * open and close requests on the device. 8829 */ 8830 sema_p(&un->un_semoclose); 8831 8832 mutex_enter(SD_MUTEX(un)); 8833 8834 /* 8835 * All device accesses go thru sdstrategy() where we check 8836 * on suspend status but there could be a scsi_poll command, 8837 * which bypasses sdstrategy(), so we need to check pm 8838 * status. 8839 */ 8840 8841 if (!nodelay) { 8842 while ((un->un_state == SD_STATE_SUSPENDED) || 8843 (un->un_state == SD_STATE_PM_CHANGING)) { 8844 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 8845 } 8846 8847 mutex_exit(SD_MUTEX(un)); 8848 if (sd_pm_entry(un) != DDI_SUCCESS) { 8849 rval = EIO; 8850 SD_ERROR(SD_LOG_OPEN_CLOSE, un, 8851 "sdopen: sd_pm_entry failed\n"); 8852 goto open_failed_with_pm; 8853 } 8854 mutex_enter(SD_MUTEX(un)); 8855 } 8856 8857 /* check for previous exclusive open */ 8858 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); 8859 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 8860 "sdopen: exclopen=%x, flag=%x, regopen=%x\n", 8861 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 8862 8863 if (un->un_exclopen & (partmask)) { 8864 goto excl_open_fail; 8865 } 8866 8867 if (flag & FEXCL) { 8868 int i; 8869 if (un->un_ocmap.lyropen[part]) { 8870 goto excl_open_fail; 8871 } 8872 for (i = 0; i < (OTYPCNT - 1); i++) { 8873 if (un->un_ocmap.regopen[i] & (partmask)) { 8874 goto excl_open_fail; 8875 } 8876 } 8877 } 8878 8879 /* 8880 * Check the write permission if this is a removable media device, 8881 * NDELAY has not been set, and writable permission is requested. 8882 * 8883 * Note: If NDELAY was set and this is write-protected media the WRITE 8884 * attempt will fail with EIO as part of the I/O processing. This is a 8885 * more permissive implementation that allows the open to succeed and 8886 * WRITE attempts to fail when appropriate. 8887 */ 8888 if (un->un_f_chk_wp_open) { 8889 if ((flag & FWRITE) && (!nodelay)) { 8890 mutex_exit(SD_MUTEX(un)); 8891 /* 8892 * Defer the check for write permission on writable 8893 * DVD drive till sdstrategy and will not fail open even 8894 * if FWRITE is set as the device can be writable 8895 * depending upon the media and the media can change 8896 * after the call to open(). 8897 */ 8898 if (un->un_f_dvdram_writable_device == FALSE) { 8899 if (ISCD(un) || sr_check_wp(dev)) { 8900 rval = EROFS; 8901 mutex_enter(SD_MUTEX(un)); 8902 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 8903 "write to cd or write protected media\n"); 8904 goto open_fail; 8905 } 8906 } 8907 mutex_enter(SD_MUTEX(un)); 8908 } 8909 } 8910 8911 /* 8912 * If opening in NDELAY/NONBLOCK mode, just return. 8913 * Check if disk is ready and has a valid geometry later. 8914 */ 8915 if (!nodelay) { 8916 mutex_exit(SD_MUTEX(un)); 8917 rval = sd_ready_and_valid(un); 8918 mutex_enter(SD_MUTEX(un)); 8919 /* 8920 * Fail if device is not ready or if the number of disk 8921 * blocks is zero or negative for non CD devices. 8922 */ 8923 8924 nblks = 0; 8925 8926 if (rval == SD_READY_VALID && (!ISCD(un))) { 8927 /* if cmlb_partinfo fails, nblks remains 0 */ 8928 mutex_exit(SD_MUTEX(un)); 8929 (void) cmlb_partinfo(un->un_cmlbhandle, part, &nblks, 8930 NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 8931 mutex_enter(SD_MUTEX(un)); 8932 } 8933 8934 if ((rval != SD_READY_VALID) || 8935 (!ISCD(un) && nblks <= 0)) { 8936 rval = un->un_f_has_removable_media ? ENXIO : EIO; 8937 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 8938 "device not ready or invalid disk block value\n"); 8939 goto open_fail; 8940 } 8941 #if defined(__i386) || defined(__amd64) 8942 } else { 8943 uchar_t *cp; 8944 /* 8945 * x86 requires special nodelay handling, so that p0 is 8946 * always defined and accessible. 8947 * Invalidate geometry only if device is not already open. 8948 */ 8949 cp = &un->un_ocmap.chkd[0]; 8950 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 8951 if (*cp != (uchar_t)0) { 8952 break; 8953 } 8954 cp++; 8955 } 8956 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 8957 mutex_exit(SD_MUTEX(un)); 8958 cmlb_invalidate(un->un_cmlbhandle, 8959 (void *)SD_PATH_DIRECT); 8960 mutex_enter(SD_MUTEX(un)); 8961 } 8962 8963 #endif 8964 } 8965 8966 if (otyp == OTYP_LYR) { 8967 un->un_ocmap.lyropen[part]++; 8968 } else { 8969 un->un_ocmap.regopen[otyp] |= partmask; 8970 } 8971 8972 /* Set up open and exclusive open flags */ 8973 if (flag & FEXCL) { 8974 un->un_exclopen |= (partmask); 8975 } 8976 8977 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " 8978 "open of part %d type %d\n", part, otyp); 8979 8980 mutex_exit(SD_MUTEX(un)); 8981 if (!nodelay) { 8982 sd_pm_exit(un); 8983 } 8984 8985 sema_v(&un->un_semoclose); 8986 8987 mutex_enter(&sd_detach_mutex); 8988 un->un_opens_in_progress--; 8989 mutex_exit(&sd_detach_mutex); 8990 8991 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n"); 8992 return (DDI_SUCCESS); 8993 8994 excl_open_fail: 8995 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n"); 8996 rval = EBUSY; 8997 8998 open_fail: 8999 mutex_exit(SD_MUTEX(un)); 9000 9001 /* 9002 * On a failed open we must exit the pm management. 9003 */ 9004 if (!nodelay) { 9005 sd_pm_exit(un); 9006 } 9007 open_failed_with_pm: 9008 sema_v(&un->un_semoclose); 9009 9010 mutex_enter(&sd_detach_mutex); 9011 un->un_opens_in_progress--; 9012 if (otyp == OTYP_LYR) { 9013 un->un_layer_count--; 9014 } 9015 mutex_exit(&sd_detach_mutex); 9016 9017 return (rval); 9018 } 9019 9020 9021 /* 9022 * Function: sdclose 9023 * 9024 * Description: Driver's close(9e) entry point function. 9025 * 9026 * Arguments: dev - device number 9027 * flag - file status flag, informational only 9028 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9029 * cred_p - user credential pointer 9030 * 9031 * Return Code: ENXIO 9032 * 9033 * Context: Kernel thread context 9034 */ 9035 /* ARGSUSED */ 9036 static int 9037 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 9038 { 9039 struct sd_lun *un; 9040 uchar_t *cp; 9041 int part; 9042 int nodelay; 9043 int rval = 0; 9044 9045 /* Validate the open type */ 9046 if (otyp >= OTYPCNT) { 9047 return (ENXIO); 9048 } 9049 9050 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9051 return (ENXIO); 9052 } 9053 9054 part = SDPART(dev); 9055 nodelay = flag & (FNDELAY | FNONBLOCK); 9056 9057 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9058 "sdclose: close of part %d type %d\n", part, otyp); 9059 9060 /* 9061 * We use a semaphore here in order to serialize 9062 * open and close requests on the device. 9063 */ 9064 sema_p(&un->un_semoclose); 9065 9066 mutex_enter(SD_MUTEX(un)); 9067 9068 /* Don't proceed if power is being changed. */ 9069 while (un->un_state == SD_STATE_PM_CHANGING) { 9070 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9071 } 9072 9073 if (un->un_exclopen & (1 << part)) { 9074 un->un_exclopen &= ~(1 << part); 9075 } 9076 9077 /* Update the open partition map */ 9078 if (otyp == OTYP_LYR) { 9079 un->un_ocmap.lyropen[part] -= 1; 9080 } else { 9081 un->un_ocmap.regopen[otyp] &= ~(1 << part); 9082 } 9083 9084 cp = &un->un_ocmap.chkd[0]; 9085 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9086 if (*cp != NULL) { 9087 break; 9088 } 9089 cp++; 9090 } 9091 9092 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9093 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n"); 9094 9095 /* 9096 * We avoid persistance upon the last close, and set 9097 * the throttle back to the maximum. 9098 */ 9099 un->un_throttle = un->un_saved_throttle; 9100 9101 if (un->un_state == SD_STATE_OFFLINE) { 9102 if (un->un_f_is_fibre == FALSE) { 9103 scsi_log(SD_DEVINFO(un), sd_label, 9104 CE_WARN, "offline\n"); 9105 } 9106 mutex_exit(SD_MUTEX(un)); 9107 cmlb_invalidate(un->un_cmlbhandle, 9108 (void *)SD_PATH_DIRECT); 9109 mutex_enter(SD_MUTEX(un)); 9110 9111 } else { 9112 /* 9113 * Flush any outstanding writes in NVRAM cache. 9114 * Note: SYNCHRONIZE CACHE is an optional SCSI-2 9115 * cmd, it may not work for non-Pluto devices. 9116 * SYNCHRONIZE CACHE is not required for removables, 9117 * except DVD-RAM drives. 9118 * 9119 * Also note: because SYNCHRONIZE CACHE is currently 9120 * the only command issued here that requires the 9121 * drive be powered up, only do the power up before 9122 * sending the Sync Cache command. If additional 9123 * commands are added which require a powered up 9124 * drive, the following sequence may have to change. 9125 * 9126 * And finally, note that parallel SCSI on SPARC 9127 * only issues a Sync Cache to DVD-RAM, a newly 9128 * supported device. 9129 */ 9130 #if defined(__i386) || defined(__amd64) 9131 if (un->un_f_sync_cache_supported || 9132 un->un_f_dvdram_writable_device == TRUE) { 9133 #else 9134 if (un->un_f_dvdram_writable_device == TRUE) { 9135 #endif 9136 mutex_exit(SD_MUTEX(un)); 9137 if (sd_pm_entry(un) == DDI_SUCCESS) { 9138 rval = 9139 sd_send_scsi_SYNCHRONIZE_CACHE(un, 9140 NULL); 9141 /* ignore error if not supported */ 9142 if (rval == ENOTSUP) { 9143 rval = 0; 9144 } else if (rval != 0) { 9145 rval = EIO; 9146 } 9147 sd_pm_exit(un); 9148 } else { 9149 rval = EIO; 9150 } 9151 mutex_enter(SD_MUTEX(un)); 9152 } 9153 9154 /* 9155 * For devices which supports DOOR_LOCK, send an ALLOW 9156 * MEDIA REMOVAL command, but don't get upset if it 9157 * fails. We need to raise the power of the drive before 9158 * we can call sd_send_scsi_DOORLOCK() 9159 */ 9160 if (un->un_f_doorlock_supported) { 9161 mutex_exit(SD_MUTEX(un)); 9162 if (sd_pm_entry(un) == DDI_SUCCESS) { 9163 rval = sd_send_scsi_DOORLOCK(un, 9164 SD_REMOVAL_ALLOW, SD_PATH_DIRECT); 9165 9166 sd_pm_exit(un); 9167 if (ISCD(un) && (rval != 0) && 9168 (nodelay != 0)) { 9169 rval = ENXIO; 9170 } 9171 } else { 9172 rval = EIO; 9173 } 9174 mutex_enter(SD_MUTEX(un)); 9175 } 9176 9177 /* 9178 * If a device has removable media, invalidate all 9179 * parameters related to media, such as geometry, 9180 * blocksize, and blockcount. 9181 */ 9182 if (un->un_f_has_removable_media) { 9183 sr_ejected(un); 9184 } 9185 9186 /* 9187 * Destroy the cache (if it exists) which was 9188 * allocated for the write maps since this is 9189 * the last close for this media. 9190 */ 9191 if (un->un_wm_cache) { 9192 /* 9193 * Check if there are pending commands. 9194 * and if there are give a warning and 9195 * do not destroy the cache. 9196 */ 9197 if (un->un_ncmds_in_driver > 0) { 9198 scsi_log(SD_DEVINFO(un), 9199 sd_label, CE_WARN, 9200 "Unable to clean up memory " 9201 "because of pending I/O\n"); 9202 } else { 9203 kmem_cache_destroy( 9204 un->un_wm_cache); 9205 un->un_wm_cache = NULL; 9206 } 9207 } 9208 } 9209 } 9210 9211 mutex_exit(SD_MUTEX(un)); 9212 sema_v(&un->un_semoclose); 9213 9214 if (otyp == OTYP_LYR) { 9215 mutex_enter(&sd_detach_mutex); 9216 /* 9217 * The detach routine may run when the layer count 9218 * drops to zero. 9219 */ 9220 un->un_layer_count--; 9221 mutex_exit(&sd_detach_mutex); 9222 } 9223 9224 return (rval); 9225 } 9226 9227 9228 /* 9229 * Function: sd_ready_and_valid 9230 * 9231 * Description: Test if device is ready and has a valid geometry. 9232 * 9233 * Arguments: dev - device number 9234 * un - driver soft state (unit) structure 9235 * 9236 * Return Code: SD_READY_VALID ready and valid label 9237 * SD_NOT_READY_VALID not ready, no label 9238 * SD_RESERVED_BY_OTHERS reservation conflict 9239 * 9240 * Context: Never called at interrupt context. 9241 */ 9242 9243 static int 9244 sd_ready_and_valid(struct sd_lun *un) 9245 { 9246 struct sd_errstats *stp; 9247 uint64_t capacity; 9248 uint_t lbasize; 9249 int rval = SD_READY_VALID; 9250 char name_str[48]; 9251 int is_valid; 9252 9253 ASSERT(un != NULL); 9254 ASSERT(!mutex_owned(SD_MUTEX(un))); 9255 9256 mutex_enter(SD_MUTEX(un)); 9257 /* 9258 * If a device has removable media, we must check if media is 9259 * ready when checking if this device is ready and valid. 9260 */ 9261 if (un->un_f_has_removable_media) { 9262 mutex_exit(SD_MUTEX(un)); 9263 if (sd_send_scsi_TEST_UNIT_READY(un, 0) != 0) { 9264 rval = SD_NOT_READY_VALID; 9265 mutex_enter(SD_MUTEX(un)); 9266 goto done; 9267 } 9268 9269 is_valid = SD_IS_VALID_LABEL(un); 9270 mutex_enter(SD_MUTEX(un)); 9271 if (!is_valid || 9272 (un->un_f_blockcount_is_valid == FALSE) || 9273 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 9274 9275 /* capacity has to be read every open. */ 9276 mutex_exit(SD_MUTEX(un)); 9277 if (sd_send_scsi_READ_CAPACITY(un, &capacity, 9278 &lbasize, SD_PATH_DIRECT) != 0) { 9279 cmlb_invalidate(un->un_cmlbhandle, 9280 (void *)SD_PATH_DIRECT); 9281 mutex_enter(SD_MUTEX(un)); 9282 rval = SD_NOT_READY_VALID; 9283 goto done; 9284 } else { 9285 mutex_enter(SD_MUTEX(un)); 9286 sd_update_block_info(un, lbasize, capacity); 9287 } 9288 } 9289 9290 /* 9291 * Check if the media in the device is writable or not. 9292 */ 9293 if (!is_valid && ISCD(un)) { 9294 sd_check_for_writable_cd(un, SD_PATH_DIRECT); 9295 } 9296 9297 } else { 9298 /* 9299 * Do a test unit ready to clear any unit attention from non-cd 9300 * devices. 9301 */ 9302 mutex_exit(SD_MUTEX(un)); 9303 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 9304 mutex_enter(SD_MUTEX(un)); 9305 } 9306 9307 9308 /* 9309 * If this is a non 512 block device, allocate space for 9310 * the wmap cache. This is being done here since every time 9311 * a media is changed this routine will be called and the 9312 * block size is a function of media rather than device. 9313 */ 9314 if (un->un_f_non_devbsize_supported && NOT_DEVBSIZE(un)) { 9315 if (!(un->un_wm_cache)) { 9316 (void) snprintf(name_str, sizeof (name_str), 9317 "%s%d_cache", 9318 ddi_driver_name(SD_DEVINFO(un)), 9319 ddi_get_instance(SD_DEVINFO(un))); 9320 un->un_wm_cache = kmem_cache_create( 9321 name_str, sizeof (struct sd_w_map), 9322 8, sd_wm_cache_constructor, 9323 sd_wm_cache_destructor, NULL, 9324 (void *)un, NULL, 0); 9325 if (!(un->un_wm_cache)) { 9326 rval = ENOMEM; 9327 goto done; 9328 } 9329 } 9330 } 9331 9332 if (un->un_state == SD_STATE_NORMAL) { 9333 /* 9334 * If the target is not yet ready here (defined by a TUR 9335 * failure), invalidate the geometry and print an 'offline' 9336 * message. This is a legacy message, as the state of the 9337 * target is not actually changed to SD_STATE_OFFLINE. 9338 * 9339 * If the TUR fails for EACCES (Reservation Conflict), 9340 * SD_RESERVED_BY_OTHERS will be returned to indicate 9341 * reservation conflict. If the TUR fails for other 9342 * reasons, SD_NOT_READY_VALID will be returned. 9343 */ 9344 int err; 9345 9346 mutex_exit(SD_MUTEX(un)); 9347 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 9348 mutex_enter(SD_MUTEX(un)); 9349 9350 if (err != 0) { 9351 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 9352 "offline or reservation conflict\n"); 9353 mutex_exit(SD_MUTEX(un)); 9354 cmlb_invalidate(un->un_cmlbhandle, 9355 (void *)SD_PATH_DIRECT); 9356 mutex_enter(SD_MUTEX(un)); 9357 if (err == EACCES) { 9358 rval = SD_RESERVED_BY_OTHERS; 9359 } else { 9360 rval = SD_NOT_READY_VALID; 9361 } 9362 goto done; 9363 } 9364 } 9365 9366 if (un->un_f_format_in_progress == FALSE) { 9367 mutex_exit(SD_MUTEX(un)); 9368 if (cmlb_validate(un->un_cmlbhandle, 0, 9369 (void *)SD_PATH_DIRECT) != 0) { 9370 rval = SD_NOT_READY_VALID; 9371 mutex_enter(SD_MUTEX(un)); 9372 goto done; 9373 } 9374 if (un->un_f_pkstats_enabled) { 9375 sd_set_pstats(un); 9376 SD_TRACE(SD_LOG_IO_PARTITION, un, 9377 "sd_ready_and_valid: un:0x%p pstats created and " 9378 "set\n", un); 9379 } 9380 mutex_enter(SD_MUTEX(un)); 9381 } 9382 9383 /* 9384 * If this device supports DOOR_LOCK command, try and send 9385 * this command to PREVENT MEDIA REMOVAL, but don't get upset 9386 * if it fails. For a CD, however, it is an error 9387 */ 9388 if (un->un_f_doorlock_supported) { 9389 mutex_exit(SD_MUTEX(un)); 9390 if ((sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 9391 SD_PATH_DIRECT) != 0) && ISCD(un)) { 9392 rval = SD_NOT_READY_VALID; 9393 mutex_enter(SD_MUTEX(un)); 9394 goto done; 9395 } 9396 mutex_enter(SD_MUTEX(un)); 9397 } 9398 9399 /* The state has changed, inform the media watch routines */ 9400 un->un_mediastate = DKIO_INSERTED; 9401 cv_broadcast(&un->un_state_cv); 9402 rval = SD_READY_VALID; 9403 9404 done: 9405 9406 /* 9407 * Initialize the capacity kstat value, if no media previously 9408 * (capacity kstat is 0) and a media has been inserted 9409 * (un_blockcount > 0). 9410 */ 9411 if (un->un_errstats != NULL) { 9412 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9413 if ((stp->sd_capacity.value.ui64 == 0) && 9414 (un->un_f_blockcount_is_valid == TRUE)) { 9415 stp->sd_capacity.value.ui64 = 9416 (uint64_t)((uint64_t)un->un_blockcount * 9417 un->un_sys_blocksize); 9418 } 9419 } 9420 9421 mutex_exit(SD_MUTEX(un)); 9422 return (rval); 9423 } 9424 9425 9426 /* 9427 * Function: sdmin 9428 * 9429 * Description: Routine to limit the size of a data transfer. Used in 9430 * conjunction with physio(9F). 9431 * 9432 * Arguments: bp - pointer to the indicated buf(9S) struct. 9433 * 9434 * Context: Kernel thread context. 9435 */ 9436 9437 static void 9438 sdmin(struct buf *bp) 9439 { 9440 struct sd_lun *un; 9441 int instance; 9442 9443 instance = SDUNIT(bp->b_edev); 9444 9445 un = ddi_get_soft_state(sd_state, instance); 9446 ASSERT(un != NULL); 9447 9448 if (bp->b_bcount > un->un_max_xfer_size) { 9449 bp->b_bcount = un->un_max_xfer_size; 9450 } 9451 } 9452 9453 9454 /* 9455 * Function: sdread 9456 * 9457 * Description: Driver's read(9e) entry point function. 9458 * 9459 * Arguments: dev - device number 9460 * uio - structure pointer describing where data is to be stored 9461 * in user's space 9462 * cred_p - user credential pointer 9463 * 9464 * Return Code: ENXIO 9465 * EIO 9466 * EINVAL 9467 * value returned by physio 9468 * 9469 * Context: Kernel thread context. 9470 */ 9471 /* ARGSUSED */ 9472 static int 9473 sdread(dev_t dev, struct uio *uio, cred_t *cred_p) 9474 { 9475 struct sd_lun *un = NULL; 9476 int secmask; 9477 int err; 9478 9479 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9480 return (ENXIO); 9481 } 9482 9483 ASSERT(!mutex_owned(SD_MUTEX(un))); 9484 9485 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9486 mutex_enter(SD_MUTEX(un)); 9487 /* 9488 * Because the call to sd_ready_and_valid will issue I/O we 9489 * must wait here if either the device is suspended or 9490 * if it's power level is changing. 9491 */ 9492 while ((un->un_state == SD_STATE_SUSPENDED) || 9493 (un->un_state == SD_STATE_PM_CHANGING)) { 9494 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9495 } 9496 un->un_ncmds_in_driver++; 9497 mutex_exit(SD_MUTEX(un)); 9498 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9499 mutex_enter(SD_MUTEX(un)); 9500 un->un_ncmds_in_driver--; 9501 ASSERT(un->un_ncmds_in_driver >= 0); 9502 mutex_exit(SD_MUTEX(un)); 9503 return (EIO); 9504 } 9505 mutex_enter(SD_MUTEX(un)); 9506 un->un_ncmds_in_driver--; 9507 ASSERT(un->un_ncmds_in_driver >= 0); 9508 mutex_exit(SD_MUTEX(un)); 9509 } 9510 9511 /* 9512 * Read requests are restricted to multiples of the system block size. 9513 */ 9514 secmask = un->un_sys_blocksize - 1; 9515 9516 if (uio->uio_loffset & ((offset_t)(secmask))) { 9517 SD_ERROR(SD_LOG_READ_WRITE, un, 9518 "sdread: file offset not modulo %d\n", 9519 un->un_sys_blocksize); 9520 err = EINVAL; 9521 } else if (uio->uio_iov->iov_len & (secmask)) { 9522 SD_ERROR(SD_LOG_READ_WRITE, un, 9523 "sdread: transfer length not modulo %d\n", 9524 un->un_sys_blocksize); 9525 err = EINVAL; 9526 } else { 9527 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio); 9528 } 9529 return (err); 9530 } 9531 9532 9533 /* 9534 * Function: sdwrite 9535 * 9536 * Description: Driver's write(9e) entry point function. 9537 * 9538 * Arguments: dev - device number 9539 * uio - structure pointer describing where data is stored in 9540 * user's space 9541 * cred_p - user credential pointer 9542 * 9543 * Return Code: ENXIO 9544 * EIO 9545 * EINVAL 9546 * value returned by physio 9547 * 9548 * Context: Kernel thread context. 9549 */ 9550 /* ARGSUSED */ 9551 static int 9552 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 9553 { 9554 struct sd_lun *un = NULL; 9555 int secmask; 9556 int err; 9557 9558 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9559 return (ENXIO); 9560 } 9561 9562 ASSERT(!mutex_owned(SD_MUTEX(un))); 9563 9564 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9565 mutex_enter(SD_MUTEX(un)); 9566 /* 9567 * Because the call to sd_ready_and_valid will issue I/O we 9568 * must wait here if either the device is suspended or 9569 * if it's power level is changing. 9570 */ 9571 while ((un->un_state == SD_STATE_SUSPENDED) || 9572 (un->un_state == SD_STATE_PM_CHANGING)) { 9573 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9574 } 9575 un->un_ncmds_in_driver++; 9576 mutex_exit(SD_MUTEX(un)); 9577 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9578 mutex_enter(SD_MUTEX(un)); 9579 un->un_ncmds_in_driver--; 9580 ASSERT(un->un_ncmds_in_driver >= 0); 9581 mutex_exit(SD_MUTEX(un)); 9582 return (EIO); 9583 } 9584 mutex_enter(SD_MUTEX(un)); 9585 un->un_ncmds_in_driver--; 9586 ASSERT(un->un_ncmds_in_driver >= 0); 9587 mutex_exit(SD_MUTEX(un)); 9588 } 9589 9590 /* 9591 * Write requests are restricted to multiples of the system block size. 9592 */ 9593 secmask = un->un_sys_blocksize - 1; 9594 9595 if (uio->uio_loffset & ((offset_t)(secmask))) { 9596 SD_ERROR(SD_LOG_READ_WRITE, un, 9597 "sdwrite: file offset not modulo %d\n", 9598 un->un_sys_blocksize); 9599 err = EINVAL; 9600 } else if (uio->uio_iov->iov_len & (secmask)) { 9601 SD_ERROR(SD_LOG_READ_WRITE, un, 9602 "sdwrite: transfer length not modulo %d\n", 9603 un->un_sys_blocksize); 9604 err = EINVAL; 9605 } else { 9606 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio); 9607 } 9608 return (err); 9609 } 9610 9611 9612 /* 9613 * Function: sdaread 9614 * 9615 * Description: Driver's aread(9e) entry point function. 9616 * 9617 * Arguments: dev - device number 9618 * aio - structure pointer describing where data is to be stored 9619 * cred_p - user credential pointer 9620 * 9621 * Return Code: ENXIO 9622 * EIO 9623 * EINVAL 9624 * value returned by aphysio 9625 * 9626 * Context: Kernel thread context. 9627 */ 9628 /* ARGSUSED */ 9629 static int 9630 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 9631 { 9632 struct sd_lun *un = NULL; 9633 struct uio *uio = aio->aio_uio; 9634 int secmask; 9635 int err; 9636 9637 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9638 return (ENXIO); 9639 } 9640 9641 ASSERT(!mutex_owned(SD_MUTEX(un))); 9642 9643 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9644 mutex_enter(SD_MUTEX(un)); 9645 /* 9646 * Because the call to sd_ready_and_valid will issue I/O we 9647 * must wait here if either the device is suspended or 9648 * if it's power level is changing. 9649 */ 9650 while ((un->un_state == SD_STATE_SUSPENDED) || 9651 (un->un_state == SD_STATE_PM_CHANGING)) { 9652 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9653 } 9654 un->un_ncmds_in_driver++; 9655 mutex_exit(SD_MUTEX(un)); 9656 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9657 mutex_enter(SD_MUTEX(un)); 9658 un->un_ncmds_in_driver--; 9659 ASSERT(un->un_ncmds_in_driver >= 0); 9660 mutex_exit(SD_MUTEX(un)); 9661 return (EIO); 9662 } 9663 mutex_enter(SD_MUTEX(un)); 9664 un->un_ncmds_in_driver--; 9665 ASSERT(un->un_ncmds_in_driver >= 0); 9666 mutex_exit(SD_MUTEX(un)); 9667 } 9668 9669 /* 9670 * Read requests are restricted to multiples of the system block size. 9671 */ 9672 secmask = un->un_sys_blocksize - 1; 9673 9674 if (uio->uio_loffset & ((offset_t)(secmask))) { 9675 SD_ERROR(SD_LOG_READ_WRITE, un, 9676 "sdaread: file offset not modulo %d\n", 9677 un->un_sys_blocksize); 9678 err = EINVAL; 9679 } else if (uio->uio_iov->iov_len & (secmask)) { 9680 SD_ERROR(SD_LOG_READ_WRITE, un, 9681 "sdaread: transfer length not modulo %d\n", 9682 un->un_sys_blocksize); 9683 err = EINVAL; 9684 } else { 9685 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio); 9686 } 9687 return (err); 9688 } 9689 9690 9691 /* 9692 * Function: sdawrite 9693 * 9694 * Description: Driver's awrite(9e) entry point function. 9695 * 9696 * Arguments: dev - device number 9697 * aio - structure pointer describing where data is stored 9698 * cred_p - user credential pointer 9699 * 9700 * Return Code: ENXIO 9701 * EIO 9702 * EINVAL 9703 * value returned by aphysio 9704 * 9705 * Context: Kernel thread context. 9706 */ 9707 /* ARGSUSED */ 9708 static int 9709 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 9710 { 9711 struct sd_lun *un = NULL; 9712 struct uio *uio = aio->aio_uio; 9713 int secmask; 9714 int err; 9715 9716 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9717 return (ENXIO); 9718 } 9719 9720 ASSERT(!mutex_owned(SD_MUTEX(un))); 9721 9722 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9723 mutex_enter(SD_MUTEX(un)); 9724 /* 9725 * Because the call to sd_ready_and_valid will issue I/O we 9726 * must wait here if either the device is suspended or 9727 * if it's power level is changing. 9728 */ 9729 while ((un->un_state == SD_STATE_SUSPENDED) || 9730 (un->un_state == SD_STATE_PM_CHANGING)) { 9731 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9732 } 9733 un->un_ncmds_in_driver++; 9734 mutex_exit(SD_MUTEX(un)); 9735 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9736 mutex_enter(SD_MUTEX(un)); 9737 un->un_ncmds_in_driver--; 9738 ASSERT(un->un_ncmds_in_driver >= 0); 9739 mutex_exit(SD_MUTEX(un)); 9740 return (EIO); 9741 } 9742 mutex_enter(SD_MUTEX(un)); 9743 un->un_ncmds_in_driver--; 9744 ASSERT(un->un_ncmds_in_driver >= 0); 9745 mutex_exit(SD_MUTEX(un)); 9746 } 9747 9748 /* 9749 * Write requests are restricted to multiples of the system block size. 9750 */ 9751 secmask = un->un_sys_blocksize - 1; 9752 9753 if (uio->uio_loffset & ((offset_t)(secmask))) { 9754 SD_ERROR(SD_LOG_READ_WRITE, un, 9755 "sdawrite: file offset not modulo %d\n", 9756 un->un_sys_blocksize); 9757 err = EINVAL; 9758 } else if (uio->uio_iov->iov_len & (secmask)) { 9759 SD_ERROR(SD_LOG_READ_WRITE, un, 9760 "sdawrite: transfer length not modulo %d\n", 9761 un->un_sys_blocksize); 9762 err = EINVAL; 9763 } else { 9764 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio); 9765 } 9766 return (err); 9767 } 9768 9769 9770 9771 9772 9773 /* 9774 * Driver IO processing follows the following sequence: 9775 * 9776 * sdioctl(9E) sdstrategy(9E) biodone(9F) 9777 * | | ^ 9778 * v v | 9779 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+ 9780 * | | | | 9781 * v | | | 9782 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone() 9783 * | | ^ ^ 9784 * v v | | 9785 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | | 9786 * | | | | 9787 * +---+ | +------------+ +-------+ 9788 * | | | | 9789 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 9790 * | v | | 9791 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() | 9792 * | | ^ | 9793 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 9794 * | v | | 9795 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() | 9796 * | | ^ | 9797 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 9798 * | v | | 9799 * | sd_checksum_iostart() sd_checksum_iodone() | 9800 * | | ^ | 9801 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+ 9802 * | v | | 9803 * | sd_pm_iostart() sd_pm_iodone() | 9804 * | | ^ | 9805 * | | | | 9806 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+ 9807 * | ^ 9808 * v | 9809 * sd_core_iostart() | 9810 * | | 9811 * | +------>(*destroypkt)() 9812 * +-> sd_start_cmds() <-+ | | 9813 * | | | v 9814 * | | | scsi_destroy_pkt(9F) 9815 * | | | 9816 * +->(*initpkt)() +- sdintr() 9817 * | | | | 9818 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx() 9819 * | +-> scsi_setup_cdb(9F) | 9820 * | | 9821 * +--> scsi_transport(9F) | 9822 * | | 9823 * +----> SCSA ---->+ 9824 * 9825 * 9826 * This code is based upon the following presumtions: 9827 * 9828 * - iostart and iodone functions operate on buf(9S) structures. These 9829 * functions perform the necessary operations on the buf(9S) and pass 9830 * them along to the next function in the chain by using the macros 9831 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE() 9832 * (for iodone side functions). 9833 * 9834 * - The iostart side functions may sleep. The iodone side functions 9835 * are called under interrupt context and may NOT sleep. Therefore 9836 * iodone side functions also may not call iostart side functions. 9837 * (NOTE: iostart side functions should NOT sleep for memory, as 9838 * this could result in deadlock.) 9839 * 9840 * - An iostart side function may call its corresponding iodone side 9841 * function directly (if necessary). 9842 * 9843 * - In the event of an error, an iostart side function can return a buf(9S) 9844 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and 9845 * b_error in the usual way of course). 9846 * 9847 * - The taskq mechanism may be used by the iodone side functions to dispatch 9848 * requests to the iostart side functions. The iostart side functions in 9849 * this case would be called under the context of a taskq thread, so it's 9850 * OK for them to block/sleep/spin in this case. 9851 * 9852 * - iostart side functions may allocate "shadow" buf(9S) structs and 9853 * pass them along to the next function in the chain. The corresponding 9854 * iodone side functions must coalesce the "shadow" bufs and return 9855 * the "original" buf to the next higher layer. 9856 * 9857 * - The b_private field of the buf(9S) struct holds a pointer to 9858 * an sd_xbuf struct, which contains information needed to 9859 * construct the scsi_pkt for the command. 9860 * 9861 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each 9862 * layer must acquire & release the SD_MUTEX(un) as needed. 9863 */ 9864 9865 9866 /* 9867 * Create taskq for all targets in the system. This is created at 9868 * _init(9E) and destroyed at _fini(9E). 9869 * 9870 * Note: here we set the minalloc to a reasonably high number to ensure that 9871 * we will have an adequate supply of task entries available at interrupt time. 9872 * This is used in conjunction with the TASKQ_PREPOPULATE flag in 9873 * sd_create_taskq(). Since we do not want to sleep for allocations at 9874 * interrupt time, set maxalloc equal to minalloc. That way we will just fail 9875 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq 9876 * requests any one instant in time. 9877 */ 9878 #define SD_TASKQ_NUMTHREADS 8 9879 #define SD_TASKQ_MINALLOC 256 9880 #define SD_TASKQ_MAXALLOC 256 9881 9882 static taskq_t *sd_tq = NULL; 9883 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq)) 9884 9885 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; 9886 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; 9887 9888 /* 9889 * The following task queue is being created for the write part of 9890 * read-modify-write of non-512 block size devices. 9891 * Limit the number of threads to 1 for now. This number has been choosen 9892 * considering the fact that it applies only to dvd ram drives/MO drives 9893 * currently. Performance for which is not main criteria at this stage. 9894 * Note: It needs to be explored if we can use a single taskq in future 9895 */ 9896 #define SD_WMR_TASKQ_NUMTHREADS 1 9897 static taskq_t *sd_wmr_tq = NULL; 9898 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq)) 9899 9900 /* 9901 * Function: sd_taskq_create 9902 * 9903 * Description: Create taskq thread(s) and preallocate task entries 9904 * 9905 * Return Code: Returns a pointer to the allocated taskq_t. 9906 * 9907 * Context: Can sleep. Requires blockable context. 9908 * 9909 * Notes: - The taskq() facility currently is NOT part of the DDI. 9910 * (definitely NOT recommeded for 3rd-party drivers!) :-) 9911 * - taskq_create() will block for memory, also it will panic 9912 * if it cannot create the requested number of threads. 9913 * - Currently taskq_create() creates threads that cannot be 9914 * swapped. 9915 * - We use TASKQ_PREPOPULATE to ensure we have an adequate 9916 * supply of taskq entries at interrupt time (ie, so that we 9917 * do not have to sleep for memory) 9918 */ 9919 9920 static void 9921 sd_taskq_create(void) 9922 { 9923 char taskq_name[TASKQ_NAMELEN]; 9924 9925 ASSERT(sd_tq == NULL); 9926 ASSERT(sd_wmr_tq == NULL); 9927 9928 (void) snprintf(taskq_name, sizeof (taskq_name), 9929 "%s_drv_taskq", sd_label); 9930 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS, 9931 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 9932 TASKQ_PREPOPULATE)); 9933 9934 (void) snprintf(taskq_name, sizeof (taskq_name), 9935 "%s_rmw_taskq", sd_label); 9936 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS, 9937 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 9938 TASKQ_PREPOPULATE)); 9939 } 9940 9941 9942 /* 9943 * Function: sd_taskq_delete 9944 * 9945 * Description: Complementary cleanup routine for sd_taskq_create(). 9946 * 9947 * Context: Kernel thread context. 9948 */ 9949 9950 static void 9951 sd_taskq_delete(void) 9952 { 9953 ASSERT(sd_tq != NULL); 9954 ASSERT(sd_wmr_tq != NULL); 9955 taskq_destroy(sd_tq); 9956 taskq_destroy(sd_wmr_tq); 9957 sd_tq = NULL; 9958 sd_wmr_tq = NULL; 9959 } 9960 9961 9962 /* 9963 * Function: sdstrategy 9964 * 9965 * Description: Driver's strategy (9E) entry point function. 9966 * 9967 * Arguments: bp - pointer to buf(9S) 9968 * 9969 * Return Code: Always returns zero 9970 * 9971 * Context: Kernel thread context. 9972 */ 9973 9974 static int 9975 sdstrategy(struct buf *bp) 9976 { 9977 struct sd_lun *un; 9978 9979 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 9980 if (un == NULL) { 9981 bioerror(bp, EIO); 9982 bp->b_resid = bp->b_bcount; 9983 biodone(bp); 9984 return (0); 9985 } 9986 /* As was done in the past, fail new cmds. if state is dumping. */ 9987 if (un->un_state == SD_STATE_DUMPING) { 9988 bioerror(bp, ENXIO); 9989 bp->b_resid = bp->b_bcount; 9990 biodone(bp); 9991 return (0); 9992 } 9993 9994 ASSERT(!mutex_owned(SD_MUTEX(un))); 9995 9996 /* 9997 * Commands may sneak in while we released the mutex in 9998 * DDI_SUSPEND, we should block new commands. However, old 9999 * commands that are still in the driver at this point should 10000 * still be allowed to drain. 10001 */ 10002 mutex_enter(SD_MUTEX(un)); 10003 /* 10004 * Must wait here if either the device is suspended or 10005 * if it's power level is changing. 10006 */ 10007 while ((un->un_state == SD_STATE_SUSPENDED) || 10008 (un->un_state == SD_STATE_PM_CHANGING)) { 10009 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10010 } 10011 10012 un->un_ncmds_in_driver++; 10013 10014 /* 10015 * atapi: Since we are running the CD for now in PIO mode we need to 10016 * call bp_mapin here to avoid bp_mapin called interrupt context under 10017 * the HBA's init_pkt routine. 10018 */ 10019 if (un->un_f_cfg_is_atapi == TRUE) { 10020 mutex_exit(SD_MUTEX(un)); 10021 bp_mapin(bp); 10022 mutex_enter(SD_MUTEX(un)); 10023 } 10024 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n", 10025 un->un_ncmds_in_driver); 10026 10027 mutex_exit(SD_MUTEX(un)); 10028 10029 /* 10030 * This will (eventually) allocate the sd_xbuf area and 10031 * call sd_xbuf_strategy(). We just want to return the 10032 * result of ddi_xbuf_qstrategy so that we have an opt- 10033 * imized tail call which saves us a stack frame. 10034 */ 10035 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); 10036 } 10037 10038 10039 /* 10040 * Function: sd_xbuf_strategy 10041 * 10042 * Description: Function for initiating IO operations via the 10043 * ddi_xbuf_qstrategy() mechanism. 10044 * 10045 * Context: Kernel thread context. 10046 */ 10047 10048 static void 10049 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg) 10050 { 10051 struct sd_lun *un = arg; 10052 10053 ASSERT(bp != NULL); 10054 ASSERT(xp != NULL); 10055 ASSERT(un != NULL); 10056 ASSERT(!mutex_owned(SD_MUTEX(un))); 10057 10058 /* 10059 * Initialize the fields in the xbuf and save a pointer to the 10060 * xbuf in bp->b_private. 10061 */ 10062 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL); 10063 10064 /* Send the buf down the iostart chain */ 10065 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp); 10066 } 10067 10068 10069 /* 10070 * Function: sd_xbuf_init 10071 * 10072 * Description: Prepare the given sd_xbuf struct for use. 10073 * 10074 * Arguments: un - ptr to softstate 10075 * bp - ptr to associated buf(9S) 10076 * xp - ptr to associated sd_xbuf 10077 * chain_type - IO chain type to use: 10078 * SD_CHAIN_NULL 10079 * SD_CHAIN_BUFIO 10080 * SD_CHAIN_USCSI 10081 * SD_CHAIN_DIRECT 10082 * SD_CHAIN_DIRECT_PRIORITY 10083 * pktinfop - ptr to private data struct for scsi_pkt(9S) 10084 * initialization; may be NULL if none. 10085 * 10086 * Context: Kernel thread context 10087 */ 10088 10089 static void 10090 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 10091 uchar_t chain_type, void *pktinfop) 10092 { 10093 int index; 10094 10095 ASSERT(un != NULL); 10096 ASSERT(bp != NULL); 10097 ASSERT(xp != NULL); 10098 10099 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n", 10100 bp, chain_type); 10101 10102 xp->xb_un = un; 10103 xp->xb_pktp = NULL; 10104 xp->xb_pktinfo = pktinfop; 10105 xp->xb_private = bp->b_private; 10106 xp->xb_blkno = (daddr_t)bp->b_blkno; 10107 10108 /* 10109 * Set up the iostart and iodone chain indexes in the xbuf, based 10110 * upon the specified chain type to use. 10111 */ 10112 switch (chain_type) { 10113 case SD_CHAIN_NULL: 10114 /* 10115 * Fall thru to just use the values for the buf type, even 10116 * tho for the NULL chain these values will never be used. 10117 */ 10118 /* FALLTHRU */ 10119 case SD_CHAIN_BUFIO: 10120 index = un->un_buf_chain_type; 10121 break; 10122 case SD_CHAIN_USCSI: 10123 index = un->un_uscsi_chain_type; 10124 break; 10125 case SD_CHAIN_DIRECT: 10126 index = un->un_direct_chain_type; 10127 break; 10128 case SD_CHAIN_DIRECT_PRIORITY: 10129 index = un->un_priority_chain_type; 10130 break; 10131 default: 10132 /* We're really broken if we ever get here... */ 10133 panic("sd_xbuf_init: illegal chain type!"); 10134 /*NOTREACHED*/ 10135 } 10136 10137 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index; 10138 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index; 10139 10140 /* 10141 * It might be a bit easier to simply bzero the entire xbuf above, 10142 * but it turns out that since we init a fair number of members anyway, 10143 * we save a fair number cycles by doing explicit assignment of zero. 10144 */ 10145 xp->xb_pkt_flags = 0; 10146 xp->xb_dma_resid = 0; 10147 xp->xb_retry_count = 0; 10148 xp->xb_victim_retry_count = 0; 10149 xp->xb_ua_retry_count = 0; 10150 xp->xb_sense_bp = NULL; 10151 xp->xb_sense_status = 0; 10152 xp->xb_sense_state = 0; 10153 xp->xb_sense_resid = 0; 10154 10155 bp->b_private = xp; 10156 bp->b_flags &= ~(B_DONE | B_ERROR); 10157 bp->b_resid = 0; 10158 bp->av_forw = NULL; 10159 bp->av_back = NULL; 10160 bioerror(bp, 0); 10161 10162 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n"); 10163 } 10164 10165 10166 /* 10167 * Function: sd_uscsi_strategy 10168 * 10169 * Description: Wrapper for calling into the USCSI chain via physio(9F) 10170 * 10171 * Arguments: bp - buf struct ptr 10172 * 10173 * Return Code: Always returns 0 10174 * 10175 * Context: Kernel thread context 10176 */ 10177 10178 static int 10179 sd_uscsi_strategy(struct buf *bp) 10180 { 10181 struct sd_lun *un; 10182 struct sd_uscsi_info *uip; 10183 struct sd_xbuf *xp; 10184 uchar_t chain_type; 10185 10186 ASSERT(bp != NULL); 10187 10188 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 10189 if (un == NULL) { 10190 bioerror(bp, EIO); 10191 bp->b_resid = bp->b_bcount; 10192 biodone(bp); 10193 return (0); 10194 } 10195 10196 ASSERT(!mutex_owned(SD_MUTEX(un))); 10197 10198 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp); 10199 10200 mutex_enter(SD_MUTEX(un)); 10201 /* 10202 * atapi: Since we are running the CD for now in PIO mode we need to 10203 * call bp_mapin here to avoid bp_mapin called interrupt context under 10204 * the HBA's init_pkt routine. 10205 */ 10206 if (un->un_f_cfg_is_atapi == TRUE) { 10207 mutex_exit(SD_MUTEX(un)); 10208 bp_mapin(bp); 10209 mutex_enter(SD_MUTEX(un)); 10210 } 10211 un->un_ncmds_in_driver++; 10212 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n", 10213 un->un_ncmds_in_driver); 10214 mutex_exit(SD_MUTEX(un)); 10215 10216 /* 10217 * A pointer to a struct sd_uscsi_info is expected in bp->b_private 10218 */ 10219 ASSERT(bp->b_private != NULL); 10220 uip = (struct sd_uscsi_info *)bp->b_private; 10221 10222 switch (uip->ui_flags) { 10223 case SD_PATH_DIRECT: 10224 chain_type = SD_CHAIN_DIRECT; 10225 break; 10226 case SD_PATH_DIRECT_PRIORITY: 10227 chain_type = SD_CHAIN_DIRECT_PRIORITY; 10228 break; 10229 default: 10230 chain_type = SD_CHAIN_USCSI; 10231 break; 10232 } 10233 10234 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 10235 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp); 10236 10237 /* Use the index obtained within xbuf_init */ 10238 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp); 10239 10240 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp); 10241 10242 return (0); 10243 } 10244 10245 /* 10246 * Function: sd_send_scsi_cmd 10247 * 10248 * Description: Runs a USCSI command for user (when called thru sdioctl), 10249 * or for the driver 10250 * 10251 * Arguments: dev - the dev_t for the device 10252 * incmd - ptr to a valid uscsi_cmd struct 10253 * flag - bit flag, indicating open settings, 32/64 bit type 10254 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 10255 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 10256 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 10257 * to use the USCSI "direct" chain and bypass the normal 10258 * command waitq. 10259 * 10260 * Return Code: 0 - successful completion of the given command 10261 * EIO - scsi_uscsi_handle_command() failed 10262 * ENXIO - soft state not found for specified dev 10263 * EINVAL 10264 * EFAULT - copyin/copyout error 10265 * return code of scsi_uscsi_handle_command(): 10266 * EIO 10267 * ENXIO 10268 * EACCES 10269 * 10270 * Context: Waits for command to complete. Can sleep. 10271 */ 10272 10273 static int 10274 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 10275 enum uio_seg dataspace, int path_flag) 10276 { 10277 struct sd_uscsi_info *uip; 10278 struct uscsi_cmd *uscmd; 10279 struct sd_lun *un; 10280 int format = 0; 10281 int rval; 10282 10283 un = ddi_get_soft_state(sd_state, SDUNIT(dev)); 10284 if (un == NULL) { 10285 return (ENXIO); 10286 } 10287 10288 ASSERT(!mutex_owned(SD_MUTEX(un))); 10289 10290 #ifdef SDDEBUG 10291 switch (dataspace) { 10292 case UIO_USERSPACE: 10293 SD_TRACE(SD_LOG_IO, un, 10294 "sd_send_scsi_cmd: entry: un:0x%p UIO_USERSPACE\n", un); 10295 break; 10296 case UIO_SYSSPACE: 10297 SD_TRACE(SD_LOG_IO, un, 10298 "sd_send_scsi_cmd: entry: un:0x%p UIO_SYSSPACE\n", un); 10299 break; 10300 default: 10301 SD_TRACE(SD_LOG_IO, un, 10302 "sd_send_scsi_cmd: entry: un:0x%p UNEXPECTED SPACE\n", un); 10303 break; 10304 } 10305 #endif 10306 10307 rval = scsi_uscsi_alloc_and_copyin((intptr_t)incmd, flag, 10308 SD_ADDRESS(un), &uscmd); 10309 if (rval != 0) { 10310 SD_TRACE(SD_LOG_IO, un, "sd_sense_scsi_cmd: " 10311 "scsi_uscsi_alloc_and_copyin failed\n", un); 10312 return (rval); 10313 } 10314 10315 if ((uscmd->uscsi_cdb != NULL) && 10316 (uscmd->uscsi_cdb[0] == SCMD_FORMAT)) { 10317 mutex_enter(SD_MUTEX(un)); 10318 un->un_f_format_in_progress = TRUE; 10319 mutex_exit(SD_MUTEX(un)); 10320 format = 1; 10321 } 10322 10323 /* 10324 * Allocate an sd_uscsi_info struct and fill it with the info 10325 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 10326 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 10327 * since we allocate the buf here in this function, we do not 10328 * need to preserve the prior contents of b_private. 10329 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 10330 */ 10331 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 10332 uip->ui_flags = path_flag; 10333 uip->ui_cmdp = uscmd; 10334 10335 /* 10336 * Commands sent with priority are intended for error recovery 10337 * situations, and do not have retries performed. 10338 */ 10339 if (path_flag == SD_PATH_DIRECT_PRIORITY) { 10340 uscmd->uscsi_flags |= USCSI_DIAGNOSE; 10341 } 10342 uscmd->uscsi_flags &= ~USCSI_NOINTR; 10343 10344 rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd, 10345 sd_uscsi_strategy, NULL, uip); 10346 10347 #ifdef SDDEBUG 10348 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 10349 "uscsi_status: 0x%02x uscsi_resid:0x%x\n", 10350 uscmd->uscsi_status, uscmd->uscsi_resid); 10351 if (uscmd->uscsi_bufaddr != NULL) { 10352 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 10353 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n", 10354 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen); 10355 if (dataspace == UIO_SYSSPACE) { 10356 SD_DUMP_MEMORY(un, SD_LOG_IO, 10357 "data", (uchar_t *)uscmd->uscsi_bufaddr, 10358 uscmd->uscsi_buflen, SD_LOG_HEX); 10359 } 10360 } 10361 #endif 10362 10363 if (format == 1) { 10364 mutex_enter(SD_MUTEX(un)); 10365 un->un_f_format_in_progress = FALSE; 10366 mutex_exit(SD_MUTEX(un)); 10367 } 10368 10369 (void) scsi_uscsi_copyout_and_free((intptr_t)incmd, uscmd); 10370 kmem_free(uip, sizeof (struct sd_uscsi_info)); 10371 10372 return (rval); 10373 } 10374 10375 10376 /* 10377 * Function: sd_buf_iodone 10378 * 10379 * Description: Frees the sd_xbuf & returns the buf to its originator. 10380 * 10381 * Context: May be called from interrupt context. 10382 */ 10383 /* ARGSUSED */ 10384 static void 10385 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp) 10386 { 10387 struct sd_xbuf *xp; 10388 10389 ASSERT(un != NULL); 10390 ASSERT(bp != NULL); 10391 ASSERT(!mutex_owned(SD_MUTEX(un))); 10392 10393 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n"); 10394 10395 xp = SD_GET_XBUF(bp); 10396 ASSERT(xp != NULL); 10397 10398 mutex_enter(SD_MUTEX(un)); 10399 10400 /* 10401 * Grab time when the cmd completed. 10402 * This is used for determining if the system has been 10403 * idle long enough to make it idle to the PM framework. 10404 * This is for lowering the overhead, and therefore improving 10405 * performance per I/O operation. 10406 */ 10407 un->un_pm_idle_time = ddi_get_time(); 10408 10409 un->un_ncmds_in_driver--; 10410 ASSERT(un->un_ncmds_in_driver >= 0); 10411 SD_INFO(SD_LOG_IO, un, "sd_buf_iodone: un_ncmds_in_driver = %ld\n", 10412 un->un_ncmds_in_driver); 10413 10414 mutex_exit(SD_MUTEX(un)); 10415 10416 ddi_xbuf_done(bp, un->un_xbuf_attr); /* xbuf is gone after this */ 10417 biodone(bp); /* bp is gone after this */ 10418 10419 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n"); 10420 } 10421 10422 10423 /* 10424 * Function: sd_uscsi_iodone 10425 * 10426 * Description: Frees the sd_xbuf & returns the buf to its originator. 10427 * 10428 * Context: May be called from interrupt context. 10429 */ 10430 /* ARGSUSED */ 10431 static void 10432 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 10433 { 10434 struct sd_xbuf *xp; 10435 10436 ASSERT(un != NULL); 10437 ASSERT(bp != NULL); 10438 10439 xp = SD_GET_XBUF(bp); 10440 ASSERT(xp != NULL); 10441 ASSERT(!mutex_owned(SD_MUTEX(un))); 10442 10443 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n"); 10444 10445 bp->b_private = xp->xb_private; 10446 10447 mutex_enter(SD_MUTEX(un)); 10448 10449 /* 10450 * Grab time when the cmd completed. 10451 * This is used for determining if the system has been 10452 * idle long enough to make it idle to the PM framework. 10453 * This is for lowering the overhead, and therefore improving 10454 * performance per I/O operation. 10455 */ 10456 un->un_pm_idle_time = ddi_get_time(); 10457 10458 un->un_ncmds_in_driver--; 10459 ASSERT(un->un_ncmds_in_driver >= 0); 10460 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", 10461 un->un_ncmds_in_driver); 10462 10463 mutex_exit(SD_MUTEX(un)); 10464 10465 kmem_free(xp, sizeof (struct sd_xbuf)); 10466 biodone(bp); 10467 10468 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n"); 10469 } 10470 10471 10472 /* 10473 * Function: sd_mapblockaddr_iostart 10474 * 10475 * Description: Verify request lies withing the partition limits for 10476 * the indicated minor device. Issue "overrun" buf if 10477 * request would exceed partition range. Converts 10478 * partition-relative block address to absolute. 10479 * 10480 * Context: Can sleep 10481 * 10482 * Issues: This follows what the old code did, in terms of accessing 10483 * some of the partition info in the unit struct without holding 10484 * the mutext. This is a general issue, if the partition info 10485 * can be altered while IO is in progress... as soon as we send 10486 * a buf, its partitioning can be invalid before it gets to the 10487 * device. Probably the right fix is to move partitioning out 10488 * of the driver entirely. 10489 */ 10490 10491 static void 10492 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp) 10493 { 10494 diskaddr_t nblocks; /* #blocks in the given partition */ 10495 daddr_t blocknum; /* Block number specified by the buf */ 10496 size_t requested_nblocks; 10497 size_t available_nblocks; 10498 int partition; 10499 diskaddr_t partition_offset; 10500 struct sd_xbuf *xp; 10501 10502 10503 ASSERT(un != NULL); 10504 ASSERT(bp != NULL); 10505 ASSERT(!mutex_owned(SD_MUTEX(un))); 10506 10507 SD_TRACE(SD_LOG_IO_PARTITION, un, 10508 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp); 10509 10510 xp = SD_GET_XBUF(bp); 10511 ASSERT(xp != NULL); 10512 10513 /* 10514 * If the geometry is not indicated as valid, attempt to access 10515 * the unit & verify the geometry/label. This can be the case for 10516 * removable-media devices, of if the device was opened in 10517 * NDELAY/NONBLOCK mode. 10518 */ 10519 if (!SD_IS_VALID_LABEL(un) && 10520 (sd_ready_and_valid(un) != SD_READY_VALID)) { 10521 /* 10522 * For removable devices it is possible to start an I/O 10523 * without a media by opening the device in nodelay mode. 10524 * Also for writable CDs there can be many scenarios where 10525 * there is no geometry yet but volume manager is trying to 10526 * issue a read() just because it can see TOC on the CD. So 10527 * do not print a message for removables. 10528 */ 10529 if (!un->un_f_has_removable_media) { 10530 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10531 "i/o to invalid geometry\n"); 10532 } 10533 bioerror(bp, EIO); 10534 bp->b_resid = bp->b_bcount; 10535 SD_BEGIN_IODONE(index, un, bp); 10536 return; 10537 } 10538 10539 partition = SDPART(bp->b_edev); 10540 10541 nblocks = 0; 10542 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 10543 &nblocks, &partition_offset, NULL, NULL, (void *)SD_PATH_DIRECT); 10544 10545 /* 10546 * blocknum is the starting block number of the request. At this 10547 * point it is still relative to the start of the minor device. 10548 */ 10549 blocknum = xp->xb_blkno; 10550 10551 /* 10552 * Legacy: If the starting block number is one past the last block 10553 * in the partition, do not set B_ERROR in the buf. 10554 */ 10555 if (blocknum == nblocks) { 10556 goto error_exit; 10557 } 10558 10559 /* 10560 * Confirm that the first block of the request lies within the 10561 * partition limits. Also the requested number of bytes must be 10562 * a multiple of the system block size. 10563 */ 10564 if ((blocknum < 0) || (blocknum >= nblocks) || 10565 ((bp->b_bcount & (un->un_sys_blocksize - 1)) != 0)) { 10566 bp->b_flags |= B_ERROR; 10567 goto error_exit; 10568 } 10569 10570 /* 10571 * If the requsted # blocks exceeds the available # blocks, that 10572 * is an overrun of the partition. 10573 */ 10574 requested_nblocks = SD_BYTES2SYSBLOCKS(un, bp->b_bcount); 10575 available_nblocks = (size_t)(nblocks - blocknum); 10576 ASSERT(nblocks >= blocknum); 10577 10578 if (requested_nblocks > available_nblocks) { 10579 /* 10580 * Allocate an "overrun" buf to allow the request to proceed 10581 * for the amount of space available in the partition. The 10582 * amount not transferred will be added into the b_resid 10583 * when the operation is complete. The overrun buf 10584 * replaces the original buf here, and the original buf 10585 * is saved inside the overrun buf, for later use. 10586 */ 10587 size_t resid = SD_SYSBLOCKS2BYTES(un, 10588 (offset_t)(requested_nblocks - available_nblocks)); 10589 size_t count = bp->b_bcount - resid; 10590 /* 10591 * Note: count is an unsigned entity thus it'll NEVER 10592 * be less than 0 so ASSERT the original values are 10593 * correct. 10594 */ 10595 ASSERT(bp->b_bcount >= resid); 10596 10597 bp = sd_bioclone_alloc(bp, count, blocknum, 10598 (int (*)(struct buf *)) sd_mapblockaddr_iodone); 10599 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */ 10600 ASSERT(xp != NULL); 10601 } 10602 10603 /* At this point there should be no residual for this buf. */ 10604 ASSERT(bp->b_resid == 0); 10605 10606 /* Convert the block number to an absolute address. */ 10607 xp->xb_blkno += partition_offset; 10608 10609 SD_NEXT_IOSTART(index, un, bp); 10610 10611 SD_TRACE(SD_LOG_IO_PARTITION, un, 10612 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp); 10613 10614 return; 10615 10616 error_exit: 10617 bp->b_resid = bp->b_bcount; 10618 SD_BEGIN_IODONE(index, un, bp); 10619 SD_TRACE(SD_LOG_IO_PARTITION, un, 10620 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp); 10621 } 10622 10623 10624 /* 10625 * Function: sd_mapblockaddr_iodone 10626 * 10627 * Description: Completion-side processing for partition management. 10628 * 10629 * Context: May be called under interrupt context 10630 */ 10631 10632 static void 10633 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp) 10634 { 10635 /* int partition; */ /* Not used, see below. */ 10636 ASSERT(un != NULL); 10637 ASSERT(bp != NULL); 10638 ASSERT(!mutex_owned(SD_MUTEX(un))); 10639 10640 SD_TRACE(SD_LOG_IO_PARTITION, un, 10641 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp); 10642 10643 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) { 10644 /* 10645 * We have an "overrun" buf to deal with... 10646 */ 10647 struct sd_xbuf *xp; 10648 struct buf *obp; /* ptr to the original buf */ 10649 10650 xp = SD_GET_XBUF(bp); 10651 ASSERT(xp != NULL); 10652 10653 /* Retrieve the pointer to the original buf */ 10654 obp = (struct buf *)xp->xb_private; 10655 ASSERT(obp != NULL); 10656 10657 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid); 10658 bioerror(obp, bp->b_error); 10659 10660 sd_bioclone_free(bp); 10661 10662 /* 10663 * Get back the original buf. 10664 * Note that since the restoration of xb_blkno below 10665 * was removed, the sd_xbuf is not needed. 10666 */ 10667 bp = obp; 10668 /* 10669 * xp = SD_GET_XBUF(bp); 10670 * ASSERT(xp != NULL); 10671 */ 10672 } 10673 10674 /* 10675 * Convert sd->xb_blkno back to a minor-device relative value. 10676 * Note: this has been commented out, as it is not needed in the 10677 * current implementation of the driver (ie, since this function 10678 * is at the top of the layering chains, so the info will be 10679 * discarded) and it is in the "hot" IO path. 10680 * 10681 * partition = getminor(bp->b_edev) & SDPART_MASK; 10682 * xp->xb_blkno -= un->un_offset[partition]; 10683 */ 10684 10685 SD_NEXT_IODONE(index, un, bp); 10686 10687 SD_TRACE(SD_LOG_IO_PARTITION, un, 10688 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp); 10689 } 10690 10691 10692 /* 10693 * Function: sd_mapblocksize_iostart 10694 * 10695 * Description: Convert between system block size (un->un_sys_blocksize) 10696 * and target block size (un->un_tgt_blocksize). 10697 * 10698 * Context: Can sleep to allocate resources. 10699 * 10700 * Assumptions: A higher layer has already performed any partition validation, 10701 * and converted the xp->xb_blkno to an absolute value relative 10702 * to the start of the device. 10703 * 10704 * It is also assumed that the higher layer has implemented 10705 * an "overrun" mechanism for the case where the request would 10706 * read/write beyond the end of a partition. In this case we 10707 * assume (and ASSERT) that bp->b_resid == 0. 10708 * 10709 * Note: The implementation for this routine assumes the target 10710 * block size remains constant between allocation and transport. 10711 */ 10712 10713 static void 10714 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp) 10715 { 10716 struct sd_mapblocksize_info *bsp; 10717 struct sd_xbuf *xp; 10718 offset_t first_byte; 10719 daddr_t start_block, end_block; 10720 daddr_t request_bytes; 10721 ushort_t is_aligned = FALSE; 10722 10723 ASSERT(un != NULL); 10724 ASSERT(bp != NULL); 10725 ASSERT(!mutex_owned(SD_MUTEX(un))); 10726 ASSERT(bp->b_resid == 0); 10727 10728 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 10729 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp); 10730 10731 /* 10732 * For a non-writable CD, a write request is an error 10733 */ 10734 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) && 10735 (un->un_f_mmc_writable_media == FALSE)) { 10736 bioerror(bp, EIO); 10737 bp->b_resid = bp->b_bcount; 10738 SD_BEGIN_IODONE(index, un, bp); 10739 return; 10740 } 10741 10742 /* 10743 * We do not need a shadow buf if the device is using 10744 * un->un_sys_blocksize as its block size or if bcount == 0. 10745 * In this case there is no layer-private data block allocated. 10746 */ 10747 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 10748 (bp->b_bcount == 0)) { 10749 goto done; 10750 } 10751 10752 #if defined(__i386) || defined(__amd64) 10753 /* We do not support non-block-aligned transfers for ROD devices */ 10754 ASSERT(!ISROD(un)); 10755 #endif 10756 10757 xp = SD_GET_XBUF(bp); 10758 ASSERT(xp != NULL); 10759 10760 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 10761 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n", 10762 un->un_tgt_blocksize, un->un_sys_blocksize); 10763 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 10764 "request start block:0x%x\n", xp->xb_blkno); 10765 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 10766 "request len:0x%x\n", bp->b_bcount); 10767 10768 /* 10769 * Allocate the layer-private data area for the mapblocksize layer. 10770 * Layers are allowed to use the xp_private member of the sd_xbuf 10771 * struct to store the pointer to their layer-private data block, but 10772 * each layer also has the responsibility of restoring the prior 10773 * contents of xb_private before returning the buf/xbuf to the 10774 * higher layer that sent it. 10775 * 10776 * Here we save the prior contents of xp->xb_private into the 10777 * bsp->mbs_oprivate field of our layer-private data area. This value 10778 * is restored by sd_mapblocksize_iodone() just prior to freeing up 10779 * the layer-private area and returning the buf/xbuf to the layer 10780 * that sent it. 10781 * 10782 * Note that here we use kmem_zalloc for the allocation as there are 10783 * parts of the mapblocksize code that expect certain fields to be 10784 * zero unless explicitly set to a required value. 10785 */ 10786 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 10787 bsp->mbs_oprivate = xp->xb_private; 10788 xp->xb_private = bsp; 10789 10790 /* 10791 * This treats the data on the disk (target) as an array of bytes. 10792 * first_byte is the byte offset, from the beginning of the device, 10793 * to the location of the request. This is converted from a 10794 * un->un_sys_blocksize block address to a byte offset, and then back 10795 * to a block address based upon a un->un_tgt_blocksize block size. 10796 * 10797 * xp->xb_blkno should be absolute upon entry into this function, 10798 * but, but it is based upon partitions that use the "system" 10799 * block size. It must be adjusted to reflect the block size of 10800 * the target. 10801 * 10802 * Note that end_block is actually the block that follows the last 10803 * block of the request, but that's what is needed for the computation. 10804 */ 10805 first_byte = SD_SYSBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 10806 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize; 10807 end_block = (first_byte + bp->b_bcount + un->un_tgt_blocksize - 1) / 10808 un->un_tgt_blocksize; 10809 10810 /* request_bytes is rounded up to a multiple of the target block size */ 10811 request_bytes = (end_block - start_block) * un->un_tgt_blocksize; 10812 10813 /* 10814 * See if the starting address of the request and the request 10815 * length are aligned on a un->un_tgt_blocksize boundary. If aligned 10816 * then we do not need to allocate a shadow buf to handle the request. 10817 */ 10818 if (((first_byte % un->un_tgt_blocksize) == 0) && 10819 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) { 10820 is_aligned = TRUE; 10821 } 10822 10823 if ((bp->b_flags & B_READ) == 0) { 10824 /* 10825 * Lock the range for a write operation. An aligned request is 10826 * considered a simple write; otherwise the request must be a 10827 * read-modify-write. 10828 */ 10829 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1, 10830 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW); 10831 } 10832 10833 /* 10834 * Alloc a shadow buf if the request is not aligned. Also, this is 10835 * where the READ command is generated for a read-modify-write. (The 10836 * write phase is deferred until after the read completes.) 10837 */ 10838 if (is_aligned == FALSE) { 10839 10840 struct sd_mapblocksize_info *shadow_bsp; 10841 struct sd_xbuf *shadow_xp; 10842 struct buf *shadow_bp; 10843 10844 /* 10845 * Allocate the shadow buf and it associated xbuf. Note that 10846 * after this call the xb_blkno value in both the original 10847 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the 10848 * same: absolute relative to the start of the device, and 10849 * adjusted for the target block size. The b_blkno in the 10850 * shadow buf will also be set to this value. We should never 10851 * change b_blkno in the original bp however. 10852 * 10853 * Note also that the shadow buf will always need to be a 10854 * READ command, regardless of whether the incoming command 10855 * is a READ or a WRITE. 10856 */ 10857 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ, 10858 xp->xb_blkno, 10859 (int (*)(struct buf *)) sd_mapblocksize_iodone); 10860 10861 shadow_xp = SD_GET_XBUF(shadow_bp); 10862 10863 /* 10864 * Allocate the layer-private data for the shadow buf. 10865 * (No need to preserve xb_private in the shadow xbuf.) 10866 */ 10867 shadow_xp->xb_private = shadow_bsp = 10868 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 10869 10870 /* 10871 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone 10872 * to figure out where the start of the user data is (based upon 10873 * the system block size) in the data returned by the READ 10874 * command (which will be based upon the target blocksize). Note 10875 * that this is only really used if the request is unaligned. 10876 */ 10877 bsp->mbs_copy_offset = (ssize_t)(first_byte - 10878 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize)); 10879 ASSERT((bsp->mbs_copy_offset >= 0) && 10880 (bsp->mbs_copy_offset < un->un_tgt_blocksize)); 10881 10882 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset; 10883 10884 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index; 10885 10886 /* Transfer the wmap (if any) to the shadow buf */ 10887 shadow_bsp->mbs_wmp = bsp->mbs_wmp; 10888 bsp->mbs_wmp = NULL; 10889 10890 /* 10891 * The shadow buf goes on from here in place of the 10892 * original buf. 10893 */ 10894 shadow_bsp->mbs_orig_bp = bp; 10895 bp = shadow_bp; 10896 } 10897 10898 SD_INFO(SD_LOG_IO_RMMEDIA, un, 10899 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno); 10900 SD_INFO(SD_LOG_IO_RMMEDIA, un, 10901 "sd_mapblocksize_iostart: tgt request len:0x%x\n", 10902 request_bytes); 10903 SD_INFO(SD_LOG_IO_RMMEDIA, un, 10904 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp); 10905 10906 done: 10907 SD_NEXT_IOSTART(index, un, bp); 10908 10909 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 10910 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp); 10911 } 10912 10913 10914 /* 10915 * Function: sd_mapblocksize_iodone 10916 * 10917 * Description: Completion side processing for block-size mapping. 10918 * 10919 * Context: May be called under interrupt context 10920 */ 10921 10922 static void 10923 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp) 10924 { 10925 struct sd_mapblocksize_info *bsp; 10926 struct sd_xbuf *xp; 10927 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */ 10928 struct buf *orig_bp; /* ptr to the original buf */ 10929 offset_t shadow_end; 10930 offset_t request_end; 10931 offset_t shadow_start; 10932 ssize_t copy_offset; 10933 size_t copy_length; 10934 size_t shortfall; 10935 uint_t is_write; /* TRUE if this bp is a WRITE */ 10936 uint_t has_wmap; /* TRUE is this bp has a wmap */ 10937 10938 ASSERT(un != NULL); 10939 ASSERT(bp != NULL); 10940 10941 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 10942 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp); 10943 10944 /* 10945 * There is no shadow buf or layer-private data if the target is 10946 * using un->un_sys_blocksize as its block size or if bcount == 0. 10947 */ 10948 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 10949 (bp->b_bcount == 0)) { 10950 goto exit; 10951 } 10952 10953 xp = SD_GET_XBUF(bp); 10954 ASSERT(xp != NULL); 10955 10956 /* Retrieve the pointer to the layer-private data area from the xbuf. */ 10957 bsp = xp->xb_private; 10958 10959 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE; 10960 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE; 10961 10962 if (is_write) { 10963 /* 10964 * For a WRITE request we must free up the block range that 10965 * we have locked up. This holds regardless of whether this is 10966 * an aligned write request or a read-modify-write request. 10967 */ 10968 sd_range_unlock(un, bsp->mbs_wmp); 10969 bsp->mbs_wmp = NULL; 10970 } 10971 10972 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) { 10973 /* 10974 * An aligned read or write command will have no shadow buf; 10975 * there is not much else to do with it. 10976 */ 10977 goto done; 10978 } 10979 10980 orig_bp = bsp->mbs_orig_bp; 10981 ASSERT(orig_bp != NULL); 10982 orig_xp = SD_GET_XBUF(orig_bp); 10983 ASSERT(orig_xp != NULL); 10984 ASSERT(!mutex_owned(SD_MUTEX(un))); 10985 10986 if (!is_write && has_wmap) { 10987 /* 10988 * A READ with a wmap means this is the READ phase of a 10989 * read-modify-write. If an error occurred on the READ then 10990 * we do not proceed with the WRITE phase or copy any data. 10991 * Just release the write maps and return with an error. 10992 */ 10993 if ((bp->b_resid != 0) || (bp->b_error != 0)) { 10994 orig_bp->b_resid = orig_bp->b_bcount; 10995 bioerror(orig_bp, bp->b_error); 10996 sd_range_unlock(un, bsp->mbs_wmp); 10997 goto freebuf_done; 10998 } 10999 } 11000 11001 /* 11002 * Here is where we set up to copy the data from the shadow buf 11003 * into the space associated with the original buf. 11004 * 11005 * To deal with the conversion between block sizes, these 11006 * computations treat the data as an array of bytes, with the 11007 * first byte (byte 0) corresponding to the first byte in the 11008 * first block on the disk. 11009 */ 11010 11011 /* 11012 * shadow_start and shadow_len indicate the location and size of 11013 * the data returned with the shadow IO request. 11014 */ 11015 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 11016 shadow_end = shadow_start + bp->b_bcount - bp->b_resid; 11017 11018 /* 11019 * copy_offset gives the offset (in bytes) from the start of the first 11020 * block of the READ request to the beginning of the data. We retrieve 11021 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved 11022 * there by sd_mapblockize_iostart(). copy_length gives the amount of 11023 * data to be copied (in bytes). 11024 */ 11025 copy_offset = bsp->mbs_copy_offset; 11026 ASSERT((copy_offset >= 0) && (copy_offset < un->un_tgt_blocksize)); 11027 copy_length = orig_bp->b_bcount; 11028 request_end = shadow_start + copy_offset + orig_bp->b_bcount; 11029 11030 /* 11031 * Set up the resid and error fields of orig_bp as appropriate. 11032 */ 11033 if (shadow_end >= request_end) { 11034 /* We got all the requested data; set resid to zero */ 11035 orig_bp->b_resid = 0; 11036 } else { 11037 /* 11038 * We failed to get enough data to fully satisfy the original 11039 * request. Just copy back whatever data we got and set 11040 * up the residual and error code as required. 11041 * 11042 * 'shortfall' is the amount by which the data received with the 11043 * shadow buf has "fallen short" of the requested amount. 11044 */ 11045 shortfall = (size_t)(request_end - shadow_end); 11046 11047 if (shortfall > orig_bp->b_bcount) { 11048 /* 11049 * We did not get enough data to even partially 11050 * fulfill the original request. The residual is 11051 * equal to the amount requested. 11052 */ 11053 orig_bp->b_resid = orig_bp->b_bcount; 11054 } else { 11055 /* 11056 * We did not get all the data that we requested 11057 * from the device, but we will try to return what 11058 * portion we did get. 11059 */ 11060 orig_bp->b_resid = shortfall; 11061 } 11062 ASSERT(copy_length >= orig_bp->b_resid); 11063 copy_length -= orig_bp->b_resid; 11064 } 11065 11066 /* Propagate the error code from the shadow buf to the original buf */ 11067 bioerror(orig_bp, bp->b_error); 11068 11069 if (is_write) { 11070 goto freebuf_done; /* No data copying for a WRITE */ 11071 } 11072 11073 if (has_wmap) { 11074 /* 11075 * This is a READ command from the READ phase of a 11076 * read-modify-write request. We have to copy the data given 11077 * by the user OVER the data returned by the READ command, 11078 * then convert the command from a READ to a WRITE and send 11079 * it back to the target. 11080 */ 11081 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset, 11082 copy_length); 11083 11084 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */ 11085 11086 /* 11087 * Dispatch the WRITE command to the taskq thread, which 11088 * will in turn send the command to the target. When the 11089 * WRITE command completes, we (sd_mapblocksize_iodone()) 11090 * will get called again as part of the iodone chain 11091 * processing for it. Note that we will still be dealing 11092 * with the shadow buf at that point. 11093 */ 11094 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp, 11095 KM_NOSLEEP) != 0) { 11096 /* 11097 * Dispatch was successful so we are done. Return 11098 * without going any higher up the iodone chain. Do 11099 * not free up any layer-private data until after the 11100 * WRITE completes. 11101 */ 11102 return; 11103 } 11104 11105 /* 11106 * Dispatch of the WRITE command failed; set up the error 11107 * condition and send this IO back up the iodone chain. 11108 */ 11109 bioerror(orig_bp, EIO); 11110 orig_bp->b_resid = orig_bp->b_bcount; 11111 11112 } else { 11113 /* 11114 * This is a regular READ request (ie, not a RMW). Copy the 11115 * data from the shadow buf into the original buf. The 11116 * copy_offset compensates for any "misalignment" between the 11117 * shadow buf (with its un->un_tgt_blocksize blocks) and the 11118 * original buf (with its un->un_sys_blocksize blocks). 11119 */ 11120 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr, 11121 copy_length); 11122 } 11123 11124 freebuf_done: 11125 11126 /* 11127 * At this point we still have both the shadow buf AND the original 11128 * buf to deal with, as well as the layer-private data area in each. 11129 * Local variables are as follows: 11130 * 11131 * bp -- points to shadow buf 11132 * xp -- points to xbuf of shadow buf 11133 * bsp -- points to layer-private data area of shadow buf 11134 * orig_bp -- points to original buf 11135 * 11136 * First free the shadow buf and its associated xbuf, then free the 11137 * layer-private data area from the shadow buf. There is no need to 11138 * restore xb_private in the shadow xbuf. 11139 */ 11140 sd_shadow_buf_free(bp); 11141 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 11142 11143 /* 11144 * Now update the local variables to point to the original buf, xbuf, 11145 * and layer-private area. 11146 */ 11147 bp = orig_bp; 11148 xp = SD_GET_XBUF(bp); 11149 ASSERT(xp != NULL); 11150 ASSERT(xp == orig_xp); 11151 bsp = xp->xb_private; 11152 ASSERT(bsp != NULL); 11153 11154 done: 11155 /* 11156 * Restore xb_private to whatever it was set to by the next higher 11157 * layer in the chain, then free the layer-private data area. 11158 */ 11159 xp->xb_private = bsp->mbs_oprivate; 11160 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 11161 11162 exit: 11163 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp), 11164 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp); 11165 11166 SD_NEXT_IODONE(index, un, bp); 11167 } 11168 11169 11170 /* 11171 * Function: sd_checksum_iostart 11172 * 11173 * Description: A stub function for a layer that's currently not used. 11174 * For now just a placeholder. 11175 * 11176 * Context: Kernel thread context 11177 */ 11178 11179 static void 11180 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp) 11181 { 11182 ASSERT(un != NULL); 11183 ASSERT(bp != NULL); 11184 ASSERT(!mutex_owned(SD_MUTEX(un))); 11185 SD_NEXT_IOSTART(index, un, bp); 11186 } 11187 11188 11189 /* 11190 * Function: sd_checksum_iodone 11191 * 11192 * Description: A stub function for a layer that's currently not used. 11193 * For now just a placeholder. 11194 * 11195 * Context: May be called under interrupt context 11196 */ 11197 11198 static void 11199 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp) 11200 { 11201 ASSERT(un != NULL); 11202 ASSERT(bp != NULL); 11203 ASSERT(!mutex_owned(SD_MUTEX(un))); 11204 SD_NEXT_IODONE(index, un, bp); 11205 } 11206 11207 11208 /* 11209 * Function: sd_checksum_uscsi_iostart 11210 * 11211 * Description: A stub function for a layer that's currently not used. 11212 * For now just a placeholder. 11213 * 11214 * Context: Kernel thread context 11215 */ 11216 11217 static void 11218 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp) 11219 { 11220 ASSERT(un != NULL); 11221 ASSERT(bp != NULL); 11222 ASSERT(!mutex_owned(SD_MUTEX(un))); 11223 SD_NEXT_IOSTART(index, un, bp); 11224 } 11225 11226 11227 /* 11228 * Function: sd_checksum_uscsi_iodone 11229 * 11230 * Description: A stub function for a layer that's currently not used. 11231 * For now just a placeholder. 11232 * 11233 * Context: May be called under interrupt context 11234 */ 11235 11236 static void 11237 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 11238 { 11239 ASSERT(un != NULL); 11240 ASSERT(bp != NULL); 11241 ASSERT(!mutex_owned(SD_MUTEX(un))); 11242 SD_NEXT_IODONE(index, un, bp); 11243 } 11244 11245 11246 /* 11247 * Function: sd_pm_iostart 11248 * 11249 * Description: iostart-side routine for Power mangement. 11250 * 11251 * Context: Kernel thread context 11252 */ 11253 11254 static void 11255 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp) 11256 { 11257 ASSERT(un != NULL); 11258 ASSERT(bp != NULL); 11259 ASSERT(!mutex_owned(SD_MUTEX(un))); 11260 ASSERT(!mutex_owned(&un->un_pm_mutex)); 11261 11262 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n"); 11263 11264 if (sd_pm_entry(un) != DDI_SUCCESS) { 11265 /* 11266 * Set up to return the failed buf back up the 'iodone' 11267 * side of the calling chain. 11268 */ 11269 bioerror(bp, EIO); 11270 bp->b_resid = bp->b_bcount; 11271 11272 SD_BEGIN_IODONE(index, un, bp); 11273 11274 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 11275 return; 11276 } 11277 11278 SD_NEXT_IOSTART(index, un, bp); 11279 11280 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 11281 } 11282 11283 11284 /* 11285 * Function: sd_pm_iodone 11286 * 11287 * Description: iodone-side routine for power mangement. 11288 * 11289 * Context: may be called from interrupt context 11290 */ 11291 11292 static void 11293 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp) 11294 { 11295 ASSERT(un != NULL); 11296 ASSERT(bp != NULL); 11297 ASSERT(!mutex_owned(&un->un_pm_mutex)); 11298 11299 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n"); 11300 11301 /* 11302 * After attach the following flag is only read, so don't 11303 * take the penalty of acquiring a mutex for it. 11304 */ 11305 if (un->un_f_pm_is_enabled == TRUE) { 11306 sd_pm_exit(un); 11307 } 11308 11309 SD_NEXT_IODONE(index, un, bp); 11310 11311 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n"); 11312 } 11313 11314 11315 /* 11316 * Function: sd_core_iostart 11317 * 11318 * Description: Primary driver function for enqueuing buf(9S) structs from 11319 * the system and initiating IO to the target device 11320 * 11321 * Context: Kernel thread context. Can sleep. 11322 * 11323 * Assumptions: - The given xp->xb_blkno is absolute 11324 * (ie, relative to the start of the device). 11325 * - The IO is to be done using the native blocksize of 11326 * the device, as specified in un->un_tgt_blocksize. 11327 */ 11328 /* ARGSUSED */ 11329 static void 11330 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp) 11331 { 11332 struct sd_xbuf *xp; 11333 11334 ASSERT(un != NULL); 11335 ASSERT(bp != NULL); 11336 ASSERT(!mutex_owned(SD_MUTEX(un))); 11337 ASSERT(bp->b_resid == 0); 11338 11339 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp); 11340 11341 xp = SD_GET_XBUF(bp); 11342 ASSERT(xp != NULL); 11343 11344 mutex_enter(SD_MUTEX(un)); 11345 11346 /* 11347 * If we are currently in the failfast state, fail any new IO 11348 * that has B_FAILFAST set, then return. 11349 */ 11350 if ((bp->b_flags & B_FAILFAST) && 11351 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) { 11352 mutex_exit(SD_MUTEX(un)); 11353 bioerror(bp, EIO); 11354 bp->b_resid = bp->b_bcount; 11355 SD_BEGIN_IODONE(index, un, bp); 11356 return; 11357 } 11358 11359 if (SD_IS_DIRECT_PRIORITY(xp)) { 11360 /* 11361 * Priority command -- transport it immediately. 11362 * 11363 * Note: We may want to assert that USCSI_DIAGNOSE is set, 11364 * because all direct priority commands should be associated 11365 * with error recovery actions which we don't want to retry. 11366 */ 11367 sd_start_cmds(un, bp); 11368 } else { 11369 /* 11370 * Normal command -- add it to the wait queue, then start 11371 * transporting commands from the wait queue. 11372 */ 11373 sd_add_buf_to_waitq(un, bp); 11374 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 11375 sd_start_cmds(un, NULL); 11376 } 11377 11378 mutex_exit(SD_MUTEX(un)); 11379 11380 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp); 11381 } 11382 11383 11384 /* 11385 * Function: sd_init_cdb_limits 11386 * 11387 * Description: This is to handle scsi_pkt initialization differences 11388 * between the driver platforms. 11389 * 11390 * Legacy behaviors: 11391 * 11392 * If the block number or the sector count exceeds the 11393 * capabilities of a Group 0 command, shift over to a 11394 * Group 1 command. We don't blindly use Group 1 11395 * commands because a) some drives (CDC Wren IVs) get a 11396 * bit confused, and b) there is probably a fair amount 11397 * of speed difference for a target to receive and decode 11398 * a 10 byte command instead of a 6 byte command. 11399 * 11400 * The xfer time difference of 6 vs 10 byte CDBs is 11401 * still significant so this code is still worthwhile. 11402 * 10 byte CDBs are very inefficient with the fas HBA driver 11403 * and older disks. Each CDB byte took 1 usec with some 11404 * popular disks. 11405 * 11406 * Context: Must be called at attach time 11407 */ 11408 11409 static void 11410 sd_init_cdb_limits(struct sd_lun *un) 11411 { 11412 int hba_cdb_limit; 11413 11414 /* 11415 * Use CDB_GROUP1 commands for most devices except for 11416 * parallel SCSI fixed drives in which case we get better 11417 * performance using CDB_GROUP0 commands (where applicable). 11418 */ 11419 un->un_mincdb = SD_CDB_GROUP1; 11420 #if !defined(__fibre) 11421 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && 11422 !un->un_f_has_removable_media) { 11423 un->un_mincdb = SD_CDB_GROUP0; 11424 } 11425 #endif 11426 11427 /* 11428 * Try to read the max-cdb-length supported by HBA. 11429 */ 11430 un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1); 11431 if (0 >= un->un_max_hba_cdb) { 11432 un->un_max_hba_cdb = CDB_GROUP4; 11433 hba_cdb_limit = SD_CDB_GROUP4; 11434 } else if (0 < un->un_max_hba_cdb && 11435 un->un_max_hba_cdb < CDB_GROUP1) { 11436 hba_cdb_limit = SD_CDB_GROUP0; 11437 } else if (CDB_GROUP1 <= un->un_max_hba_cdb && 11438 un->un_max_hba_cdb < CDB_GROUP5) { 11439 hba_cdb_limit = SD_CDB_GROUP1; 11440 } else if (CDB_GROUP5 <= un->un_max_hba_cdb && 11441 un->un_max_hba_cdb < CDB_GROUP4) { 11442 hba_cdb_limit = SD_CDB_GROUP5; 11443 } else { 11444 hba_cdb_limit = SD_CDB_GROUP4; 11445 } 11446 11447 /* 11448 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4 11449 * commands for fixed disks unless we are building for a 32 bit 11450 * kernel. 11451 */ 11452 #ifdef _LP64 11453 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 11454 min(hba_cdb_limit, SD_CDB_GROUP4); 11455 #else 11456 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 11457 min(hba_cdb_limit, SD_CDB_GROUP1); 11458 #endif 11459 11460 /* 11461 * x86 systems require the PKT_DMA_PARTIAL flag 11462 */ 11463 #if defined(__x86) 11464 un->un_pkt_flags = PKT_DMA_PARTIAL; 11465 #else 11466 un->un_pkt_flags = 0; 11467 #endif 11468 11469 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) 11470 ? sizeof (struct scsi_arq_status) : 1); 11471 un->un_cmd_timeout = (ushort_t)sd_io_time; 11472 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; 11473 } 11474 11475 11476 /* 11477 * Function: sd_initpkt_for_buf 11478 * 11479 * Description: Allocate and initialize for transport a scsi_pkt struct, 11480 * based upon the info specified in the given buf struct. 11481 * 11482 * Assumes the xb_blkno in the request is absolute (ie, 11483 * relative to the start of the device (NOT partition!). 11484 * Also assumes that the request is using the native block 11485 * size of the device (as returned by the READ CAPACITY 11486 * command). 11487 * 11488 * Return Code: SD_PKT_ALLOC_SUCCESS 11489 * SD_PKT_ALLOC_FAILURE 11490 * SD_PKT_ALLOC_FAILURE_NO_DMA 11491 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 11492 * 11493 * Context: Kernel thread and may be called from software interrupt context 11494 * as part of a sdrunout callback. This function may not block or 11495 * call routines that block 11496 */ 11497 11498 static int 11499 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp) 11500 { 11501 struct sd_xbuf *xp; 11502 struct scsi_pkt *pktp = NULL; 11503 struct sd_lun *un; 11504 size_t blockcount; 11505 daddr_t startblock; 11506 int rval; 11507 int cmd_flags; 11508 11509 ASSERT(bp != NULL); 11510 ASSERT(pktpp != NULL); 11511 xp = SD_GET_XBUF(bp); 11512 ASSERT(xp != NULL); 11513 un = SD_GET_UN(bp); 11514 ASSERT(un != NULL); 11515 ASSERT(mutex_owned(SD_MUTEX(un))); 11516 ASSERT(bp->b_resid == 0); 11517 11518 SD_TRACE(SD_LOG_IO_CORE, un, 11519 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); 11520 11521 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11522 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { 11523 /* 11524 * Already have a scsi_pkt -- just need DMA resources. 11525 * We must recompute the CDB in case the mapping returns 11526 * a nonzero pkt_resid. 11527 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer 11528 * that is being retried, the unmap/remap of the DMA resouces 11529 * will result in the entire transfer starting over again 11530 * from the very first block. 11531 */ 11532 ASSERT(xp->xb_pktp != NULL); 11533 pktp = xp->xb_pktp; 11534 } else { 11535 pktp = NULL; 11536 } 11537 #endif /* __i386 || __amd64 */ 11538 11539 startblock = xp->xb_blkno; /* Absolute block num. */ 11540 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 11541 11542 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11543 11544 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); 11545 11546 #else 11547 11548 cmd_flags = un->un_pkt_flags | xp->xb_pkt_flags; 11549 11550 #endif 11551 11552 /* 11553 * sd_setup_rw_pkt will determine the appropriate CDB group to use, 11554 * call scsi_init_pkt, and build the CDB. 11555 */ 11556 rval = sd_setup_rw_pkt(un, &pktp, bp, 11557 cmd_flags, sdrunout, (caddr_t)un, 11558 startblock, blockcount); 11559 11560 if (rval == 0) { 11561 /* 11562 * Success. 11563 * 11564 * If partial DMA is being used and required for this transfer. 11565 * set it up here. 11566 */ 11567 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 && 11568 (pktp->pkt_resid != 0)) { 11569 11570 /* 11571 * Save the CDB length and pkt_resid for the 11572 * next xfer 11573 */ 11574 xp->xb_dma_resid = pktp->pkt_resid; 11575 11576 /* rezero resid */ 11577 pktp->pkt_resid = 0; 11578 11579 } else { 11580 xp->xb_dma_resid = 0; 11581 } 11582 11583 pktp->pkt_flags = un->un_tagflags; 11584 pktp->pkt_time = un->un_cmd_timeout; 11585 pktp->pkt_comp = sdintr; 11586 11587 pktp->pkt_private = bp; 11588 *pktpp = pktp; 11589 11590 SD_TRACE(SD_LOG_IO_CORE, un, 11591 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); 11592 11593 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11594 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; 11595 #endif 11596 11597 return (SD_PKT_ALLOC_SUCCESS); 11598 11599 } 11600 11601 /* 11602 * SD_PKT_ALLOC_FAILURE is the only expected failure code 11603 * from sd_setup_rw_pkt. 11604 */ 11605 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 11606 11607 if (rval == SD_PKT_ALLOC_FAILURE) { 11608 *pktpp = NULL; 11609 /* 11610 * Set the driver state to RWAIT to indicate the driver 11611 * is waiting on resource allocations. The driver will not 11612 * suspend, pm_suspend, or detatch while the state is RWAIT. 11613 */ 11614 New_state(un, SD_STATE_RWAIT); 11615 11616 SD_ERROR(SD_LOG_IO_CORE, un, 11617 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp); 11618 11619 if ((bp->b_flags & B_ERROR) != 0) { 11620 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 11621 } 11622 return (SD_PKT_ALLOC_FAILURE); 11623 } else { 11624 /* 11625 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL 11626 * 11627 * This should never happen. Maybe someone messed with the 11628 * kernel's minphys? 11629 */ 11630 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 11631 "Request rejected: too large for CDB: " 11632 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount); 11633 SD_ERROR(SD_LOG_IO_CORE, un, 11634 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp); 11635 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 11636 11637 } 11638 } 11639 11640 11641 /* 11642 * Function: sd_destroypkt_for_buf 11643 * 11644 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing). 11645 * 11646 * Context: Kernel thread or interrupt context 11647 */ 11648 11649 static void 11650 sd_destroypkt_for_buf(struct buf *bp) 11651 { 11652 ASSERT(bp != NULL); 11653 ASSERT(SD_GET_UN(bp) != NULL); 11654 11655 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 11656 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp); 11657 11658 ASSERT(SD_GET_PKTP(bp) != NULL); 11659 scsi_destroy_pkt(SD_GET_PKTP(bp)); 11660 11661 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 11662 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp); 11663 } 11664 11665 /* 11666 * Function: sd_setup_rw_pkt 11667 * 11668 * Description: Determines appropriate CDB group for the requested LBA 11669 * and transfer length, calls scsi_init_pkt, and builds 11670 * the CDB. Do not use for partial DMA transfers except 11671 * for the initial transfer since the CDB size must 11672 * remain constant. 11673 * 11674 * Context: Kernel thread and may be called from software interrupt 11675 * context as part of a sdrunout callback. This function may not 11676 * block or call routines that block 11677 */ 11678 11679 11680 int 11681 sd_setup_rw_pkt(struct sd_lun *un, 11682 struct scsi_pkt **pktpp, struct buf *bp, int flags, 11683 int (*callback)(caddr_t), caddr_t callback_arg, 11684 diskaddr_t lba, uint32_t blockcount) 11685 { 11686 struct scsi_pkt *return_pktp; 11687 union scsi_cdb *cdbp; 11688 struct sd_cdbinfo *cp = NULL; 11689 int i; 11690 11691 /* 11692 * See which size CDB to use, based upon the request. 11693 */ 11694 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) { 11695 11696 /* 11697 * Check lba and block count against sd_cdbtab limits. 11698 * In the partial DMA case, we have to use the same size 11699 * CDB for all the transfers. Check lba + blockcount 11700 * against the max LBA so we know that segment of the 11701 * transfer can use the CDB we select. 11702 */ 11703 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) && 11704 (blockcount <= sd_cdbtab[i].sc_maxlen)) { 11705 11706 /* 11707 * The command will fit into the CDB type 11708 * specified by sd_cdbtab[i]. 11709 */ 11710 cp = sd_cdbtab + i; 11711 11712 /* 11713 * Call scsi_init_pkt so we can fill in the 11714 * CDB. 11715 */ 11716 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp, 11717 bp, cp->sc_grpcode, un->un_status_len, 0, 11718 flags, callback, callback_arg); 11719 11720 if (return_pktp != NULL) { 11721 11722 /* 11723 * Return new value of pkt 11724 */ 11725 *pktpp = return_pktp; 11726 11727 /* 11728 * To be safe, zero the CDB insuring there is 11729 * no leftover data from a previous command. 11730 */ 11731 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode); 11732 11733 /* 11734 * Handle partial DMA mapping 11735 */ 11736 if (return_pktp->pkt_resid != 0) { 11737 11738 /* 11739 * Not going to xfer as many blocks as 11740 * originally expected 11741 */ 11742 blockcount -= 11743 SD_BYTES2TGTBLOCKS(un, 11744 return_pktp->pkt_resid); 11745 } 11746 11747 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp; 11748 11749 /* 11750 * Set command byte based on the CDB 11751 * type we matched. 11752 */ 11753 cdbp->scc_cmd = cp->sc_grpmask | 11754 ((bp->b_flags & B_READ) ? 11755 SCMD_READ : SCMD_WRITE); 11756 11757 SD_FILL_SCSI1_LUN(un, return_pktp); 11758 11759 /* 11760 * Fill in LBA and length 11761 */ 11762 ASSERT((cp->sc_grpcode == CDB_GROUP1) || 11763 (cp->sc_grpcode == CDB_GROUP4) || 11764 (cp->sc_grpcode == CDB_GROUP0) || 11765 (cp->sc_grpcode == CDB_GROUP5)); 11766 11767 if (cp->sc_grpcode == CDB_GROUP1) { 11768 FORMG1ADDR(cdbp, lba); 11769 FORMG1COUNT(cdbp, blockcount); 11770 return (0); 11771 } else if (cp->sc_grpcode == CDB_GROUP4) { 11772 FORMG4LONGADDR(cdbp, lba); 11773 FORMG4COUNT(cdbp, blockcount); 11774 return (0); 11775 } else if (cp->sc_grpcode == CDB_GROUP0) { 11776 FORMG0ADDR(cdbp, lba); 11777 FORMG0COUNT(cdbp, blockcount); 11778 return (0); 11779 } else if (cp->sc_grpcode == CDB_GROUP5) { 11780 FORMG5ADDR(cdbp, lba); 11781 FORMG5COUNT(cdbp, blockcount); 11782 return (0); 11783 } 11784 11785 /* 11786 * It should be impossible to not match one 11787 * of the CDB types above, so we should never 11788 * reach this point. Set the CDB command byte 11789 * to test-unit-ready to avoid writing 11790 * to somewhere we don't intend. 11791 */ 11792 cdbp->scc_cmd = SCMD_TEST_UNIT_READY; 11793 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 11794 } else { 11795 /* 11796 * Couldn't get scsi_pkt 11797 */ 11798 return (SD_PKT_ALLOC_FAILURE); 11799 } 11800 } 11801 } 11802 11803 /* 11804 * None of the available CDB types were suitable. This really 11805 * should never happen: on a 64 bit system we support 11806 * READ16/WRITE16 which will hold an entire 64 bit disk address 11807 * and on a 32 bit system we will refuse to bind to a device 11808 * larger than 2TB so addresses will never be larger than 32 bits. 11809 */ 11810 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 11811 } 11812 11813 #if defined(__i386) || defined(__amd64) 11814 /* 11815 * Function: sd_setup_next_rw_pkt 11816 * 11817 * Description: Setup packet for partial DMA transfers, except for the 11818 * initial transfer. sd_setup_rw_pkt should be used for 11819 * the initial transfer. 11820 * 11821 * Context: Kernel thread and may be called from interrupt context. 11822 */ 11823 11824 int 11825 sd_setup_next_rw_pkt(struct sd_lun *un, 11826 struct scsi_pkt *pktp, struct buf *bp, 11827 diskaddr_t lba, uint32_t blockcount) 11828 { 11829 uchar_t com; 11830 union scsi_cdb *cdbp; 11831 uchar_t cdb_group_id; 11832 11833 ASSERT(pktp != NULL); 11834 ASSERT(pktp->pkt_cdbp != NULL); 11835 11836 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 11837 com = cdbp->scc_cmd; 11838 cdb_group_id = CDB_GROUPID(com); 11839 11840 ASSERT((cdb_group_id == CDB_GROUPID_0) || 11841 (cdb_group_id == CDB_GROUPID_1) || 11842 (cdb_group_id == CDB_GROUPID_4) || 11843 (cdb_group_id == CDB_GROUPID_5)); 11844 11845 /* 11846 * Move pkt to the next portion of the xfer. 11847 * func is NULL_FUNC so we do not have to release 11848 * the disk mutex here. 11849 */ 11850 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0, 11851 NULL_FUNC, NULL) == pktp) { 11852 /* Success. Handle partial DMA */ 11853 if (pktp->pkt_resid != 0) { 11854 blockcount -= 11855 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid); 11856 } 11857 11858 cdbp->scc_cmd = com; 11859 SD_FILL_SCSI1_LUN(un, pktp); 11860 if (cdb_group_id == CDB_GROUPID_1) { 11861 FORMG1ADDR(cdbp, lba); 11862 FORMG1COUNT(cdbp, blockcount); 11863 return (0); 11864 } else if (cdb_group_id == CDB_GROUPID_4) { 11865 FORMG4LONGADDR(cdbp, lba); 11866 FORMG4COUNT(cdbp, blockcount); 11867 return (0); 11868 } else if (cdb_group_id == CDB_GROUPID_0) { 11869 FORMG0ADDR(cdbp, lba); 11870 FORMG0COUNT(cdbp, blockcount); 11871 return (0); 11872 } else if (cdb_group_id == CDB_GROUPID_5) { 11873 FORMG5ADDR(cdbp, lba); 11874 FORMG5COUNT(cdbp, blockcount); 11875 return (0); 11876 } 11877 11878 /* Unreachable */ 11879 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 11880 } 11881 11882 /* 11883 * Error setting up next portion of cmd transfer. 11884 * Something is definitely very wrong and this 11885 * should not happen. 11886 */ 11887 return (SD_PKT_ALLOC_FAILURE); 11888 } 11889 #endif /* defined(__i386) || defined(__amd64) */ 11890 11891 /* 11892 * Function: sd_initpkt_for_uscsi 11893 * 11894 * Description: Allocate and initialize for transport a scsi_pkt struct, 11895 * based upon the info specified in the given uscsi_cmd struct. 11896 * 11897 * Return Code: SD_PKT_ALLOC_SUCCESS 11898 * SD_PKT_ALLOC_FAILURE 11899 * SD_PKT_ALLOC_FAILURE_NO_DMA 11900 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 11901 * 11902 * Context: Kernel thread and may be called from software interrupt context 11903 * as part of a sdrunout callback. This function may not block or 11904 * call routines that block 11905 */ 11906 11907 static int 11908 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp) 11909 { 11910 struct uscsi_cmd *uscmd; 11911 struct sd_xbuf *xp; 11912 struct scsi_pkt *pktp; 11913 struct sd_lun *un; 11914 uint32_t flags = 0; 11915 11916 ASSERT(bp != NULL); 11917 ASSERT(pktpp != NULL); 11918 xp = SD_GET_XBUF(bp); 11919 ASSERT(xp != NULL); 11920 un = SD_GET_UN(bp); 11921 ASSERT(un != NULL); 11922 ASSERT(mutex_owned(SD_MUTEX(un))); 11923 11924 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 11925 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 11926 ASSERT(uscmd != NULL); 11927 11928 SD_TRACE(SD_LOG_IO_CORE, un, 11929 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp); 11930 11931 /* 11932 * Allocate the scsi_pkt for the command. 11933 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path 11934 * during scsi_init_pkt time and will continue to use the 11935 * same path as long as the same scsi_pkt is used without 11936 * intervening scsi_dma_free(). Since uscsi command does 11937 * not call scsi_dmafree() before retry failed command, it 11938 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT 11939 * set such that scsi_vhci can use other available path for 11940 * retry. Besides, ucsci command does not allow DMA breakup, 11941 * so there is no need to set PKT_DMA_PARTIAL flag. 11942 */ 11943 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 11944 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 11945 sizeof (struct scsi_arq_status), 0, 11946 (un->un_pkt_flags & ~PKT_DMA_PARTIAL), 11947 sdrunout, (caddr_t)un); 11948 11949 if (pktp == NULL) { 11950 *pktpp = NULL; 11951 /* 11952 * Set the driver state to RWAIT to indicate the driver 11953 * is waiting on resource allocations. The driver will not 11954 * suspend, pm_suspend, or detatch while the state is RWAIT. 11955 */ 11956 New_state(un, SD_STATE_RWAIT); 11957 11958 SD_ERROR(SD_LOG_IO_CORE, un, 11959 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp); 11960 11961 if ((bp->b_flags & B_ERROR) != 0) { 11962 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 11963 } 11964 return (SD_PKT_ALLOC_FAILURE); 11965 } 11966 11967 /* 11968 * We do not do DMA breakup for USCSI commands, so return failure 11969 * here if all the needed DMA resources were not allocated. 11970 */ 11971 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) && 11972 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) { 11973 scsi_destroy_pkt(pktp); 11974 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: " 11975 "No partial DMA for USCSI. exit: buf:0x%p\n", bp); 11976 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL); 11977 } 11978 11979 /* Init the cdb from the given uscsi struct */ 11980 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp, 11981 uscmd->uscsi_cdb[0], 0, 0, 0); 11982 11983 SD_FILL_SCSI1_LUN(un, pktp); 11984 11985 /* 11986 * Set up the optional USCSI flags. See the uscsi (7I) man page 11987 * for listing of the supported flags. 11988 */ 11989 11990 if (uscmd->uscsi_flags & USCSI_SILENT) { 11991 flags |= FLAG_SILENT; 11992 } 11993 11994 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) { 11995 flags |= FLAG_DIAGNOSE; 11996 } 11997 11998 if (uscmd->uscsi_flags & USCSI_ISOLATE) { 11999 flags |= FLAG_ISOLATE; 12000 } 12001 12002 if (un->un_f_is_fibre == FALSE) { 12003 if (uscmd->uscsi_flags & USCSI_RENEGOT) { 12004 flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 12005 } 12006 } 12007 12008 /* 12009 * Set the pkt flags here so we save time later. 12010 * Note: These flags are NOT in the uscsi man page!!! 12011 */ 12012 if (uscmd->uscsi_flags & USCSI_HEAD) { 12013 flags |= FLAG_HEAD; 12014 } 12015 12016 if (uscmd->uscsi_flags & USCSI_NOINTR) { 12017 flags |= FLAG_NOINTR; 12018 } 12019 12020 /* 12021 * For tagged queueing, things get a bit complicated. 12022 * Check first for head of queue and last for ordered queue. 12023 * If neither head nor order, use the default driver tag flags. 12024 */ 12025 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) { 12026 if (uscmd->uscsi_flags & USCSI_HTAG) { 12027 flags |= FLAG_HTAG; 12028 } else if (uscmd->uscsi_flags & USCSI_OTAG) { 12029 flags |= FLAG_OTAG; 12030 } else { 12031 flags |= un->un_tagflags & FLAG_TAGMASK; 12032 } 12033 } 12034 12035 if (uscmd->uscsi_flags & USCSI_NODISCON) { 12036 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON; 12037 } 12038 12039 pktp->pkt_flags = flags; 12040 12041 /* Copy the caller's CDB into the pkt... */ 12042 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen); 12043 12044 if (uscmd->uscsi_timeout == 0) { 12045 pktp->pkt_time = un->un_uscsi_timeout; 12046 } else { 12047 pktp->pkt_time = uscmd->uscsi_timeout; 12048 } 12049 12050 /* need it later to identify USCSI request in sdintr */ 12051 xp->xb_pkt_flags |= SD_XB_USCSICMD; 12052 12053 xp->xb_sense_resid = uscmd->uscsi_rqresid; 12054 12055 pktp->pkt_private = bp; 12056 pktp->pkt_comp = sdintr; 12057 *pktpp = pktp; 12058 12059 SD_TRACE(SD_LOG_IO_CORE, un, 12060 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp); 12061 12062 return (SD_PKT_ALLOC_SUCCESS); 12063 } 12064 12065 12066 /* 12067 * Function: sd_destroypkt_for_uscsi 12068 * 12069 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi 12070 * IOs.. Also saves relevant info into the associated uscsi_cmd 12071 * struct. 12072 * 12073 * Context: May be called under interrupt context 12074 */ 12075 12076 static void 12077 sd_destroypkt_for_uscsi(struct buf *bp) 12078 { 12079 struct uscsi_cmd *uscmd; 12080 struct sd_xbuf *xp; 12081 struct scsi_pkt *pktp; 12082 struct sd_lun *un; 12083 12084 ASSERT(bp != NULL); 12085 xp = SD_GET_XBUF(bp); 12086 ASSERT(xp != NULL); 12087 un = SD_GET_UN(bp); 12088 ASSERT(un != NULL); 12089 ASSERT(!mutex_owned(SD_MUTEX(un))); 12090 pktp = SD_GET_PKTP(bp); 12091 ASSERT(pktp != NULL); 12092 12093 SD_TRACE(SD_LOG_IO_CORE, un, 12094 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp); 12095 12096 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 12097 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 12098 ASSERT(uscmd != NULL); 12099 12100 /* Save the status and the residual into the uscsi_cmd struct */ 12101 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 12102 uscmd->uscsi_resid = bp->b_resid; 12103 12104 /* 12105 * If enabled, copy any saved sense data into the area specified 12106 * by the uscsi command. 12107 */ 12108 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 12109 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 12110 /* 12111 * Note: uscmd->uscsi_rqbuf should always point to a buffer 12112 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd()) 12113 */ 12114 uscmd->uscsi_rqstatus = xp->xb_sense_status; 12115 uscmd->uscsi_rqresid = xp->xb_sense_resid; 12116 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, SENSE_LENGTH); 12117 } 12118 12119 /* We are done with the scsi_pkt; free it now */ 12120 ASSERT(SD_GET_PKTP(bp) != NULL); 12121 scsi_destroy_pkt(SD_GET_PKTP(bp)); 12122 12123 SD_TRACE(SD_LOG_IO_CORE, un, 12124 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp); 12125 } 12126 12127 12128 /* 12129 * Function: sd_bioclone_alloc 12130 * 12131 * Description: Allocate a buf(9S) and init it as per the given buf 12132 * and the various arguments. The associated sd_xbuf 12133 * struct is (nearly) duplicated. The struct buf *bp 12134 * argument is saved in new_xp->xb_private. 12135 * 12136 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 12137 * datalen - size of data area for the shadow bp 12138 * blkno - starting LBA 12139 * func - function pointer for b_iodone in the shadow buf. (May 12140 * be NULL if none.) 12141 * 12142 * Return Code: Pointer to allocates buf(9S) struct 12143 * 12144 * Context: Can sleep. 12145 */ 12146 12147 static struct buf * 12148 sd_bioclone_alloc(struct buf *bp, size_t datalen, 12149 daddr_t blkno, int (*func)(struct buf *)) 12150 { 12151 struct sd_lun *un; 12152 struct sd_xbuf *xp; 12153 struct sd_xbuf *new_xp; 12154 struct buf *new_bp; 12155 12156 ASSERT(bp != NULL); 12157 xp = SD_GET_XBUF(bp); 12158 ASSERT(xp != NULL); 12159 un = SD_GET_UN(bp); 12160 ASSERT(un != NULL); 12161 ASSERT(!mutex_owned(SD_MUTEX(un))); 12162 12163 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func, 12164 NULL, KM_SLEEP); 12165 12166 new_bp->b_lblkno = blkno; 12167 12168 /* 12169 * Allocate an xbuf for the shadow bp and copy the contents of the 12170 * original xbuf into it. 12171 */ 12172 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 12173 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 12174 12175 /* 12176 * The given bp is automatically saved in the xb_private member 12177 * of the new xbuf. Callers are allowed to depend on this. 12178 */ 12179 new_xp->xb_private = bp; 12180 12181 new_bp->b_private = new_xp; 12182 12183 return (new_bp); 12184 } 12185 12186 /* 12187 * Function: sd_shadow_buf_alloc 12188 * 12189 * Description: Allocate a buf(9S) and init it as per the given buf 12190 * and the various arguments. The associated sd_xbuf 12191 * struct is (nearly) duplicated. The struct buf *bp 12192 * argument is saved in new_xp->xb_private. 12193 * 12194 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 12195 * datalen - size of data area for the shadow bp 12196 * bflags - B_READ or B_WRITE (pseudo flag) 12197 * blkno - starting LBA 12198 * func - function pointer for b_iodone in the shadow buf. (May 12199 * be NULL if none.) 12200 * 12201 * Return Code: Pointer to allocates buf(9S) struct 12202 * 12203 * Context: Can sleep. 12204 */ 12205 12206 static struct buf * 12207 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags, 12208 daddr_t blkno, int (*func)(struct buf *)) 12209 { 12210 struct sd_lun *un; 12211 struct sd_xbuf *xp; 12212 struct sd_xbuf *new_xp; 12213 struct buf *new_bp; 12214 12215 ASSERT(bp != NULL); 12216 xp = SD_GET_XBUF(bp); 12217 ASSERT(xp != NULL); 12218 un = SD_GET_UN(bp); 12219 ASSERT(un != NULL); 12220 ASSERT(!mutex_owned(SD_MUTEX(un))); 12221 12222 if (bp->b_flags & (B_PAGEIO | B_PHYS)) { 12223 bp_mapin(bp); 12224 } 12225 12226 bflags &= (B_READ | B_WRITE); 12227 #if defined(__i386) || defined(__amd64) 12228 new_bp = getrbuf(KM_SLEEP); 12229 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); 12230 new_bp->b_bcount = datalen; 12231 new_bp->b_flags = bflags | 12232 (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW)); 12233 #else 12234 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, 12235 datalen, bflags, SLEEP_FUNC, NULL); 12236 #endif 12237 new_bp->av_forw = NULL; 12238 new_bp->av_back = NULL; 12239 new_bp->b_dev = bp->b_dev; 12240 new_bp->b_blkno = blkno; 12241 new_bp->b_iodone = func; 12242 new_bp->b_edev = bp->b_edev; 12243 new_bp->b_resid = 0; 12244 12245 /* We need to preserve the B_FAILFAST flag */ 12246 if (bp->b_flags & B_FAILFAST) { 12247 new_bp->b_flags |= B_FAILFAST; 12248 } 12249 12250 /* 12251 * Allocate an xbuf for the shadow bp and copy the contents of the 12252 * original xbuf into it. 12253 */ 12254 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 12255 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 12256 12257 /* Need later to copy data between the shadow buf & original buf! */ 12258 new_xp->xb_pkt_flags |= PKT_CONSISTENT; 12259 12260 /* 12261 * The given bp is automatically saved in the xb_private member 12262 * of the new xbuf. Callers are allowed to depend on this. 12263 */ 12264 new_xp->xb_private = bp; 12265 12266 new_bp->b_private = new_xp; 12267 12268 return (new_bp); 12269 } 12270 12271 /* 12272 * Function: sd_bioclone_free 12273 * 12274 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations 12275 * in the larger than partition operation. 12276 * 12277 * Context: May be called under interrupt context 12278 */ 12279 12280 static void 12281 sd_bioclone_free(struct buf *bp) 12282 { 12283 struct sd_xbuf *xp; 12284 12285 ASSERT(bp != NULL); 12286 xp = SD_GET_XBUF(bp); 12287 ASSERT(xp != NULL); 12288 12289 /* 12290 * Call bp_mapout() before freeing the buf, in case a lower 12291 * layer or HBA had done a bp_mapin(). we must do this here 12292 * as we are the "originator" of the shadow buf. 12293 */ 12294 bp_mapout(bp); 12295 12296 /* 12297 * Null out b_iodone before freeing the bp, to ensure that the driver 12298 * never gets confused by a stale value in this field. (Just a little 12299 * extra defensiveness here.) 12300 */ 12301 bp->b_iodone = NULL; 12302 12303 freerbuf(bp); 12304 12305 kmem_free(xp, sizeof (struct sd_xbuf)); 12306 } 12307 12308 /* 12309 * Function: sd_shadow_buf_free 12310 * 12311 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations. 12312 * 12313 * Context: May be called under interrupt context 12314 */ 12315 12316 static void 12317 sd_shadow_buf_free(struct buf *bp) 12318 { 12319 struct sd_xbuf *xp; 12320 12321 ASSERT(bp != NULL); 12322 xp = SD_GET_XBUF(bp); 12323 ASSERT(xp != NULL); 12324 12325 #if defined(__sparc) 12326 /* 12327 * Call bp_mapout() before freeing the buf, in case a lower 12328 * layer or HBA had done a bp_mapin(). we must do this here 12329 * as we are the "originator" of the shadow buf. 12330 */ 12331 bp_mapout(bp); 12332 #endif 12333 12334 /* 12335 * Null out b_iodone before freeing the bp, to ensure that the driver 12336 * never gets confused by a stale value in this field. (Just a little 12337 * extra defensiveness here.) 12338 */ 12339 bp->b_iodone = NULL; 12340 12341 #if defined(__i386) || defined(__amd64) 12342 kmem_free(bp->b_un.b_addr, bp->b_bcount); 12343 freerbuf(bp); 12344 #else 12345 scsi_free_consistent_buf(bp); 12346 #endif 12347 12348 kmem_free(xp, sizeof (struct sd_xbuf)); 12349 } 12350 12351 12352 /* 12353 * Function: sd_print_transport_rejected_message 12354 * 12355 * Description: This implements the ludicrously complex rules for printing 12356 * a "transport rejected" message. This is to address the 12357 * specific problem of having a flood of this error message 12358 * produced when a failover occurs. 12359 * 12360 * Context: Any. 12361 */ 12362 12363 static void 12364 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp, 12365 int code) 12366 { 12367 ASSERT(un != NULL); 12368 ASSERT(mutex_owned(SD_MUTEX(un))); 12369 ASSERT(xp != NULL); 12370 12371 /* 12372 * Print the "transport rejected" message under the following 12373 * conditions: 12374 * 12375 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set 12376 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR. 12377 * - If the error code IS a TRAN_FATAL_ERROR, then the message is 12378 * printed the FIRST time a TRAN_FATAL_ERROR is returned from 12379 * scsi_transport(9F) (which indicates that the target might have 12380 * gone off-line). This uses the un->un_tran_fatal_count 12381 * count, which is incremented whenever a TRAN_FATAL_ERROR is 12382 * received, and reset to zero whenver a TRAN_ACCEPT is returned 12383 * from scsi_transport(). 12384 * 12385 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of 12386 * the preceeding cases in order for the message to be printed. 12387 */ 12388 if ((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) { 12389 if ((sd_level_mask & SD_LOGMASK_DIAG) || 12390 (code != TRAN_FATAL_ERROR) || 12391 (un->un_tran_fatal_count == 1)) { 12392 switch (code) { 12393 case TRAN_BADPKT: 12394 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12395 "transport rejected bad packet\n"); 12396 break; 12397 case TRAN_FATAL_ERROR: 12398 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12399 "transport rejected fatal error\n"); 12400 break; 12401 default: 12402 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12403 "transport rejected (%d)\n", code); 12404 break; 12405 } 12406 } 12407 } 12408 } 12409 12410 12411 /* 12412 * Function: sd_add_buf_to_waitq 12413 * 12414 * Description: Add the given buf(9S) struct to the wait queue for the 12415 * instance. If sorting is enabled, then the buf is added 12416 * to the queue via an elevator sort algorithm (a la 12417 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key. 12418 * If sorting is not enabled, then the buf is just added 12419 * to the end of the wait queue. 12420 * 12421 * Return Code: void 12422 * 12423 * Context: Does not sleep/block, therefore technically can be called 12424 * from any context. However if sorting is enabled then the 12425 * execution time is indeterminate, and may take long if 12426 * the wait queue grows large. 12427 */ 12428 12429 static void 12430 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp) 12431 { 12432 struct buf *ap; 12433 12434 ASSERT(bp != NULL); 12435 ASSERT(un != NULL); 12436 ASSERT(mutex_owned(SD_MUTEX(un))); 12437 12438 /* If the queue is empty, add the buf as the only entry & return. */ 12439 if (un->un_waitq_headp == NULL) { 12440 ASSERT(un->un_waitq_tailp == NULL); 12441 un->un_waitq_headp = un->un_waitq_tailp = bp; 12442 bp->av_forw = NULL; 12443 return; 12444 } 12445 12446 ASSERT(un->un_waitq_tailp != NULL); 12447 12448 /* 12449 * If sorting is disabled, just add the buf to the tail end of 12450 * the wait queue and return. 12451 */ 12452 if (un->un_f_disksort_disabled) { 12453 un->un_waitq_tailp->av_forw = bp; 12454 un->un_waitq_tailp = bp; 12455 bp->av_forw = NULL; 12456 return; 12457 } 12458 12459 /* 12460 * Sort thru the list of requests currently on the wait queue 12461 * and add the new buf request at the appropriate position. 12462 * 12463 * The un->un_waitq_headp is an activity chain pointer on which 12464 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The 12465 * first queue holds those requests which are positioned after 12466 * the current SD_GET_BLKNO() (in the first request); the second holds 12467 * requests which came in after their SD_GET_BLKNO() number was passed. 12468 * Thus we implement a one way scan, retracting after reaching 12469 * the end of the drive to the first request on the second 12470 * queue, at which time it becomes the first queue. 12471 * A one-way scan is natural because of the way UNIX read-ahead 12472 * blocks are allocated. 12473 * 12474 * If we lie after the first request, then we must locate the 12475 * second request list and add ourselves to it. 12476 */ 12477 ap = un->un_waitq_headp; 12478 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) { 12479 while (ap->av_forw != NULL) { 12480 /* 12481 * Look for an "inversion" in the (normally 12482 * ascending) block numbers. This indicates 12483 * the start of the second request list. 12484 */ 12485 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) { 12486 /* 12487 * Search the second request list for the 12488 * first request at a larger block number. 12489 * We go before that; however if there is 12490 * no such request, we go at the end. 12491 */ 12492 do { 12493 if (SD_GET_BLKNO(bp) < 12494 SD_GET_BLKNO(ap->av_forw)) { 12495 goto insert; 12496 } 12497 ap = ap->av_forw; 12498 } while (ap->av_forw != NULL); 12499 goto insert; /* after last */ 12500 } 12501 ap = ap->av_forw; 12502 } 12503 12504 /* 12505 * No inversions... we will go after the last, and 12506 * be the first request in the second request list. 12507 */ 12508 goto insert; 12509 } 12510 12511 /* 12512 * Request is at/after the current request... 12513 * sort in the first request list. 12514 */ 12515 while (ap->av_forw != NULL) { 12516 /* 12517 * We want to go after the current request (1) if 12518 * there is an inversion after it (i.e. it is the end 12519 * of the first request list), or (2) if the next 12520 * request is a larger block no. than our request. 12521 */ 12522 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) || 12523 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) { 12524 goto insert; 12525 } 12526 ap = ap->av_forw; 12527 } 12528 12529 /* 12530 * Neither a second list nor a larger request, therefore 12531 * we go at the end of the first list (which is the same 12532 * as the end of the whole schebang). 12533 */ 12534 insert: 12535 bp->av_forw = ap->av_forw; 12536 ap->av_forw = bp; 12537 12538 /* 12539 * If we inserted onto the tail end of the waitq, make sure the 12540 * tail pointer is updated. 12541 */ 12542 if (ap == un->un_waitq_tailp) { 12543 un->un_waitq_tailp = bp; 12544 } 12545 } 12546 12547 12548 /* 12549 * Function: sd_start_cmds 12550 * 12551 * Description: Remove and transport cmds from the driver queues. 12552 * 12553 * Arguments: un - pointer to the unit (soft state) struct for the target. 12554 * 12555 * immed_bp - ptr to a buf to be transported immediately. Only 12556 * the immed_bp is transported; bufs on the waitq are not 12557 * processed and the un_retry_bp is not checked. If immed_bp is 12558 * NULL, then normal queue processing is performed. 12559 * 12560 * Context: May be called from kernel thread context, interrupt context, 12561 * or runout callback context. This function may not block or 12562 * call routines that block. 12563 */ 12564 12565 static void 12566 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) 12567 { 12568 struct sd_xbuf *xp; 12569 struct buf *bp; 12570 void (*statp)(kstat_io_t *); 12571 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12572 void (*saved_statp)(kstat_io_t *); 12573 #endif 12574 int rval; 12575 12576 ASSERT(un != NULL); 12577 ASSERT(mutex_owned(SD_MUTEX(un))); 12578 ASSERT(un->un_ncmds_in_transport >= 0); 12579 ASSERT(un->un_throttle >= 0); 12580 12581 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); 12582 12583 do { 12584 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12585 saved_statp = NULL; 12586 #endif 12587 12588 /* 12589 * If we are syncing or dumping, fail the command to 12590 * avoid recursively calling back into scsi_transport(). 12591 * The dump I/O itself uses a separate code path so this 12592 * only prevents non-dump I/O from being sent while dumping. 12593 * File system sync takes place before dumping begins. 12594 * During panic, filesystem I/O is allowed provided 12595 * un_in_callback is <= 1. This is to prevent recursion 12596 * such as sd_start_cmds -> scsi_transport -> sdintr -> 12597 * sd_start_cmds and so on. See panic.c for more information 12598 * about the states the system can be in during panic. 12599 */ 12600 if ((un->un_state == SD_STATE_DUMPING) || 12601 (ddi_in_panic() && (un->un_in_callback > 1))) { 12602 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12603 "sd_start_cmds: panicking\n"); 12604 goto exit; 12605 } 12606 12607 if ((bp = immed_bp) != NULL) { 12608 /* 12609 * We have a bp that must be transported immediately. 12610 * It's OK to transport the immed_bp here without doing 12611 * the throttle limit check because the immed_bp is 12612 * always used in a retry/recovery case. This means 12613 * that we know we are not at the throttle limit by 12614 * virtue of the fact that to get here we must have 12615 * already gotten a command back via sdintr(). This also 12616 * relies on (1) the command on un_retry_bp preventing 12617 * further commands from the waitq from being issued; 12618 * and (2) the code in sd_retry_command checking the 12619 * throttle limit before issuing a delayed or immediate 12620 * retry. This holds even if the throttle limit is 12621 * currently ratcheted down from its maximum value. 12622 */ 12623 statp = kstat_runq_enter; 12624 if (bp == un->un_retry_bp) { 12625 ASSERT((un->un_retry_statp == NULL) || 12626 (un->un_retry_statp == kstat_waitq_enter) || 12627 (un->un_retry_statp == 12628 kstat_runq_back_to_waitq)); 12629 /* 12630 * If the waitq kstat was incremented when 12631 * sd_set_retry_bp() queued this bp for a retry, 12632 * then we must set up statp so that the waitq 12633 * count will get decremented correctly below. 12634 * Also we must clear un->un_retry_statp to 12635 * ensure that we do not act on a stale value 12636 * in this field. 12637 */ 12638 if ((un->un_retry_statp == kstat_waitq_enter) || 12639 (un->un_retry_statp == 12640 kstat_runq_back_to_waitq)) { 12641 statp = kstat_waitq_to_runq; 12642 } 12643 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12644 saved_statp = un->un_retry_statp; 12645 #endif 12646 un->un_retry_statp = NULL; 12647 12648 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 12649 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " 12650 "un_throttle:%d un_ncmds_in_transport:%d\n", 12651 un, un->un_retry_bp, un->un_throttle, 12652 un->un_ncmds_in_transport); 12653 } else { 12654 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: " 12655 "processing priority bp:0x%p\n", bp); 12656 } 12657 12658 } else if ((bp = un->un_waitq_headp) != NULL) { 12659 /* 12660 * A command on the waitq is ready to go, but do not 12661 * send it if: 12662 * 12663 * (1) the throttle limit has been reached, or 12664 * (2) a retry is pending, or 12665 * (3) a START_STOP_UNIT callback pending, or 12666 * (4) a callback for a SD_PATH_DIRECT_PRIORITY 12667 * command is pending. 12668 * 12669 * For all of these conditions, IO processing will 12670 * restart after the condition is cleared. 12671 */ 12672 if (un->un_ncmds_in_transport >= un->un_throttle) { 12673 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12674 "sd_start_cmds: exiting, " 12675 "throttle limit reached!\n"); 12676 goto exit; 12677 } 12678 if (un->un_retry_bp != NULL) { 12679 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12680 "sd_start_cmds: exiting, retry pending!\n"); 12681 goto exit; 12682 } 12683 if (un->un_startstop_timeid != NULL) { 12684 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12685 "sd_start_cmds: exiting, " 12686 "START_STOP pending!\n"); 12687 goto exit; 12688 } 12689 if (un->un_direct_priority_timeid != NULL) { 12690 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12691 "sd_start_cmds: exiting, " 12692 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n"); 12693 goto exit; 12694 } 12695 12696 /* Dequeue the command */ 12697 un->un_waitq_headp = bp->av_forw; 12698 if (un->un_waitq_headp == NULL) { 12699 un->un_waitq_tailp = NULL; 12700 } 12701 bp->av_forw = NULL; 12702 statp = kstat_waitq_to_runq; 12703 SD_TRACE(SD_LOG_IO_CORE, un, 12704 "sd_start_cmds: processing waitq bp:0x%p\n", bp); 12705 12706 } else { 12707 /* No work to do so bail out now */ 12708 SD_TRACE(SD_LOG_IO_CORE, un, 12709 "sd_start_cmds: no more work, exiting!\n"); 12710 goto exit; 12711 } 12712 12713 /* 12714 * Reset the state to normal. This is the mechanism by which 12715 * the state transitions from either SD_STATE_RWAIT or 12716 * SD_STATE_OFFLINE to SD_STATE_NORMAL. 12717 * If state is SD_STATE_PM_CHANGING then this command is 12718 * part of the device power control and the state must 12719 * not be put back to normal. Doing so would would 12720 * allow new commands to proceed when they shouldn't, 12721 * the device may be going off. 12722 */ 12723 if ((un->un_state != SD_STATE_SUSPENDED) && 12724 (un->un_state != SD_STATE_PM_CHANGING)) { 12725 New_state(un, SD_STATE_NORMAL); 12726 } 12727 12728 xp = SD_GET_XBUF(bp); 12729 ASSERT(xp != NULL); 12730 12731 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12732 /* 12733 * Allocate the scsi_pkt if we need one, or attach DMA 12734 * resources if we have a scsi_pkt that needs them. The 12735 * latter should only occur for commands that are being 12736 * retried. 12737 */ 12738 if ((xp->xb_pktp == NULL) || 12739 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { 12740 #else 12741 if (xp->xb_pktp == NULL) { 12742 #endif 12743 /* 12744 * There is no scsi_pkt allocated for this buf. Call 12745 * the initpkt function to allocate & init one. 12746 * 12747 * The scsi_init_pkt runout callback functionality is 12748 * implemented as follows: 12749 * 12750 * 1) The initpkt function always calls 12751 * scsi_init_pkt(9F) with sdrunout specified as the 12752 * callback routine. 12753 * 2) A successful packet allocation is initialized and 12754 * the I/O is transported. 12755 * 3) The I/O associated with an allocation resource 12756 * failure is left on its queue to be retried via 12757 * runout or the next I/O. 12758 * 4) The I/O associated with a DMA error is removed 12759 * from the queue and failed with EIO. Processing of 12760 * the transport queues is also halted to be 12761 * restarted via runout or the next I/O. 12762 * 5) The I/O associated with a CDB size or packet 12763 * size error is removed from the queue and failed 12764 * with EIO. Processing of the transport queues is 12765 * continued. 12766 * 12767 * Note: there is no interface for canceling a runout 12768 * callback. To prevent the driver from detaching or 12769 * suspending while a runout is pending the driver 12770 * state is set to SD_STATE_RWAIT 12771 * 12772 * Note: using the scsi_init_pkt callback facility can 12773 * result in an I/O request persisting at the head of 12774 * the list which cannot be satisfied even after 12775 * multiple retries. In the future the driver may 12776 * implement some kind of maximum runout count before 12777 * failing an I/O. 12778 * 12779 * Note: the use of funcp below may seem superfluous, 12780 * but it helps warlock figure out the correct 12781 * initpkt function calls (see [s]sd.wlcmd). 12782 */ 12783 struct scsi_pkt *pktp; 12784 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp); 12785 12786 ASSERT(bp != un->un_rqs_bp); 12787 12788 funcp = sd_initpkt_map[xp->xb_chain_iostart]; 12789 switch ((*funcp)(bp, &pktp)) { 12790 case SD_PKT_ALLOC_SUCCESS: 12791 xp->xb_pktp = pktp; 12792 SD_TRACE(SD_LOG_IO_CORE, un, 12793 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n", 12794 pktp); 12795 goto got_pkt; 12796 12797 case SD_PKT_ALLOC_FAILURE: 12798 /* 12799 * Temporary (hopefully) resource depletion. 12800 * Since retries and RQS commands always have a 12801 * scsi_pkt allocated, these cases should never 12802 * get here. So the only cases this needs to 12803 * handle is a bp from the waitq (which we put 12804 * back onto the waitq for sdrunout), or a bp 12805 * sent as an immed_bp (which we just fail). 12806 */ 12807 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12808 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); 12809 12810 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12811 12812 if (bp == immed_bp) { 12813 /* 12814 * If SD_XB_DMA_FREED is clear, then 12815 * this is a failure to allocate a 12816 * scsi_pkt, and we must fail the 12817 * command. 12818 */ 12819 if ((xp->xb_pkt_flags & 12820 SD_XB_DMA_FREED) == 0) { 12821 break; 12822 } 12823 12824 /* 12825 * If this immediate command is NOT our 12826 * un_retry_bp, then we must fail it. 12827 */ 12828 if (bp != un->un_retry_bp) { 12829 break; 12830 } 12831 12832 /* 12833 * We get here if this cmd is our 12834 * un_retry_bp that was DMAFREED, but 12835 * scsi_init_pkt() failed to reallocate 12836 * DMA resources when we attempted to 12837 * retry it. This can happen when an 12838 * mpxio failover is in progress, but 12839 * we don't want to just fail the 12840 * command in this case. 12841 * 12842 * Use timeout(9F) to restart it after 12843 * a 100ms delay. We don't want to 12844 * let sdrunout() restart it, because 12845 * sdrunout() is just supposed to start 12846 * commands that are sitting on the 12847 * wait queue. The un_retry_bp stays 12848 * set until the command completes, but 12849 * sdrunout can be called many times 12850 * before that happens. Since sdrunout 12851 * cannot tell if the un_retry_bp is 12852 * already in the transport, it could 12853 * end up calling scsi_transport() for 12854 * the un_retry_bp multiple times. 12855 * 12856 * Also: don't schedule the callback 12857 * if some other callback is already 12858 * pending. 12859 */ 12860 if (un->un_retry_statp == NULL) { 12861 /* 12862 * restore the kstat pointer to 12863 * keep kstat counts coherent 12864 * when we do retry the command. 12865 */ 12866 un->un_retry_statp = 12867 saved_statp; 12868 } 12869 12870 if ((un->un_startstop_timeid == NULL) && 12871 (un->un_retry_timeid == NULL) && 12872 (un->un_direct_priority_timeid == 12873 NULL)) { 12874 12875 un->un_retry_timeid = 12876 timeout( 12877 sd_start_retry_command, 12878 un, SD_RESTART_TIMEOUT); 12879 } 12880 goto exit; 12881 } 12882 12883 #else 12884 if (bp == immed_bp) { 12885 break; /* Just fail the command */ 12886 } 12887 #endif 12888 12889 /* Add the buf back to the head of the waitq */ 12890 bp->av_forw = un->un_waitq_headp; 12891 un->un_waitq_headp = bp; 12892 if (un->un_waitq_tailp == NULL) { 12893 un->un_waitq_tailp = bp; 12894 } 12895 goto exit; 12896 12897 case SD_PKT_ALLOC_FAILURE_NO_DMA: 12898 /* 12899 * HBA DMA resource failure. Fail the command 12900 * and continue processing of the queues. 12901 */ 12902 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12903 "sd_start_cmds: " 12904 "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); 12905 break; 12906 12907 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: 12908 /* 12909 * Note:x86: Partial DMA mapping not supported 12910 * for USCSI commands, and all the needed DMA 12911 * resources were not allocated. 12912 */ 12913 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12914 "sd_start_cmds: " 12915 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); 12916 break; 12917 12918 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: 12919 /* 12920 * Note:x86: Request cannot fit into CDB based 12921 * on lba and len. 12922 */ 12923 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12924 "sd_start_cmds: " 12925 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); 12926 break; 12927 12928 default: 12929 /* Should NEVER get here! */ 12930 panic("scsi_initpkt error"); 12931 /*NOTREACHED*/ 12932 } 12933 12934 /* 12935 * Fatal error in allocating a scsi_pkt for this buf. 12936 * Update kstats & return the buf with an error code. 12937 * We must use sd_return_failed_command_no_restart() to 12938 * avoid a recursive call back into sd_start_cmds(). 12939 * However this also means that we must keep processing 12940 * the waitq here in order to avoid stalling. 12941 */ 12942 if (statp == kstat_waitq_to_runq) { 12943 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 12944 } 12945 sd_return_failed_command_no_restart(un, bp, EIO); 12946 if (bp == immed_bp) { 12947 /* immed_bp is gone by now, so clear this */ 12948 immed_bp = NULL; 12949 } 12950 continue; 12951 } 12952 got_pkt: 12953 if (bp == immed_bp) { 12954 /* goto the head of the class.... */ 12955 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 12956 } 12957 12958 un->un_ncmds_in_transport++; 12959 SD_UPDATE_KSTATS(un, statp, bp); 12960 12961 /* 12962 * Call scsi_transport() to send the command to the target. 12963 * According to SCSA architecture, we must drop the mutex here 12964 * before calling scsi_transport() in order to avoid deadlock. 12965 * Note that the scsi_pkt's completion routine can be executed 12966 * (from interrupt context) even before the call to 12967 * scsi_transport() returns. 12968 */ 12969 SD_TRACE(SD_LOG_IO_CORE, un, 12970 "sd_start_cmds: calling scsi_transport()\n"); 12971 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); 12972 12973 mutex_exit(SD_MUTEX(un)); 12974 rval = scsi_transport(xp->xb_pktp); 12975 mutex_enter(SD_MUTEX(un)); 12976 12977 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12978 "sd_start_cmds: scsi_transport() returned %d\n", rval); 12979 12980 switch (rval) { 12981 case TRAN_ACCEPT: 12982 /* Clear this with every pkt accepted by the HBA */ 12983 un->un_tran_fatal_count = 0; 12984 break; /* Success; try the next cmd (if any) */ 12985 12986 case TRAN_BUSY: 12987 un->un_ncmds_in_transport--; 12988 ASSERT(un->un_ncmds_in_transport >= 0); 12989 12990 /* 12991 * Don't retry request sense, the sense data 12992 * is lost when another request is sent. 12993 * Free up the rqs buf and retry 12994 * the original failed cmd. Update kstat. 12995 */ 12996 if (bp == un->un_rqs_bp) { 12997 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 12998 bp = sd_mark_rqs_idle(un, xp); 12999 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 13000 NULL, NULL, EIO, SD_BSY_TIMEOUT / 500, 13001 kstat_waitq_enter); 13002 goto exit; 13003 } 13004 13005 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13006 /* 13007 * Free the DMA resources for the scsi_pkt. This will 13008 * allow mpxio to select another path the next time 13009 * we call scsi_transport() with this scsi_pkt. 13010 * See sdintr() for the rationalization behind this. 13011 */ 13012 if ((un->un_f_is_fibre == TRUE) && 13013 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 13014 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { 13015 scsi_dmafree(xp->xb_pktp); 13016 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 13017 } 13018 #endif 13019 13020 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { 13021 /* 13022 * Commands that are SD_PATH_DIRECT_PRIORITY 13023 * are for error recovery situations. These do 13024 * not use the normal command waitq, so if they 13025 * get a TRAN_BUSY we cannot put them back onto 13026 * the waitq for later retry. One possible 13027 * problem is that there could already be some 13028 * other command on un_retry_bp that is waiting 13029 * for this one to complete, so we would be 13030 * deadlocked if we put this command back onto 13031 * the waitq for later retry (since un_retry_bp 13032 * must complete before the driver gets back to 13033 * commands on the waitq). 13034 * 13035 * To avoid deadlock we must schedule a callback 13036 * that will restart this command after a set 13037 * interval. This should keep retrying for as 13038 * long as the underlying transport keeps 13039 * returning TRAN_BUSY (just like for other 13040 * commands). Use the same timeout interval as 13041 * for the ordinary TRAN_BUSY retry. 13042 */ 13043 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13044 "sd_start_cmds: scsi_transport() returned " 13045 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n"); 13046 13047 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13048 un->un_direct_priority_timeid = 13049 timeout(sd_start_direct_priority_command, 13050 bp, SD_BSY_TIMEOUT / 500); 13051 13052 goto exit; 13053 } 13054 13055 /* 13056 * For TRAN_BUSY, we want to reduce the throttle value, 13057 * unless we are retrying a command. 13058 */ 13059 if (bp != un->un_retry_bp) { 13060 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); 13061 } 13062 13063 /* 13064 * Set up the bp to be tried again 10 ms later. 13065 * Note:x86: Is there a timeout value in the sd_lun 13066 * for this condition? 13067 */ 13068 sd_set_retry_bp(un, bp, SD_BSY_TIMEOUT / 500, 13069 kstat_runq_back_to_waitq); 13070 goto exit; 13071 13072 case TRAN_FATAL_ERROR: 13073 un->un_tran_fatal_count++; 13074 /* FALLTHRU */ 13075 13076 case TRAN_BADPKT: 13077 default: 13078 un->un_ncmds_in_transport--; 13079 ASSERT(un->un_ncmds_in_transport >= 0); 13080 13081 /* 13082 * If this is our REQUEST SENSE command with a 13083 * transport error, we must get back the pointers 13084 * to the original buf, and mark the REQUEST 13085 * SENSE command as "available". 13086 */ 13087 if (bp == un->un_rqs_bp) { 13088 bp = sd_mark_rqs_idle(un, xp); 13089 xp = SD_GET_XBUF(bp); 13090 } else { 13091 /* 13092 * Legacy behavior: do not update transport 13093 * error count for request sense commands. 13094 */ 13095 SD_UPDATE_ERRSTATS(un, sd_transerrs); 13096 } 13097 13098 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13099 sd_print_transport_rejected_message(un, xp, rval); 13100 13101 /* 13102 * We must use sd_return_failed_command_no_restart() to 13103 * avoid a recursive call back into sd_start_cmds(). 13104 * However this also means that we must keep processing 13105 * the waitq here in order to avoid stalling. 13106 */ 13107 sd_return_failed_command_no_restart(un, bp, EIO); 13108 13109 /* 13110 * Notify any threads waiting in sd_ddi_suspend() that 13111 * a command completion has occurred. 13112 */ 13113 if (un->un_state == SD_STATE_SUSPENDED) { 13114 cv_broadcast(&un->un_disk_busy_cv); 13115 } 13116 13117 if (bp == immed_bp) { 13118 /* immed_bp is gone by now, so clear this */ 13119 immed_bp = NULL; 13120 } 13121 break; 13122 } 13123 13124 } while (immed_bp == NULL); 13125 13126 exit: 13127 ASSERT(mutex_owned(SD_MUTEX(un))); 13128 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n"); 13129 } 13130 13131 13132 /* 13133 * Function: sd_return_command 13134 * 13135 * Description: Returns a command to its originator (with or without an 13136 * error). Also starts commands waiting to be transported 13137 * to the target. 13138 * 13139 * Context: May be called from interrupt, kernel, or timeout context 13140 */ 13141 13142 static void 13143 sd_return_command(struct sd_lun *un, struct buf *bp) 13144 { 13145 struct sd_xbuf *xp; 13146 #if defined(__i386) || defined(__amd64) 13147 struct scsi_pkt *pktp; 13148 #endif 13149 13150 ASSERT(bp != NULL); 13151 ASSERT(un != NULL); 13152 ASSERT(mutex_owned(SD_MUTEX(un))); 13153 ASSERT(bp != un->un_rqs_bp); 13154 xp = SD_GET_XBUF(bp); 13155 ASSERT(xp != NULL); 13156 13157 #if defined(__i386) || defined(__amd64) 13158 pktp = SD_GET_PKTP(bp); 13159 #endif 13160 13161 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n"); 13162 13163 #if defined(__i386) || defined(__amd64) 13164 /* 13165 * Note:x86: check for the "sdrestart failed" case. 13166 */ 13167 if (((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) && 13168 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) && 13169 (xp->xb_pktp->pkt_resid == 0)) { 13170 13171 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) { 13172 /* 13173 * Successfully set up next portion of cmd 13174 * transfer, try sending it 13175 */ 13176 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 13177 NULL, NULL, 0, (clock_t)0, NULL); 13178 sd_start_cmds(un, NULL); 13179 return; /* Note:x86: need a return here? */ 13180 } 13181 } 13182 #endif 13183 13184 /* 13185 * If this is the failfast bp, clear it from un_failfast_bp. This 13186 * can happen if upon being re-tried the failfast bp either 13187 * succeeded or encountered another error (possibly even a different 13188 * error than the one that precipitated the failfast state, but in 13189 * that case it would have had to exhaust retries as well). Regardless, 13190 * this should not occur whenever the instance is in the active 13191 * failfast state. 13192 */ 13193 if (bp == un->un_failfast_bp) { 13194 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 13195 un->un_failfast_bp = NULL; 13196 } 13197 13198 /* 13199 * Clear the failfast state upon successful completion of ANY cmd. 13200 */ 13201 if (bp->b_error == 0) { 13202 un->un_failfast_state = SD_FAILFAST_INACTIVE; 13203 } 13204 13205 /* 13206 * This is used if the command was retried one or more times. Show that 13207 * we are done with it, and allow processing of the waitq to resume. 13208 */ 13209 if (bp == un->un_retry_bp) { 13210 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13211 "sd_return_command: un:0x%p: " 13212 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 13213 un->un_retry_bp = NULL; 13214 un->un_retry_statp = NULL; 13215 } 13216 13217 SD_UPDATE_RDWR_STATS(un, bp); 13218 SD_UPDATE_PARTITION_STATS(un, bp); 13219 13220 switch (un->un_state) { 13221 case SD_STATE_SUSPENDED: 13222 /* 13223 * Notify any threads waiting in sd_ddi_suspend() that 13224 * a command completion has occurred. 13225 */ 13226 cv_broadcast(&un->un_disk_busy_cv); 13227 break; 13228 default: 13229 sd_start_cmds(un, NULL); 13230 break; 13231 } 13232 13233 /* Return this command up the iodone chain to its originator. */ 13234 mutex_exit(SD_MUTEX(un)); 13235 13236 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 13237 xp->xb_pktp = NULL; 13238 13239 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 13240 13241 ASSERT(!mutex_owned(SD_MUTEX(un))); 13242 mutex_enter(SD_MUTEX(un)); 13243 13244 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n"); 13245 } 13246 13247 13248 /* 13249 * Function: sd_return_failed_command 13250 * 13251 * Description: Command completion when an error occurred. 13252 * 13253 * Context: May be called from interrupt context 13254 */ 13255 13256 static void 13257 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode) 13258 { 13259 ASSERT(bp != NULL); 13260 ASSERT(un != NULL); 13261 ASSERT(mutex_owned(SD_MUTEX(un))); 13262 13263 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13264 "sd_return_failed_command: entry\n"); 13265 13266 /* 13267 * b_resid could already be nonzero due to a partial data 13268 * transfer, so do not change it here. 13269 */ 13270 SD_BIOERROR(bp, errcode); 13271 13272 sd_return_command(un, bp); 13273 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13274 "sd_return_failed_command: exit\n"); 13275 } 13276 13277 13278 /* 13279 * Function: sd_return_failed_command_no_restart 13280 * 13281 * Description: Same as sd_return_failed_command, but ensures that no 13282 * call back into sd_start_cmds will be issued. 13283 * 13284 * Context: May be called from interrupt context 13285 */ 13286 13287 static void 13288 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp, 13289 int errcode) 13290 { 13291 struct sd_xbuf *xp; 13292 13293 ASSERT(bp != NULL); 13294 ASSERT(un != NULL); 13295 ASSERT(mutex_owned(SD_MUTEX(un))); 13296 xp = SD_GET_XBUF(bp); 13297 ASSERT(xp != NULL); 13298 ASSERT(errcode != 0); 13299 13300 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13301 "sd_return_failed_command_no_restart: entry\n"); 13302 13303 /* 13304 * b_resid could already be nonzero due to a partial data 13305 * transfer, so do not change it here. 13306 */ 13307 SD_BIOERROR(bp, errcode); 13308 13309 /* 13310 * If this is the failfast bp, clear it. This can happen if the 13311 * failfast bp encounterd a fatal error when we attempted to 13312 * re-try it (such as a scsi_transport(9F) failure). However 13313 * we should NOT be in an active failfast state if the failfast 13314 * bp is not NULL. 13315 */ 13316 if (bp == un->un_failfast_bp) { 13317 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 13318 un->un_failfast_bp = NULL; 13319 } 13320 13321 if (bp == un->un_retry_bp) { 13322 /* 13323 * This command was retried one or more times. Show that we are 13324 * done with it, and allow processing of the waitq to resume. 13325 */ 13326 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13327 "sd_return_failed_command_no_restart: " 13328 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 13329 un->un_retry_bp = NULL; 13330 un->un_retry_statp = NULL; 13331 } 13332 13333 SD_UPDATE_RDWR_STATS(un, bp); 13334 SD_UPDATE_PARTITION_STATS(un, bp); 13335 13336 mutex_exit(SD_MUTEX(un)); 13337 13338 if (xp->xb_pktp != NULL) { 13339 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 13340 xp->xb_pktp = NULL; 13341 } 13342 13343 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 13344 13345 mutex_enter(SD_MUTEX(un)); 13346 13347 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13348 "sd_return_failed_command_no_restart: exit\n"); 13349 } 13350 13351 13352 /* 13353 * Function: sd_retry_command 13354 * 13355 * Description: queue up a command for retry, or (optionally) fail it 13356 * if retry counts are exhausted. 13357 * 13358 * Arguments: un - Pointer to the sd_lun struct for the target. 13359 * 13360 * bp - Pointer to the buf for the command to be retried. 13361 * 13362 * retry_check_flag - Flag to see which (if any) of the retry 13363 * counts should be decremented/checked. If the indicated 13364 * retry count is exhausted, then the command will not be 13365 * retried; it will be failed instead. This should use a 13366 * value equal to one of the following: 13367 * 13368 * SD_RETRIES_NOCHECK 13369 * SD_RESD_RETRIES_STANDARD 13370 * SD_RETRIES_VICTIM 13371 * 13372 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE 13373 * if the check should be made to see of FLAG_ISOLATE is set 13374 * in the pkt. If FLAG_ISOLATE is set, then the command is 13375 * not retried, it is simply failed. 13376 * 13377 * user_funcp - Ptr to function to call before dispatching the 13378 * command. May be NULL if no action needs to be performed. 13379 * (Primarily intended for printing messages.) 13380 * 13381 * user_arg - Optional argument to be passed along to 13382 * the user_funcp call. 13383 * 13384 * failure_code - errno return code to set in the bp if the 13385 * command is going to be failed. 13386 * 13387 * retry_delay - Retry delay interval in (clock_t) units. May 13388 * be zero which indicates that the retry should be retried 13389 * immediately (ie, without an intervening delay). 13390 * 13391 * statp - Ptr to kstat function to be updated if the command 13392 * is queued for a delayed retry. May be NULL if no kstat 13393 * update is desired. 13394 * 13395 * Context: May be called from interupt context. 13396 */ 13397 13398 static void 13399 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag, 13400 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int 13401 code), void *user_arg, int failure_code, clock_t retry_delay, 13402 void (*statp)(kstat_io_t *)) 13403 { 13404 struct sd_xbuf *xp; 13405 struct scsi_pkt *pktp; 13406 13407 ASSERT(un != NULL); 13408 ASSERT(mutex_owned(SD_MUTEX(un))); 13409 ASSERT(bp != NULL); 13410 xp = SD_GET_XBUF(bp); 13411 ASSERT(xp != NULL); 13412 pktp = SD_GET_PKTP(bp); 13413 ASSERT(pktp != NULL); 13414 13415 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 13416 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp); 13417 13418 /* 13419 * If we are syncing or dumping, fail the command to avoid 13420 * recursively calling back into scsi_transport(). 13421 */ 13422 if (ddi_in_panic()) { 13423 goto fail_command_no_log; 13424 } 13425 13426 /* 13427 * We should never be be retrying a command with FLAG_DIAGNOSE set, so 13428 * log an error and fail the command. 13429 */ 13430 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 13431 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 13432 "ERROR, retrying FLAG_DIAGNOSE command.\n"); 13433 sd_dump_memory(un, SD_LOG_IO, "CDB", 13434 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 13435 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 13436 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 13437 goto fail_command; 13438 } 13439 13440 /* 13441 * If we are suspended, then put the command onto head of the 13442 * wait queue since we don't want to start more commands. 13443 */ 13444 switch (un->un_state) { 13445 case SD_STATE_SUSPENDED: 13446 case SD_STATE_DUMPING: 13447 bp->av_forw = un->un_waitq_headp; 13448 un->un_waitq_headp = bp; 13449 if (un->un_waitq_tailp == NULL) { 13450 un->un_waitq_tailp = bp; 13451 } 13452 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 13453 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: " 13454 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp); 13455 return; 13456 default: 13457 break; 13458 } 13459 13460 /* 13461 * If the caller wants us to check FLAG_ISOLATE, then see if that 13462 * is set; if it is then we do not want to retry the command. 13463 * Normally, FLAG_ISOLATE is only used with USCSI cmds. 13464 */ 13465 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) { 13466 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { 13467 goto fail_command; 13468 } 13469 } 13470 13471 13472 /* 13473 * If SD_RETRIES_FAILFAST is set, it indicates that either a 13474 * command timeout or a selection timeout has occurred. This means 13475 * that we were unable to establish an kind of communication with 13476 * the target, and subsequent retries and/or commands are likely 13477 * to encounter similar results and take a long time to complete. 13478 * 13479 * If this is a failfast error condition, we need to update the 13480 * failfast state, even if this bp does not have B_FAILFAST set. 13481 */ 13482 if (retry_check_flag & SD_RETRIES_FAILFAST) { 13483 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { 13484 ASSERT(un->un_failfast_bp == NULL); 13485 /* 13486 * If we are already in the active failfast state, and 13487 * another failfast error condition has been detected, 13488 * then fail this command if it has B_FAILFAST set. 13489 * If B_FAILFAST is clear, then maintain the legacy 13490 * behavior of retrying heroically, even tho this will 13491 * take a lot more time to fail the command. 13492 */ 13493 if (bp->b_flags & B_FAILFAST) { 13494 goto fail_command; 13495 } 13496 } else { 13497 /* 13498 * We're not in the active failfast state, but we 13499 * have a failfast error condition, so we must begin 13500 * transition to the next state. We do this regardless 13501 * of whether or not this bp has B_FAILFAST set. 13502 */ 13503 if (un->un_failfast_bp == NULL) { 13504 /* 13505 * This is the first bp to meet a failfast 13506 * condition so save it on un_failfast_bp & 13507 * do normal retry processing. Do not enter 13508 * active failfast state yet. This marks 13509 * entry into the "failfast pending" state. 13510 */ 13511 un->un_failfast_bp = bp; 13512 13513 } else if (un->un_failfast_bp == bp) { 13514 /* 13515 * This is the second time *this* bp has 13516 * encountered a failfast error condition, 13517 * so enter active failfast state & flush 13518 * queues as appropriate. 13519 */ 13520 un->un_failfast_state = SD_FAILFAST_ACTIVE; 13521 un->un_failfast_bp = NULL; 13522 sd_failfast_flushq(un); 13523 13524 /* 13525 * Fail this bp now if B_FAILFAST set; 13526 * otherwise continue with retries. (It would 13527 * be pretty ironic if this bp succeeded on a 13528 * subsequent retry after we just flushed all 13529 * the queues). 13530 */ 13531 if (bp->b_flags & B_FAILFAST) { 13532 goto fail_command; 13533 } 13534 13535 #if !defined(lint) && !defined(__lint) 13536 } else { 13537 /* 13538 * If neither of the preceeding conditionals 13539 * was true, it means that there is some 13540 * *other* bp that has met an inital failfast 13541 * condition and is currently either being 13542 * retried or is waiting to be retried. In 13543 * that case we should perform normal retry 13544 * processing on *this* bp, since there is a 13545 * chance that the current failfast condition 13546 * is transient and recoverable. If that does 13547 * not turn out to be the case, then retries 13548 * will be cleared when the wait queue is 13549 * flushed anyway. 13550 */ 13551 #endif 13552 } 13553 } 13554 } else { 13555 /* 13556 * SD_RETRIES_FAILFAST is clear, which indicates that we 13557 * likely were able to at least establish some level of 13558 * communication with the target and subsequent commands 13559 * and/or retries are likely to get through to the target, 13560 * In this case we want to be aggressive about clearing 13561 * the failfast state. Note that this does not affect 13562 * the "failfast pending" condition. 13563 */ 13564 un->un_failfast_state = SD_FAILFAST_INACTIVE; 13565 } 13566 13567 13568 /* 13569 * Check the specified retry count to see if we can still do 13570 * any retries with this pkt before we should fail it. 13571 */ 13572 switch (retry_check_flag & SD_RETRIES_MASK) { 13573 case SD_RETRIES_VICTIM: 13574 /* 13575 * Check the victim retry count. If exhausted, then fall 13576 * thru & check against the standard retry count. 13577 */ 13578 if (xp->xb_victim_retry_count < un->un_victim_retry_count) { 13579 /* Increment count & proceed with the retry */ 13580 xp->xb_victim_retry_count++; 13581 break; 13582 } 13583 /* Victim retries exhausted, fall back to std. retries... */ 13584 /* FALLTHRU */ 13585 13586 case SD_RETRIES_STANDARD: 13587 if (xp->xb_retry_count >= un->un_retry_count) { 13588 /* Retries exhausted, fail the command */ 13589 SD_TRACE(SD_LOG_IO_CORE, un, 13590 "sd_retry_command: retries exhausted!\n"); 13591 /* 13592 * update b_resid for failed SCMD_READ & SCMD_WRITE 13593 * commands with nonzero pkt_resid. 13594 */ 13595 if ((pktp->pkt_reason == CMD_CMPLT) && 13596 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) && 13597 (pktp->pkt_resid != 0)) { 13598 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F; 13599 if ((op == SCMD_READ) || (op == SCMD_WRITE)) { 13600 SD_UPDATE_B_RESID(bp, pktp); 13601 } 13602 } 13603 goto fail_command; 13604 } 13605 xp->xb_retry_count++; 13606 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13607 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 13608 break; 13609 13610 case SD_RETRIES_UA: 13611 if (xp->xb_ua_retry_count >= sd_ua_retry_count) { 13612 /* Retries exhausted, fail the command */ 13613 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13614 "Unit Attention retries exhausted. " 13615 "Check the target.\n"); 13616 goto fail_command; 13617 } 13618 xp->xb_ua_retry_count++; 13619 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13620 "sd_retry_command: retry count:%d\n", 13621 xp->xb_ua_retry_count); 13622 break; 13623 13624 case SD_RETRIES_BUSY: 13625 if (xp->xb_retry_count >= un->un_busy_retry_count) { 13626 /* Retries exhausted, fail the command */ 13627 SD_TRACE(SD_LOG_IO_CORE, un, 13628 "sd_retry_command: retries exhausted!\n"); 13629 goto fail_command; 13630 } 13631 xp->xb_retry_count++; 13632 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13633 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 13634 break; 13635 13636 case SD_RETRIES_NOCHECK: 13637 default: 13638 /* No retry count to check. Just proceed with the retry */ 13639 break; 13640 } 13641 13642 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 13643 13644 /* 13645 * If we were given a zero timeout, we must attempt to retry the 13646 * command immediately (ie, without a delay). 13647 */ 13648 if (retry_delay == 0) { 13649 /* 13650 * Check some limiting conditions to see if we can actually 13651 * do the immediate retry. If we cannot, then we must 13652 * fall back to queueing up a delayed retry. 13653 */ 13654 if (un->un_ncmds_in_transport >= un->un_throttle) { 13655 /* 13656 * We are at the throttle limit for the target, 13657 * fall back to delayed retry. 13658 */ 13659 retry_delay = SD_BSY_TIMEOUT; 13660 statp = kstat_waitq_enter; 13661 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13662 "sd_retry_command: immed. retry hit " 13663 "throttle!\n"); 13664 } else { 13665 /* 13666 * We're clear to proceed with the immediate retry. 13667 * First call the user-provided function (if any) 13668 */ 13669 if (user_funcp != NULL) { 13670 (*user_funcp)(un, bp, user_arg, 13671 SD_IMMEDIATE_RETRY_ISSUED); 13672 #ifdef __lock_lint 13673 sd_print_incomplete_msg(un, bp, user_arg, 13674 SD_IMMEDIATE_RETRY_ISSUED); 13675 sd_print_cmd_incomplete_msg(un, bp, user_arg, 13676 SD_IMMEDIATE_RETRY_ISSUED); 13677 sd_print_sense_failed_msg(un, bp, user_arg, 13678 SD_IMMEDIATE_RETRY_ISSUED); 13679 #endif 13680 } 13681 13682 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13683 "sd_retry_command: issuing immediate retry\n"); 13684 13685 /* 13686 * Call sd_start_cmds() to transport the command to 13687 * the target. 13688 */ 13689 sd_start_cmds(un, bp); 13690 13691 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13692 "sd_retry_command exit\n"); 13693 return; 13694 } 13695 } 13696 13697 /* 13698 * Set up to retry the command after a delay. 13699 * First call the user-provided function (if any) 13700 */ 13701 if (user_funcp != NULL) { 13702 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED); 13703 } 13704 13705 sd_set_retry_bp(un, bp, retry_delay, statp); 13706 13707 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 13708 return; 13709 13710 fail_command: 13711 13712 if (user_funcp != NULL) { 13713 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED); 13714 } 13715 13716 fail_command_no_log: 13717 13718 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13719 "sd_retry_command: returning failed command\n"); 13720 13721 sd_return_failed_command(un, bp, failure_code); 13722 13723 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 13724 } 13725 13726 13727 /* 13728 * Function: sd_set_retry_bp 13729 * 13730 * Description: Set up the given bp for retry. 13731 * 13732 * Arguments: un - ptr to associated softstate 13733 * bp - ptr to buf(9S) for the command 13734 * retry_delay - time interval before issuing retry (may be 0) 13735 * statp - optional pointer to kstat function 13736 * 13737 * Context: May be called under interrupt context 13738 */ 13739 13740 static void 13741 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, 13742 void (*statp)(kstat_io_t *)) 13743 { 13744 ASSERT(un != NULL); 13745 ASSERT(mutex_owned(SD_MUTEX(un))); 13746 ASSERT(bp != NULL); 13747 13748 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 13749 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp); 13750 13751 /* 13752 * Indicate that the command is being retried. This will not allow any 13753 * other commands on the wait queue to be transported to the target 13754 * until this command has been completed (success or failure). The 13755 * "retry command" is not transported to the target until the given 13756 * time delay expires, unless the user specified a 0 retry_delay. 13757 * 13758 * Note: the timeout(9F) callback routine is what actually calls 13759 * sd_start_cmds() to transport the command, with the exception of a 13760 * zero retry_delay. The only current implementor of a zero retry delay 13761 * is the case where a START_STOP_UNIT is sent to spin-up a device. 13762 */ 13763 if (un->un_retry_bp == NULL) { 13764 ASSERT(un->un_retry_statp == NULL); 13765 un->un_retry_bp = bp; 13766 13767 /* 13768 * If the user has not specified a delay the command should 13769 * be queued and no timeout should be scheduled. 13770 */ 13771 if (retry_delay == 0) { 13772 /* 13773 * Save the kstat pointer that will be used in the 13774 * call to SD_UPDATE_KSTATS() below, so that 13775 * sd_start_cmds() can correctly decrement the waitq 13776 * count when it is time to transport this command. 13777 */ 13778 un->un_retry_statp = statp; 13779 goto done; 13780 } 13781 } 13782 13783 if (un->un_retry_bp == bp) { 13784 /* 13785 * Save the kstat pointer that will be used in the call to 13786 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can 13787 * correctly decrement the waitq count when it is time to 13788 * transport this command. 13789 */ 13790 un->un_retry_statp = statp; 13791 13792 /* 13793 * Schedule a timeout if: 13794 * 1) The user has specified a delay. 13795 * 2) There is not a START_STOP_UNIT callback pending. 13796 * 13797 * If no delay has been specified, then it is up to the caller 13798 * to ensure that IO processing continues without stalling. 13799 * Effectively, this means that the caller will issue the 13800 * required call to sd_start_cmds(). The START_STOP_UNIT 13801 * callback does this after the START STOP UNIT command has 13802 * completed. In either of these cases we should not schedule 13803 * a timeout callback here. Also don't schedule the timeout if 13804 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart. 13805 */ 13806 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) && 13807 (un->un_direct_priority_timeid == NULL)) { 13808 un->un_retry_timeid = 13809 timeout(sd_start_retry_command, un, retry_delay); 13810 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13811 "sd_set_retry_bp: setting timeout: un: 0x%p" 13812 " bp:0x%p un_retry_timeid:0x%p\n", 13813 un, bp, un->un_retry_timeid); 13814 } 13815 } else { 13816 /* 13817 * We only get in here if there is already another command 13818 * waiting to be retried. In this case, we just put the 13819 * given command onto the wait queue, so it can be transported 13820 * after the current retry command has completed. 13821 * 13822 * Also we have to make sure that if the command at the head 13823 * of the wait queue is the un_failfast_bp, that we do not 13824 * put ahead of it any other commands that are to be retried. 13825 */ 13826 if ((un->un_failfast_bp != NULL) && 13827 (un->un_failfast_bp == un->un_waitq_headp)) { 13828 /* 13829 * Enqueue this command AFTER the first command on 13830 * the wait queue (which is also un_failfast_bp). 13831 */ 13832 bp->av_forw = un->un_waitq_headp->av_forw; 13833 un->un_waitq_headp->av_forw = bp; 13834 if (un->un_waitq_headp == un->un_waitq_tailp) { 13835 un->un_waitq_tailp = bp; 13836 } 13837 } else { 13838 /* Enqueue this command at the head of the waitq. */ 13839 bp->av_forw = un->un_waitq_headp; 13840 un->un_waitq_headp = bp; 13841 if (un->un_waitq_tailp == NULL) { 13842 un->un_waitq_tailp = bp; 13843 } 13844 } 13845 13846 if (statp == NULL) { 13847 statp = kstat_waitq_enter; 13848 } 13849 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13850 "sd_set_retry_bp: un:0x%p already delayed retry\n", un); 13851 } 13852 13853 done: 13854 if (statp != NULL) { 13855 SD_UPDATE_KSTATS(un, statp, bp); 13856 } 13857 13858 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13859 "sd_set_retry_bp: exit un:0x%p\n", un); 13860 } 13861 13862 13863 /* 13864 * Function: sd_start_retry_command 13865 * 13866 * Description: Start the command that has been waiting on the target's 13867 * retry queue. Called from timeout(9F) context after the 13868 * retry delay interval has expired. 13869 * 13870 * Arguments: arg - pointer to associated softstate for the device. 13871 * 13872 * Context: timeout(9F) thread context. May not sleep. 13873 */ 13874 13875 static void 13876 sd_start_retry_command(void *arg) 13877 { 13878 struct sd_lun *un = arg; 13879 13880 ASSERT(un != NULL); 13881 ASSERT(!mutex_owned(SD_MUTEX(un))); 13882 13883 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13884 "sd_start_retry_command: entry\n"); 13885 13886 mutex_enter(SD_MUTEX(un)); 13887 13888 un->un_retry_timeid = NULL; 13889 13890 if (un->un_retry_bp != NULL) { 13891 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13892 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n", 13893 un, un->un_retry_bp); 13894 sd_start_cmds(un, un->un_retry_bp); 13895 } 13896 13897 mutex_exit(SD_MUTEX(un)); 13898 13899 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13900 "sd_start_retry_command: exit\n"); 13901 } 13902 13903 13904 /* 13905 * Function: sd_start_direct_priority_command 13906 * 13907 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had 13908 * received TRAN_BUSY when we called scsi_transport() to send it 13909 * to the underlying HBA. This function is called from timeout(9F) 13910 * context after the delay interval has expired. 13911 * 13912 * Arguments: arg - pointer to associated buf(9S) to be restarted. 13913 * 13914 * Context: timeout(9F) thread context. May not sleep. 13915 */ 13916 13917 static void 13918 sd_start_direct_priority_command(void *arg) 13919 { 13920 struct buf *priority_bp = arg; 13921 struct sd_lun *un; 13922 13923 ASSERT(priority_bp != NULL); 13924 un = SD_GET_UN(priority_bp); 13925 ASSERT(un != NULL); 13926 ASSERT(!mutex_owned(SD_MUTEX(un))); 13927 13928 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13929 "sd_start_direct_priority_command: entry\n"); 13930 13931 mutex_enter(SD_MUTEX(un)); 13932 un->un_direct_priority_timeid = NULL; 13933 sd_start_cmds(un, priority_bp); 13934 mutex_exit(SD_MUTEX(un)); 13935 13936 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13937 "sd_start_direct_priority_command: exit\n"); 13938 } 13939 13940 13941 /* 13942 * Function: sd_send_request_sense_command 13943 * 13944 * Description: Sends a REQUEST SENSE command to the target 13945 * 13946 * Context: May be called from interrupt context. 13947 */ 13948 13949 static void 13950 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 13951 struct scsi_pkt *pktp) 13952 { 13953 ASSERT(bp != NULL); 13954 ASSERT(un != NULL); 13955 ASSERT(mutex_owned(SD_MUTEX(un))); 13956 13957 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: " 13958 "entry: buf:0x%p\n", bp); 13959 13960 /* 13961 * If we are syncing or dumping, then fail the command to avoid a 13962 * recursive callback into scsi_transport(). Also fail the command 13963 * if we are suspended (legacy behavior). 13964 */ 13965 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 13966 (un->un_state == SD_STATE_DUMPING)) { 13967 sd_return_failed_command(un, bp, EIO); 13968 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13969 "sd_send_request_sense_command: syncing/dumping, exit\n"); 13970 return; 13971 } 13972 13973 /* 13974 * Retry the failed command and don't issue the request sense if: 13975 * 1) the sense buf is busy 13976 * 2) we have 1 or more outstanding commands on the target 13977 * (the sense data will be cleared or invalidated any way) 13978 * 13979 * Note: There could be an issue with not checking a retry limit here, 13980 * the problem is determining which retry limit to check. 13981 */ 13982 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { 13983 /* Don't retry if the command is flagged as non-retryable */ 13984 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 13985 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 13986 NULL, NULL, 0, SD_BSY_TIMEOUT, kstat_waitq_enter); 13987 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13988 "sd_send_request_sense_command: " 13989 "at full throttle, retrying exit\n"); 13990 } else { 13991 sd_return_failed_command(un, bp, EIO); 13992 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13993 "sd_send_request_sense_command: " 13994 "at full throttle, non-retryable exit\n"); 13995 } 13996 return; 13997 } 13998 13999 sd_mark_rqs_busy(un, bp); 14000 sd_start_cmds(un, un->un_rqs_bp); 14001 14002 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14003 "sd_send_request_sense_command: exit\n"); 14004 } 14005 14006 14007 /* 14008 * Function: sd_mark_rqs_busy 14009 * 14010 * Description: Indicate that the request sense bp for this instance is 14011 * in use. 14012 * 14013 * Context: May be called under interrupt context 14014 */ 14015 14016 static void 14017 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp) 14018 { 14019 struct sd_xbuf *sense_xp; 14020 14021 ASSERT(un != NULL); 14022 ASSERT(bp != NULL); 14023 ASSERT(mutex_owned(SD_MUTEX(un))); 14024 ASSERT(un->un_sense_isbusy == 0); 14025 14026 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: " 14027 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un); 14028 14029 sense_xp = SD_GET_XBUF(un->un_rqs_bp); 14030 ASSERT(sense_xp != NULL); 14031 14032 SD_INFO(SD_LOG_IO, un, 14033 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp); 14034 14035 ASSERT(sense_xp->xb_pktp != NULL); 14036 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) 14037 == (FLAG_SENSING | FLAG_HEAD)); 14038 14039 un->un_sense_isbusy = 1; 14040 un->un_rqs_bp->b_resid = 0; 14041 sense_xp->xb_pktp->pkt_resid = 0; 14042 sense_xp->xb_pktp->pkt_reason = 0; 14043 14044 /* So we can get back the bp at interrupt time! */ 14045 sense_xp->xb_sense_bp = bp; 14046 14047 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH); 14048 14049 /* 14050 * Mark this buf as awaiting sense data. (This is already set in 14051 * the pkt_flags for the RQS packet.) 14052 */ 14053 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING; 14054 14055 sense_xp->xb_retry_count = 0; 14056 sense_xp->xb_victim_retry_count = 0; 14057 sense_xp->xb_ua_retry_count = 0; 14058 sense_xp->xb_dma_resid = 0; 14059 14060 /* Clean up the fields for auto-request sense */ 14061 sense_xp->xb_sense_status = 0; 14062 sense_xp->xb_sense_state = 0; 14063 sense_xp->xb_sense_resid = 0; 14064 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data)); 14065 14066 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n"); 14067 } 14068 14069 14070 /* 14071 * Function: sd_mark_rqs_idle 14072 * 14073 * Description: SD_MUTEX must be held continuously through this routine 14074 * to prevent reuse of the rqs struct before the caller can 14075 * complete it's processing. 14076 * 14077 * Return Code: Pointer to the RQS buf 14078 * 14079 * Context: May be called under interrupt context 14080 */ 14081 14082 static struct buf * 14083 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp) 14084 { 14085 struct buf *bp; 14086 ASSERT(un != NULL); 14087 ASSERT(sense_xp != NULL); 14088 ASSERT(mutex_owned(SD_MUTEX(un))); 14089 ASSERT(un->un_sense_isbusy != 0); 14090 14091 un->un_sense_isbusy = 0; 14092 bp = sense_xp->xb_sense_bp; 14093 sense_xp->xb_sense_bp = NULL; 14094 14095 /* This pkt is no longer interested in getting sense data */ 14096 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING; 14097 14098 return (bp); 14099 } 14100 14101 14102 14103 /* 14104 * Function: sd_alloc_rqs 14105 * 14106 * Description: Set up the unit to receive auto request sense data 14107 * 14108 * Return Code: DDI_SUCCESS or DDI_FAILURE 14109 * 14110 * Context: Called under attach(9E) context 14111 */ 14112 14113 static int 14114 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un) 14115 { 14116 struct sd_xbuf *xp; 14117 14118 ASSERT(un != NULL); 14119 ASSERT(!mutex_owned(SD_MUTEX(un))); 14120 ASSERT(un->un_rqs_bp == NULL); 14121 ASSERT(un->un_rqs_pktp == NULL); 14122 14123 /* 14124 * First allocate the required buf and scsi_pkt structs, then set up 14125 * the CDB in the scsi_pkt for a REQUEST SENSE command. 14126 */ 14127 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL, 14128 SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 14129 if (un->un_rqs_bp == NULL) { 14130 return (DDI_FAILURE); 14131 } 14132 14133 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp, 14134 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL); 14135 14136 if (un->un_rqs_pktp == NULL) { 14137 sd_free_rqs(un); 14138 return (DDI_FAILURE); 14139 } 14140 14141 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */ 14142 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp, 14143 SCMD_REQUEST_SENSE, 0, SENSE_LENGTH, 0); 14144 14145 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); 14146 14147 /* Set up the other needed members in the ARQ scsi_pkt. */ 14148 un->un_rqs_pktp->pkt_comp = sdintr; 14149 un->un_rqs_pktp->pkt_time = sd_io_time; 14150 un->un_rqs_pktp->pkt_flags |= 14151 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ 14152 14153 /* 14154 * Allocate & init the sd_xbuf struct for the RQS command. Do not 14155 * provide any intpkt, destroypkt routines as we take care of 14156 * scsi_pkt allocation/freeing here and in sd_free_rqs(). 14157 */ 14158 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 14159 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL); 14160 xp->xb_pktp = un->un_rqs_pktp; 14161 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14162 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n", 14163 un, xp, un->un_rqs_pktp, un->un_rqs_bp); 14164 14165 /* 14166 * Save the pointer to the request sense private bp so it can 14167 * be retrieved in sdintr. 14168 */ 14169 un->un_rqs_pktp->pkt_private = un->un_rqs_bp; 14170 ASSERT(un->un_rqs_bp->b_private == xp); 14171 14172 /* 14173 * See if the HBA supports auto-request sense for the specified 14174 * target/lun. If it does, then try to enable it (if not already 14175 * enabled). 14176 * 14177 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return 14178 * failure, while for other HBAs (pln) scsi_ifsetcap will always 14179 * return success. However, in both of these cases ARQ is always 14180 * enabled and scsi_ifgetcap will always return true. The best approach 14181 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). 14182 * 14183 * The 3rd case is the HBA (adp) always return enabled on 14184 * scsi_ifgetgetcap even when it's not enable, the best approach 14185 * is issue a scsi_ifsetcap then a scsi_ifgetcap 14186 * Note: this case is to circumvent the Adaptec bug. (x86 only) 14187 */ 14188 14189 if (un->un_f_is_fibre == TRUE) { 14190 un->un_f_arq_enabled = TRUE; 14191 } else { 14192 #if defined(__i386) || defined(__amd64) 14193 /* 14194 * Circumvent the Adaptec bug, remove this code when 14195 * the bug is fixed 14196 */ 14197 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); 14198 #endif 14199 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { 14200 case 0: 14201 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14202 "sd_alloc_rqs: HBA supports ARQ\n"); 14203 /* 14204 * ARQ is supported by this HBA but currently is not 14205 * enabled. Attempt to enable it and if successful then 14206 * mark this instance as ARQ enabled. 14207 */ 14208 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1) 14209 == 1) { 14210 /* Successfully enabled ARQ in the HBA */ 14211 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14212 "sd_alloc_rqs: ARQ enabled\n"); 14213 un->un_f_arq_enabled = TRUE; 14214 } else { 14215 /* Could not enable ARQ in the HBA */ 14216 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14217 "sd_alloc_rqs: failed ARQ enable\n"); 14218 un->un_f_arq_enabled = FALSE; 14219 } 14220 break; 14221 case 1: 14222 /* 14223 * ARQ is supported by this HBA and is already enabled. 14224 * Just mark ARQ as enabled for this instance. 14225 */ 14226 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14227 "sd_alloc_rqs: ARQ already enabled\n"); 14228 un->un_f_arq_enabled = TRUE; 14229 break; 14230 default: 14231 /* 14232 * ARQ is not supported by this HBA; disable it for this 14233 * instance. 14234 */ 14235 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14236 "sd_alloc_rqs: HBA does not support ARQ\n"); 14237 un->un_f_arq_enabled = FALSE; 14238 break; 14239 } 14240 } 14241 14242 return (DDI_SUCCESS); 14243 } 14244 14245 14246 /* 14247 * Function: sd_free_rqs 14248 * 14249 * Description: Cleanup for the pre-instance RQS command. 14250 * 14251 * Context: Kernel thread context 14252 */ 14253 14254 static void 14255 sd_free_rqs(struct sd_lun *un) 14256 { 14257 ASSERT(un != NULL); 14258 14259 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n"); 14260 14261 /* 14262 * If consistent memory is bound to a scsi_pkt, the pkt 14263 * has to be destroyed *before* freeing the consistent memory. 14264 * Don't change the sequence of this operations. 14265 * scsi_destroy_pkt() might access memory, which isn't allowed, 14266 * after it was freed in scsi_free_consistent_buf(). 14267 */ 14268 if (un->un_rqs_pktp != NULL) { 14269 scsi_destroy_pkt(un->un_rqs_pktp); 14270 un->un_rqs_pktp = NULL; 14271 } 14272 14273 if (un->un_rqs_bp != NULL) { 14274 kmem_free(SD_GET_XBUF(un->un_rqs_bp), sizeof (struct sd_xbuf)); 14275 scsi_free_consistent_buf(un->un_rqs_bp); 14276 un->un_rqs_bp = NULL; 14277 } 14278 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n"); 14279 } 14280 14281 14282 14283 /* 14284 * Function: sd_reduce_throttle 14285 * 14286 * Description: Reduces the maximun # of outstanding commands on a 14287 * target to the current number of outstanding commands. 14288 * Queues a tiemout(9F) callback to restore the limit 14289 * after a specified interval has elapsed. 14290 * Typically used when we get a TRAN_BUSY return code 14291 * back from scsi_transport(). 14292 * 14293 * Arguments: un - ptr to the sd_lun softstate struct 14294 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL 14295 * 14296 * Context: May be called from interrupt context 14297 */ 14298 14299 static void 14300 sd_reduce_throttle(struct sd_lun *un, int throttle_type) 14301 { 14302 ASSERT(un != NULL); 14303 ASSERT(mutex_owned(SD_MUTEX(un))); 14304 ASSERT(un->un_ncmds_in_transport >= 0); 14305 14306 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 14307 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n", 14308 un, un->un_throttle, un->un_ncmds_in_transport); 14309 14310 if (un->un_throttle > 1) { 14311 if (un->un_f_use_adaptive_throttle == TRUE) { 14312 switch (throttle_type) { 14313 case SD_THROTTLE_TRAN_BUSY: 14314 if (un->un_busy_throttle == 0) { 14315 un->un_busy_throttle = un->un_throttle; 14316 } 14317 break; 14318 case SD_THROTTLE_QFULL: 14319 un->un_busy_throttle = 0; 14320 break; 14321 default: 14322 ASSERT(FALSE); 14323 } 14324 14325 if (un->un_ncmds_in_transport > 0) { 14326 un->un_throttle = un->un_ncmds_in_transport; 14327 } 14328 14329 } else { 14330 if (un->un_ncmds_in_transport == 0) { 14331 un->un_throttle = 1; 14332 } else { 14333 un->un_throttle = un->un_ncmds_in_transport; 14334 } 14335 } 14336 } 14337 14338 /* Reschedule the timeout if none is currently active */ 14339 if (un->un_reset_throttle_timeid == NULL) { 14340 un->un_reset_throttle_timeid = timeout(sd_restore_throttle, 14341 un, SD_THROTTLE_RESET_INTERVAL); 14342 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14343 "sd_reduce_throttle: timeout scheduled!\n"); 14344 } 14345 14346 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 14347 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle); 14348 } 14349 14350 14351 14352 /* 14353 * Function: sd_restore_throttle 14354 * 14355 * Description: Callback function for timeout(9F). Resets the current 14356 * value of un->un_throttle to its default. 14357 * 14358 * Arguments: arg - pointer to associated softstate for the device. 14359 * 14360 * Context: May be called from interrupt context 14361 */ 14362 14363 static void 14364 sd_restore_throttle(void *arg) 14365 { 14366 struct sd_lun *un = arg; 14367 14368 ASSERT(un != NULL); 14369 ASSERT(!mutex_owned(SD_MUTEX(un))); 14370 14371 mutex_enter(SD_MUTEX(un)); 14372 14373 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 14374 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle); 14375 14376 un->un_reset_throttle_timeid = NULL; 14377 14378 if (un->un_f_use_adaptive_throttle == TRUE) { 14379 /* 14380 * If un_busy_throttle is nonzero, then it contains the 14381 * value that un_throttle was when we got a TRAN_BUSY back 14382 * from scsi_transport(). We want to revert back to this 14383 * value. 14384 * 14385 * In the QFULL case, the throttle limit will incrementally 14386 * increase until it reaches max throttle. 14387 */ 14388 if (un->un_busy_throttle > 0) { 14389 un->un_throttle = un->un_busy_throttle; 14390 un->un_busy_throttle = 0; 14391 } else { 14392 /* 14393 * increase throttle by 10% open gate slowly, schedule 14394 * another restore if saved throttle has not been 14395 * reached 14396 */ 14397 short throttle; 14398 if (sd_qfull_throttle_enable) { 14399 throttle = un->un_throttle + 14400 max((un->un_throttle / 10), 1); 14401 un->un_throttle = 14402 (throttle < un->un_saved_throttle) ? 14403 throttle : un->un_saved_throttle; 14404 if (un->un_throttle < un->un_saved_throttle) { 14405 un->un_reset_throttle_timeid = 14406 timeout(sd_restore_throttle, 14407 un, SD_QFULL_THROTTLE_RESET_INTERVAL); 14408 } 14409 } 14410 } 14411 14412 /* 14413 * If un_throttle has fallen below the low-water mark, we 14414 * restore the maximum value here (and allow it to ratchet 14415 * down again if necessary). 14416 */ 14417 if (un->un_throttle < un->un_min_throttle) { 14418 un->un_throttle = un->un_saved_throttle; 14419 } 14420 } else { 14421 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 14422 "restoring limit from 0x%x to 0x%x\n", 14423 un->un_throttle, un->un_saved_throttle); 14424 un->un_throttle = un->un_saved_throttle; 14425 } 14426 14427 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14428 "sd_restore_throttle: calling sd_start_cmds!\n"); 14429 14430 sd_start_cmds(un, NULL); 14431 14432 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14433 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n", 14434 un, un->un_throttle); 14435 14436 mutex_exit(SD_MUTEX(un)); 14437 14438 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n"); 14439 } 14440 14441 /* 14442 * Function: sdrunout 14443 * 14444 * Description: Callback routine for scsi_init_pkt when a resource allocation 14445 * fails. 14446 * 14447 * Arguments: arg - a pointer to the sd_lun unit struct for the particular 14448 * soft state instance. 14449 * 14450 * Return Code: The scsi_init_pkt routine allows for the callback function to 14451 * return a 0 indicating the callback should be rescheduled or a 1 14452 * indicating not to reschedule. This routine always returns 1 14453 * because the driver always provides a callback function to 14454 * scsi_init_pkt. This results in a callback always being scheduled 14455 * (via the scsi_init_pkt callback implementation) if a resource 14456 * failure occurs. 14457 * 14458 * Context: This callback function may not block or call routines that block 14459 * 14460 * Note: Using the scsi_init_pkt callback facility can result in an I/O 14461 * request persisting at the head of the list which cannot be 14462 * satisfied even after multiple retries. In the future the driver 14463 * may implement some time of maximum runout count before failing 14464 * an I/O. 14465 */ 14466 14467 static int 14468 sdrunout(caddr_t arg) 14469 { 14470 struct sd_lun *un = (struct sd_lun *)arg; 14471 14472 ASSERT(un != NULL); 14473 ASSERT(!mutex_owned(SD_MUTEX(un))); 14474 14475 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n"); 14476 14477 mutex_enter(SD_MUTEX(un)); 14478 sd_start_cmds(un, NULL); 14479 mutex_exit(SD_MUTEX(un)); 14480 /* 14481 * This callback routine always returns 1 (i.e. do not reschedule) 14482 * because we always specify sdrunout as the callback handler for 14483 * scsi_init_pkt inside the call to sd_start_cmds. 14484 */ 14485 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); 14486 return (1); 14487 } 14488 14489 14490 /* 14491 * Function: sdintr 14492 * 14493 * Description: Completion callback routine for scsi_pkt(9S) structs 14494 * sent to the HBA driver via scsi_transport(9F). 14495 * 14496 * Context: Interrupt context 14497 */ 14498 14499 static void 14500 sdintr(struct scsi_pkt *pktp) 14501 { 14502 struct buf *bp; 14503 struct sd_xbuf *xp; 14504 struct sd_lun *un; 14505 14506 ASSERT(pktp != NULL); 14507 bp = (struct buf *)pktp->pkt_private; 14508 ASSERT(bp != NULL); 14509 xp = SD_GET_XBUF(bp); 14510 ASSERT(xp != NULL); 14511 ASSERT(xp->xb_pktp != NULL); 14512 un = SD_GET_UN(bp); 14513 ASSERT(un != NULL); 14514 ASSERT(!mutex_owned(SD_MUTEX(un))); 14515 14516 #ifdef SD_FAULT_INJECTION 14517 14518 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n"); 14519 /* SD FaultInjection */ 14520 sd_faultinjection(pktp); 14521 14522 #endif /* SD_FAULT_INJECTION */ 14523 14524 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p," 14525 " xp:0x%p, un:0x%p\n", bp, xp, un); 14526 14527 mutex_enter(SD_MUTEX(un)); 14528 14529 /* Reduce the count of the #commands currently in transport */ 14530 un->un_ncmds_in_transport--; 14531 ASSERT(un->un_ncmds_in_transport >= 0); 14532 14533 /* Increment counter to indicate that the callback routine is active */ 14534 un->un_in_callback++; 14535 14536 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14537 14538 #ifdef SDDEBUG 14539 if (bp == un->un_retry_bp) { 14540 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " 14541 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", 14542 un, un->un_retry_bp, un->un_ncmds_in_transport); 14543 } 14544 #endif 14545 14546 /* 14547 * If pkt_reason is CMD_DEV_GONE, just fail the command 14548 */ 14549 if (pktp->pkt_reason == CMD_DEV_GONE) { 14550 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 14551 "Device is gone\n"); 14552 sd_return_failed_command(un, bp, EIO); 14553 goto exit; 14554 } 14555 14556 /* 14557 * First see if the pkt has auto-request sense data with it.... 14558 * Look at the packet state first so we don't take a performance 14559 * hit looking at the arq enabled flag unless absolutely necessary. 14560 */ 14561 if ((pktp->pkt_state & STATE_ARQ_DONE) && 14562 (un->un_f_arq_enabled == TRUE)) { 14563 /* 14564 * The HBA did an auto request sense for this command so check 14565 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 14566 * driver command that should not be retried. 14567 */ 14568 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 14569 /* 14570 * Save the relevant sense info into the xp for the 14571 * original cmd. 14572 */ 14573 struct scsi_arq_status *asp; 14574 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 14575 xp->xb_sense_status = 14576 *((uchar_t *)(&(asp->sts_rqpkt_status))); 14577 xp->xb_sense_state = asp->sts_rqpkt_state; 14578 xp->xb_sense_resid = asp->sts_rqpkt_resid; 14579 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 14580 min(sizeof (struct scsi_extended_sense), 14581 SENSE_LENGTH)); 14582 14583 /* fail the command */ 14584 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14585 "sdintr: arq done and FLAG_DIAGNOSE set\n"); 14586 sd_return_failed_command(un, bp, EIO); 14587 goto exit; 14588 } 14589 14590 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 14591 /* 14592 * We want to either retry or fail this command, so free 14593 * the DMA resources here. If we retry the command then 14594 * the DMA resources will be reallocated in sd_start_cmds(). 14595 * Note that when PKT_DMA_PARTIAL is used, this reallocation 14596 * causes the *entire* transfer to start over again from the 14597 * beginning of the request, even for PARTIAL chunks that 14598 * have already transferred successfully. 14599 */ 14600 if ((un->un_f_is_fibre == TRUE) && 14601 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 14602 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 14603 scsi_dmafree(pktp); 14604 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 14605 } 14606 #endif 14607 14608 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14609 "sdintr: arq done, sd_handle_auto_request_sense\n"); 14610 14611 sd_handle_auto_request_sense(un, bp, xp, pktp); 14612 goto exit; 14613 } 14614 14615 /* Next see if this is the REQUEST SENSE pkt for the instance */ 14616 if (pktp->pkt_flags & FLAG_SENSING) { 14617 /* This pktp is from the unit's REQUEST_SENSE command */ 14618 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14619 "sdintr: sd_handle_request_sense\n"); 14620 sd_handle_request_sense(un, bp, xp, pktp); 14621 goto exit; 14622 } 14623 14624 /* 14625 * Check to see if the command successfully completed as requested; 14626 * this is the most common case (and also the hot performance path). 14627 * 14628 * Requirements for successful completion are: 14629 * pkt_reason is CMD_CMPLT and packet status is status good. 14630 * In addition: 14631 * - A residual of zero indicates successful completion no matter what 14632 * the command is. 14633 * - If the residual is not zero and the command is not a read or 14634 * write, then it's still defined as successful completion. In other 14635 * words, if the command is a read or write the residual must be 14636 * zero for successful completion. 14637 * - If the residual is not zero and the command is a read or 14638 * write, and it's a USCSICMD, then it's still defined as 14639 * successful completion. 14640 */ 14641 if ((pktp->pkt_reason == CMD_CMPLT) && 14642 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) { 14643 14644 /* 14645 * Since this command is returned with a good status, we 14646 * can reset the count for Sonoma failover. 14647 */ 14648 un->un_sonoma_failure_count = 0; 14649 14650 /* 14651 * Return all USCSI commands on good status 14652 */ 14653 if (pktp->pkt_resid == 0) { 14654 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14655 "sdintr: returning command for resid == 0\n"); 14656 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) && 14657 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) { 14658 SD_UPDATE_B_RESID(bp, pktp); 14659 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14660 "sdintr: returning command for resid != 0\n"); 14661 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 14662 SD_UPDATE_B_RESID(bp, pktp); 14663 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14664 "sdintr: returning uscsi command\n"); 14665 } else { 14666 goto not_successful; 14667 } 14668 sd_return_command(un, bp); 14669 14670 /* 14671 * Decrement counter to indicate that the callback routine 14672 * is done. 14673 */ 14674 un->un_in_callback--; 14675 ASSERT(un->un_in_callback >= 0); 14676 mutex_exit(SD_MUTEX(un)); 14677 14678 return; 14679 } 14680 14681 not_successful: 14682 14683 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 14684 /* 14685 * The following is based upon knowledge of the underlying transport 14686 * and its use of DMA resources. This code should be removed when 14687 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor 14688 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() 14689 * and sd_start_cmds(). 14690 * 14691 * Free any DMA resources associated with this command if there 14692 * is a chance it could be retried or enqueued for later retry. 14693 * If we keep the DMA binding then mpxio cannot reissue the 14694 * command on another path whenever a path failure occurs. 14695 * 14696 * Note that when PKT_DMA_PARTIAL is used, free/reallocation 14697 * causes the *entire* transfer to start over again from the 14698 * beginning of the request, even for PARTIAL chunks that 14699 * have already transferred successfully. 14700 * 14701 * This is only done for non-uscsi commands (and also skipped for the 14702 * driver's internal RQS command). Also just do this for Fibre Channel 14703 * devices as these are the only ones that support mpxio. 14704 */ 14705 if ((un->un_f_is_fibre == TRUE) && 14706 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 14707 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 14708 scsi_dmafree(pktp); 14709 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 14710 } 14711 #endif 14712 14713 /* 14714 * The command did not successfully complete as requested so check 14715 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 14716 * driver command that should not be retried so just return. If 14717 * FLAG_DIAGNOSE is not set the error will be processed below. 14718 */ 14719 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 14720 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14721 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n"); 14722 /* 14723 * Issue a request sense if a check condition caused the error 14724 * (we handle the auto request sense case above), otherwise 14725 * just fail the command. 14726 */ 14727 if ((pktp->pkt_reason == CMD_CMPLT) && 14728 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { 14729 sd_send_request_sense_command(un, bp, pktp); 14730 } else { 14731 sd_return_failed_command(un, bp, EIO); 14732 } 14733 goto exit; 14734 } 14735 14736 /* 14737 * The command did not successfully complete as requested so process 14738 * the error, retry, and/or attempt recovery. 14739 */ 14740 switch (pktp->pkt_reason) { 14741 case CMD_CMPLT: 14742 switch (SD_GET_PKT_STATUS(pktp)) { 14743 case STATUS_GOOD: 14744 /* 14745 * The command completed successfully with a non-zero 14746 * residual 14747 */ 14748 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14749 "sdintr: STATUS_GOOD \n"); 14750 sd_pkt_status_good(un, bp, xp, pktp); 14751 break; 14752 14753 case STATUS_CHECK: 14754 case STATUS_TERMINATED: 14755 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14756 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n"); 14757 sd_pkt_status_check_condition(un, bp, xp, pktp); 14758 break; 14759 14760 case STATUS_BUSY: 14761 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14762 "sdintr: STATUS_BUSY\n"); 14763 sd_pkt_status_busy(un, bp, xp, pktp); 14764 break; 14765 14766 case STATUS_RESERVATION_CONFLICT: 14767 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14768 "sdintr: STATUS_RESERVATION_CONFLICT\n"); 14769 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 14770 break; 14771 14772 case STATUS_QFULL: 14773 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14774 "sdintr: STATUS_QFULL\n"); 14775 sd_pkt_status_qfull(un, bp, xp, pktp); 14776 break; 14777 14778 case STATUS_MET: 14779 case STATUS_INTERMEDIATE: 14780 case STATUS_SCSI2: 14781 case STATUS_INTERMEDIATE_MET: 14782 case STATUS_ACA_ACTIVE: 14783 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 14784 "Unexpected SCSI status received: 0x%x\n", 14785 SD_GET_PKT_STATUS(pktp)); 14786 sd_return_failed_command(un, bp, EIO); 14787 break; 14788 14789 default: 14790 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 14791 "Invalid SCSI status received: 0x%x\n", 14792 SD_GET_PKT_STATUS(pktp)); 14793 sd_return_failed_command(un, bp, EIO); 14794 break; 14795 14796 } 14797 break; 14798 14799 case CMD_INCOMPLETE: 14800 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14801 "sdintr: CMD_INCOMPLETE\n"); 14802 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp); 14803 break; 14804 case CMD_TRAN_ERR: 14805 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14806 "sdintr: CMD_TRAN_ERR\n"); 14807 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp); 14808 break; 14809 case CMD_RESET: 14810 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14811 "sdintr: CMD_RESET \n"); 14812 sd_pkt_reason_cmd_reset(un, bp, xp, pktp); 14813 break; 14814 case CMD_ABORTED: 14815 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14816 "sdintr: CMD_ABORTED \n"); 14817 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp); 14818 break; 14819 case CMD_TIMEOUT: 14820 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14821 "sdintr: CMD_TIMEOUT\n"); 14822 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp); 14823 break; 14824 case CMD_UNX_BUS_FREE: 14825 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14826 "sdintr: CMD_UNX_BUS_FREE \n"); 14827 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp); 14828 break; 14829 case CMD_TAG_REJECT: 14830 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14831 "sdintr: CMD_TAG_REJECT\n"); 14832 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp); 14833 break; 14834 default: 14835 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14836 "sdintr: default\n"); 14837 sd_pkt_reason_default(un, bp, xp, pktp); 14838 break; 14839 } 14840 14841 exit: 14842 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n"); 14843 14844 /* Decrement counter to indicate that the callback routine is done. */ 14845 un->un_in_callback--; 14846 ASSERT(un->un_in_callback >= 0); 14847 14848 /* 14849 * At this point, the pkt has been dispatched, ie, it is either 14850 * being re-tried or has been returned to its caller and should 14851 * not be referenced. 14852 */ 14853 14854 mutex_exit(SD_MUTEX(un)); 14855 } 14856 14857 14858 /* 14859 * Function: sd_print_incomplete_msg 14860 * 14861 * Description: Prints the error message for a CMD_INCOMPLETE error. 14862 * 14863 * Arguments: un - ptr to associated softstate for the device. 14864 * bp - ptr to the buf(9S) for the command. 14865 * arg - message string ptr 14866 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED, 14867 * or SD_NO_RETRY_ISSUED. 14868 * 14869 * Context: May be called under interrupt context 14870 */ 14871 14872 static void 14873 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 14874 { 14875 struct scsi_pkt *pktp; 14876 char *msgp; 14877 char *cmdp = arg; 14878 14879 ASSERT(un != NULL); 14880 ASSERT(mutex_owned(SD_MUTEX(un))); 14881 ASSERT(bp != NULL); 14882 ASSERT(arg != NULL); 14883 pktp = SD_GET_PKTP(bp); 14884 ASSERT(pktp != NULL); 14885 14886 switch (code) { 14887 case SD_DELAYED_RETRY_ISSUED: 14888 case SD_IMMEDIATE_RETRY_ISSUED: 14889 msgp = "retrying"; 14890 break; 14891 case SD_NO_RETRY_ISSUED: 14892 default: 14893 msgp = "giving up"; 14894 break; 14895 } 14896 14897 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 14898 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14899 "incomplete %s- %s\n", cmdp, msgp); 14900 } 14901 } 14902 14903 14904 14905 /* 14906 * Function: sd_pkt_status_good 14907 * 14908 * Description: Processing for a STATUS_GOOD code in pkt_status. 14909 * 14910 * Context: May be called under interrupt context 14911 */ 14912 14913 static void 14914 sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 14915 struct sd_xbuf *xp, struct scsi_pkt *pktp) 14916 { 14917 char *cmdp; 14918 14919 ASSERT(un != NULL); 14920 ASSERT(mutex_owned(SD_MUTEX(un))); 14921 ASSERT(bp != NULL); 14922 ASSERT(xp != NULL); 14923 ASSERT(pktp != NULL); 14924 ASSERT(pktp->pkt_reason == CMD_CMPLT); 14925 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD); 14926 ASSERT(pktp->pkt_resid != 0); 14927 14928 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n"); 14929 14930 SD_UPDATE_ERRSTATS(un, sd_harderrs); 14931 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) { 14932 case SCMD_READ: 14933 cmdp = "read"; 14934 break; 14935 case SCMD_WRITE: 14936 cmdp = "write"; 14937 break; 14938 default: 14939 SD_UPDATE_B_RESID(bp, pktp); 14940 sd_return_command(un, bp); 14941 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 14942 return; 14943 } 14944 14945 /* 14946 * See if we can retry the read/write, preferrably immediately. 14947 * If retries are exhaused, then sd_retry_command() will update 14948 * the b_resid count. 14949 */ 14950 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg, 14951 cmdp, EIO, (clock_t)0, NULL); 14952 14953 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 14954 } 14955 14956 14957 14958 14959 14960 /* 14961 * Function: sd_handle_request_sense 14962 * 14963 * Description: Processing for non-auto Request Sense command. 14964 * 14965 * Arguments: un - ptr to associated softstate 14966 * sense_bp - ptr to buf(9S) for the RQS command 14967 * sense_xp - ptr to the sd_xbuf for the RQS command 14968 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command 14969 * 14970 * Context: May be called under interrupt context 14971 */ 14972 14973 static void 14974 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp, 14975 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp) 14976 { 14977 struct buf *cmd_bp; /* buf for the original command */ 14978 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */ 14979 struct scsi_pkt *cmd_pktp; /* pkt for the original command */ 14980 14981 ASSERT(un != NULL); 14982 ASSERT(mutex_owned(SD_MUTEX(un))); 14983 ASSERT(sense_bp != NULL); 14984 ASSERT(sense_xp != NULL); 14985 ASSERT(sense_pktp != NULL); 14986 14987 /* 14988 * Note the sense_bp, sense_xp, and sense_pktp here are for the 14989 * RQS command and not the original command. 14990 */ 14991 ASSERT(sense_pktp == un->un_rqs_pktp); 14992 ASSERT(sense_bp == un->un_rqs_bp); 14993 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) == 14994 (FLAG_SENSING | FLAG_HEAD)); 14995 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) & 14996 FLAG_SENSING) == FLAG_SENSING); 14997 14998 /* These are the bp, xp, and pktp for the original command */ 14999 cmd_bp = sense_xp->xb_sense_bp; 15000 cmd_xp = SD_GET_XBUF(cmd_bp); 15001 cmd_pktp = SD_GET_PKTP(cmd_bp); 15002 15003 if (sense_pktp->pkt_reason != CMD_CMPLT) { 15004 /* 15005 * The REQUEST SENSE command failed. Release the REQUEST 15006 * SENSE command for re-use, get back the bp for the original 15007 * command, and attempt to re-try the original command if 15008 * FLAG_DIAGNOSE is not set in the original packet. 15009 */ 15010 SD_UPDATE_ERRSTATS(un, sd_harderrs); 15011 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15012 cmd_bp = sd_mark_rqs_idle(un, sense_xp); 15013 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD, 15014 NULL, NULL, EIO, (clock_t)0, NULL); 15015 return; 15016 } 15017 } 15018 15019 /* 15020 * Save the relevant sense info into the xp for the original cmd. 15021 * 15022 * Note: if the request sense failed the state info will be zero 15023 * as set in sd_mark_rqs_busy() 15024 */ 15025 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp); 15026 cmd_xp->xb_sense_state = sense_pktp->pkt_state; 15027 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid; 15028 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, SENSE_LENGTH); 15029 15030 /* 15031 * Free up the RQS command.... 15032 * NOTE: 15033 * Must do this BEFORE calling sd_validate_sense_data! 15034 * sd_validate_sense_data may return the original command in 15035 * which case the pkt will be freed and the flags can no 15036 * longer be touched. 15037 * SD_MUTEX is held through this process until the command 15038 * is dispatched based upon the sense data, so there are 15039 * no race conditions. 15040 */ 15041 (void) sd_mark_rqs_idle(un, sense_xp); 15042 15043 /* 15044 * For a retryable command see if we have valid sense data, if so then 15045 * turn it over to sd_decode_sense() to figure out the right course of 15046 * action. Just fail a non-retryable command. 15047 */ 15048 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15049 if (sd_validate_sense_data(un, cmd_bp, cmd_xp) == 15050 SD_SENSE_DATA_IS_VALID) { 15051 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp); 15052 } 15053 } else { 15054 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB", 15055 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15056 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data", 15057 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 15058 sd_return_failed_command(un, cmd_bp, EIO); 15059 } 15060 } 15061 15062 15063 15064 15065 /* 15066 * Function: sd_handle_auto_request_sense 15067 * 15068 * Description: Processing for auto-request sense information. 15069 * 15070 * Arguments: un - ptr to associated softstate 15071 * bp - ptr to buf(9S) for the command 15072 * xp - ptr to the sd_xbuf for the command 15073 * pktp - ptr to the scsi_pkt(9S) for the command 15074 * 15075 * Context: May be called under interrupt context 15076 */ 15077 15078 static void 15079 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 15080 struct sd_xbuf *xp, struct scsi_pkt *pktp) 15081 { 15082 struct scsi_arq_status *asp; 15083 15084 ASSERT(un != NULL); 15085 ASSERT(mutex_owned(SD_MUTEX(un))); 15086 ASSERT(bp != NULL); 15087 ASSERT(xp != NULL); 15088 ASSERT(pktp != NULL); 15089 ASSERT(pktp != un->un_rqs_pktp); 15090 ASSERT(bp != un->un_rqs_bp); 15091 15092 /* 15093 * For auto-request sense, we get a scsi_arq_status back from 15094 * the HBA, with the sense data in the sts_sensedata member. 15095 * The pkt_scbp of the packet points to this scsi_arq_status. 15096 */ 15097 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 15098 15099 if (asp->sts_rqpkt_reason != CMD_CMPLT) { 15100 /* 15101 * The auto REQUEST SENSE failed; see if we can re-try 15102 * the original command. 15103 */ 15104 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15105 "auto request sense failed (reason=%s)\n", 15106 scsi_rname(asp->sts_rqpkt_reason)); 15107 15108 sd_reset_target(un, pktp); 15109 15110 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15111 NULL, NULL, EIO, (clock_t)0, NULL); 15112 return; 15113 } 15114 15115 /* Save the relevant sense info into the xp for the original cmd. */ 15116 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status))); 15117 xp->xb_sense_state = asp->sts_rqpkt_state; 15118 xp->xb_sense_resid = asp->sts_rqpkt_resid; 15119 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 15120 min(sizeof (struct scsi_extended_sense), SENSE_LENGTH)); 15121 15122 /* 15123 * See if we have valid sense data, if so then turn it over to 15124 * sd_decode_sense() to figure out the right course of action. 15125 */ 15126 if (sd_validate_sense_data(un, bp, xp) == SD_SENSE_DATA_IS_VALID) { 15127 sd_decode_sense(un, bp, xp, pktp); 15128 } 15129 } 15130 15131 15132 /* 15133 * Function: sd_print_sense_failed_msg 15134 * 15135 * Description: Print log message when RQS has failed. 15136 * 15137 * Arguments: un - ptr to associated softstate 15138 * bp - ptr to buf(9S) for the command 15139 * arg - generic message string ptr 15140 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 15141 * or SD_NO_RETRY_ISSUED 15142 * 15143 * Context: May be called from interrupt context 15144 */ 15145 15146 static void 15147 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg, 15148 int code) 15149 { 15150 char *msgp = arg; 15151 15152 ASSERT(un != NULL); 15153 ASSERT(mutex_owned(SD_MUTEX(un))); 15154 ASSERT(bp != NULL); 15155 15156 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) { 15157 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp); 15158 } 15159 } 15160 15161 15162 /* 15163 * Function: sd_validate_sense_data 15164 * 15165 * Description: Check the given sense data for validity. 15166 * If the sense data is not valid, the command will 15167 * be either failed or retried! 15168 * 15169 * Return Code: SD_SENSE_DATA_IS_INVALID 15170 * SD_SENSE_DATA_IS_VALID 15171 * 15172 * Context: May be called from interrupt context 15173 */ 15174 15175 static int 15176 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp) 15177 { 15178 struct scsi_extended_sense *esp; 15179 struct scsi_pkt *pktp; 15180 size_t actual_len; 15181 char *msgp = NULL; 15182 15183 ASSERT(un != NULL); 15184 ASSERT(mutex_owned(SD_MUTEX(un))); 15185 ASSERT(bp != NULL); 15186 ASSERT(bp != un->un_rqs_bp); 15187 ASSERT(xp != NULL); 15188 15189 pktp = SD_GET_PKTP(bp); 15190 ASSERT(pktp != NULL); 15191 15192 /* 15193 * Check the status of the RQS command (auto or manual). 15194 */ 15195 switch (xp->xb_sense_status & STATUS_MASK) { 15196 case STATUS_GOOD: 15197 break; 15198 15199 case STATUS_RESERVATION_CONFLICT: 15200 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 15201 return (SD_SENSE_DATA_IS_INVALID); 15202 15203 case STATUS_BUSY: 15204 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15205 "Busy Status on REQUEST SENSE\n"); 15206 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL, 15207 NULL, EIO, SD_BSY_TIMEOUT / 500, kstat_waitq_enter); 15208 return (SD_SENSE_DATA_IS_INVALID); 15209 15210 case STATUS_QFULL: 15211 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15212 "QFULL Status on REQUEST SENSE\n"); 15213 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, 15214 NULL, EIO, SD_BSY_TIMEOUT / 500, kstat_waitq_enter); 15215 return (SD_SENSE_DATA_IS_INVALID); 15216 15217 case STATUS_CHECK: 15218 case STATUS_TERMINATED: 15219 msgp = "Check Condition on REQUEST SENSE\n"; 15220 goto sense_failed; 15221 15222 default: 15223 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n"; 15224 goto sense_failed; 15225 } 15226 15227 /* 15228 * See if we got the minimum required amount of sense data. 15229 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes 15230 * or less. 15231 */ 15232 actual_len = (int)(SENSE_LENGTH - xp->xb_sense_resid); 15233 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) || 15234 (actual_len == 0)) { 15235 msgp = "Request Sense couldn't get sense data\n"; 15236 goto sense_failed; 15237 } 15238 15239 if (actual_len < SUN_MIN_SENSE_LENGTH) { 15240 msgp = "Not enough sense information\n"; 15241 goto sense_failed; 15242 } 15243 15244 /* 15245 * We require the extended sense data 15246 */ 15247 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 15248 if (esp->es_class != CLASS_EXTENDED_SENSE) { 15249 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 15250 static char tmp[8]; 15251 static char buf[148]; 15252 char *p = (char *)(xp->xb_sense_data); 15253 int i; 15254 15255 mutex_enter(&sd_sense_mutex); 15256 (void) strcpy(buf, "undecodable sense information:"); 15257 for (i = 0; i < actual_len; i++) { 15258 (void) sprintf(tmp, " 0x%x", *(p++)&0xff); 15259 (void) strcpy(&buf[strlen(buf)], tmp); 15260 } 15261 i = strlen(buf); 15262 (void) strcpy(&buf[i], "-(assumed fatal)\n"); 15263 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, buf); 15264 mutex_exit(&sd_sense_mutex); 15265 } 15266 /* Note: Legacy behavior, fail the command with no retry */ 15267 sd_return_failed_command(un, bp, EIO); 15268 return (SD_SENSE_DATA_IS_INVALID); 15269 } 15270 15271 /* 15272 * Check that es_code is valid (es_class concatenated with es_code 15273 * make up the "response code" field. es_class will always be 7, so 15274 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the 15275 * format. 15276 */ 15277 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) && 15278 (esp->es_code != CODE_FMT_FIXED_DEFERRED) && 15279 (esp->es_code != CODE_FMT_DESCR_CURRENT) && 15280 (esp->es_code != CODE_FMT_DESCR_DEFERRED) && 15281 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) { 15282 goto sense_failed; 15283 } 15284 15285 return (SD_SENSE_DATA_IS_VALID); 15286 15287 sense_failed: 15288 /* 15289 * If the request sense failed (for whatever reason), attempt 15290 * to retry the original command. 15291 */ 15292 #if defined(__i386) || defined(__amd64) 15293 /* 15294 * SD_RETRY_DELAY is conditionally compile (#if fibre) in 15295 * sddef.h for Sparc platform, and x86 uses 1 binary 15296 * for both SCSI/FC. 15297 * The SD_RETRY_DELAY value need to be adjusted here 15298 * when SD_RETRY_DELAY change in sddef.h 15299 */ 15300 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15301 sd_print_sense_failed_msg, msgp, EIO, 15302 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL); 15303 #else 15304 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15305 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); 15306 #endif 15307 15308 return (SD_SENSE_DATA_IS_INVALID); 15309 } 15310 15311 15312 15313 /* 15314 * Function: sd_decode_sense 15315 * 15316 * Description: Take recovery action(s) when SCSI Sense Data is received. 15317 * 15318 * Context: Interrupt context. 15319 */ 15320 15321 static void 15322 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 15323 struct scsi_pkt *pktp) 15324 { 15325 uint8_t sense_key; 15326 15327 ASSERT(un != NULL); 15328 ASSERT(mutex_owned(SD_MUTEX(un))); 15329 ASSERT(bp != NULL); 15330 ASSERT(bp != un->un_rqs_bp); 15331 ASSERT(xp != NULL); 15332 ASSERT(pktp != NULL); 15333 15334 sense_key = scsi_sense_key(xp->xb_sense_data); 15335 15336 switch (sense_key) { 15337 case KEY_NO_SENSE: 15338 sd_sense_key_no_sense(un, bp, xp, pktp); 15339 break; 15340 case KEY_RECOVERABLE_ERROR: 15341 sd_sense_key_recoverable_error(un, xp->xb_sense_data, 15342 bp, xp, pktp); 15343 break; 15344 case KEY_NOT_READY: 15345 sd_sense_key_not_ready(un, xp->xb_sense_data, 15346 bp, xp, pktp); 15347 break; 15348 case KEY_MEDIUM_ERROR: 15349 case KEY_HARDWARE_ERROR: 15350 sd_sense_key_medium_or_hardware_error(un, 15351 xp->xb_sense_data, bp, xp, pktp); 15352 break; 15353 case KEY_ILLEGAL_REQUEST: 15354 sd_sense_key_illegal_request(un, bp, xp, pktp); 15355 break; 15356 case KEY_UNIT_ATTENTION: 15357 sd_sense_key_unit_attention(un, xp->xb_sense_data, 15358 bp, xp, pktp); 15359 break; 15360 case KEY_WRITE_PROTECT: 15361 case KEY_VOLUME_OVERFLOW: 15362 case KEY_MISCOMPARE: 15363 sd_sense_key_fail_command(un, bp, xp, pktp); 15364 break; 15365 case KEY_BLANK_CHECK: 15366 sd_sense_key_blank_check(un, bp, xp, pktp); 15367 break; 15368 case KEY_ABORTED_COMMAND: 15369 sd_sense_key_aborted_command(un, bp, xp, pktp); 15370 break; 15371 case KEY_VENDOR_UNIQUE: 15372 case KEY_COPY_ABORTED: 15373 case KEY_EQUAL: 15374 case KEY_RESERVED: 15375 default: 15376 sd_sense_key_default(un, xp->xb_sense_data, 15377 bp, xp, pktp); 15378 break; 15379 } 15380 } 15381 15382 15383 /* 15384 * Function: sd_dump_memory 15385 * 15386 * Description: Debug logging routine to print the contents of a user provided 15387 * buffer. The output of the buffer is broken up into 256 byte 15388 * segments due to a size constraint of the scsi_log. 15389 * implementation. 15390 * 15391 * Arguments: un - ptr to softstate 15392 * comp - component mask 15393 * title - "title" string to preceed data when printed 15394 * data - ptr to data block to be printed 15395 * len - size of data block to be printed 15396 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c) 15397 * 15398 * Context: May be called from interrupt context 15399 */ 15400 15401 #define SD_DUMP_MEMORY_BUF_SIZE 256 15402 15403 static char *sd_dump_format_string[] = { 15404 " 0x%02x", 15405 " %c" 15406 }; 15407 15408 static void 15409 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data, 15410 int len, int fmt) 15411 { 15412 int i, j; 15413 int avail_count; 15414 int start_offset; 15415 int end_offset; 15416 size_t entry_len; 15417 char *bufp; 15418 char *local_buf; 15419 char *format_string; 15420 15421 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR)); 15422 15423 /* 15424 * In the debug version of the driver, this function is called from a 15425 * number of places which are NOPs in the release driver. 15426 * The debug driver therefore has additional methods of filtering 15427 * debug output. 15428 */ 15429 #ifdef SDDEBUG 15430 /* 15431 * In the debug version of the driver we can reduce the amount of debug 15432 * messages by setting sd_error_level to something other than 15433 * SCSI_ERR_ALL and clearing bits in sd_level_mask and 15434 * sd_component_mask. 15435 */ 15436 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) || 15437 (sd_error_level != SCSI_ERR_ALL)) { 15438 return; 15439 } 15440 if (((sd_component_mask & comp) == 0) || 15441 (sd_error_level != SCSI_ERR_ALL)) { 15442 return; 15443 } 15444 #else 15445 if (sd_error_level != SCSI_ERR_ALL) { 15446 return; 15447 } 15448 #endif 15449 15450 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP); 15451 bufp = local_buf; 15452 /* 15453 * Available length is the length of local_buf[], minus the 15454 * length of the title string, minus one for the ":", minus 15455 * one for the newline, minus one for the NULL terminator. 15456 * This gives the #bytes available for holding the printed 15457 * values from the given data buffer. 15458 */ 15459 if (fmt == SD_LOG_HEX) { 15460 format_string = sd_dump_format_string[0]; 15461 } else /* SD_LOG_CHAR */ { 15462 format_string = sd_dump_format_string[1]; 15463 } 15464 /* 15465 * Available count is the number of elements from the given 15466 * data buffer that we can fit into the available length. 15467 * This is based upon the size of the format string used. 15468 * Make one entry and find it's size. 15469 */ 15470 (void) sprintf(bufp, format_string, data[0]); 15471 entry_len = strlen(bufp); 15472 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len; 15473 15474 j = 0; 15475 while (j < len) { 15476 bufp = local_buf; 15477 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE); 15478 start_offset = j; 15479 15480 end_offset = start_offset + avail_count; 15481 15482 (void) sprintf(bufp, "%s:", title); 15483 bufp += strlen(bufp); 15484 for (i = start_offset; ((i < end_offset) && (j < len)); 15485 i++, j++) { 15486 (void) sprintf(bufp, format_string, data[i]); 15487 bufp += entry_len; 15488 } 15489 (void) sprintf(bufp, "\n"); 15490 15491 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf); 15492 } 15493 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE); 15494 } 15495 15496 /* 15497 * Function: sd_print_sense_msg 15498 * 15499 * Description: Log a message based upon the given sense data. 15500 * 15501 * Arguments: un - ptr to associated softstate 15502 * bp - ptr to buf(9S) for the command 15503 * arg - ptr to associate sd_sense_info struct 15504 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 15505 * or SD_NO_RETRY_ISSUED 15506 * 15507 * Context: May be called from interrupt context 15508 */ 15509 15510 static void 15511 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 15512 { 15513 struct sd_xbuf *xp; 15514 struct scsi_pkt *pktp; 15515 uint8_t *sensep; 15516 daddr_t request_blkno; 15517 diskaddr_t err_blkno; 15518 int severity; 15519 int pfa_flag; 15520 extern struct scsi_key_strings scsi_cmds[]; 15521 15522 ASSERT(un != NULL); 15523 ASSERT(mutex_owned(SD_MUTEX(un))); 15524 ASSERT(bp != NULL); 15525 xp = SD_GET_XBUF(bp); 15526 ASSERT(xp != NULL); 15527 pktp = SD_GET_PKTP(bp); 15528 ASSERT(pktp != NULL); 15529 ASSERT(arg != NULL); 15530 15531 severity = ((struct sd_sense_info *)(arg))->ssi_severity; 15532 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag; 15533 15534 if ((code == SD_DELAYED_RETRY_ISSUED) || 15535 (code == SD_IMMEDIATE_RETRY_ISSUED)) { 15536 severity = SCSI_ERR_RETRYABLE; 15537 } 15538 15539 /* Use absolute block number for the request block number */ 15540 request_blkno = xp->xb_blkno; 15541 15542 /* 15543 * Now try to get the error block number from the sense data 15544 */ 15545 sensep = xp->xb_sense_data; 15546 15547 if (scsi_sense_info_uint64(sensep, SENSE_LENGTH, 15548 (uint64_t *)&err_blkno)) { 15549 /* 15550 * We retrieved the error block number from the information 15551 * portion of the sense data. 15552 * 15553 * For USCSI commands we are better off using the error 15554 * block no. as the requested block no. (This is the best 15555 * we can estimate.) 15556 */ 15557 if ((SD_IS_BUFIO(xp) == FALSE) && 15558 ((pktp->pkt_flags & FLAG_SILENT) == 0)) { 15559 request_blkno = err_blkno; 15560 } 15561 } else { 15562 /* 15563 * Without the es_valid bit set (for fixed format) or an 15564 * information descriptor (for descriptor format) we cannot 15565 * be certain of the error blkno, so just use the 15566 * request_blkno. 15567 */ 15568 err_blkno = (diskaddr_t)request_blkno; 15569 } 15570 15571 /* 15572 * The following will log the buffer contents for the release driver 15573 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error 15574 * level is set to verbose. 15575 */ 15576 sd_dump_memory(un, SD_LOG_IO, "Failed CDB", 15577 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15578 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 15579 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX); 15580 15581 if (pfa_flag == FALSE) { 15582 /* This is normally only set for USCSI */ 15583 if ((pktp->pkt_flags & FLAG_SILENT) != 0) { 15584 return; 15585 } 15586 15587 if ((SD_IS_BUFIO(xp) == TRUE) && 15588 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) && 15589 (severity < sd_error_level))) { 15590 return; 15591 } 15592 } 15593 15594 /* 15595 * Check for Sonoma Failover and keep a count of how many failed I/O's 15596 */ 15597 if ((SD_IS_LSI(un)) && 15598 (scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) && 15599 (scsi_sense_asc(sensep) == 0x94) && 15600 (scsi_sense_ascq(sensep) == 0x01)) { 15601 un->un_sonoma_failure_count++; 15602 if (un->un_sonoma_failure_count > 1) { 15603 return; 15604 } 15605 } 15606 15607 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity, 15608 request_blkno, err_blkno, scsi_cmds, 15609 (struct scsi_extended_sense *)sensep, 15610 un->un_additional_codes, NULL); 15611 } 15612 15613 /* 15614 * Function: sd_sense_key_no_sense 15615 * 15616 * Description: Recovery action when sense data was not received. 15617 * 15618 * Context: May be called from interrupt context 15619 */ 15620 15621 static void 15622 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 15623 struct sd_xbuf *xp, struct scsi_pkt *pktp) 15624 { 15625 struct sd_sense_info si; 15626 15627 ASSERT(un != NULL); 15628 ASSERT(mutex_owned(SD_MUTEX(un))); 15629 ASSERT(bp != NULL); 15630 ASSERT(xp != NULL); 15631 ASSERT(pktp != NULL); 15632 15633 si.ssi_severity = SCSI_ERR_FATAL; 15634 si.ssi_pfa_flag = FALSE; 15635 15636 SD_UPDATE_ERRSTATS(un, sd_softerrs); 15637 15638 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 15639 &si, EIO, (clock_t)0, NULL); 15640 } 15641 15642 15643 /* 15644 * Function: sd_sense_key_recoverable_error 15645 * 15646 * Description: Recovery actions for a SCSI "Recovered Error" sense key. 15647 * 15648 * Context: May be called from interrupt context 15649 */ 15650 15651 static void 15652 sd_sense_key_recoverable_error(struct sd_lun *un, 15653 uint8_t *sense_datap, 15654 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 15655 { 15656 struct sd_sense_info si; 15657 uint8_t asc = scsi_sense_asc(sense_datap); 15658 15659 ASSERT(un != NULL); 15660 ASSERT(mutex_owned(SD_MUTEX(un))); 15661 ASSERT(bp != NULL); 15662 ASSERT(xp != NULL); 15663 ASSERT(pktp != NULL); 15664 15665 /* 15666 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED 15667 */ 15668 if ((asc == 0x5D) && (sd_report_pfa != 0)) { 15669 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 15670 si.ssi_severity = SCSI_ERR_INFO; 15671 si.ssi_pfa_flag = TRUE; 15672 } else { 15673 SD_UPDATE_ERRSTATS(un, sd_softerrs); 15674 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err); 15675 si.ssi_severity = SCSI_ERR_RECOVERED; 15676 si.ssi_pfa_flag = FALSE; 15677 } 15678 15679 if (pktp->pkt_resid == 0) { 15680 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 15681 sd_return_command(un, bp); 15682 return; 15683 } 15684 15685 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 15686 &si, EIO, (clock_t)0, NULL); 15687 } 15688 15689 15690 15691 15692 /* 15693 * Function: sd_sense_key_not_ready 15694 * 15695 * Description: Recovery actions for a SCSI "Not Ready" sense key. 15696 * 15697 * Context: May be called from interrupt context 15698 */ 15699 15700 static void 15701 sd_sense_key_not_ready(struct sd_lun *un, 15702 uint8_t *sense_datap, 15703 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 15704 { 15705 struct sd_sense_info si; 15706 uint8_t asc = scsi_sense_asc(sense_datap); 15707 uint8_t ascq = scsi_sense_ascq(sense_datap); 15708 15709 ASSERT(un != NULL); 15710 ASSERT(mutex_owned(SD_MUTEX(un))); 15711 ASSERT(bp != NULL); 15712 ASSERT(xp != NULL); 15713 ASSERT(pktp != NULL); 15714 15715 si.ssi_severity = SCSI_ERR_FATAL; 15716 si.ssi_pfa_flag = FALSE; 15717 15718 /* 15719 * Update error stats after first NOT READY error. Disks may have 15720 * been powered down and may need to be restarted. For CDROMs, 15721 * report NOT READY errors only if media is present. 15722 */ 15723 if ((ISCD(un) && (asc == 0x3A)) || 15724 (xp->xb_retry_count > 0)) { 15725 SD_UPDATE_ERRSTATS(un, sd_harderrs); 15726 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err); 15727 } 15728 15729 /* 15730 * Just fail if the "not ready" retry limit has been reached. 15731 */ 15732 if (xp->xb_retry_count >= un->un_notready_retry_count) { 15733 /* Special check for error message printing for removables. */ 15734 if (un->un_f_has_removable_media && (asc == 0x04) && 15735 (ascq >= 0x04)) { 15736 si.ssi_severity = SCSI_ERR_ALL; 15737 } 15738 goto fail_command; 15739 } 15740 15741 /* 15742 * Check the ASC and ASCQ in the sense data as needed, to determine 15743 * what to do. 15744 */ 15745 switch (asc) { 15746 case 0x04: /* LOGICAL UNIT NOT READY */ 15747 /* 15748 * disk drives that don't spin up result in a very long delay 15749 * in format without warning messages. We will log a message 15750 * if the error level is set to verbose. 15751 */ 15752 if (sd_error_level < SCSI_ERR_RETRYABLE) { 15753 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15754 "logical unit not ready, resetting disk\n"); 15755 } 15756 15757 /* 15758 * There are different requirements for CDROMs and disks for 15759 * the number of retries. If a CD-ROM is giving this, it is 15760 * probably reading TOC and is in the process of getting 15761 * ready, so we should keep on trying for a long time to make 15762 * sure that all types of media are taken in account (for 15763 * some media the drive takes a long time to read TOC). For 15764 * disks we do not want to retry this too many times as this 15765 * can cause a long hang in format when the drive refuses to 15766 * spin up (a very common failure). 15767 */ 15768 switch (ascq) { 15769 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */ 15770 /* 15771 * Disk drives frequently refuse to spin up which 15772 * results in a very long hang in format without 15773 * warning messages. 15774 * 15775 * Note: This code preserves the legacy behavior of 15776 * comparing xb_retry_count against zero for fibre 15777 * channel targets instead of comparing against the 15778 * un_reset_retry_count value. The reason for this 15779 * discrepancy has been so utterly lost beneath the 15780 * Sands of Time that even Indiana Jones could not 15781 * find it. 15782 */ 15783 if (un->un_f_is_fibre == TRUE) { 15784 if (((sd_level_mask & SD_LOGMASK_DIAG) || 15785 (xp->xb_retry_count > 0)) && 15786 (un->un_startstop_timeid == NULL)) { 15787 scsi_log(SD_DEVINFO(un), sd_label, 15788 CE_WARN, "logical unit not ready, " 15789 "resetting disk\n"); 15790 sd_reset_target(un, pktp); 15791 } 15792 } else { 15793 if (((sd_level_mask & SD_LOGMASK_DIAG) || 15794 (xp->xb_retry_count > 15795 un->un_reset_retry_count)) && 15796 (un->un_startstop_timeid == NULL)) { 15797 scsi_log(SD_DEVINFO(un), sd_label, 15798 CE_WARN, "logical unit not ready, " 15799 "resetting disk\n"); 15800 sd_reset_target(un, pktp); 15801 } 15802 } 15803 break; 15804 15805 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */ 15806 /* 15807 * If the target is in the process of becoming 15808 * ready, just proceed with the retry. This can 15809 * happen with CD-ROMs that take a long time to 15810 * read TOC after a power cycle or reset. 15811 */ 15812 goto do_retry; 15813 15814 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */ 15815 break; 15816 15817 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */ 15818 /* 15819 * Retries cannot help here so just fail right away. 15820 */ 15821 goto fail_command; 15822 15823 case 0x88: 15824 /* 15825 * Vendor-unique code for T3/T4: it indicates a 15826 * path problem in a mutipathed config, but as far as 15827 * the target driver is concerned it equates to a fatal 15828 * error, so we should just fail the command right away 15829 * (without printing anything to the console). If this 15830 * is not a T3/T4, fall thru to the default recovery 15831 * action. 15832 * T3/T4 is FC only, don't need to check is_fibre 15833 */ 15834 if (SD_IS_T3(un) || SD_IS_T4(un)) { 15835 sd_return_failed_command(un, bp, EIO); 15836 return; 15837 } 15838 /* FALLTHRU */ 15839 15840 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */ 15841 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */ 15842 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */ 15843 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */ 15844 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */ 15845 default: /* Possible future codes in SCSI spec? */ 15846 /* 15847 * For removable-media devices, do not retry if 15848 * ASCQ > 2 as these result mostly from USCSI commands 15849 * on MMC devices issued to check status of an 15850 * operation initiated in immediate mode. Also for 15851 * ASCQ >= 4 do not print console messages as these 15852 * mainly represent a user-initiated operation 15853 * instead of a system failure. 15854 */ 15855 if (un->un_f_has_removable_media) { 15856 si.ssi_severity = SCSI_ERR_ALL; 15857 goto fail_command; 15858 } 15859 break; 15860 } 15861 15862 /* 15863 * As part of our recovery attempt for the NOT READY 15864 * condition, we issue a START STOP UNIT command. However 15865 * we want to wait for a short delay before attempting this 15866 * as there may still be more commands coming back from the 15867 * target with the check condition. To do this we use 15868 * timeout(9F) to call sd_start_stop_unit_callback() after 15869 * the delay interval expires. (sd_start_stop_unit_callback() 15870 * dispatches sd_start_stop_unit_task(), which will issue 15871 * the actual START STOP UNIT command. The delay interval 15872 * is one-half of the delay that we will use to retry the 15873 * command that generated the NOT READY condition. 15874 * 15875 * Note that we could just dispatch sd_start_stop_unit_task() 15876 * from here and allow it to sleep for the delay interval, 15877 * but then we would be tying up the taskq thread 15878 * uncesessarily for the duration of the delay. 15879 * 15880 * Do not issue the START STOP UNIT if the current command 15881 * is already a START STOP UNIT. 15882 */ 15883 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) { 15884 break; 15885 } 15886 15887 /* 15888 * Do not schedule the timeout if one is already pending. 15889 */ 15890 if (un->un_startstop_timeid != NULL) { 15891 SD_INFO(SD_LOG_ERROR, un, 15892 "sd_sense_key_not_ready: restart already issued to" 15893 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)), 15894 ddi_get_instance(SD_DEVINFO(un))); 15895 break; 15896 } 15897 15898 /* 15899 * Schedule the START STOP UNIT command, then queue the command 15900 * for a retry. 15901 * 15902 * Note: A timeout is not scheduled for this retry because we 15903 * want the retry to be serial with the START_STOP_UNIT. The 15904 * retry will be started when the START_STOP_UNIT is completed 15905 * in sd_start_stop_unit_task. 15906 */ 15907 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback, 15908 un, SD_BSY_TIMEOUT / 2); 15909 xp->xb_retry_count++; 15910 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter); 15911 return; 15912 15913 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */ 15914 if (sd_error_level < SCSI_ERR_RETRYABLE) { 15915 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15916 "unit does not respond to selection\n"); 15917 } 15918 break; 15919 15920 case 0x3A: /* MEDIUM NOT PRESENT */ 15921 if (sd_error_level >= SCSI_ERR_FATAL) { 15922 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15923 "Caddy not inserted in drive\n"); 15924 } 15925 15926 sr_ejected(un); 15927 un->un_mediastate = DKIO_EJECTED; 15928 /* The state has changed, inform the media watch routines */ 15929 cv_broadcast(&un->un_state_cv); 15930 /* Just fail if no media is present in the drive. */ 15931 goto fail_command; 15932 15933 default: 15934 if (sd_error_level < SCSI_ERR_RETRYABLE) { 15935 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 15936 "Unit not Ready. Additional sense code 0x%x\n", 15937 asc); 15938 } 15939 break; 15940 } 15941 15942 do_retry: 15943 15944 /* 15945 * Retry the command, as some targets may report NOT READY for 15946 * several seconds after being reset. 15947 */ 15948 xp->xb_retry_count++; 15949 si.ssi_severity = SCSI_ERR_RETRYABLE; 15950 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 15951 &si, EIO, SD_BSY_TIMEOUT, NULL); 15952 15953 return; 15954 15955 fail_command: 15956 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 15957 sd_return_failed_command(un, bp, EIO); 15958 } 15959 15960 15961 15962 /* 15963 * Function: sd_sense_key_medium_or_hardware_error 15964 * 15965 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error" 15966 * sense key. 15967 * 15968 * Context: May be called from interrupt context 15969 */ 15970 15971 static void 15972 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 15973 uint8_t *sense_datap, 15974 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 15975 { 15976 struct sd_sense_info si; 15977 uint8_t sense_key = scsi_sense_key(sense_datap); 15978 uint8_t asc = scsi_sense_asc(sense_datap); 15979 15980 ASSERT(un != NULL); 15981 ASSERT(mutex_owned(SD_MUTEX(un))); 15982 ASSERT(bp != NULL); 15983 ASSERT(xp != NULL); 15984 ASSERT(pktp != NULL); 15985 15986 si.ssi_severity = SCSI_ERR_FATAL; 15987 si.ssi_pfa_flag = FALSE; 15988 15989 if (sense_key == KEY_MEDIUM_ERROR) { 15990 SD_UPDATE_ERRSTATS(un, sd_rq_media_err); 15991 } 15992 15993 SD_UPDATE_ERRSTATS(un, sd_harderrs); 15994 15995 if ((un->un_reset_retry_count != 0) && 15996 (xp->xb_retry_count == un->un_reset_retry_count)) { 15997 mutex_exit(SD_MUTEX(un)); 15998 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */ 15999 if (un->un_f_allow_bus_device_reset == TRUE) { 16000 16001 boolean_t try_resetting_target = B_TRUE; 16002 16003 /* 16004 * We need to be able to handle specific ASC when we are 16005 * handling a KEY_HARDWARE_ERROR. In particular 16006 * taking the default action of resetting the target may 16007 * not be the appropriate way to attempt recovery. 16008 * Resetting a target because of a single LUN failure 16009 * victimizes all LUNs on that target. 16010 * 16011 * This is true for the LSI arrays, if an LSI 16012 * array controller returns an ASC of 0x84 (LUN Dead) we 16013 * should trust it. 16014 */ 16015 16016 if (sense_key == KEY_HARDWARE_ERROR) { 16017 switch (asc) { 16018 case 0x84: 16019 if (SD_IS_LSI(un)) { 16020 try_resetting_target = B_FALSE; 16021 } 16022 break; 16023 default: 16024 break; 16025 } 16026 } 16027 16028 if (try_resetting_target == B_TRUE) { 16029 int reset_retval = 0; 16030 if (un->un_f_lun_reset_enabled == TRUE) { 16031 SD_TRACE(SD_LOG_IO_CORE, un, 16032 "sd_sense_key_medium_or_hardware_" 16033 "error: issuing RESET_LUN\n"); 16034 reset_retval = 16035 scsi_reset(SD_ADDRESS(un), 16036 RESET_LUN); 16037 } 16038 if (reset_retval == 0) { 16039 SD_TRACE(SD_LOG_IO_CORE, un, 16040 "sd_sense_key_medium_or_hardware_" 16041 "error: issuing RESET_TARGET\n"); 16042 (void) scsi_reset(SD_ADDRESS(un), 16043 RESET_TARGET); 16044 } 16045 } 16046 } 16047 mutex_enter(SD_MUTEX(un)); 16048 } 16049 16050 /* 16051 * This really ought to be a fatal error, but we will retry anyway 16052 * as some drives report this as a spurious error. 16053 */ 16054 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16055 &si, EIO, (clock_t)0, NULL); 16056 } 16057 16058 16059 16060 /* 16061 * Function: sd_sense_key_illegal_request 16062 * 16063 * Description: Recovery actions for a SCSI "Illegal Request" sense key. 16064 * 16065 * Context: May be called from interrupt context 16066 */ 16067 16068 static void 16069 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 16070 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16071 { 16072 struct sd_sense_info si; 16073 16074 ASSERT(un != NULL); 16075 ASSERT(mutex_owned(SD_MUTEX(un))); 16076 ASSERT(bp != NULL); 16077 ASSERT(xp != NULL); 16078 ASSERT(pktp != NULL); 16079 16080 SD_UPDATE_ERRSTATS(un, sd_softerrs); 16081 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err); 16082 16083 si.ssi_severity = SCSI_ERR_INFO; 16084 si.ssi_pfa_flag = FALSE; 16085 16086 /* Pointless to retry if the target thinks it's an illegal request */ 16087 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16088 sd_return_failed_command(un, bp, EIO); 16089 } 16090 16091 16092 16093 16094 /* 16095 * Function: sd_sense_key_unit_attention 16096 * 16097 * Description: Recovery actions for a SCSI "Unit Attention" sense key. 16098 * 16099 * Context: May be called from interrupt context 16100 */ 16101 16102 static void 16103 sd_sense_key_unit_attention(struct sd_lun *un, 16104 uint8_t *sense_datap, 16105 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16106 { 16107 /* 16108 * For UNIT ATTENTION we allow retries for one minute. Devices 16109 * like Sonoma can return UNIT ATTENTION close to a minute 16110 * under certain conditions. 16111 */ 16112 int retry_check_flag = SD_RETRIES_UA; 16113 boolean_t kstat_updated = B_FALSE; 16114 struct sd_sense_info si; 16115 uint8_t asc = scsi_sense_asc(sense_datap); 16116 16117 ASSERT(un != NULL); 16118 ASSERT(mutex_owned(SD_MUTEX(un))); 16119 ASSERT(bp != NULL); 16120 ASSERT(xp != NULL); 16121 ASSERT(pktp != NULL); 16122 16123 si.ssi_severity = SCSI_ERR_INFO; 16124 si.ssi_pfa_flag = FALSE; 16125 16126 16127 switch (asc) { 16128 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */ 16129 if (sd_report_pfa != 0) { 16130 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 16131 si.ssi_pfa_flag = TRUE; 16132 retry_check_flag = SD_RETRIES_STANDARD; 16133 goto do_retry; 16134 } 16135 16136 break; 16137 16138 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ 16139 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 16140 un->un_resvd_status |= 16141 (SD_LOST_RESERVE | SD_WANT_RESERVE); 16142 } 16143 #ifdef _LP64 16144 if (un->un_blockcount + 1 > SD_GROUP1_MAX_ADDRESS) { 16145 if (taskq_dispatch(sd_tq, sd_reenable_dsense_task, 16146 un, KM_NOSLEEP) == 0) { 16147 /* 16148 * If we can't dispatch the task we'll just 16149 * live without descriptor sense. We can 16150 * try again on the next "unit attention" 16151 */ 16152 SD_ERROR(SD_LOG_ERROR, un, 16153 "sd_sense_key_unit_attention: " 16154 "Could not dispatch " 16155 "sd_reenable_dsense_task\n"); 16156 } 16157 } 16158 #endif /* _LP64 */ 16159 /* FALLTHRU */ 16160 16161 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ 16162 if (!un->un_f_has_removable_media) { 16163 break; 16164 } 16165 16166 /* 16167 * When we get a unit attention from a removable-media device, 16168 * it may be in a state that will take a long time to recover 16169 * (e.g., from a reset). Since we are executing in interrupt 16170 * context here, we cannot wait around for the device to come 16171 * back. So hand this command off to sd_media_change_task() 16172 * for deferred processing under taskq thread context. (Note 16173 * that the command still may be failed if a problem is 16174 * encountered at a later time.) 16175 */ 16176 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp, 16177 KM_NOSLEEP) == 0) { 16178 /* 16179 * Cannot dispatch the request so fail the command. 16180 */ 16181 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16182 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 16183 si.ssi_severity = SCSI_ERR_FATAL; 16184 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16185 sd_return_failed_command(un, bp, EIO); 16186 } 16187 16188 /* 16189 * If failed to dispatch sd_media_change_task(), we already 16190 * updated kstat. If succeed to dispatch sd_media_change_task(), 16191 * we should update kstat later if it encounters an error. So, 16192 * we update kstat_updated flag here. 16193 */ 16194 kstat_updated = B_TRUE; 16195 16196 /* 16197 * Either the command has been successfully dispatched to a 16198 * task Q for retrying, or the dispatch failed. In either case 16199 * do NOT retry again by calling sd_retry_command. This sets up 16200 * two retries of the same command and when one completes and 16201 * frees the resources the other will access freed memory, 16202 * a bad thing. 16203 */ 16204 return; 16205 16206 default: 16207 break; 16208 } 16209 16210 /* 16211 * Update kstat if we haven't done that. 16212 */ 16213 if (!kstat_updated) { 16214 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16215 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 16216 } 16217 16218 do_retry: 16219 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si, 16220 EIO, SD_UA_RETRY_DELAY, NULL); 16221 } 16222 16223 16224 16225 /* 16226 * Function: sd_sense_key_fail_command 16227 * 16228 * Description: Use to fail a command when we don't like the sense key that 16229 * was returned. 16230 * 16231 * Context: May be called from interrupt context 16232 */ 16233 16234 static void 16235 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 16236 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16237 { 16238 struct sd_sense_info si; 16239 16240 ASSERT(un != NULL); 16241 ASSERT(mutex_owned(SD_MUTEX(un))); 16242 ASSERT(bp != NULL); 16243 ASSERT(xp != NULL); 16244 ASSERT(pktp != NULL); 16245 16246 si.ssi_severity = SCSI_ERR_FATAL; 16247 si.ssi_pfa_flag = FALSE; 16248 16249 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16250 sd_return_failed_command(un, bp, EIO); 16251 } 16252 16253 16254 16255 /* 16256 * Function: sd_sense_key_blank_check 16257 * 16258 * Description: Recovery actions for a SCSI "Blank Check" sense key. 16259 * Has no monetary connotation. 16260 * 16261 * Context: May be called from interrupt context 16262 */ 16263 16264 static void 16265 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 16266 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16267 { 16268 struct sd_sense_info si; 16269 16270 ASSERT(un != NULL); 16271 ASSERT(mutex_owned(SD_MUTEX(un))); 16272 ASSERT(bp != NULL); 16273 ASSERT(xp != NULL); 16274 ASSERT(pktp != NULL); 16275 16276 /* 16277 * Blank check is not fatal for removable devices, therefore 16278 * it does not require a console message. 16279 */ 16280 si.ssi_severity = (un->un_f_has_removable_media) ? SCSI_ERR_ALL : 16281 SCSI_ERR_FATAL; 16282 si.ssi_pfa_flag = FALSE; 16283 16284 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16285 sd_return_failed_command(un, bp, EIO); 16286 } 16287 16288 16289 16290 16291 /* 16292 * Function: sd_sense_key_aborted_command 16293 * 16294 * Description: Recovery actions for a SCSI "Aborted Command" sense key. 16295 * 16296 * Context: May be called from interrupt context 16297 */ 16298 16299 static void 16300 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 16301 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16302 { 16303 struct sd_sense_info si; 16304 16305 ASSERT(un != NULL); 16306 ASSERT(mutex_owned(SD_MUTEX(un))); 16307 ASSERT(bp != NULL); 16308 ASSERT(xp != NULL); 16309 ASSERT(pktp != NULL); 16310 16311 si.ssi_severity = SCSI_ERR_FATAL; 16312 si.ssi_pfa_flag = FALSE; 16313 16314 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16315 16316 /* 16317 * This really ought to be a fatal error, but we will retry anyway 16318 * as some drives report this as a spurious error. 16319 */ 16320 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16321 &si, EIO, (clock_t)0, NULL); 16322 } 16323 16324 16325 16326 /* 16327 * Function: sd_sense_key_default 16328 * 16329 * Description: Default recovery action for several SCSI sense keys (basically 16330 * attempts a retry). 16331 * 16332 * Context: May be called from interrupt context 16333 */ 16334 16335 static void 16336 sd_sense_key_default(struct sd_lun *un, 16337 uint8_t *sense_datap, 16338 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16339 { 16340 struct sd_sense_info si; 16341 uint8_t sense_key = scsi_sense_key(sense_datap); 16342 16343 ASSERT(un != NULL); 16344 ASSERT(mutex_owned(SD_MUTEX(un))); 16345 ASSERT(bp != NULL); 16346 ASSERT(xp != NULL); 16347 ASSERT(pktp != NULL); 16348 16349 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16350 16351 /* 16352 * Undecoded sense key. Attempt retries and hope that will fix 16353 * the problem. Otherwise, we're dead. 16354 */ 16355 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16356 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16357 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]); 16358 } 16359 16360 si.ssi_severity = SCSI_ERR_FATAL; 16361 si.ssi_pfa_flag = FALSE; 16362 16363 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16364 &si, EIO, (clock_t)0, NULL); 16365 } 16366 16367 16368 16369 /* 16370 * Function: sd_print_retry_msg 16371 * 16372 * Description: Print a message indicating the retry action being taken. 16373 * 16374 * Arguments: un - ptr to associated softstate 16375 * bp - ptr to buf(9S) for the command 16376 * arg - not used. 16377 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16378 * or SD_NO_RETRY_ISSUED 16379 * 16380 * Context: May be called from interrupt context 16381 */ 16382 /* ARGSUSED */ 16383 static void 16384 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag) 16385 { 16386 struct sd_xbuf *xp; 16387 struct scsi_pkt *pktp; 16388 char *reasonp; 16389 char *msgp; 16390 16391 ASSERT(un != NULL); 16392 ASSERT(mutex_owned(SD_MUTEX(un))); 16393 ASSERT(bp != NULL); 16394 pktp = SD_GET_PKTP(bp); 16395 ASSERT(pktp != NULL); 16396 xp = SD_GET_XBUF(bp); 16397 ASSERT(xp != NULL); 16398 16399 ASSERT(!mutex_owned(&un->un_pm_mutex)); 16400 mutex_enter(&un->un_pm_mutex); 16401 if ((un->un_state == SD_STATE_SUSPENDED) || 16402 (SD_DEVICE_IS_IN_LOW_POWER(un)) || 16403 (pktp->pkt_flags & FLAG_SILENT)) { 16404 mutex_exit(&un->un_pm_mutex); 16405 goto update_pkt_reason; 16406 } 16407 mutex_exit(&un->un_pm_mutex); 16408 16409 /* 16410 * Suppress messages if they are all the same pkt_reason; with 16411 * TQ, many (up to 256) are returned with the same pkt_reason. 16412 * If we are in panic, then suppress the retry messages. 16413 */ 16414 switch (flag) { 16415 case SD_NO_RETRY_ISSUED: 16416 msgp = "giving up"; 16417 break; 16418 case SD_IMMEDIATE_RETRY_ISSUED: 16419 case SD_DELAYED_RETRY_ISSUED: 16420 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) || 16421 ((pktp->pkt_reason == un->un_last_pkt_reason) && 16422 (sd_error_level != SCSI_ERR_ALL))) { 16423 return; 16424 } 16425 msgp = "retrying command"; 16426 break; 16427 default: 16428 goto update_pkt_reason; 16429 } 16430 16431 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" : 16432 scsi_rname(pktp->pkt_reason)); 16433 16434 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16435 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp); 16436 16437 update_pkt_reason: 16438 /* 16439 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason. 16440 * This is to prevent multiple console messages for the same failure 16441 * condition. Note that un->un_last_pkt_reason is NOT restored if & 16442 * when the command is retried successfully because there still may be 16443 * more commands coming back with the same value of pktp->pkt_reason. 16444 */ 16445 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) { 16446 un->un_last_pkt_reason = pktp->pkt_reason; 16447 } 16448 } 16449 16450 16451 /* 16452 * Function: sd_print_cmd_incomplete_msg 16453 * 16454 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason. 16455 * 16456 * Arguments: un - ptr to associated softstate 16457 * bp - ptr to buf(9S) for the command 16458 * arg - passed to sd_print_retry_msg() 16459 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16460 * or SD_NO_RETRY_ISSUED 16461 * 16462 * Context: May be called from interrupt context 16463 */ 16464 16465 static void 16466 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, 16467 int code) 16468 { 16469 dev_info_t *dip; 16470 16471 ASSERT(un != NULL); 16472 ASSERT(mutex_owned(SD_MUTEX(un))); 16473 ASSERT(bp != NULL); 16474 16475 switch (code) { 16476 case SD_NO_RETRY_ISSUED: 16477 /* Command was failed. Someone turned off this target? */ 16478 if (un->un_state != SD_STATE_OFFLINE) { 16479 /* 16480 * Suppress message if we are detaching and 16481 * device has been disconnected 16482 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation 16483 * private interface and not part of the DDI 16484 */ 16485 dip = un->un_sd->sd_dev; 16486 if (!(DEVI_IS_DETACHING(dip) && 16487 DEVI_IS_DEVICE_REMOVED(dip))) { 16488 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16489 "disk not responding to selection\n"); 16490 } 16491 New_state(un, SD_STATE_OFFLINE); 16492 } 16493 break; 16494 16495 case SD_DELAYED_RETRY_ISSUED: 16496 case SD_IMMEDIATE_RETRY_ISSUED: 16497 default: 16498 /* Command was successfully queued for retry */ 16499 sd_print_retry_msg(un, bp, arg, code); 16500 break; 16501 } 16502 } 16503 16504 16505 /* 16506 * Function: sd_pkt_reason_cmd_incomplete 16507 * 16508 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason. 16509 * 16510 * Context: May be called from interrupt context 16511 */ 16512 16513 static void 16514 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 16515 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16516 { 16517 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE; 16518 16519 ASSERT(un != NULL); 16520 ASSERT(mutex_owned(SD_MUTEX(un))); 16521 ASSERT(bp != NULL); 16522 ASSERT(xp != NULL); 16523 ASSERT(pktp != NULL); 16524 16525 /* Do not do a reset if selection did not complete */ 16526 /* Note: Should this not just check the bit? */ 16527 if (pktp->pkt_state != STATE_GOT_BUS) { 16528 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16529 sd_reset_target(un, pktp); 16530 } 16531 16532 /* 16533 * If the target was not successfully selected, then set 16534 * SD_RETRIES_FAILFAST to indicate that we lost communication 16535 * with the target, and further retries and/or commands are 16536 * likely to take a long time. 16537 */ 16538 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) { 16539 flag |= SD_RETRIES_FAILFAST; 16540 } 16541 16542 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16543 16544 sd_retry_command(un, bp, flag, 16545 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16546 } 16547 16548 16549 16550 /* 16551 * Function: sd_pkt_reason_cmd_tran_err 16552 * 16553 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason. 16554 * 16555 * Context: May be called from interrupt context 16556 */ 16557 16558 static void 16559 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 16560 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16561 { 16562 ASSERT(un != NULL); 16563 ASSERT(mutex_owned(SD_MUTEX(un))); 16564 ASSERT(bp != NULL); 16565 ASSERT(xp != NULL); 16566 ASSERT(pktp != NULL); 16567 16568 /* 16569 * Do not reset if we got a parity error, or if 16570 * selection did not complete. 16571 */ 16572 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16573 /* Note: Should this not just check the bit for pkt_state? */ 16574 if (((pktp->pkt_statistics & STAT_PERR) == 0) && 16575 (pktp->pkt_state != STATE_GOT_BUS)) { 16576 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16577 sd_reset_target(un, pktp); 16578 } 16579 16580 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16581 16582 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 16583 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16584 } 16585 16586 16587 16588 /* 16589 * Function: sd_pkt_reason_cmd_reset 16590 * 16591 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason. 16592 * 16593 * Context: May be called from interrupt context 16594 */ 16595 16596 static void 16597 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 16598 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16599 { 16600 ASSERT(un != NULL); 16601 ASSERT(mutex_owned(SD_MUTEX(un))); 16602 ASSERT(bp != NULL); 16603 ASSERT(xp != NULL); 16604 ASSERT(pktp != NULL); 16605 16606 /* The target may still be running the command, so try to reset. */ 16607 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16608 sd_reset_target(un, pktp); 16609 16610 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16611 16612 /* 16613 * If pkt_reason is CMD_RESET chances are that this pkt got 16614 * reset because another target on this bus caused it. The target 16615 * that caused it should get CMD_TIMEOUT with pkt_statistics 16616 * of STAT_TIMEOUT/STAT_DEV_RESET. 16617 */ 16618 16619 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 16620 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16621 } 16622 16623 16624 16625 16626 /* 16627 * Function: sd_pkt_reason_cmd_aborted 16628 * 16629 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason. 16630 * 16631 * Context: May be called from interrupt context 16632 */ 16633 16634 static void 16635 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 16636 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16637 { 16638 ASSERT(un != NULL); 16639 ASSERT(mutex_owned(SD_MUTEX(un))); 16640 ASSERT(bp != NULL); 16641 ASSERT(xp != NULL); 16642 ASSERT(pktp != NULL); 16643 16644 /* The target may still be running the command, so try to reset. */ 16645 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16646 sd_reset_target(un, pktp); 16647 16648 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16649 16650 /* 16651 * If pkt_reason is CMD_ABORTED chances are that this pkt got 16652 * aborted because another target on this bus caused it. The target 16653 * that caused it should get CMD_TIMEOUT with pkt_statistics 16654 * of STAT_TIMEOUT/STAT_DEV_RESET. 16655 */ 16656 16657 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 16658 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16659 } 16660 16661 16662 16663 /* 16664 * Function: sd_pkt_reason_cmd_timeout 16665 * 16666 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason. 16667 * 16668 * Context: May be called from interrupt context 16669 */ 16670 16671 static void 16672 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 16673 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16674 { 16675 ASSERT(un != NULL); 16676 ASSERT(mutex_owned(SD_MUTEX(un))); 16677 ASSERT(bp != NULL); 16678 ASSERT(xp != NULL); 16679 ASSERT(pktp != NULL); 16680 16681 16682 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16683 sd_reset_target(un, pktp); 16684 16685 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16686 16687 /* 16688 * A command timeout indicates that we could not establish 16689 * communication with the target, so set SD_RETRIES_FAILFAST 16690 * as further retries/commands are likely to take a long time. 16691 */ 16692 sd_retry_command(un, bp, 16693 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST), 16694 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16695 } 16696 16697 16698 16699 /* 16700 * Function: sd_pkt_reason_cmd_unx_bus_free 16701 * 16702 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason. 16703 * 16704 * Context: May be called from interrupt context 16705 */ 16706 16707 static void 16708 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 16709 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16710 { 16711 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code); 16712 16713 ASSERT(un != NULL); 16714 ASSERT(mutex_owned(SD_MUTEX(un))); 16715 ASSERT(bp != NULL); 16716 ASSERT(xp != NULL); 16717 ASSERT(pktp != NULL); 16718 16719 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16720 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16721 16722 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? 16723 sd_print_retry_msg : NULL; 16724 16725 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 16726 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16727 } 16728 16729 16730 /* 16731 * Function: sd_pkt_reason_cmd_tag_reject 16732 * 16733 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason. 16734 * 16735 * Context: May be called from interrupt context 16736 */ 16737 16738 static void 16739 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 16740 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16741 { 16742 ASSERT(un != NULL); 16743 ASSERT(mutex_owned(SD_MUTEX(un))); 16744 ASSERT(bp != NULL); 16745 ASSERT(xp != NULL); 16746 ASSERT(pktp != NULL); 16747 16748 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16749 pktp->pkt_flags = 0; 16750 un->un_tagflags = 0; 16751 if (un->un_f_opt_queueing == TRUE) { 16752 un->un_throttle = min(un->un_throttle, 3); 16753 } else { 16754 un->un_throttle = 1; 16755 } 16756 mutex_exit(SD_MUTEX(un)); 16757 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 16758 mutex_enter(SD_MUTEX(un)); 16759 16760 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16761 16762 /* Legacy behavior not to check retry counts here. */ 16763 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE), 16764 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16765 } 16766 16767 16768 /* 16769 * Function: sd_pkt_reason_default 16770 * 16771 * Description: Default recovery actions for SCSA pkt_reason values that 16772 * do not have more explicit recovery actions. 16773 * 16774 * Context: May be called from interrupt context 16775 */ 16776 16777 static void 16778 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 16779 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16780 { 16781 ASSERT(un != NULL); 16782 ASSERT(mutex_owned(SD_MUTEX(un))); 16783 ASSERT(bp != NULL); 16784 ASSERT(xp != NULL); 16785 ASSERT(pktp != NULL); 16786 16787 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16788 sd_reset_target(un, pktp); 16789 16790 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16791 16792 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 16793 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16794 } 16795 16796 16797 16798 /* 16799 * Function: sd_pkt_status_check_condition 16800 * 16801 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status. 16802 * 16803 * Context: May be called from interrupt context 16804 */ 16805 16806 static void 16807 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 16808 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16809 { 16810 ASSERT(un != NULL); 16811 ASSERT(mutex_owned(SD_MUTEX(un))); 16812 ASSERT(bp != NULL); 16813 ASSERT(xp != NULL); 16814 ASSERT(pktp != NULL); 16815 16816 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: " 16817 "entry: buf:0x%p xp:0x%p\n", bp, xp); 16818 16819 /* 16820 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the 16821 * command will be retried after the request sense). Otherwise, retry 16822 * the command. Note: we are issuing the request sense even though the 16823 * retry limit may have been reached for the failed command. 16824 */ 16825 if (un->un_f_arq_enabled == FALSE) { 16826 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 16827 "no ARQ, sending request sense command\n"); 16828 sd_send_request_sense_command(un, bp, pktp); 16829 } else { 16830 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 16831 "ARQ,retrying request sense command\n"); 16832 #if defined(__i386) || defined(__amd64) 16833 /* 16834 * The SD_RETRY_DELAY value need to be adjusted here 16835 * when SD_RETRY_DELAY change in sddef.h 16836 */ 16837 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 16838 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, 16839 NULL); 16840 #else 16841 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 16842 EIO, SD_RETRY_DELAY, NULL); 16843 #endif 16844 } 16845 16846 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); 16847 } 16848 16849 16850 /* 16851 * Function: sd_pkt_status_busy 16852 * 16853 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status. 16854 * 16855 * Context: May be called from interrupt context 16856 */ 16857 16858 static void 16859 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 16860 struct scsi_pkt *pktp) 16861 { 16862 ASSERT(un != NULL); 16863 ASSERT(mutex_owned(SD_MUTEX(un))); 16864 ASSERT(bp != NULL); 16865 ASSERT(xp != NULL); 16866 ASSERT(pktp != NULL); 16867 16868 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16869 "sd_pkt_status_busy: entry\n"); 16870 16871 /* If retries are exhausted, just fail the command. */ 16872 if (xp->xb_retry_count >= un->un_busy_retry_count) { 16873 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16874 "device busy too long\n"); 16875 sd_return_failed_command(un, bp, EIO); 16876 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16877 "sd_pkt_status_busy: exit\n"); 16878 return; 16879 } 16880 xp->xb_retry_count++; 16881 16882 /* 16883 * Try to reset the target. However, we do not want to perform 16884 * more than one reset if the device continues to fail. The reset 16885 * will be performed when the retry count reaches the reset 16886 * threshold. This threshold should be set such that at least 16887 * one retry is issued before the reset is performed. 16888 */ 16889 if (xp->xb_retry_count == 16890 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) { 16891 int rval = 0; 16892 mutex_exit(SD_MUTEX(un)); 16893 if (un->un_f_allow_bus_device_reset == TRUE) { 16894 /* 16895 * First try to reset the LUN; if we cannot then 16896 * try to reset the target. 16897 */ 16898 if (un->un_f_lun_reset_enabled == TRUE) { 16899 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16900 "sd_pkt_status_busy: RESET_LUN\n"); 16901 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 16902 } 16903 if (rval == 0) { 16904 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16905 "sd_pkt_status_busy: RESET_TARGET\n"); 16906 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 16907 } 16908 } 16909 if (rval == 0) { 16910 /* 16911 * If the RESET_LUN and/or RESET_TARGET failed, 16912 * try RESET_ALL 16913 */ 16914 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16915 "sd_pkt_status_busy: RESET_ALL\n"); 16916 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL); 16917 } 16918 mutex_enter(SD_MUTEX(un)); 16919 if (rval == 0) { 16920 /* 16921 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed. 16922 * At this point we give up & fail the command. 16923 */ 16924 sd_return_failed_command(un, bp, EIO); 16925 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16926 "sd_pkt_status_busy: exit (failed cmd)\n"); 16927 return; 16928 } 16929 } 16930 16931 /* 16932 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as 16933 * we have already checked the retry counts above. 16934 */ 16935 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 16936 EIO, SD_BSY_TIMEOUT, NULL); 16937 16938 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16939 "sd_pkt_status_busy: exit\n"); 16940 } 16941 16942 16943 /* 16944 * Function: sd_pkt_status_reservation_conflict 16945 * 16946 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI 16947 * command status. 16948 * 16949 * Context: May be called from interrupt context 16950 */ 16951 16952 static void 16953 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp, 16954 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16955 { 16956 ASSERT(un != NULL); 16957 ASSERT(mutex_owned(SD_MUTEX(un))); 16958 ASSERT(bp != NULL); 16959 ASSERT(xp != NULL); 16960 ASSERT(pktp != NULL); 16961 16962 /* 16963 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation 16964 * conflict could be due to various reasons like incorrect keys, not 16965 * registered or not reserved etc. So, we return EACCES to the caller. 16966 */ 16967 if (un->un_reservation_type == SD_SCSI3_RESERVATION) { 16968 int cmd = SD_GET_PKT_OPCODE(pktp); 16969 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) || 16970 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) { 16971 sd_return_failed_command(un, bp, EACCES); 16972 return; 16973 } 16974 } 16975 16976 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 16977 16978 if ((un->un_resvd_status & SD_FAILFAST) != 0) { 16979 if (sd_failfast_enable != 0) { 16980 /* By definition, we must panic here.... */ 16981 sd_panic_for_res_conflict(un); 16982 /*NOTREACHED*/ 16983 } 16984 SD_ERROR(SD_LOG_IO, un, 16985 "sd_handle_resv_conflict: Disk Reserved\n"); 16986 sd_return_failed_command(un, bp, EACCES); 16987 return; 16988 } 16989 16990 /* 16991 * 1147670: retry only if sd_retry_on_reservation_conflict 16992 * property is set (default is 1). Retries will not succeed 16993 * on a disk reserved by another initiator. HA systems 16994 * may reset this via sd.conf to avoid these retries. 16995 * 16996 * Note: The legacy return code for this failure is EIO, however EACCES 16997 * seems more appropriate for a reservation conflict. 16998 */ 16999 if (sd_retry_on_reservation_conflict == 0) { 17000 SD_ERROR(SD_LOG_IO, un, 17001 "sd_handle_resv_conflict: Device Reserved\n"); 17002 sd_return_failed_command(un, bp, EIO); 17003 return; 17004 } 17005 17006 /* 17007 * Retry the command if we can. 17008 * 17009 * Note: The legacy return code for this failure is EIO, however EACCES 17010 * seems more appropriate for a reservation conflict. 17011 */ 17012 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 17013 (clock_t)2, NULL); 17014 } 17015 17016 17017 17018 /* 17019 * Function: sd_pkt_status_qfull 17020 * 17021 * Description: Handle a QUEUE FULL condition from the target. This can 17022 * occur if the HBA does not handle the queue full condition. 17023 * (Basically this means third-party HBAs as Sun HBAs will 17024 * handle the queue full condition.) Note that if there are 17025 * some commands already in the transport, then the queue full 17026 * has occurred because the queue for this nexus is actually 17027 * full. If there are no commands in the transport, then the 17028 * queue full is resulting from some other initiator or lun 17029 * consuming all the resources at the target. 17030 * 17031 * Context: May be called from interrupt context 17032 */ 17033 17034 static void 17035 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 17036 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17037 { 17038 ASSERT(un != NULL); 17039 ASSERT(mutex_owned(SD_MUTEX(un))); 17040 ASSERT(bp != NULL); 17041 ASSERT(xp != NULL); 17042 ASSERT(pktp != NULL); 17043 17044 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17045 "sd_pkt_status_qfull: entry\n"); 17046 17047 /* 17048 * Just lower the QFULL throttle and retry the command. Note that 17049 * we do not limit the number of retries here. 17050 */ 17051 sd_reduce_throttle(un, SD_THROTTLE_QFULL); 17052 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, 17053 SD_RESTART_TIMEOUT, NULL); 17054 17055 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17056 "sd_pkt_status_qfull: exit\n"); 17057 } 17058 17059 17060 /* 17061 * Function: sd_reset_target 17062 * 17063 * Description: Issue a scsi_reset(9F), with either RESET_LUN, 17064 * RESET_TARGET, or RESET_ALL. 17065 * 17066 * Context: May be called under interrupt context. 17067 */ 17068 17069 static void 17070 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp) 17071 { 17072 int rval = 0; 17073 17074 ASSERT(un != NULL); 17075 ASSERT(mutex_owned(SD_MUTEX(un))); 17076 ASSERT(pktp != NULL); 17077 17078 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n"); 17079 17080 /* 17081 * No need to reset if the transport layer has already done so. 17082 */ 17083 if ((pktp->pkt_statistics & 17084 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) { 17085 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17086 "sd_reset_target: no reset\n"); 17087 return; 17088 } 17089 17090 mutex_exit(SD_MUTEX(un)); 17091 17092 if (un->un_f_allow_bus_device_reset == TRUE) { 17093 if (un->un_f_lun_reset_enabled == TRUE) { 17094 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17095 "sd_reset_target: RESET_LUN\n"); 17096 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 17097 } 17098 if (rval == 0) { 17099 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17100 "sd_reset_target: RESET_TARGET\n"); 17101 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 17102 } 17103 } 17104 17105 if (rval == 0) { 17106 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17107 "sd_reset_target: RESET_ALL\n"); 17108 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 17109 } 17110 17111 mutex_enter(SD_MUTEX(un)); 17112 17113 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n"); 17114 } 17115 17116 17117 /* 17118 * Function: sd_media_change_task 17119 * 17120 * Description: Recovery action for CDROM to become available. 17121 * 17122 * Context: Executes in a taskq() thread context 17123 */ 17124 17125 static void 17126 sd_media_change_task(void *arg) 17127 { 17128 struct scsi_pkt *pktp = arg; 17129 struct sd_lun *un; 17130 struct buf *bp; 17131 struct sd_xbuf *xp; 17132 int err = 0; 17133 int retry_count = 0; 17134 int retry_limit = SD_UNIT_ATTENTION_RETRY/10; 17135 struct sd_sense_info si; 17136 17137 ASSERT(pktp != NULL); 17138 bp = (struct buf *)pktp->pkt_private; 17139 ASSERT(bp != NULL); 17140 xp = SD_GET_XBUF(bp); 17141 ASSERT(xp != NULL); 17142 un = SD_GET_UN(bp); 17143 ASSERT(un != NULL); 17144 ASSERT(!mutex_owned(SD_MUTEX(un))); 17145 ASSERT(un->un_f_monitor_media_state); 17146 17147 si.ssi_severity = SCSI_ERR_INFO; 17148 si.ssi_pfa_flag = FALSE; 17149 17150 /* 17151 * When a reset is issued on a CDROM, it takes a long time to 17152 * recover. First few attempts to read capacity and other things 17153 * related to handling unit attention fail (with a ASC 0x4 and 17154 * ASCQ 0x1). In that case we want to do enough retries and we want 17155 * to limit the retries in other cases of genuine failures like 17156 * no media in drive. 17157 */ 17158 while (retry_count++ < retry_limit) { 17159 if ((err = sd_handle_mchange(un)) == 0) { 17160 break; 17161 } 17162 if (err == EAGAIN) { 17163 retry_limit = SD_UNIT_ATTENTION_RETRY; 17164 } 17165 /* Sleep for 0.5 sec. & try again */ 17166 delay(drv_usectohz(500000)); 17167 } 17168 17169 /* 17170 * Dispatch (retry or fail) the original command here, 17171 * along with appropriate console messages.... 17172 * 17173 * Must grab the mutex before calling sd_retry_command, 17174 * sd_print_sense_msg and sd_return_failed_command. 17175 */ 17176 mutex_enter(SD_MUTEX(un)); 17177 if (err != SD_CMD_SUCCESS) { 17178 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17179 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17180 si.ssi_severity = SCSI_ERR_FATAL; 17181 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17182 sd_return_failed_command(un, bp, EIO); 17183 } else { 17184 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 17185 &si, EIO, (clock_t)0, NULL); 17186 } 17187 mutex_exit(SD_MUTEX(un)); 17188 } 17189 17190 17191 17192 /* 17193 * Function: sd_handle_mchange 17194 * 17195 * Description: Perform geometry validation & other recovery when CDROM 17196 * has been removed from drive. 17197 * 17198 * Return Code: 0 for success 17199 * errno-type return code of either sd_send_scsi_DOORLOCK() or 17200 * sd_send_scsi_READ_CAPACITY() 17201 * 17202 * Context: Executes in a taskq() thread context 17203 */ 17204 17205 static int 17206 sd_handle_mchange(struct sd_lun *un) 17207 { 17208 uint64_t capacity; 17209 uint32_t lbasize; 17210 int rval; 17211 17212 ASSERT(!mutex_owned(SD_MUTEX(un))); 17213 ASSERT(un->un_f_monitor_media_state); 17214 17215 if ((rval = sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 17216 SD_PATH_DIRECT_PRIORITY)) != 0) { 17217 return (rval); 17218 } 17219 17220 mutex_enter(SD_MUTEX(un)); 17221 sd_update_block_info(un, lbasize, capacity); 17222 17223 if (un->un_errstats != NULL) { 17224 struct sd_errstats *stp = 17225 (struct sd_errstats *)un->un_errstats->ks_data; 17226 stp->sd_capacity.value.ui64 = (uint64_t) 17227 ((uint64_t)un->un_blockcount * 17228 (uint64_t)un->un_tgt_blocksize); 17229 } 17230 17231 17232 /* 17233 * Check if the media in the device is writable or not 17234 */ 17235 if (ISCD(un)) 17236 sd_check_for_writable_cd(un, SD_PATH_DIRECT_PRIORITY); 17237 17238 /* 17239 * Note: Maybe let the strategy/partitioning chain worry about getting 17240 * valid geometry. 17241 */ 17242 mutex_exit(SD_MUTEX(un)); 17243 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 17244 17245 17246 if (cmlb_validate(un->un_cmlbhandle, 0, 17247 (void *)SD_PATH_DIRECT_PRIORITY) != 0) { 17248 return (EIO); 17249 } else { 17250 if (un->un_f_pkstats_enabled) { 17251 sd_set_pstats(un); 17252 SD_TRACE(SD_LOG_IO_PARTITION, un, 17253 "sd_handle_mchange: un:0x%p pstats created and " 17254 "set\n", un); 17255 } 17256 } 17257 17258 17259 /* 17260 * Try to lock the door 17261 */ 17262 return (sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 17263 SD_PATH_DIRECT_PRIORITY)); 17264 } 17265 17266 17267 /* 17268 * Function: sd_send_scsi_DOORLOCK 17269 * 17270 * Description: Issue the scsi DOOR LOCK command 17271 * 17272 * Arguments: un - pointer to driver soft state (unit) structure for 17273 * this target. 17274 * flag - SD_REMOVAL_ALLOW 17275 * SD_REMOVAL_PREVENT 17276 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17277 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17278 * to use the USCSI "direct" chain and bypass the normal 17279 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 17280 * command is issued as part of an error recovery action. 17281 * 17282 * Return Code: 0 - Success 17283 * errno return code from sd_send_scsi_cmd() 17284 * 17285 * Context: Can sleep. 17286 */ 17287 17288 static int 17289 sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag) 17290 { 17291 union scsi_cdb cdb; 17292 struct uscsi_cmd ucmd_buf; 17293 struct scsi_extended_sense sense_buf; 17294 int status; 17295 17296 ASSERT(un != NULL); 17297 ASSERT(!mutex_owned(SD_MUTEX(un))); 17298 17299 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un); 17300 17301 /* already determined doorlock is not supported, fake success */ 17302 if (un->un_f_doorlock_supported == FALSE) { 17303 return (0); 17304 } 17305 17306 /* 17307 * If we are ejecting and see an SD_REMOVAL_PREVENT 17308 * ignore the command so we can complete the eject 17309 * operation. 17310 */ 17311 if (flag == SD_REMOVAL_PREVENT) { 17312 mutex_enter(SD_MUTEX(un)); 17313 if (un->un_f_ejecting == TRUE) { 17314 mutex_exit(SD_MUTEX(un)); 17315 return (EAGAIN); 17316 } 17317 mutex_exit(SD_MUTEX(un)); 17318 } 17319 17320 bzero(&cdb, sizeof (cdb)); 17321 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17322 17323 cdb.scc_cmd = SCMD_DOORLOCK; 17324 cdb.cdb_opaque[4] = (uchar_t)flag; 17325 17326 ucmd_buf.uscsi_cdb = (char *)&cdb; 17327 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 17328 ucmd_buf.uscsi_bufaddr = NULL; 17329 ucmd_buf.uscsi_buflen = 0; 17330 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17331 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 17332 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 17333 ucmd_buf.uscsi_timeout = 15; 17334 17335 SD_TRACE(SD_LOG_IO, un, 17336 "sd_send_scsi_DOORLOCK: returning sd_send_scsi_cmd()\n"); 17337 17338 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17339 UIO_SYSSPACE, path_flag); 17340 17341 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) && 17342 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 17343 (scsi_sense_key((uint8_t *)&sense_buf) == KEY_ILLEGAL_REQUEST)) { 17344 /* fake success and skip subsequent doorlock commands */ 17345 un->un_f_doorlock_supported = FALSE; 17346 return (0); 17347 } 17348 17349 return (status); 17350 } 17351 17352 /* 17353 * Function: sd_send_scsi_READ_CAPACITY 17354 * 17355 * Description: This routine uses the scsi READ CAPACITY command to determine 17356 * the device capacity in number of blocks and the device native 17357 * block size. If this function returns a failure, then the 17358 * values in *capp and *lbap are undefined. If the capacity 17359 * returned is 0xffffffff then the lun is too large for a 17360 * normal READ CAPACITY command and the results of a 17361 * READ CAPACITY 16 will be used instead. 17362 * 17363 * Arguments: un - ptr to soft state struct for the target 17364 * capp - ptr to unsigned 64-bit variable to receive the 17365 * capacity value from the command. 17366 * lbap - ptr to unsigned 32-bit varaible to receive the 17367 * block size value from the command 17368 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17369 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17370 * to use the USCSI "direct" chain and bypass the normal 17371 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 17372 * command is issued as part of an error recovery action. 17373 * 17374 * Return Code: 0 - Success 17375 * EIO - IO error 17376 * EACCES - Reservation conflict detected 17377 * EAGAIN - Device is becoming ready 17378 * errno return code from sd_send_scsi_cmd() 17379 * 17380 * Context: Can sleep. Blocks until command completes. 17381 */ 17382 17383 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity) 17384 17385 static int 17386 sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, uint32_t *lbap, 17387 int path_flag) 17388 { 17389 struct scsi_extended_sense sense_buf; 17390 struct uscsi_cmd ucmd_buf; 17391 union scsi_cdb cdb; 17392 uint32_t *capacity_buf; 17393 uint64_t capacity; 17394 uint32_t lbasize; 17395 int status; 17396 17397 ASSERT(un != NULL); 17398 ASSERT(!mutex_owned(SD_MUTEX(un))); 17399 ASSERT(capp != NULL); 17400 ASSERT(lbap != NULL); 17401 17402 SD_TRACE(SD_LOG_IO, un, 17403 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 17404 17405 /* 17406 * First send a READ_CAPACITY command to the target. 17407 * (This command is mandatory under SCSI-2.) 17408 * 17409 * Set up the CDB for the READ_CAPACITY command. The Partial 17410 * Medium Indicator bit is cleared. The address field must be 17411 * zero if the PMI bit is zero. 17412 */ 17413 bzero(&cdb, sizeof (cdb)); 17414 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17415 17416 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP); 17417 17418 cdb.scc_cmd = SCMD_READ_CAPACITY; 17419 17420 ucmd_buf.uscsi_cdb = (char *)&cdb; 17421 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 17422 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; 17423 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; 17424 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17425 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 17426 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 17427 ucmd_buf.uscsi_timeout = 60; 17428 17429 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17430 UIO_SYSSPACE, path_flag); 17431 17432 switch (status) { 17433 case 0: 17434 /* Return failure if we did not get valid capacity data. */ 17435 if (ucmd_buf.uscsi_resid != 0) { 17436 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17437 return (EIO); 17438 } 17439 17440 /* 17441 * Read capacity and block size from the READ CAPACITY 10 data. 17442 * This data may be adjusted later due to device specific 17443 * issues. 17444 * 17445 * According to the SCSI spec, the READ CAPACITY 10 17446 * command returns the following: 17447 * 17448 * bytes 0-3: Maximum logical block address available. 17449 * (MSB in byte:0 & LSB in byte:3) 17450 * 17451 * bytes 4-7: Block length in bytes 17452 * (MSB in byte:4 & LSB in byte:7) 17453 * 17454 */ 17455 capacity = BE_32(capacity_buf[0]); 17456 lbasize = BE_32(capacity_buf[1]); 17457 17458 /* 17459 * Done with capacity_buf 17460 */ 17461 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17462 17463 /* 17464 * if the reported capacity is set to all 0xf's, then 17465 * this disk is too large and requires SBC-2 commands. 17466 * Reissue the request using READ CAPACITY 16. 17467 */ 17468 if (capacity == 0xffffffff) { 17469 status = sd_send_scsi_READ_CAPACITY_16(un, &capacity, 17470 &lbasize, path_flag); 17471 if (status != 0) { 17472 return (status); 17473 } 17474 } 17475 break; /* Success! */ 17476 case EIO: 17477 switch (ucmd_buf.uscsi_status) { 17478 case STATUS_RESERVATION_CONFLICT: 17479 status = EACCES; 17480 break; 17481 case STATUS_CHECK: 17482 /* 17483 * Check condition; look for ASC/ASCQ of 0x04/0x01 17484 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 17485 */ 17486 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 17487 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 17488 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 17489 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17490 return (EAGAIN); 17491 } 17492 break; 17493 default: 17494 break; 17495 } 17496 /* FALLTHRU */ 17497 default: 17498 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17499 return (status); 17500 } 17501 17502 /* 17503 * Some ATAPI CD-ROM drives report inaccurate LBA size values 17504 * (2352 and 0 are common) so for these devices always force the value 17505 * to 2048 as required by the ATAPI specs. 17506 */ 17507 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 17508 lbasize = 2048; 17509 } 17510 17511 /* 17512 * Get the maximum LBA value from the READ CAPACITY data. 17513 * Here we assume that the Partial Medium Indicator (PMI) bit 17514 * was cleared when issuing the command. This means that the LBA 17515 * returned from the device is the LBA of the last logical block 17516 * on the logical unit. The actual logical block count will be 17517 * this value plus one. 17518 * 17519 * Currently the capacity is saved in terms of un->un_sys_blocksize, 17520 * so scale the capacity value to reflect this. 17521 */ 17522 capacity = (capacity + 1) * (lbasize / un->un_sys_blocksize); 17523 17524 /* 17525 * Copy the values from the READ CAPACITY command into the space 17526 * provided by the caller. 17527 */ 17528 *capp = capacity; 17529 *lbap = lbasize; 17530 17531 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: " 17532 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 17533 17534 /* 17535 * Both the lbasize and capacity from the device must be nonzero, 17536 * otherwise we assume that the values are not valid and return 17537 * failure to the caller. (4203735) 17538 */ 17539 if ((capacity == 0) || (lbasize == 0)) { 17540 return (EIO); 17541 } 17542 17543 return (0); 17544 } 17545 17546 /* 17547 * Function: sd_send_scsi_READ_CAPACITY_16 17548 * 17549 * Description: This routine uses the scsi READ CAPACITY 16 command to 17550 * determine the device capacity in number of blocks and the 17551 * device native block size. If this function returns a failure, 17552 * then the values in *capp and *lbap are undefined. 17553 * This routine should always be called by 17554 * sd_send_scsi_READ_CAPACITY which will appy any device 17555 * specific adjustments to capacity and lbasize. 17556 * 17557 * Arguments: un - ptr to soft state struct for the target 17558 * capp - ptr to unsigned 64-bit variable to receive the 17559 * capacity value from the command. 17560 * lbap - ptr to unsigned 32-bit varaible to receive the 17561 * block size value from the command 17562 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17563 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17564 * to use the USCSI "direct" chain and bypass the normal 17565 * command waitq. SD_PATH_DIRECT_PRIORITY is used when 17566 * this command is issued as part of an error recovery 17567 * action. 17568 * 17569 * Return Code: 0 - Success 17570 * EIO - IO error 17571 * EACCES - Reservation conflict detected 17572 * EAGAIN - Device is becoming ready 17573 * errno return code from sd_send_scsi_cmd() 17574 * 17575 * Context: Can sleep. Blocks until command completes. 17576 */ 17577 17578 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16) 17579 17580 static int 17581 sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 17582 uint32_t *lbap, int path_flag) 17583 { 17584 struct scsi_extended_sense sense_buf; 17585 struct uscsi_cmd ucmd_buf; 17586 union scsi_cdb cdb; 17587 uint64_t *capacity16_buf; 17588 uint64_t capacity; 17589 uint32_t lbasize; 17590 int status; 17591 17592 ASSERT(un != NULL); 17593 ASSERT(!mutex_owned(SD_MUTEX(un))); 17594 ASSERT(capp != NULL); 17595 ASSERT(lbap != NULL); 17596 17597 SD_TRACE(SD_LOG_IO, un, 17598 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 17599 17600 /* 17601 * First send a READ_CAPACITY_16 command to the target. 17602 * 17603 * Set up the CDB for the READ_CAPACITY_16 command. The Partial 17604 * Medium Indicator bit is cleared. The address field must be 17605 * zero if the PMI bit is zero. 17606 */ 17607 bzero(&cdb, sizeof (cdb)); 17608 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17609 17610 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP); 17611 17612 ucmd_buf.uscsi_cdb = (char *)&cdb; 17613 ucmd_buf.uscsi_cdblen = CDB_GROUP4; 17614 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; 17615 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; 17616 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17617 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 17618 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 17619 ucmd_buf.uscsi_timeout = 60; 17620 17621 /* 17622 * Read Capacity (16) is a Service Action In command. One 17623 * command byte (0x9E) is overloaded for multiple operations, 17624 * with the second CDB byte specifying the desired operation 17625 */ 17626 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4; 17627 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4; 17628 17629 /* 17630 * Fill in allocation length field 17631 */ 17632 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen); 17633 17634 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17635 UIO_SYSSPACE, path_flag); 17636 17637 switch (status) { 17638 case 0: 17639 /* Return failure if we did not get valid capacity data. */ 17640 if (ucmd_buf.uscsi_resid > 20) { 17641 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 17642 return (EIO); 17643 } 17644 17645 /* 17646 * Read capacity and block size from the READ CAPACITY 10 data. 17647 * This data may be adjusted later due to device specific 17648 * issues. 17649 * 17650 * According to the SCSI spec, the READ CAPACITY 10 17651 * command returns the following: 17652 * 17653 * bytes 0-7: Maximum logical block address available. 17654 * (MSB in byte:0 & LSB in byte:7) 17655 * 17656 * bytes 8-11: Block length in bytes 17657 * (MSB in byte:8 & LSB in byte:11) 17658 * 17659 */ 17660 capacity = BE_64(capacity16_buf[0]); 17661 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); 17662 17663 /* 17664 * Done with capacity16_buf 17665 */ 17666 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 17667 17668 /* 17669 * if the reported capacity is set to all 0xf's, then 17670 * this disk is too large. This could only happen with 17671 * a device that supports LBAs larger than 64 bits which 17672 * are not defined by any current T10 standards. 17673 */ 17674 if (capacity == 0xffffffffffffffff) { 17675 return (EIO); 17676 } 17677 break; /* Success! */ 17678 case EIO: 17679 switch (ucmd_buf.uscsi_status) { 17680 case STATUS_RESERVATION_CONFLICT: 17681 status = EACCES; 17682 break; 17683 case STATUS_CHECK: 17684 /* 17685 * Check condition; look for ASC/ASCQ of 0x04/0x01 17686 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 17687 */ 17688 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 17689 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 17690 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 17691 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 17692 return (EAGAIN); 17693 } 17694 break; 17695 default: 17696 break; 17697 } 17698 /* FALLTHRU */ 17699 default: 17700 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 17701 return (status); 17702 } 17703 17704 *capp = capacity; 17705 *lbap = lbasize; 17706 17707 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: " 17708 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 17709 17710 return (0); 17711 } 17712 17713 17714 /* 17715 * Function: sd_send_scsi_START_STOP_UNIT 17716 * 17717 * Description: Issue a scsi START STOP UNIT command to the target. 17718 * 17719 * Arguments: un - pointer to driver soft state (unit) structure for 17720 * this target. 17721 * flag - SD_TARGET_START 17722 * SD_TARGET_STOP 17723 * SD_TARGET_EJECT 17724 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17725 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17726 * to use the USCSI "direct" chain and bypass the normal 17727 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 17728 * command is issued as part of an error recovery action. 17729 * 17730 * Return Code: 0 - Success 17731 * EIO - IO error 17732 * EACCES - Reservation conflict detected 17733 * ENXIO - Not Ready, medium not present 17734 * errno return code from sd_send_scsi_cmd() 17735 * 17736 * Context: Can sleep. 17737 */ 17738 17739 static int 17740 sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, int path_flag) 17741 { 17742 struct scsi_extended_sense sense_buf; 17743 union scsi_cdb cdb; 17744 struct uscsi_cmd ucmd_buf; 17745 int status; 17746 17747 ASSERT(un != NULL); 17748 ASSERT(!mutex_owned(SD_MUTEX(un))); 17749 17750 SD_TRACE(SD_LOG_IO, un, 17751 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un); 17752 17753 if (un->un_f_check_start_stop && 17754 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) && 17755 (un->un_f_start_stop_supported != TRUE)) { 17756 return (0); 17757 } 17758 17759 /* 17760 * If we are performing an eject operation and 17761 * we receive any command other than SD_TARGET_EJECT 17762 * we should immediately return. 17763 */ 17764 if (flag != SD_TARGET_EJECT) { 17765 mutex_enter(SD_MUTEX(un)); 17766 if (un->un_f_ejecting == TRUE) { 17767 mutex_exit(SD_MUTEX(un)); 17768 return (EAGAIN); 17769 } 17770 mutex_exit(SD_MUTEX(un)); 17771 } 17772 17773 bzero(&cdb, sizeof (cdb)); 17774 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17775 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 17776 17777 cdb.scc_cmd = SCMD_START_STOP; 17778 cdb.cdb_opaque[4] = (uchar_t)flag; 17779 17780 ucmd_buf.uscsi_cdb = (char *)&cdb; 17781 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 17782 ucmd_buf.uscsi_bufaddr = NULL; 17783 ucmd_buf.uscsi_buflen = 0; 17784 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17785 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 17786 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 17787 ucmd_buf.uscsi_timeout = 200; 17788 17789 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17790 UIO_SYSSPACE, path_flag); 17791 17792 switch (status) { 17793 case 0: 17794 break; /* Success! */ 17795 case EIO: 17796 switch (ucmd_buf.uscsi_status) { 17797 case STATUS_RESERVATION_CONFLICT: 17798 status = EACCES; 17799 break; 17800 case STATUS_CHECK: 17801 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) { 17802 switch (scsi_sense_key( 17803 (uint8_t *)&sense_buf)) { 17804 case KEY_ILLEGAL_REQUEST: 17805 status = ENOTSUP; 17806 break; 17807 case KEY_NOT_READY: 17808 if (scsi_sense_asc( 17809 (uint8_t *)&sense_buf) 17810 == 0x3A) { 17811 status = ENXIO; 17812 } 17813 break; 17814 default: 17815 break; 17816 } 17817 } 17818 break; 17819 default: 17820 break; 17821 } 17822 break; 17823 default: 17824 break; 17825 } 17826 17827 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n"); 17828 17829 return (status); 17830 } 17831 17832 17833 /* 17834 * Function: sd_start_stop_unit_callback 17835 * 17836 * Description: timeout(9F) callback to begin recovery process for a 17837 * device that has spun down. 17838 * 17839 * Arguments: arg - pointer to associated softstate struct. 17840 * 17841 * Context: Executes in a timeout(9F) thread context 17842 */ 17843 17844 static void 17845 sd_start_stop_unit_callback(void *arg) 17846 { 17847 struct sd_lun *un = arg; 17848 ASSERT(un != NULL); 17849 ASSERT(!mutex_owned(SD_MUTEX(un))); 17850 17851 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n"); 17852 17853 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP); 17854 } 17855 17856 17857 /* 17858 * Function: sd_start_stop_unit_task 17859 * 17860 * Description: Recovery procedure when a drive is spun down. 17861 * 17862 * Arguments: arg - pointer to associated softstate struct. 17863 * 17864 * Context: Executes in a taskq() thread context 17865 */ 17866 17867 static void 17868 sd_start_stop_unit_task(void *arg) 17869 { 17870 struct sd_lun *un = arg; 17871 17872 ASSERT(un != NULL); 17873 ASSERT(!mutex_owned(SD_MUTEX(un))); 17874 17875 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n"); 17876 17877 /* 17878 * Some unformatted drives report not ready error, no need to 17879 * restart if format has been initiated. 17880 */ 17881 mutex_enter(SD_MUTEX(un)); 17882 if (un->un_f_format_in_progress == TRUE) { 17883 mutex_exit(SD_MUTEX(un)); 17884 return; 17885 } 17886 mutex_exit(SD_MUTEX(un)); 17887 17888 /* 17889 * When a START STOP command is issued from here, it is part of a 17890 * failure recovery operation and must be issued before any other 17891 * commands, including any pending retries. Thus it must be sent 17892 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up 17893 * succeeds or not, we will start I/O after the attempt. 17894 */ 17895 (void) sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 17896 SD_PATH_DIRECT_PRIORITY); 17897 17898 /* 17899 * The above call blocks until the START_STOP_UNIT command completes. 17900 * Now that it has completed, we must re-try the original IO that 17901 * received the NOT READY condition in the first place. There are 17902 * three possible conditions here: 17903 * 17904 * (1) The original IO is on un_retry_bp. 17905 * (2) The original IO is on the regular wait queue, and un_retry_bp 17906 * is NULL. 17907 * (3) The original IO is on the regular wait queue, and un_retry_bp 17908 * points to some other, unrelated bp. 17909 * 17910 * For each case, we must call sd_start_cmds() with un_retry_bp 17911 * as the argument. If un_retry_bp is NULL, this will initiate 17912 * processing of the regular wait queue. If un_retry_bp is not NULL, 17913 * then this will process the bp on un_retry_bp. That may or may not 17914 * be the original IO, but that does not matter: the important thing 17915 * is to keep the IO processing going at this point. 17916 * 17917 * Note: This is a very specific error recovery sequence associated 17918 * with a drive that is not spun up. We attempt a START_STOP_UNIT and 17919 * serialize the I/O with completion of the spin-up. 17920 */ 17921 mutex_enter(SD_MUTEX(un)); 17922 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17923 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n", 17924 un, un->un_retry_bp); 17925 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */ 17926 sd_start_cmds(un, un->un_retry_bp); 17927 mutex_exit(SD_MUTEX(un)); 17928 17929 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n"); 17930 } 17931 17932 17933 /* 17934 * Function: sd_send_scsi_INQUIRY 17935 * 17936 * Description: Issue the scsi INQUIRY command. 17937 * 17938 * Arguments: un 17939 * bufaddr 17940 * buflen 17941 * evpd 17942 * page_code 17943 * page_length 17944 * 17945 * Return Code: 0 - Success 17946 * errno return code from sd_send_scsi_cmd() 17947 * 17948 * Context: Can sleep. Does not return until command is completed. 17949 */ 17950 17951 static int 17952 sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, size_t buflen, 17953 uchar_t evpd, uchar_t page_code, size_t *residp) 17954 { 17955 union scsi_cdb cdb; 17956 struct uscsi_cmd ucmd_buf; 17957 int status; 17958 17959 ASSERT(un != NULL); 17960 ASSERT(!mutex_owned(SD_MUTEX(un))); 17961 ASSERT(bufaddr != NULL); 17962 17963 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un); 17964 17965 bzero(&cdb, sizeof (cdb)); 17966 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17967 bzero(bufaddr, buflen); 17968 17969 cdb.scc_cmd = SCMD_INQUIRY; 17970 cdb.cdb_opaque[1] = evpd; 17971 cdb.cdb_opaque[2] = page_code; 17972 FORMG0COUNT(&cdb, buflen); 17973 17974 ucmd_buf.uscsi_cdb = (char *)&cdb; 17975 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 17976 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 17977 ucmd_buf.uscsi_buflen = buflen; 17978 ucmd_buf.uscsi_rqbuf = NULL; 17979 ucmd_buf.uscsi_rqlen = 0; 17980 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 17981 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ 17982 17983 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17984 UIO_SYSSPACE, SD_PATH_DIRECT); 17985 17986 if ((status == 0) && (residp != NULL)) { 17987 *residp = ucmd_buf.uscsi_resid; 17988 } 17989 17990 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n"); 17991 17992 return (status); 17993 } 17994 17995 17996 /* 17997 * Function: sd_send_scsi_TEST_UNIT_READY 17998 * 17999 * Description: Issue the scsi TEST UNIT READY command. 18000 * This routine can be told to set the flag USCSI_DIAGNOSE to 18001 * prevent retrying failed commands. Use this when the intent 18002 * is either to check for device readiness, to clear a Unit 18003 * Attention, or to clear any outstanding sense data. 18004 * However under specific conditions the expected behavior 18005 * is for retries to bring a device ready, so use the flag 18006 * with caution. 18007 * 18008 * Arguments: un 18009 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present 18010 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE. 18011 * 0: dont check for media present, do retries on cmd. 18012 * 18013 * Return Code: 0 - Success 18014 * EIO - IO error 18015 * EACCES - Reservation conflict detected 18016 * ENXIO - Not Ready, medium not present 18017 * errno return code from sd_send_scsi_cmd() 18018 * 18019 * Context: Can sleep. Does not return until command is completed. 18020 */ 18021 18022 static int 18023 sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag) 18024 { 18025 struct scsi_extended_sense sense_buf; 18026 union scsi_cdb cdb; 18027 struct uscsi_cmd ucmd_buf; 18028 int status; 18029 18030 ASSERT(un != NULL); 18031 ASSERT(!mutex_owned(SD_MUTEX(un))); 18032 18033 SD_TRACE(SD_LOG_IO, un, 18034 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un); 18035 18036 /* 18037 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect 18038 * timeouts when they receive a TUR and the queue is not empty. Check 18039 * the configuration flag set during attach (indicating the drive has 18040 * this firmware bug) and un_ncmds_in_transport before issuing the 18041 * TUR. If there are 18042 * pending commands return success, this is a bit arbitrary but is ok 18043 * for non-removables (i.e. the eliteI disks) and non-clustering 18044 * configurations. 18045 */ 18046 if (un->un_f_cfg_tur_check == TRUE) { 18047 mutex_enter(SD_MUTEX(un)); 18048 if (un->un_ncmds_in_transport != 0) { 18049 mutex_exit(SD_MUTEX(un)); 18050 return (0); 18051 } 18052 mutex_exit(SD_MUTEX(un)); 18053 } 18054 18055 bzero(&cdb, sizeof (cdb)); 18056 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18057 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18058 18059 cdb.scc_cmd = SCMD_TEST_UNIT_READY; 18060 18061 ucmd_buf.uscsi_cdb = (char *)&cdb; 18062 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18063 ucmd_buf.uscsi_bufaddr = NULL; 18064 ucmd_buf.uscsi_buflen = 0; 18065 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18066 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18067 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18068 18069 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ 18070 if ((flag & SD_DONT_RETRY_TUR) != 0) { 18071 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; 18072 } 18073 ucmd_buf.uscsi_timeout = 60; 18074 18075 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18076 UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : 18077 SD_PATH_STANDARD)); 18078 18079 switch (status) { 18080 case 0: 18081 break; /* Success! */ 18082 case EIO: 18083 switch (ucmd_buf.uscsi_status) { 18084 case STATUS_RESERVATION_CONFLICT: 18085 status = EACCES; 18086 break; 18087 case STATUS_CHECK: 18088 if ((flag & SD_CHECK_FOR_MEDIA) == 0) { 18089 break; 18090 } 18091 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18092 (scsi_sense_key((uint8_t *)&sense_buf) == 18093 KEY_NOT_READY) && 18094 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x3A)) { 18095 status = ENXIO; 18096 } 18097 break; 18098 default: 18099 break; 18100 } 18101 break; 18102 default: 18103 break; 18104 } 18105 18106 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n"); 18107 18108 return (status); 18109 } 18110 18111 18112 /* 18113 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN 18114 * 18115 * Description: Issue the scsi PERSISTENT RESERVE IN command. 18116 * 18117 * Arguments: un 18118 * 18119 * Return Code: 0 - Success 18120 * EACCES 18121 * ENOTSUP 18122 * errno return code from sd_send_scsi_cmd() 18123 * 18124 * Context: Can sleep. Does not return until command is completed. 18125 */ 18126 18127 static int 18128 sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, uchar_t usr_cmd, 18129 uint16_t data_len, uchar_t *data_bufp) 18130 { 18131 struct scsi_extended_sense sense_buf; 18132 union scsi_cdb cdb; 18133 struct uscsi_cmd ucmd_buf; 18134 int status; 18135 int no_caller_buf = FALSE; 18136 18137 ASSERT(un != NULL); 18138 ASSERT(!mutex_owned(SD_MUTEX(un))); 18139 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV)); 18140 18141 SD_TRACE(SD_LOG_IO, un, 18142 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un); 18143 18144 bzero(&cdb, sizeof (cdb)); 18145 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18146 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18147 if (data_bufp == NULL) { 18148 /* Allocate a default buf if the caller did not give one */ 18149 ASSERT(data_len == 0); 18150 data_len = MHIOC_RESV_KEY_SIZE; 18151 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP); 18152 no_caller_buf = TRUE; 18153 } 18154 18155 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN; 18156 cdb.cdb_opaque[1] = usr_cmd; 18157 FORMG1COUNT(&cdb, data_len); 18158 18159 ucmd_buf.uscsi_cdb = (char *)&cdb; 18160 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 18161 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; 18162 ucmd_buf.uscsi_buflen = data_len; 18163 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18164 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18165 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 18166 ucmd_buf.uscsi_timeout = 60; 18167 18168 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18169 UIO_SYSSPACE, SD_PATH_STANDARD); 18170 18171 switch (status) { 18172 case 0: 18173 break; /* Success! */ 18174 case EIO: 18175 switch (ucmd_buf.uscsi_status) { 18176 case STATUS_RESERVATION_CONFLICT: 18177 status = EACCES; 18178 break; 18179 case STATUS_CHECK: 18180 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18181 (scsi_sense_key((uint8_t *)&sense_buf) == 18182 KEY_ILLEGAL_REQUEST)) { 18183 status = ENOTSUP; 18184 } 18185 break; 18186 default: 18187 break; 18188 } 18189 break; 18190 default: 18191 break; 18192 } 18193 18194 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n"); 18195 18196 if (no_caller_buf == TRUE) { 18197 kmem_free(data_bufp, data_len); 18198 } 18199 18200 return (status); 18201 } 18202 18203 18204 /* 18205 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT 18206 * 18207 * Description: This routine is the driver entry point for handling CD-ROM 18208 * multi-host persistent reservation requests (MHIOCGRP_INKEYS, 18209 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the 18210 * device. 18211 * 18212 * Arguments: un - Pointer to soft state struct for the target. 18213 * usr_cmd SCSI-3 reservation facility command (one of 18214 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE, 18215 * SD_SCSI3_PREEMPTANDABORT) 18216 * usr_bufp - user provided pointer register, reserve descriptor or 18217 * preempt and abort structure (mhioc_register_t, 18218 * mhioc_resv_desc_t, mhioc_preemptandabort_t) 18219 * 18220 * Return Code: 0 - Success 18221 * EACCES 18222 * ENOTSUP 18223 * errno return code from sd_send_scsi_cmd() 18224 * 18225 * Context: Can sleep. Does not return until command is completed. 18226 */ 18227 18228 static int 18229 sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, uchar_t usr_cmd, 18230 uchar_t *usr_bufp) 18231 { 18232 struct scsi_extended_sense sense_buf; 18233 union scsi_cdb cdb; 18234 struct uscsi_cmd ucmd_buf; 18235 int status; 18236 uchar_t data_len = sizeof (sd_prout_t); 18237 sd_prout_t *prp; 18238 18239 ASSERT(un != NULL); 18240 ASSERT(!mutex_owned(SD_MUTEX(un))); 18241 ASSERT(data_len == 24); /* required by scsi spec */ 18242 18243 SD_TRACE(SD_LOG_IO, un, 18244 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un); 18245 18246 if (usr_bufp == NULL) { 18247 return (EINVAL); 18248 } 18249 18250 bzero(&cdb, sizeof (cdb)); 18251 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18252 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18253 prp = kmem_zalloc(data_len, KM_SLEEP); 18254 18255 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT; 18256 cdb.cdb_opaque[1] = usr_cmd; 18257 FORMG1COUNT(&cdb, data_len); 18258 18259 ucmd_buf.uscsi_cdb = (char *)&cdb; 18260 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 18261 ucmd_buf.uscsi_bufaddr = (caddr_t)prp; 18262 ucmd_buf.uscsi_buflen = data_len; 18263 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18264 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18265 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 18266 ucmd_buf.uscsi_timeout = 60; 18267 18268 switch (usr_cmd) { 18269 case SD_SCSI3_REGISTER: { 18270 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; 18271 18272 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18273 bcopy(ptr->newkey.key, prp->service_key, 18274 MHIOC_RESV_KEY_SIZE); 18275 prp->aptpl = ptr->aptpl; 18276 break; 18277 } 18278 case SD_SCSI3_RESERVE: 18279 case SD_SCSI3_RELEASE: { 18280 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 18281 18282 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18283 prp->scope_address = BE_32(ptr->scope_specific_addr); 18284 cdb.cdb_opaque[2] = ptr->type; 18285 break; 18286 } 18287 case SD_SCSI3_PREEMPTANDABORT: { 18288 mhioc_preemptandabort_t *ptr = 18289 (mhioc_preemptandabort_t *)usr_bufp; 18290 18291 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18292 bcopy(ptr->victim_key.key, prp->service_key, 18293 MHIOC_RESV_KEY_SIZE); 18294 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr); 18295 cdb.cdb_opaque[2] = ptr->resvdesc.type; 18296 ucmd_buf.uscsi_flags |= USCSI_HEAD; 18297 break; 18298 } 18299 case SD_SCSI3_REGISTERANDIGNOREKEY: 18300 { 18301 mhioc_registerandignorekey_t *ptr; 18302 ptr = (mhioc_registerandignorekey_t *)usr_bufp; 18303 bcopy(ptr->newkey.key, 18304 prp->service_key, MHIOC_RESV_KEY_SIZE); 18305 prp->aptpl = ptr->aptpl; 18306 break; 18307 } 18308 default: 18309 ASSERT(FALSE); 18310 break; 18311 } 18312 18313 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18314 UIO_SYSSPACE, SD_PATH_STANDARD); 18315 18316 switch (status) { 18317 case 0: 18318 break; /* Success! */ 18319 case EIO: 18320 switch (ucmd_buf.uscsi_status) { 18321 case STATUS_RESERVATION_CONFLICT: 18322 status = EACCES; 18323 break; 18324 case STATUS_CHECK: 18325 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18326 (scsi_sense_key((uint8_t *)&sense_buf) == 18327 KEY_ILLEGAL_REQUEST)) { 18328 status = ENOTSUP; 18329 } 18330 break; 18331 default: 18332 break; 18333 } 18334 break; 18335 default: 18336 break; 18337 } 18338 18339 kmem_free(prp, data_len); 18340 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n"); 18341 return (status); 18342 } 18343 18344 18345 /* 18346 * Function: sd_send_scsi_SYNCHRONIZE_CACHE 18347 * 18348 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target 18349 * 18350 * Arguments: un - pointer to the target's soft state struct 18351 * 18352 * Return Code: 0 - success 18353 * errno-type error code 18354 * 18355 * Context: kernel thread context only. 18356 */ 18357 18358 static int 18359 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc) 18360 { 18361 struct sd_uscsi_info *uip; 18362 struct uscsi_cmd *uscmd; 18363 union scsi_cdb *cdb; 18364 struct buf *bp; 18365 int rval = 0; 18366 18367 SD_TRACE(SD_LOG_IO, un, 18368 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un); 18369 18370 ASSERT(un != NULL); 18371 ASSERT(!mutex_owned(SD_MUTEX(un))); 18372 18373 cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP); 18374 cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE; 18375 18376 /* 18377 * First get some memory for the uscsi_cmd struct and cdb 18378 * and initialize for SYNCHRONIZE_CACHE cmd. 18379 */ 18380 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 18381 uscmd->uscsi_cdblen = CDB_GROUP1; 18382 uscmd->uscsi_cdb = (caddr_t)cdb; 18383 uscmd->uscsi_bufaddr = NULL; 18384 uscmd->uscsi_buflen = 0; 18385 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 18386 uscmd->uscsi_rqlen = SENSE_LENGTH; 18387 uscmd->uscsi_rqresid = SENSE_LENGTH; 18388 uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18389 uscmd->uscsi_timeout = sd_io_time; 18390 18391 /* 18392 * Allocate an sd_uscsi_info struct and fill it with the info 18393 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 18394 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 18395 * since we allocate the buf here in this function, we do not 18396 * need to preserve the prior contents of b_private. 18397 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 18398 */ 18399 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 18400 uip->ui_flags = SD_PATH_DIRECT; 18401 uip->ui_cmdp = uscmd; 18402 18403 bp = getrbuf(KM_SLEEP); 18404 bp->b_private = uip; 18405 18406 /* 18407 * Setup buffer to carry uscsi request. 18408 */ 18409 bp->b_flags = B_BUSY; 18410 bp->b_bcount = 0; 18411 bp->b_blkno = 0; 18412 18413 if (dkc != NULL) { 18414 bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone; 18415 uip->ui_dkc = *dkc; 18416 } 18417 18418 bp->b_edev = SD_GET_DEV(un); 18419 bp->b_dev = cmpdev(bp->b_edev); /* maybe unnecessary? */ 18420 18421 (void) sd_uscsi_strategy(bp); 18422 18423 /* 18424 * If synchronous request, wait for completion 18425 * If async just return and let b_iodone callback 18426 * cleanup. 18427 * NOTE: On return, u_ncmds_in_driver will be decremented, 18428 * but it was also incremented in sd_uscsi_strategy(), so 18429 * we should be ok. 18430 */ 18431 if (dkc == NULL) { 18432 (void) biowait(bp); 18433 rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp); 18434 } 18435 18436 return (rval); 18437 } 18438 18439 18440 static int 18441 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp) 18442 { 18443 struct sd_uscsi_info *uip; 18444 struct uscsi_cmd *uscmd; 18445 uint8_t *sense_buf; 18446 struct sd_lun *un; 18447 int status; 18448 18449 uip = (struct sd_uscsi_info *)(bp->b_private); 18450 ASSERT(uip != NULL); 18451 18452 uscmd = uip->ui_cmdp; 18453 ASSERT(uscmd != NULL); 18454 18455 sense_buf = (uint8_t *)uscmd->uscsi_rqbuf; 18456 ASSERT(sense_buf != NULL); 18457 18458 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 18459 ASSERT(un != NULL); 18460 18461 status = geterror(bp); 18462 switch (status) { 18463 case 0: 18464 break; /* Success! */ 18465 case EIO: 18466 switch (uscmd->uscsi_status) { 18467 case STATUS_RESERVATION_CONFLICT: 18468 /* Ignore reservation conflict */ 18469 status = 0; 18470 goto done; 18471 18472 case STATUS_CHECK: 18473 if ((uscmd->uscsi_rqstatus == STATUS_GOOD) && 18474 (scsi_sense_key(sense_buf) == 18475 KEY_ILLEGAL_REQUEST)) { 18476 /* Ignore Illegal Request error */ 18477 mutex_enter(SD_MUTEX(un)); 18478 un->un_f_sync_cache_supported = FALSE; 18479 mutex_exit(SD_MUTEX(un)); 18480 status = ENOTSUP; 18481 goto done; 18482 } 18483 break; 18484 default: 18485 break; 18486 } 18487 /* FALLTHRU */ 18488 default: 18489 /* 18490 * Don't log an error message if this device 18491 * has removable media. 18492 */ 18493 if (!un->un_f_has_removable_media) { 18494 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18495 "SYNCHRONIZE CACHE command failed (%d)\n", status); 18496 } 18497 break; 18498 } 18499 18500 done: 18501 if (uip->ui_dkc.dkc_callback != NULL) { 18502 (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status); 18503 } 18504 18505 ASSERT((bp->b_flags & B_REMAPPED) == 0); 18506 freerbuf(bp); 18507 kmem_free(uip, sizeof (struct sd_uscsi_info)); 18508 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 18509 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 18510 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 18511 18512 return (status); 18513 } 18514 18515 18516 /* 18517 * Function: sd_send_scsi_GET_CONFIGURATION 18518 * 18519 * Description: Issues the get configuration command to the device. 18520 * Called from sd_check_for_writable_cd & sd_get_media_info 18521 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN 18522 * Arguments: un 18523 * ucmdbuf 18524 * rqbuf 18525 * rqbuflen 18526 * bufaddr 18527 * buflen 18528 * path_flag 18529 * 18530 * Return Code: 0 - Success 18531 * errno return code from sd_send_scsi_cmd() 18532 * 18533 * Context: Can sleep. Does not return until command is completed. 18534 * 18535 */ 18536 18537 static int 18538 sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, struct uscsi_cmd *ucmdbuf, 18539 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen, 18540 int path_flag) 18541 { 18542 char cdb[CDB_GROUP1]; 18543 int status; 18544 18545 ASSERT(un != NULL); 18546 ASSERT(!mutex_owned(SD_MUTEX(un))); 18547 ASSERT(bufaddr != NULL); 18548 ASSERT(ucmdbuf != NULL); 18549 ASSERT(rqbuf != NULL); 18550 18551 SD_TRACE(SD_LOG_IO, un, 18552 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un); 18553 18554 bzero(cdb, sizeof (cdb)); 18555 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 18556 bzero(rqbuf, rqbuflen); 18557 bzero(bufaddr, buflen); 18558 18559 /* 18560 * Set up cdb field for the get configuration command. 18561 */ 18562 cdb[0] = SCMD_GET_CONFIGURATION; 18563 cdb[1] = 0x02; /* Requested Type */ 18564 cdb[8] = SD_PROFILE_HEADER_LEN; 18565 ucmdbuf->uscsi_cdb = cdb; 18566 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 18567 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 18568 ucmdbuf->uscsi_buflen = buflen; 18569 ucmdbuf->uscsi_timeout = sd_io_time; 18570 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 18571 ucmdbuf->uscsi_rqlen = rqbuflen; 18572 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 18573 18574 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, FKIOCTL, 18575 UIO_SYSSPACE, path_flag); 18576 18577 switch (status) { 18578 case 0: 18579 break; /* Success! */ 18580 case EIO: 18581 switch (ucmdbuf->uscsi_status) { 18582 case STATUS_RESERVATION_CONFLICT: 18583 status = EACCES; 18584 break; 18585 default: 18586 break; 18587 } 18588 break; 18589 default: 18590 break; 18591 } 18592 18593 if (status == 0) { 18594 SD_DUMP_MEMORY(un, SD_LOG_IO, 18595 "sd_send_scsi_GET_CONFIGURATION: data", 18596 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 18597 } 18598 18599 SD_TRACE(SD_LOG_IO, un, 18600 "sd_send_scsi_GET_CONFIGURATION: exit\n"); 18601 18602 return (status); 18603 } 18604 18605 /* 18606 * Function: sd_send_scsi_feature_GET_CONFIGURATION 18607 * 18608 * Description: Issues the get configuration command to the device to 18609 * retrieve a specfic feature. Called from 18610 * sd_check_for_writable_cd & sd_set_mmc_caps. 18611 * Arguments: un 18612 * ucmdbuf 18613 * rqbuf 18614 * rqbuflen 18615 * bufaddr 18616 * buflen 18617 * feature 18618 * 18619 * Return Code: 0 - Success 18620 * errno return code from sd_send_scsi_cmd() 18621 * 18622 * Context: Can sleep. Does not return until command is completed. 18623 * 18624 */ 18625 static int 18626 sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 18627 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 18628 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag) 18629 { 18630 char cdb[CDB_GROUP1]; 18631 int status; 18632 18633 ASSERT(un != NULL); 18634 ASSERT(!mutex_owned(SD_MUTEX(un))); 18635 ASSERT(bufaddr != NULL); 18636 ASSERT(ucmdbuf != NULL); 18637 ASSERT(rqbuf != NULL); 18638 18639 SD_TRACE(SD_LOG_IO, un, 18640 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un); 18641 18642 bzero(cdb, sizeof (cdb)); 18643 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 18644 bzero(rqbuf, rqbuflen); 18645 bzero(bufaddr, buflen); 18646 18647 /* 18648 * Set up cdb field for the get configuration command. 18649 */ 18650 cdb[0] = SCMD_GET_CONFIGURATION; 18651 cdb[1] = 0x02; /* Requested Type */ 18652 cdb[3] = feature; 18653 cdb[8] = buflen; 18654 ucmdbuf->uscsi_cdb = cdb; 18655 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 18656 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 18657 ucmdbuf->uscsi_buflen = buflen; 18658 ucmdbuf->uscsi_timeout = sd_io_time; 18659 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 18660 ucmdbuf->uscsi_rqlen = rqbuflen; 18661 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 18662 18663 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, FKIOCTL, 18664 UIO_SYSSPACE, path_flag); 18665 18666 switch (status) { 18667 case 0: 18668 break; /* Success! */ 18669 case EIO: 18670 switch (ucmdbuf->uscsi_status) { 18671 case STATUS_RESERVATION_CONFLICT: 18672 status = EACCES; 18673 break; 18674 default: 18675 break; 18676 } 18677 break; 18678 default: 18679 break; 18680 } 18681 18682 if (status == 0) { 18683 SD_DUMP_MEMORY(un, SD_LOG_IO, 18684 "sd_send_scsi_feature_GET_CONFIGURATION: data", 18685 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 18686 } 18687 18688 SD_TRACE(SD_LOG_IO, un, 18689 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n"); 18690 18691 return (status); 18692 } 18693 18694 18695 /* 18696 * Function: sd_send_scsi_MODE_SENSE 18697 * 18698 * Description: Utility function for issuing a scsi MODE SENSE command. 18699 * Note: This routine uses a consistent implementation for Group0, 18700 * Group1, and Group2 commands across all platforms. ATAPI devices 18701 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 18702 * 18703 * Arguments: un - pointer to the softstate struct for the target. 18704 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 18705 * CDB_GROUP[1|2] (10 byte). 18706 * bufaddr - buffer for page data retrieved from the target. 18707 * buflen - size of page to be retrieved. 18708 * page_code - page code of data to be retrieved from the target. 18709 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18710 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18711 * to use the USCSI "direct" chain and bypass the normal 18712 * command waitq. 18713 * 18714 * Return Code: 0 - Success 18715 * errno return code from sd_send_scsi_cmd() 18716 * 18717 * Context: Can sleep. Does not return until command is completed. 18718 */ 18719 18720 static int 18721 sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 18722 size_t buflen, uchar_t page_code, int path_flag) 18723 { 18724 struct scsi_extended_sense sense_buf; 18725 union scsi_cdb cdb; 18726 struct uscsi_cmd ucmd_buf; 18727 int status; 18728 int headlen; 18729 18730 ASSERT(un != NULL); 18731 ASSERT(!mutex_owned(SD_MUTEX(un))); 18732 ASSERT(bufaddr != NULL); 18733 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 18734 (cdbsize == CDB_GROUP2)); 18735 18736 SD_TRACE(SD_LOG_IO, un, 18737 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un); 18738 18739 bzero(&cdb, sizeof (cdb)); 18740 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18741 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18742 bzero(bufaddr, buflen); 18743 18744 if (cdbsize == CDB_GROUP0) { 18745 cdb.scc_cmd = SCMD_MODE_SENSE; 18746 cdb.cdb_opaque[2] = page_code; 18747 FORMG0COUNT(&cdb, buflen); 18748 headlen = MODE_HEADER_LENGTH; 18749 } else { 18750 cdb.scc_cmd = SCMD_MODE_SENSE_G1; 18751 cdb.cdb_opaque[2] = page_code; 18752 FORMG1COUNT(&cdb, buflen); 18753 headlen = MODE_HEADER_LENGTH_GRP2; 18754 } 18755 18756 ASSERT(headlen <= buflen); 18757 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 18758 18759 ucmd_buf.uscsi_cdb = (char *)&cdb; 18760 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 18761 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 18762 ucmd_buf.uscsi_buflen = buflen; 18763 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18764 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18765 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 18766 ucmd_buf.uscsi_timeout = 60; 18767 18768 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18769 UIO_SYSSPACE, path_flag); 18770 18771 switch (status) { 18772 case 0: 18773 /* 18774 * sr_check_wp() uses 0x3f page code and check the header of 18775 * mode page to determine if target device is write-protected. 18776 * But some USB devices return 0 bytes for 0x3f page code. For 18777 * this case, make sure that mode page header is returned at 18778 * least. 18779 */ 18780 if (buflen - ucmd_buf.uscsi_resid < headlen) 18781 status = EIO; 18782 break; /* Success! */ 18783 case EIO: 18784 switch (ucmd_buf.uscsi_status) { 18785 case STATUS_RESERVATION_CONFLICT: 18786 status = EACCES; 18787 break; 18788 default: 18789 break; 18790 } 18791 break; 18792 default: 18793 break; 18794 } 18795 18796 if (status == 0) { 18797 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data", 18798 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 18799 } 18800 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n"); 18801 18802 return (status); 18803 } 18804 18805 18806 /* 18807 * Function: sd_send_scsi_MODE_SELECT 18808 * 18809 * Description: Utility function for issuing a scsi MODE SELECT command. 18810 * Note: This routine uses a consistent implementation for Group0, 18811 * Group1, and Group2 commands across all platforms. ATAPI devices 18812 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 18813 * 18814 * Arguments: un - pointer to the softstate struct for the target. 18815 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 18816 * CDB_GROUP[1|2] (10 byte). 18817 * bufaddr - buffer for page data retrieved from the target. 18818 * buflen - size of page to be retrieved. 18819 * save_page - boolean to determin if SP bit should be set. 18820 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18821 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18822 * to use the USCSI "direct" chain and bypass the normal 18823 * command waitq. 18824 * 18825 * Return Code: 0 - Success 18826 * errno return code from sd_send_scsi_cmd() 18827 * 18828 * Context: Can sleep. Does not return until command is completed. 18829 */ 18830 18831 static int 18832 sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 18833 size_t buflen, uchar_t save_page, int path_flag) 18834 { 18835 struct scsi_extended_sense sense_buf; 18836 union scsi_cdb cdb; 18837 struct uscsi_cmd ucmd_buf; 18838 int status; 18839 18840 ASSERT(un != NULL); 18841 ASSERT(!mutex_owned(SD_MUTEX(un))); 18842 ASSERT(bufaddr != NULL); 18843 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 18844 (cdbsize == CDB_GROUP2)); 18845 18846 SD_TRACE(SD_LOG_IO, un, 18847 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un); 18848 18849 bzero(&cdb, sizeof (cdb)); 18850 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18851 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18852 18853 /* Set the PF bit for many third party drives */ 18854 cdb.cdb_opaque[1] = 0x10; 18855 18856 /* Set the savepage(SP) bit if given */ 18857 if (save_page == SD_SAVE_PAGE) { 18858 cdb.cdb_opaque[1] |= 0x01; 18859 } 18860 18861 if (cdbsize == CDB_GROUP0) { 18862 cdb.scc_cmd = SCMD_MODE_SELECT; 18863 FORMG0COUNT(&cdb, buflen); 18864 } else { 18865 cdb.scc_cmd = SCMD_MODE_SELECT_G1; 18866 FORMG1COUNT(&cdb, buflen); 18867 } 18868 18869 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 18870 18871 ucmd_buf.uscsi_cdb = (char *)&cdb; 18872 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 18873 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 18874 ucmd_buf.uscsi_buflen = buflen; 18875 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18876 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18877 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 18878 ucmd_buf.uscsi_timeout = 60; 18879 18880 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18881 UIO_SYSSPACE, path_flag); 18882 18883 switch (status) { 18884 case 0: 18885 break; /* Success! */ 18886 case EIO: 18887 switch (ucmd_buf.uscsi_status) { 18888 case STATUS_RESERVATION_CONFLICT: 18889 status = EACCES; 18890 break; 18891 default: 18892 break; 18893 } 18894 break; 18895 default: 18896 break; 18897 } 18898 18899 if (status == 0) { 18900 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data", 18901 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 18902 } 18903 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n"); 18904 18905 return (status); 18906 } 18907 18908 18909 /* 18910 * Function: sd_send_scsi_RDWR 18911 * 18912 * Description: Issue a scsi READ or WRITE command with the given parameters. 18913 * 18914 * Arguments: un: Pointer to the sd_lun struct for the target. 18915 * cmd: SCMD_READ or SCMD_WRITE 18916 * bufaddr: Address of caller's buffer to receive the RDWR data 18917 * buflen: Length of caller's buffer receive the RDWR data. 18918 * start_block: Block number for the start of the RDWR operation. 18919 * (Assumes target-native block size.) 18920 * residp: Pointer to variable to receive the redisual of the 18921 * RDWR operation (may be NULL of no residual requested). 18922 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18923 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18924 * to use the USCSI "direct" chain and bypass the normal 18925 * command waitq. 18926 * 18927 * Return Code: 0 - Success 18928 * errno return code from sd_send_scsi_cmd() 18929 * 18930 * Context: Can sleep. Does not return until command is completed. 18931 */ 18932 18933 static int 18934 sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 18935 size_t buflen, daddr_t start_block, int path_flag) 18936 { 18937 struct scsi_extended_sense sense_buf; 18938 union scsi_cdb cdb; 18939 struct uscsi_cmd ucmd_buf; 18940 uint32_t block_count; 18941 int status; 18942 int cdbsize; 18943 uchar_t flag; 18944 18945 ASSERT(un != NULL); 18946 ASSERT(!mutex_owned(SD_MUTEX(un))); 18947 ASSERT(bufaddr != NULL); 18948 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE)); 18949 18950 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un); 18951 18952 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 18953 return (EINVAL); 18954 } 18955 18956 mutex_enter(SD_MUTEX(un)); 18957 block_count = SD_BYTES2TGTBLOCKS(un, buflen); 18958 mutex_exit(SD_MUTEX(un)); 18959 18960 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE; 18961 18962 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: " 18963 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n", 18964 bufaddr, buflen, start_block, block_count); 18965 18966 bzero(&cdb, sizeof (cdb)); 18967 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18968 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18969 18970 /* Compute CDB size to use */ 18971 if (start_block > 0xffffffff) 18972 cdbsize = CDB_GROUP4; 18973 else if ((start_block & 0xFFE00000) || 18974 (un->un_f_cfg_is_atapi == TRUE)) 18975 cdbsize = CDB_GROUP1; 18976 else 18977 cdbsize = CDB_GROUP0; 18978 18979 switch (cdbsize) { 18980 case CDB_GROUP0: /* 6-byte CDBs */ 18981 cdb.scc_cmd = cmd; 18982 FORMG0ADDR(&cdb, start_block); 18983 FORMG0COUNT(&cdb, block_count); 18984 break; 18985 case CDB_GROUP1: /* 10-byte CDBs */ 18986 cdb.scc_cmd = cmd | SCMD_GROUP1; 18987 FORMG1ADDR(&cdb, start_block); 18988 FORMG1COUNT(&cdb, block_count); 18989 break; 18990 case CDB_GROUP4: /* 16-byte CDBs */ 18991 cdb.scc_cmd = cmd | SCMD_GROUP4; 18992 FORMG4LONGADDR(&cdb, (uint64_t)start_block); 18993 FORMG4COUNT(&cdb, block_count); 18994 break; 18995 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */ 18996 default: 18997 /* All others reserved */ 18998 return (EINVAL); 18999 } 19000 19001 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */ 19002 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 19003 19004 ucmd_buf.uscsi_cdb = (char *)&cdb; 19005 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 19006 ucmd_buf.uscsi_bufaddr = bufaddr; 19007 ucmd_buf.uscsi_buflen = buflen; 19008 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19009 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19010 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; 19011 ucmd_buf.uscsi_timeout = 60; 19012 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19013 UIO_SYSSPACE, path_flag); 19014 switch (status) { 19015 case 0: 19016 break; /* Success! */ 19017 case EIO: 19018 switch (ucmd_buf.uscsi_status) { 19019 case STATUS_RESERVATION_CONFLICT: 19020 status = EACCES; 19021 break; 19022 default: 19023 break; 19024 } 19025 break; 19026 default: 19027 break; 19028 } 19029 19030 if (status == 0) { 19031 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data", 19032 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19033 } 19034 19035 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n"); 19036 19037 return (status); 19038 } 19039 19040 19041 /* 19042 * Function: sd_send_scsi_LOG_SENSE 19043 * 19044 * Description: Issue a scsi LOG_SENSE command with the given parameters. 19045 * 19046 * Arguments: un: Pointer to the sd_lun struct for the target. 19047 * 19048 * Return Code: 0 - Success 19049 * errno return code from sd_send_scsi_cmd() 19050 * 19051 * Context: Can sleep. Does not return until command is completed. 19052 */ 19053 19054 static int 19055 sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, uint16_t buflen, 19056 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, 19057 int path_flag) 19058 19059 { 19060 struct scsi_extended_sense sense_buf; 19061 union scsi_cdb cdb; 19062 struct uscsi_cmd ucmd_buf; 19063 int status; 19064 19065 ASSERT(un != NULL); 19066 ASSERT(!mutex_owned(SD_MUTEX(un))); 19067 19068 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un); 19069 19070 bzero(&cdb, sizeof (cdb)); 19071 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19072 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19073 19074 cdb.scc_cmd = SCMD_LOG_SENSE_G1; 19075 cdb.cdb_opaque[2] = (page_control << 6) | page_code; 19076 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8); 19077 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF); 19078 FORMG1COUNT(&cdb, buflen); 19079 19080 ucmd_buf.uscsi_cdb = (char *)&cdb; 19081 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19082 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19083 ucmd_buf.uscsi_buflen = buflen; 19084 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19085 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19086 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19087 ucmd_buf.uscsi_timeout = 60; 19088 19089 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19090 UIO_SYSSPACE, path_flag); 19091 19092 switch (status) { 19093 case 0: 19094 break; 19095 case EIO: 19096 switch (ucmd_buf.uscsi_status) { 19097 case STATUS_RESERVATION_CONFLICT: 19098 status = EACCES; 19099 break; 19100 case STATUS_CHECK: 19101 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19102 (scsi_sense_key((uint8_t *)&sense_buf) == 19103 KEY_ILLEGAL_REQUEST) && 19104 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x24)) { 19105 /* 19106 * ASC 0x24: INVALID FIELD IN CDB 19107 */ 19108 switch (page_code) { 19109 case START_STOP_CYCLE_PAGE: 19110 /* 19111 * The start stop cycle counter is 19112 * implemented as page 0x31 in earlier 19113 * generation disks. In new generation 19114 * disks the start stop cycle counter is 19115 * implemented as page 0xE. To properly 19116 * handle this case if an attempt for 19117 * log page 0xE is made and fails we 19118 * will try again using page 0x31. 19119 * 19120 * Network storage BU committed to 19121 * maintain the page 0x31 for this 19122 * purpose and will not have any other 19123 * page implemented with page code 0x31 19124 * until all disks transition to the 19125 * standard page. 19126 */ 19127 mutex_enter(SD_MUTEX(un)); 19128 un->un_start_stop_cycle_page = 19129 START_STOP_CYCLE_VU_PAGE; 19130 cdb.cdb_opaque[2] = 19131 (char)(page_control << 6) | 19132 un->un_start_stop_cycle_page; 19133 mutex_exit(SD_MUTEX(un)); 19134 status = sd_send_scsi_cmd( 19135 SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19136 UIO_SYSSPACE, path_flag); 19137 19138 break; 19139 case TEMPERATURE_PAGE: 19140 status = ENOTTY; 19141 break; 19142 default: 19143 break; 19144 } 19145 } 19146 break; 19147 default: 19148 break; 19149 } 19150 break; 19151 default: 19152 break; 19153 } 19154 19155 if (status == 0) { 19156 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data", 19157 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19158 } 19159 19160 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n"); 19161 19162 return (status); 19163 } 19164 19165 19166 /* 19167 * Function: sdioctl 19168 * 19169 * Description: Driver's ioctl(9e) entry point function. 19170 * 19171 * Arguments: dev - device number 19172 * cmd - ioctl operation to be performed 19173 * arg - user argument, contains data to be set or reference 19174 * parameter for get 19175 * flag - bit flag, indicating open settings, 32/64 bit type 19176 * cred_p - user credential pointer 19177 * rval_p - calling process return value (OPT) 19178 * 19179 * Return Code: EINVAL 19180 * ENOTTY 19181 * ENXIO 19182 * EIO 19183 * EFAULT 19184 * ENOTSUP 19185 * EPERM 19186 * 19187 * Context: Called from the device switch at normal priority. 19188 */ 19189 19190 static int 19191 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) 19192 { 19193 struct sd_lun *un = NULL; 19194 int err = 0; 19195 int i = 0; 19196 cred_t *cr; 19197 int tmprval = EINVAL; 19198 int is_valid; 19199 19200 /* 19201 * All device accesses go thru sdstrategy where we check on suspend 19202 * status 19203 */ 19204 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 19205 return (ENXIO); 19206 } 19207 19208 ASSERT(!mutex_owned(SD_MUTEX(un))); 19209 19210 19211 is_valid = SD_IS_VALID_LABEL(un); 19212 19213 /* 19214 * Moved this wait from sd_uscsi_strategy to here for 19215 * reasons of deadlock prevention. Internal driver commands, 19216 * specifically those to change a devices power level, result 19217 * in a call to sd_uscsi_strategy. 19218 */ 19219 mutex_enter(SD_MUTEX(un)); 19220 while ((un->un_state == SD_STATE_SUSPENDED) || 19221 (un->un_state == SD_STATE_PM_CHANGING)) { 19222 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 19223 } 19224 /* 19225 * Twiddling the counter here protects commands from now 19226 * through to the top of sd_uscsi_strategy. Without the 19227 * counter inc. a power down, for example, could get in 19228 * after the above check for state is made and before 19229 * execution gets to the top of sd_uscsi_strategy. 19230 * That would cause problems. 19231 */ 19232 un->un_ncmds_in_driver++; 19233 19234 if (!is_valid && 19235 (flag & (FNDELAY | FNONBLOCK))) { 19236 switch (cmd) { 19237 case DKIOCGGEOM: /* SD_PATH_DIRECT */ 19238 case DKIOCGVTOC: 19239 case DKIOCGAPART: 19240 case DKIOCPARTINFO: 19241 case DKIOCSGEOM: 19242 case DKIOCSAPART: 19243 case DKIOCGETEFI: 19244 case DKIOCPARTITION: 19245 case DKIOCSVTOC: 19246 case DKIOCSETEFI: 19247 case DKIOCGMBOOT: 19248 case DKIOCSMBOOT: 19249 case DKIOCG_PHYGEOM: 19250 case DKIOCG_VIRTGEOM: 19251 /* let cmlb handle it */ 19252 goto skip_ready_valid; 19253 19254 case CDROMPAUSE: 19255 case CDROMRESUME: 19256 case CDROMPLAYMSF: 19257 case CDROMPLAYTRKIND: 19258 case CDROMREADTOCHDR: 19259 case CDROMREADTOCENTRY: 19260 case CDROMSTOP: 19261 case CDROMSTART: 19262 case CDROMVOLCTRL: 19263 case CDROMSUBCHNL: 19264 case CDROMREADMODE2: 19265 case CDROMREADMODE1: 19266 case CDROMREADOFFSET: 19267 case CDROMSBLKMODE: 19268 case CDROMGBLKMODE: 19269 case CDROMGDRVSPEED: 19270 case CDROMSDRVSPEED: 19271 case CDROMCDDA: 19272 case CDROMCDXA: 19273 case CDROMSUBCODE: 19274 if (!ISCD(un)) { 19275 un->un_ncmds_in_driver--; 19276 ASSERT(un->un_ncmds_in_driver >= 0); 19277 mutex_exit(SD_MUTEX(un)); 19278 return (ENOTTY); 19279 } 19280 break; 19281 case FDEJECT: 19282 case DKIOCEJECT: 19283 case CDROMEJECT: 19284 if (!un->un_f_eject_media_supported) { 19285 un->un_ncmds_in_driver--; 19286 ASSERT(un->un_ncmds_in_driver >= 0); 19287 mutex_exit(SD_MUTEX(un)); 19288 return (ENOTTY); 19289 } 19290 break; 19291 case DKIOCFLUSHWRITECACHE: 19292 mutex_exit(SD_MUTEX(un)); 19293 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 19294 if (err != 0) { 19295 mutex_enter(SD_MUTEX(un)); 19296 un->un_ncmds_in_driver--; 19297 ASSERT(un->un_ncmds_in_driver >= 0); 19298 mutex_exit(SD_MUTEX(un)); 19299 return (EIO); 19300 } 19301 mutex_enter(SD_MUTEX(un)); 19302 /* FALLTHROUGH */ 19303 case DKIOCREMOVABLE: 19304 case DKIOCHOTPLUGGABLE: 19305 case DKIOCINFO: 19306 case DKIOCGMEDIAINFO: 19307 case MHIOCENFAILFAST: 19308 case MHIOCSTATUS: 19309 case MHIOCTKOWN: 19310 case MHIOCRELEASE: 19311 case MHIOCGRP_INKEYS: 19312 case MHIOCGRP_INRESV: 19313 case MHIOCGRP_REGISTER: 19314 case MHIOCGRP_RESERVE: 19315 case MHIOCGRP_PREEMPTANDABORT: 19316 case MHIOCGRP_REGISTERANDIGNOREKEY: 19317 case CDROMCLOSETRAY: 19318 case USCSICMD: 19319 goto skip_ready_valid; 19320 default: 19321 break; 19322 } 19323 19324 mutex_exit(SD_MUTEX(un)); 19325 err = sd_ready_and_valid(un); 19326 mutex_enter(SD_MUTEX(un)); 19327 19328 if (err != SD_READY_VALID) { 19329 switch (cmd) { 19330 case DKIOCSTATE: 19331 case CDROMGDRVSPEED: 19332 case CDROMSDRVSPEED: 19333 case FDEJECT: /* for eject command */ 19334 case DKIOCEJECT: 19335 case CDROMEJECT: 19336 case DKIOCREMOVABLE: 19337 case DKIOCHOTPLUGGABLE: 19338 break; 19339 default: 19340 if (un->un_f_has_removable_media) { 19341 err = ENXIO; 19342 } else { 19343 /* Do not map SD_RESERVED_BY_OTHERS to EIO */ 19344 if (err == SD_RESERVED_BY_OTHERS) { 19345 err = EACCES; 19346 } else { 19347 err = EIO; 19348 } 19349 } 19350 un->un_ncmds_in_driver--; 19351 ASSERT(un->un_ncmds_in_driver >= 0); 19352 mutex_exit(SD_MUTEX(un)); 19353 return (err); 19354 } 19355 } 19356 } 19357 19358 skip_ready_valid: 19359 mutex_exit(SD_MUTEX(un)); 19360 19361 switch (cmd) { 19362 case DKIOCINFO: 19363 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n"); 19364 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag); 19365 break; 19366 19367 case DKIOCGMEDIAINFO: 19368 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n"); 19369 err = sd_get_media_info(dev, (caddr_t)arg, flag); 19370 break; 19371 19372 case DKIOCGGEOM: 19373 case DKIOCGVTOC: 19374 case DKIOCGAPART: 19375 case DKIOCPARTINFO: 19376 case DKIOCSGEOM: 19377 case DKIOCSAPART: 19378 case DKIOCGETEFI: 19379 case DKIOCPARTITION: 19380 case DKIOCSVTOC: 19381 case DKIOCSETEFI: 19382 case DKIOCGMBOOT: 19383 case DKIOCSMBOOT: 19384 case DKIOCG_PHYGEOM: 19385 case DKIOCG_VIRTGEOM: 19386 SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd); 19387 19388 /* TUR should spin up */ 19389 19390 if (un->un_f_has_removable_media) 19391 err = sd_send_scsi_TEST_UNIT_READY(un, 19392 SD_CHECK_FOR_MEDIA); 19393 else 19394 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 19395 19396 if (err != 0) 19397 break; 19398 19399 err = cmlb_ioctl(un->un_cmlbhandle, dev, 19400 cmd, arg, flag, cred_p, rval_p, (void *)SD_PATH_DIRECT); 19401 19402 if ((err == 0) && 19403 ((cmd == DKIOCSETEFI) || 19404 (un->un_f_pkstats_enabled) && 19405 (cmd == DKIOCSAPART || cmd == DKIOCSVTOC))) { 19406 19407 tmprval = cmlb_validate(un->un_cmlbhandle, CMLB_SILENT, 19408 (void *)SD_PATH_DIRECT); 19409 if ((tmprval == 0) && un->un_f_pkstats_enabled) { 19410 sd_set_pstats(un); 19411 SD_TRACE(SD_LOG_IO_PARTITION, un, 19412 "sd_ioctl: un:0x%p pstats created and " 19413 "set\n", un); 19414 } 19415 } 19416 19417 if ((cmd == DKIOCSVTOC) || 19418 ((cmd == DKIOCSETEFI) && (tmprval == 0))) { 19419 19420 mutex_enter(SD_MUTEX(un)); 19421 if (un->un_f_devid_supported && 19422 (un->un_f_opt_fab_devid == TRUE)) { 19423 if (un->un_devid == NULL) { 19424 sd_register_devid(un, SD_DEVINFO(un), 19425 SD_TARGET_IS_UNRESERVED); 19426 } else { 19427 /* 19428 * The device id for this disk 19429 * has been fabricated. The 19430 * device id must be preserved 19431 * by writing it back out to 19432 * disk. 19433 */ 19434 if (sd_write_deviceid(un) != 0) { 19435 ddi_devid_free(un->un_devid); 19436 un->un_devid = NULL; 19437 } 19438 } 19439 } 19440 mutex_exit(SD_MUTEX(un)); 19441 } 19442 19443 break; 19444 19445 case DKIOCLOCK: 19446 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n"); 19447 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 19448 SD_PATH_STANDARD); 19449 break; 19450 19451 case DKIOCUNLOCK: 19452 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n"); 19453 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 19454 SD_PATH_STANDARD); 19455 break; 19456 19457 case DKIOCSTATE: { 19458 enum dkio_state state; 19459 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n"); 19460 19461 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) { 19462 err = EFAULT; 19463 } else { 19464 err = sd_check_media(dev, state); 19465 if (err == 0) { 19466 if (ddi_copyout(&un->un_mediastate, (void *)arg, 19467 sizeof (int), flag) != 0) 19468 err = EFAULT; 19469 } 19470 } 19471 break; 19472 } 19473 19474 case DKIOCREMOVABLE: 19475 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n"); 19476 i = un->un_f_has_removable_media ? 1 : 0; 19477 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 19478 err = EFAULT; 19479 } else { 19480 err = 0; 19481 } 19482 break; 19483 19484 case DKIOCHOTPLUGGABLE: 19485 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCHOTPLUGGABLE\n"); 19486 i = un->un_f_is_hotpluggable ? 1 : 0; 19487 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 19488 err = EFAULT; 19489 } else { 19490 err = 0; 19491 } 19492 break; 19493 19494 case DKIOCGTEMPERATURE: 19495 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n"); 19496 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag); 19497 break; 19498 19499 case MHIOCENFAILFAST: 19500 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n"); 19501 if ((err = drv_priv(cred_p)) == 0) { 19502 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag); 19503 } 19504 break; 19505 19506 case MHIOCTKOWN: 19507 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n"); 19508 if ((err = drv_priv(cred_p)) == 0) { 19509 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag); 19510 } 19511 break; 19512 19513 case MHIOCRELEASE: 19514 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n"); 19515 if ((err = drv_priv(cred_p)) == 0) { 19516 err = sd_mhdioc_release(dev); 19517 } 19518 break; 19519 19520 case MHIOCSTATUS: 19521 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n"); 19522 if ((err = drv_priv(cred_p)) == 0) { 19523 switch (sd_send_scsi_TEST_UNIT_READY(un, 0)) { 19524 case 0: 19525 err = 0; 19526 break; 19527 case EACCES: 19528 *rval_p = 1; 19529 err = 0; 19530 break; 19531 default: 19532 err = EIO; 19533 break; 19534 } 19535 } 19536 break; 19537 19538 case MHIOCQRESERVE: 19539 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n"); 19540 if ((err = drv_priv(cred_p)) == 0) { 19541 err = sd_reserve_release(dev, SD_RESERVE); 19542 } 19543 break; 19544 19545 case MHIOCREREGISTERDEVID: 19546 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n"); 19547 if (drv_priv(cred_p) == EPERM) { 19548 err = EPERM; 19549 } else if (!un->un_f_devid_supported) { 19550 err = ENOTTY; 19551 } else { 19552 err = sd_mhdioc_register_devid(dev); 19553 } 19554 break; 19555 19556 case MHIOCGRP_INKEYS: 19557 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n"); 19558 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 19559 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 19560 err = ENOTSUP; 19561 } else { 19562 err = sd_mhdioc_inkeys(dev, (caddr_t)arg, 19563 flag); 19564 } 19565 } 19566 break; 19567 19568 case MHIOCGRP_INRESV: 19569 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n"); 19570 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 19571 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 19572 err = ENOTSUP; 19573 } else { 19574 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag); 19575 } 19576 } 19577 break; 19578 19579 case MHIOCGRP_REGISTER: 19580 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n"); 19581 if ((err = drv_priv(cred_p)) != EPERM) { 19582 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 19583 err = ENOTSUP; 19584 } else if (arg != NULL) { 19585 mhioc_register_t reg; 19586 if (ddi_copyin((void *)arg, ®, 19587 sizeof (mhioc_register_t), flag) != 0) { 19588 err = EFAULT; 19589 } else { 19590 err = 19591 sd_send_scsi_PERSISTENT_RESERVE_OUT( 19592 un, SD_SCSI3_REGISTER, 19593 (uchar_t *)®); 19594 } 19595 } 19596 } 19597 break; 19598 19599 case MHIOCGRP_RESERVE: 19600 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n"); 19601 if ((err = drv_priv(cred_p)) != EPERM) { 19602 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 19603 err = ENOTSUP; 19604 } else if (arg != NULL) { 19605 mhioc_resv_desc_t resv_desc; 19606 if (ddi_copyin((void *)arg, &resv_desc, 19607 sizeof (mhioc_resv_desc_t), flag) != 0) { 19608 err = EFAULT; 19609 } else { 19610 err = 19611 sd_send_scsi_PERSISTENT_RESERVE_OUT( 19612 un, SD_SCSI3_RESERVE, 19613 (uchar_t *)&resv_desc); 19614 } 19615 } 19616 } 19617 break; 19618 19619 case MHIOCGRP_PREEMPTANDABORT: 19620 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 19621 if ((err = drv_priv(cred_p)) != EPERM) { 19622 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 19623 err = ENOTSUP; 19624 } else if (arg != NULL) { 19625 mhioc_preemptandabort_t preempt_abort; 19626 if (ddi_copyin((void *)arg, &preempt_abort, 19627 sizeof (mhioc_preemptandabort_t), 19628 flag) != 0) { 19629 err = EFAULT; 19630 } else { 19631 err = 19632 sd_send_scsi_PERSISTENT_RESERVE_OUT( 19633 un, SD_SCSI3_PREEMPTANDABORT, 19634 (uchar_t *)&preempt_abort); 19635 } 19636 } 19637 } 19638 break; 19639 19640 case MHIOCGRP_REGISTERANDIGNOREKEY: 19641 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 19642 if ((err = drv_priv(cred_p)) != EPERM) { 19643 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 19644 err = ENOTSUP; 19645 } else if (arg != NULL) { 19646 mhioc_registerandignorekey_t r_and_i; 19647 if (ddi_copyin((void *)arg, (void *)&r_and_i, 19648 sizeof (mhioc_registerandignorekey_t), 19649 flag) != 0) { 19650 err = EFAULT; 19651 } else { 19652 err = 19653 sd_send_scsi_PERSISTENT_RESERVE_OUT( 19654 un, SD_SCSI3_REGISTERANDIGNOREKEY, 19655 (uchar_t *)&r_and_i); 19656 } 19657 } 19658 } 19659 break; 19660 19661 case USCSICMD: 19662 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n"); 19663 cr = ddi_get_cred(); 19664 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 19665 err = EPERM; 19666 } else { 19667 enum uio_seg uioseg; 19668 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE : 19669 UIO_USERSPACE; 19670 if (un->un_f_format_in_progress == TRUE) { 19671 err = EAGAIN; 19672 break; 19673 } 19674 err = sd_send_scsi_cmd(dev, (struct uscsi_cmd *)arg, 19675 flag, uioseg, SD_PATH_STANDARD); 19676 } 19677 break; 19678 19679 case CDROMPAUSE: 19680 case CDROMRESUME: 19681 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n"); 19682 if (!ISCD(un)) { 19683 err = ENOTTY; 19684 } else { 19685 err = sr_pause_resume(dev, cmd); 19686 } 19687 break; 19688 19689 case CDROMPLAYMSF: 19690 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n"); 19691 if (!ISCD(un)) { 19692 err = ENOTTY; 19693 } else { 19694 err = sr_play_msf(dev, (caddr_t)arg, flag); 19695 } 19696 break; 19697 19698 case CDROMPLAYTRKIND: 19699 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); 19700 #if defined(__i386) || defined(__amd64) 19701 /* 19702 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead 19703 */ 19704 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 19705 #else 19706 if (!ISCD(un)) { 19707 #endif 19708 err = ENOTTY; 19709 } else { 19710 err = sr_play_trkind(dev, (caddr_t)arg, flag); 19711 } 19712 break; 19713 19714 case CDROMREADTOCHDR: 19715 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n"); 19716 if (!ISCD(un)) { 19717 err = ENOTTY; 19718 } else { 19719 err = sr_read_tochdr(dev, (caddr_t)arg, flag); 19720 } 19721 break; 19722 19723 case CDROMREADTOCENTRY: 19724 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n"); 19725 if (!ISCD(un)) { 19726 err = ENOTTY; 19727 } else { 19728 err = sr_read_tocentry(dev, (caddr_t)arg, flag); 19729 } 19730 break; 19731 19732 case CDROMSTOP: 19733 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n"); 19734 if (!ISCD(un)) { 19735 err = ENOTTY; 19736 } else { 19737 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_STOP, 19738 SD_PATH_STANDARD); 19739 } 19740 break; 19741 19742 case CDROMSTART: 19743 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n"); 19744 if (!ISCD(un)) { 19745 err = ENOTTY; 19746 } else { 19747 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 19748 SD_PATH_STANDARD); 19749 } 19750 break; 19751 19752 case CDROMCLOSETRAY: 19753 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n"); 19754 if (!ISCD(un)) { 19755 err = ENOTTY; 19756 } else { 19757 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_CLOSE, 19758 SD_PATH_STANDARD); 19759 } 19760 break; 19761 19762 case FDEJECT: /* for eject command */ 19763 case DKIOCEJECT: 19764 case CDROMEJECT: 19765 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n"); 19766 if (!un->un_f_eject_media_supported) { 19767 err = ENOTTY; 19768 } else { 19769 err = sr_eject(dev); 19770 } 19771 break; 19772 19773 case CDROMVOLCTRL: 19774 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n"); 19775 if (!ISCD(un)) { 19776 err = ENOTTY; 19777 } else { 19778 err = sr_volume_ctrl(dev, (caddr_t)arg, flag); 19779 } 19780 break; 19781 19782 case CDROMSUBCHNL: 19783 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n"); 19784 if (!ISCD(un)) { 19785 err = ENOTTY; 19786 } else { 19787 err = sr_read_subchannel(dev, (caddr_t)arg, flag); 19788 } 19789 break; 19790 19791 case CDROMREADMODE2: 19792 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n"); 19793 if (!ISCD(un)) { 19794 err = ENOTTY; 19795 } else if (un->un_f_cfg_is_atapi == TRUE) { 19796 /* 19797 * If the drive supports READ CD, use that instead of 19798 * switching the LBA size via a MODE SELECT 19799 * Block Descriptor 19800 */ 19801 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag); 19802 } else { 19803 err = sr_read_mode2(dev, (caddr_t)arg, flag); 19804 } 19805 break; 19806 19807 case CDROMREADMODE1: 19808 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n"); 19809 if (!ISCD(un)) { 19810 err = ENOTTY; 19811 } else { 19812 err = sr_read_mode1(dev, (caddr_t)arg, flag); 19813 } 19814 break; 19815 19816 case CDROMREADOFFSET: 19817 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n"); 19818 if (!ISCD(un)) { 19819 err = ENOTTY; 19820 } else { 19821 err = sr_read_sony_session_offset(dev, (caddr_t)arg, 19822 flag); 19823 } 19824 break; 19825 19826 case CDROMSBLKMODE: 19827 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n"); 19828 /* 19829 * There is no means of changing block size in case of atapi 19830 * drives, thus return ENOTTY if drive type is atapi 19831 */ 19832 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 19833 err = ENOTTY; 19834 } else if (un->un_f_mmc_cap == TRUE) { 19835 19836 /* 19837 * MMC Devices do not support changing the 19838 * logical block size 19839 * 19840 * Note: EINVAL is being returned instead of ENOTTY to 19841 * maintain consistancy with the original mmc 19842 * driver update. 19843 */ 19844 err = EINVAL; 19845 } else { 19846 mutex_enter(SD_MUTEX(un)); 19847 if ((!(un->un_exclopen & (1<<SDPART(dev)))) || 19848 (un->un_ncmds_in_transport > 0)) { 19849 mutex_exit(SD_MUTEX(un)); 19850 err = EINVAL; 19851 } else { 19852 mutex_exit(SD_MUTEX(un)); 19853 err = sr_change_blkmode(dev, cmd, arg, flag); 19854 } 19855 } 19856 break; 19857 19858 case CDROMGBLKMODE: 19859 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n"); 19860 if (!ISCD(un)) { 19861 err = ENOTTY; 19862 } else if ((un->un_f_cfg_is_atapi != FALSE) && 19863 (un->un_f_blockcount_is_valid != FALSE)) { 19864 /* 19865 * Drive is an ATAPI drive so return target block 19866 * size for ATAPI drives since we cannot change the 19867 * blocksize on ATAPI drives. Used primarily to detect 19868 * if an ATAPI cdrom is present. 19869 */ 19870 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg, 19871 sizeof (int), flag) != 0) { 19872 err = EFAULT; 19873 } else { 19874 err = 0; 19875 } 19876 19877 } else { 19878 /* 19879 * Drive supports changing block sizes via a Mode 19880 * Select. 19881 */ 19882 err = sr_change_blkmode(dev, cmd, arg, flag); 19883 } 19884 break; 19885 19886 case CDROMGDRVSPEED: 19887 case CDROMSDRVSPEED: 19888 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n"); 19889 if (!ISCD(un)) { 19890 err = ENOTTY; 19891 } else if (un->un_f_mmc_cap == TRUE) { 19892 /* 19893 * Note: In the future the driver implementation 19894 * for getting and 19895 * setting cd speed should entail: 19896 * 1) If non-mmc try the Toshiba mode page 19897 * (sr_change_speed) 19898 * 2) If mmc but no support for Real Time Streaming try 19899 * the SET CD SPEED (0xBB) command 19900 * (sr_atapi_change_speed) 19901 * 3) If mmc and support for Real Time Streaming 19902 * try the GET PERFORMANCE and SET STREAMING 19903 * commands (not yet implemented, 4380808) 19904 */ 19905 /* 19906 * As per recent MMC spec, CD-ROM speed is variable 19907 * and changes with LBA. Since there is no such 19908 * things as drive speed now, fail this ioctl. 19909 * 19910 * Note: EINVAL is returned for consistancy of original 19911 * implementation which included support for getting 19912 * the drive speed of mmc devices but not setting 19913 * the drive speed. Thus EINVAL would be returned 19914 * if a set request was made for an mmc device. 19915 * We no longer support get or set speed for 19916 * mmc but need to remain consistant with regard 19917 * to the error code returned. 19918 */ 19919 err = EINVAL; 19920 } else if (un->un_f_cfg_is_atapi == TRUE) { 19921 err = sr_atapi_change_speed(dev, cmd, arg, flag); 19922 } else { 19923 err = sr_change_speed(dev, cmd, arg, flag); 19924 } 19925 break; 19926 19927 case CDROMCDDA: 19928 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n"); 19929 if (!ISCD(un)) { 19930 err = ENOTTY; 19931 } else { 19932 err = sr_read_cdda(dev, (void *)arg, flag); 19933 } 19934 break; 19935 19936 case CDROMCDXA: 19937 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n"); 19938 if (!ISCD(un)) { 19939 err = ENOTTY; 19940 } else { 19941 err = sr_read_cdxa(dev, (caddr_t)arg, flag); 19942 } 19943 break; 19944 19945 case CDROMSUBCODE: 19946 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n"); 19947 if (!ISCD(un)) { 19948 err = ENOTTY; 19949 } else { 19950 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag); 19951 } 19952 break; 19953 19954 19955 #ifdef SDDEBUG 19956 /* RESET/ABORTS testing ioctls */ 19957 case DKIOCRESET: { 19958 int reset_level; 19959 19960 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) { 19961 err = EFAULT; 19962 } else { 19963 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: " 19964 "reset_level = 0x%lx\n", reset_level); 19965 if (scsi_reset(SD_ADDRESS(un), reset_level)) { 19966 err = 0; 19967 } else { 19968 err = EIO; 19969 } 19970 } 19971 break; 19972 } 19973 19974 case DKIOCABORT: 19975 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n"); 19976 if (scsi_abort(SD_ADDRESS(un), NULL)) { 19977 err = 0; 19978 } else { 19979 err = EIO; 19980 } 19981 break; 19982 #endif 19983 19984 #ifdef SD_FAULT_INJECTION 19985 /* SDIOC FaultInjection testing ioctls */ 19986 case SDIOCSTART: 19987 case SDIOCSTOP: 19988 case SDIOCINSERTPKT: 19989 case SDIOCINSERTXB: 19990 case SDIOCINSERTUN: 19991 case SDIOCINSERTARQ: 19992 case SDIOCPUSH: 19993 case SDIOCRETRIEVE: 19994 case SDIOCRUN: 19995 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" 19996 "SDIOC detected cmd:0x%X:\n", cmd); 19997 /* call error generator */ 19998 sd_faultinjection_ioctl(cmd, arg, un); 19999 err = 0; 20000 break; 20001 20002 #endif /* SD_FAULT_INJECTION */ 20003 20004 case DKIOCFLUSHWRITECACHE: 20005 { 20006 struct dk_callback *dkc = (struct dk_callback *)arg; 20007 20008 mutex_enter(SD_MUTEX(un)); 20009 if (!un->un_f_sync_cache_supported || 20010 !un->un_f_write_cache_enabled) { 20011 err = un->un_f_sync_cache_supported ? 20012 0 : ENOTSUP; 20013 mutex_exit(SD_MUTEX(un)); 20014 if ((flag & FKIOCTL) && dkc != NULL && 20015 dkc->dkc_callback != NULL) { 20016 (*dkc->dkc_callback)(dkc->dkc_cookie, 20017 err); 20018 /* 20019 * Did callback and reported error. 20020 * Since we did a callback, ioctl 20021 * should return 0. 20022 */ 20023 err = 0; 20024 } 20025 break; 20026 } 20027 mutex_exit(SD_MUTEX(un)); 20028 20029 if ((flag & FKIOCTL) && dkc != NULL && 20030 dkc->dkc_callback != NULL) { 20031 /* async SYNC CACHE request */ 20032 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 20033 } else { 20034 /* synchronous SYNC CACHE request */ 20035 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 20036 } 20037 } 20038 break; 20039 20040 case DKIOCGETWCE: { 20041 20042 int wce; 20043 20044 if ((err = sd_get_write_cache_enabled(un, &wce)) != 0) { 20045 break; 20046 } 20047 20048 if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) { 20049 err = EFAULT; 20050 } 20051 break; 20052 } 20053 20054 case DKIOCSETWCE: { 20055 20056 int wce, sync_supported; 20057 20058 if (ddi_copyin((void *)arg, &wce, sizeof (wce), flag)) { 20059 err = EFAULT; 20060 break; 20061 } 20062 20063 /* 20064 * Synchronize multiple threads trying to enable 20065 * or disable the cache via the un_f_wcc_cv 20066 * condition variable. 20067 */ 20068 mutex_enter(SD_MUTEX(un)); 20069 20070 /* 20071 * Don't allow the cache to be enabled if the 20072 * config file has it disabled. 20073 */ 20074 if (un->un_f_opt_disable_cache && wce) { 20075 mutex_exit(SD_MUTEX(un)); 20076 err = EINVAL; 20077 break; 20078 } 20079 20080 /* 20081 * Wait for write cache change in progress 20082 * bit to be clear before proceeding. 20083 */ 20084 while (un->un_f_wcc_inprog) 20085 cv_wait(&un->un_wcc_cv, SD_MUTEX(un)); 20086 20087 un->un_f_wcc_inprog = 1; 20088 20089 if (un->un_f_write_cache_enabled && wce == 0) { 20090 /* 20091 * Disable the write cache. Don't clear 20092 * un_f_write_cache_enabled until after 20093 * the mode select and flush are complete. 20094 */ 20095 sync_supported = un->un_f_sync_cache_supported; 20096 mutex_exit(SD_MUTEX(un)); 20097 if ((err = sd_cache_control(un, SD_CACHE_NOCHANGE, 20098 SD_CACHE_DISABLE)) == 0 && sync_supported) { 20099 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 20100 } 20101 20102 mutex_enter(SD_MUTEX(un)); 20103 if (err == 0) { 20104 un->un_f_write_cache_enabled = 0; 20105 } 20106 20107 } else if (!un->un_f_write_cache_enabled && wce != 0) { 20108 /* 20109 * Set un_f_write_cache_enabled first, so there is 20110 * no window where the cache is enabled, but the 20111 * bit says it isn't. 20112 */ 20113 un->un_f_write_cache_enabled = 1; 20114 mutex_exit(SD_MUTEX(un)); 20115 20116 err = sd_cache_control(un, SD_CACHE_NOCHANGE, 20117 SD_CACHE_ENABLE); 20118 20119 mutex_enter(SD_MUTEX(un)); 20120 20121 if (err) { 20122 un->un_f_write_cache_enabled = 0; 20123 } 20124 } 20125 20126 un->un_f_wcc_inprog = 0; 20127 cv_broadcast(&un->un_wcc_cv); 20128 mutex_exit(SD_MUTEX(un)); 20129 break; 20130 } 20131 20132 default: 20133 err = ENOTTY; 20134 break; 20135 } 20136 mutex_enter(SD_MUTEX(un)); 20137 un->un_ncmds_in_driver--; 20138 ASSERT(un->un_ncmds_in_driver >= 0); 20139 mutex_exit(SD_MUTEX(un)); 20140 20141 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 20142 return (err); 20143 } 20144 20145 20146 /* 20147 * Function: sd_dkio_ctrl_info 20148 * 20149 * Description: This routine is the driver entry point for handling controller 20150 * information ioctl requests (DKIOCINFO). 20151 * 20152 * Arguments: dev - the device number 20153 * arg - pointer to user provided dk_cinfo structure 20154 * specifying the controller type and attributes. 20155 * flag - this argument is a pass through to ddi_copyxxx() 20156 * directly from the mode argument of ioctl(). 20157 * 20158 * Return Code: 0 20159 * EFAULT 20160 * ENXIO 20161 */ 20162 20163 static int 20164 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag) 20165 { 20166 struct sd_lun *un = NULL; 20167 struct dk_cinfo *info; 20168 dev_info_t *pdip; 20169 int lun, tgt; 20170 20171 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20172 return (ENXIO); 20173 } 20174 20175 info = (struct dk_cinfo *) 20176 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); 20177 20178 switch (un->un_ctype) { 20179 case CTYPE_CDROM: 20180 info->dki_ctype = DKC_CDROM; 20181 break; 20182 default: 20183 info->dki_ctype = DKC_SCSI_CCS; 20184 break; 20185 } 20186 pdip = ddi_get_parent(SD_DEVINFO(un)); 20187 info->dki_cnum = ddi_get_instance(pdip); 20188 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) { 20189 (void) strcpy(info->dki_cname, ddi_get_name(pdip)); 20190 } else { 20191 (void) strncpy(info->dki_cname, ddi_node_name(pdip), 20192 DK_DEVLEN - 1); 20193 } 20194 20195 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 20196 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0); 20197 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 20198 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0); 20199 20200 /* Unit Information */ 20201 info->dki_unit = ddi_get_instance(SD_DEVINFO(un)); 20202 info->dki_slave = ((tgt << 3) | lun); 20203 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)), 20204 DK_DEVLEN - 1); 20205 info->dki_flags = DKI_FMTVOL; 20206 info->dki_partition = SDPART(dev); 20207 20208 /* Max Transfer size of this device in blocks */ 20209 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize; 20210 info->dki_addr = 0; 20211 info->dki_space = 0; 20212 info->dki_prio = 0; 20213 info->dki_vec = 0; 20214 20215 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) { 20216 kmem_free(info, sizeof (struct dk_cinfo)); 20217 return (EFAULT); 20218 } else { 20219 kmem_free(info, sizeof (struct dk_cinfo)); 20220 return (0); 20221 } 20222 } 20223 20224 20225 /* 20226 * Function: sd_get_media_info 20227 * 20228 * Description: This routine is the driver entry point for handling ioctl 20229 * requests for the media type or command set profile used by the 20230 * drive to operate on the media (DKIOCGMEDIAINFO). 20231 * 20232 * Arguments: dev - the device number 20233 * arg - pointer to user provided dk_minfo structure 20234 * specifying the media type, logical block size and 20235 * drive capacity. 20236 * flag - this argument is a pass through to ddi_copyxxx() 20237 * directly from the mode argument of ioctl(). 20238 * 20239 * Return Code: 0 20240 * EACCESS 20241 * EFAULT 20242 * ENXIO 20243 * EIO 20244 */ 20245 20246 static int 20247 sd_get_media_info(dev_t dev, caddr_t arg, int flag) 20248 { 20249 struct sd_lun *un = NULL; 20250 struct uscsi_cmd com; 20251 struct scsi_inquiry *sinq; 20252 struct dk_minfo media_info; 20253 u_longlong_t media_capacity; 20254 uint64_t capacity; 20255 uint_t lbasize; 20256 uchar_t *out_data; 20257 uchar_t *rqbuf; 20258 int rval = 0; 20259 int rtn; 20260 20261 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 20262 (un->un_state == SD_STATE_OFFLINE)) { 20263 return (ENXIO); 20264 } 20265 20266 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info: entry\n"); 20267 20268 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 20269 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 20270 20271 /* Issue a TUR to determine if the drive is ready with media present */ 20272 rval = sd_send_scsi_TEST_UNIT_READY(un, SD_CHECK_FOR_MEDIA); 20273 if (rval == ENXIO) { 20274 goto done; 20275 } 20276 20277 /* Now get configuration data */ 20278 if (ISCD(un)) { 20279 media_info.dki_media_type = DK_CDROM; 20280 20281 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 20282 if (un->un_f_mmc_cap == TRUE) { 20283 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, 20284 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN, 20285 SD_PATH_STANDARD); 20286 20287 if (rtn) { 20288 /* 20289 * Failed for other than an illegal request 20290 * or command not supported 20291 */ 20292 if ((com.uscsi_status == STATUS_CHECK) && 20293 (com.uscsi_rqstatus == STATUS_GOOD)) { 20294 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 20295 (rqbuf[12] != 0x20)) { 20296 rval = EIO; 20297 goto done; 20298 } 20299 } 20300 } else { 20301 /* 20302 * The GET CONFIGURATION command succeeded 20303 * so set the media type according to the 20304 * returned data 20305 */ 20306 media_info.dki_media_type = out_data[6]; 20307 media_info.dki_media_type <<= 8; 20308 media_info.dki_media_type |= out_data[7]; 20309 } 20310 } 20311 } else { 20312 /* 20313 * The profile list is not available, so we attempt to identify 20314 * the media type based on the inquiry data 20315 */ 20316 sinq = un->un_sd->sd_inq; 20317 if ((sinq->inq_dtype == DTYPE_DIRECT) || 20318 (sinq->inq_dtype == DTYPE_OPTICAL)) { 20319 /* This is a direct access device or optical disk */ 20320 media_info.dki_media_type = DK_FIXED_DISK; 20321 20322 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 20323 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 20324 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 20325 media_info.dki_media_type = DK_ZIP; 20326 } else if ( 20327 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 20328 media_info.dki_media_type = DK_JAZ; 20329 } 20330 } 20331 } else { 20332 /* 20333 * Not a CD, direct access or optical disk so return 20334 * unknown media 20335 */ 20336 media_info.dki_media_type = DK_UNKNOWN; 20337 } 20338 } 20339 20340 /* Now read the capacity so we can provide the lbasize and capacity */ 20341 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 20342 SD_PATH_DIRECT)) { 20343 case 0: 20344 break; 20345 case EACCES: 20346 rval = EACCES; 20347 goto done; 20348 default: 20349 rval = EIO; 20350 goto done; 20351 } 20352 20353 media_info.dki_lbsize = lbasize; 20354 media_capacity = capacity; 20355 20356 /* 20357 * sd_send_scsi_READ_CAPACITY() reports capacity in 20358 * un->un_sys_blocksize chunks. So we need to convert it into 20359 * cap.lbasize chunks. 20360 */ 20361 media_capacity *= un->un_sys_blocksize; 20362 media_capacity /= lbasize; 20363 media_info.dki_capacity = media_capacity; 20364 20365 if (ddi_copyout(&media_info, arg, sizeof (struct dk_minfo), flag)) { 20366 rval = EFAULT; 20367 /* Put goto. Anybody might add some code below in future */ 20368 goto done; 20369 } 20370 done: 20371 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 20372 kmem_free(rqbuf, SENSE_LENGTH); 20373 return (rval); 20374 } 20375 20376 20377 /* 20378 * Function: sd_check_media 20379 * 20380 * Description: This utility routine implements the functionality for the 20381 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the 20382 * driver state changes from that specified by the user 20383 * (inserted or ejected). For example, if the user specifies 20384 * DKIO_EJECTED and the current media state is inserted this 20385 * routine will immediately return DKIO_INSERTED. However, if the 20386 * current media state is not inserted the user thread will be 20387 * blocked until the drive state changes. If DKIO_NONE is specified 20388 * the user thread will block until a drive state change occurs. 20389 * 20390 * Arguments: dev - the device number 20391 * state - user pointer to a dkio_state, updated with the current 20392 * drive state at return. 20393 * 20394 * Return Code: ENXIO 20395 * EIO 20396 * EAGAIN 20397 * EINTR 20398 */ 20399 20400 static int 20401 sd_check_media(dev_t dev, enum dkio_state state) 20402 { 20403 struct sd_lun *un = NULL; 20404 enum dkio_state prev_state; 20405 opaque_t token = NULL; 20406 int rval = 0; 20407 20408 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20409 return (ENXIO); 20410 } 20411 20412 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n"); 20413 20414 mutex_enter(SD_MUTEX(un)); 20415 20416 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: " 20417 "state=%x, mediastate=%x\n", state, un->un_mediastate); 20418 20419 prev_state = un->un_mediastate; 20420 20421 /* is there anything to do? */ 20422 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) { 20423 /* 20424 * submit the request to the scsi_watch service; 20425 * scsi_media_watch_cb() does the real work 20426 */ 20427 mutex_exit(SD_MUTEX(un)); 20428 20429 /* 20430 * This change handles the case where a scsi watch request is 20431 * added to a device that is powered down. To accomplish this 20432 * we power up the device before adding the scsi watch request, 20433 * since the scsi watch sends a TUR directly to the device 20434 * which the device cannot handle if it is powered down. 20435 */ 20436 if (sd_pm_entry(un) != DDI_SUCCESS) { 20437 mutex_enter(SD_MUTEX(un)); 20438 goto done; 20439 } 20440 20441 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), 20442 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 20443 (caddr_t)dev); 20444 20445 sd_pm_exit(un); 20446 20447 mutex_enter(SD_MUTEX(un)); 20448 if (token == NULL) { 20449 rval = EAGAIN; 20450 goto done; 20451 } 20452 20453 /* 20454 * This is a special case IOCTL that doesn't return 20455 * until the media state changes. Routine sdpower 20456 * knows about and handles this so don't count it 20457 * as an active cmd in the driver, which would 20458 * keep the device busy to the pm framework. 20459 * If the count isn't decremented the device can't 20460 * be powered down. 20461 */ 20462 un->un_ncmds_in_driver--; 20463 ASSERT(un->un_ncmds_in_driver >= 0); 20464 20465 /* 20466 * if a prior request had been made, this will be the same 20467 * token, as scsi_watch was designed that way. 20468 */ 20469 un->un_swr_token = token; 20470 un->un_specified_mediastate = state; 20471 20472 /* 20473 * now wait for media change 20474 * we will not be signalled unless mediastate == state but it is 20475 * still better to test for this condition, since there is a 20476 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED 20477 */ 20478 SD_TRACE(SD_LOG_COMMON, un, 20479 "sd_check_media: waiting for media state change\n"); 20480 while (un->un_mediastate == state) { 20481 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) { 20482 SD_TRACE(SD_LOG_COMMON, un, 20483 "sd_check_media: waiting for media state " 20484 "was interrupted\n"); 20485 un->un_ncmds_in_driver++; 20486 rval = EINTR; 20487 goto done; 20488 } 20489 SD_TRACE(SD_LOG_COMMON, un, 20490 "sd_check_media: received signal, state=%x\n", 20491 un->un_mediastate); 20492 } 20493 /* 20494 * Inc the counter to indicate the device once again 20495 * has an active outstanding cmd. 20496 */ 20497 un->un_ncmds_in_driver++; 20498 } 20499 20500 /* invalidate geometry */ 20501 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) { 20502 sr_ejected(un); 20503 } 20504 20505 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) { 20506 uint64_t capacity; 20507 uint_t lbasize; 20508 20509 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n"); 20510 mutex_exit(SD_MUTEX(un)); 20511 /* 20512 * Since the following routines use SD_PATH_DIRECT, we must 20513 * call PM directly before the upcoming disk accesses. This 20514 * may cause the disk to be power/spin up. 20515 */ 20516 20517 if (sd_pm_entry(un) == DDI_SUCCESS) { 20518 rval = sd_send_scsi_READ_CAPACITY(un, 20519 &capacity, 20520 &lbasize, SD_PATH_DIRECT); 20521 if (rval != 0) { 20522 sd_pm_exit(un); 20523 mutex_enter(SD_MUTEX(un)); 20524 goto done; 20525 } 20526 } else { 20527 rval = EIO; 20528 mutex_enter(SD_MUTEX(un)); 20529 goto done; 20530 } 20531 mutex_enter(SD_MUTEX(un)); 20532 20533 sd_update_block_info(un, lbasize, capacity); 20534 20535 /* 20536 * Check if the media in the device is writable or not 20537 */ 20538 if (ISCD(un)) 20539 sd_check_for_writable_cd(un, SD_PATH_DIRECT); 20540 20541 mutex_exit(SD_MUTEX(un)); 20542 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 20543 if ((cmlb_validate(un->un_cmlbhandle, 0, 20544 (void *)SD_PATH_DIRECT) == 0) && un->un_f_pkstats_enabled) { 20545 sd_set_pstats(un); 20546 SD_TRACE(SD_LOG_IO_PARTITION, un, 20547 "sd_check_media: un:0x%p pstats created and " 20548 "set\n", un); 20549 } 20550 20551 rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 20552 SD_PATH_DIRECT); 20553 sd_pm_exit(un); 20554 20555 mutex_enter(SD_MUTEX(un)); 20556 } 20557 done: 20558 un->un_f_watcht_stopped = FALSE; 20559 if (un->un_swr_token) { 20560 /* 20561 * Use of this local token and the mutex ensures that we avoid 20562 * some race conditions associated with terminating the 20563 * scsi watch. 20564 */ 20565 token = un->un_swr_token; 20566 un->un_swr_token = (opaque_t)NULL; 20567 mutex_exit(SD_MUTEX(un)); 20568 (void) scsi_watch_request_terminate(token, 20569 SCSI_WATCH_TERMINATE_WAIT); 20570 mutex_enter(SD_MUTEX(un)); 20571 } 20572 20573 /* 20574 * Update the capacity kstat value, if no media previously 20575 * (capacity kstat is 0) and a media has been inserted 20576 * (un_f_blockcount_is_valid == TRUE) 20577 */ 20578 if (un->un_errstats) { 20579 struct sd_errstats *stp = NULL; 20580 20581 stp = (struct sd_errstats *)un->un_errstats->ks_data; 20582 if ((stp->sd_capacity.value.ui64 == 0) && 20583 (un->un_f_blockcount_is_valid == TRUE)) { 20584 stp->sd_capacity.value.ui64 = 20585 (uint64_t)((uint64_t)un->un_blockcount * 20586 un->un_sys_blocksize); 20587 } 20588 } 20589 mutex_exit(SD_MUTEX(un)); 20590 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n"); 20591 return (rval); 20592 } 20593 20594 20595 /* 20596 * Function: sd_delayed_cv_broadcast 20597 * 20598 * Description: Delayed cv_broadcast to allow for target to recover from media 20599 * insertion. 20600 * 20601 * Arguments: arg - driver soft state (unit) structure 20602 */ 20603 20604 static void 20605 sd_delayed_cv_broadcast(void *arg) 20606 { 20607 struct sd_lun *un = arg; 20608 20609 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n"); 20610 20611 mutex_enter(SD_MUTEX(un)); 20612 un->un_dcvb_timeid = NULL; 20613 cv_broadcast(&un->un_state_cv); 20614 mutex_exit(SD_MUTEX(un)); 20615 } 20616 20617 20618 /* 20619 * Function: sd_media_watch_cb 20620 * 20621 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This 20622 * routine processes the TUR sense data and updates the driver 20623 * state if a transition has occurred. The user thread 20624 * (sd_check_media) is then signalled. 20625 * 20626 * Arguments: arg - the device 'dev_t' is used for context to discriminate 20627 * among multiple watches that share this callback function 20628 * resultp - scsi watch facility result packet containing scsi 20629 * packet, status byte and sense data 20630 * 20631 * Return Code: 0 for success, -1 for failure 20632 */ 20633 20634 static int 20635 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 20636 { 20637 struct sd_lun *un; 20638 struct scsi_status *statusp = resultp->statusp; 20639 uint8_t *sensep = (uint8_t *)resultp->sensep; 20640 enum dkio_state state = DKIO_NONE; 20641 dev_t dev = (dev_t)arg; 20642 uchar_t actual_sense_length; 20643 uint8_t skey, asc, ascq; 20644 20645 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20646 return (-1); 20647 } 20648 actual_sense_length = resultp->actual_sense_length; 20649 20650 mutex_enter(SD_MUTEX(un)); 20651 SD_TRACE(SD_LOG_COMMON, un, 20652 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n", 20653 *((char *)statusp), (void *)sensep, actual_sense_length); 20654 20655 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) { 20656 un->un_mediastate = DKIO_DEV_GONE; 20657 cv_broadcast(&un->un_state_cv); 20658 mutex_exit(SD_MUTEX(un)); 20659 20660 return (0); 20661 } 20662 20663 /* 20664 * If there was a check condition then sensep points to valid sense data 20665 * If status was not a check condition but a reservation or busy status 20666 * then the new state is DKIO_NONE 20667 */ 20668 if (sensep != NULL) { 20669 skey = scsi_sense_key(sensep); 20670 asc = scsi_sense_asc(sensep); 20671 ascq = scsi_sense_ascq(sensep); 20672 20673 SD_INFO(SD_LOG_COMMON, un, 20674 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n", 20675 skey, asc, ascq); 20676 /* This routine only uses up to 13 bytes of sense data. */ 20677 if (actual_sense_length >= 13) { 20678 if (skey == KEY_UNIT_ATTENTION) { 20679 if (asc == 0x28) { 20680 state = DKIO_INSERTED; 20681 } 20682 } else if (skey == KEY_NOT_READY) { 20683 /* 20684 * if 02/04/02 means that the host 20685 * should send start command. Explicitly 20686 * leave the media state as is 20687 * (inserted) as the media is inserted 20688 * and host has stopped device for PM 20689 * reasons. Upon next true read/write 20690 * to this media will bring the 20691 * device to the right state good for 20692 * media access. 20693 */ 20694 if (asc == 0x3a) { 20695 state = DKIO_EJECTED; 20696 } else { 20697 /* 20698 * If the drive is busy with an 20699 * operation or long write, keep the 20700 * media in an inserted state. 20701 */ 20702 20703 if ((asc == 0x04) && 20704 ((ascq == 0x02) || 20705 (ascq == 0x07) || 20706 (ascq == 0x08))) { 20707 state = DKIO_INSERTED; 20708 } 20709 } 20710 } else if (skey == KEY_NO_SENSE) { 20711 if ((asc == 0x00) && (ascq == 0x00)) { 20712 /* 20713 * Sense Data 00/00/00 does not provide 20714 * any information about the state of 20715 * the media. Ignore it. 20716 */ 20717 mutex_exit(SD_MUTEX(un)); 20718 return (0); 20719 } 20720 } 20721 } 20722 } else if ((*((char *)statusp) == STATUS_GOOD) && 20723 (resultp->pkt->pkt_reason == CMD_CMPLT)) { 20724 state = DKIO_INSERTED; 20725 } 20726 20727 SD_TRACE(SD_LOG_COMMON, un, 20728 "sd_media_watch_cb: state=%x, specified=%x\n", 20729 state, un->un_specified_mediastate); 20730 20731 /* 20732 * now signal the waiting thread if this is *not* the specified state; 20733 * delay the signal if the state is DKIO_INSERTED to allow the target 20734 * to recover 20735 */ 20736 if (state != un->un_specified_mediastate) { 20737 un->un_mediastate = state; 20738 if (state == DKIO_INSERTED) { 20739 /* 20740 * delay the signal to give the drive a chance 20741 * to do what it apparently needs to do 20742 */ 20743 SD_TRACE(SD_LOG_COMMON, un, 20744 "sd_media_watch_cb: delayed cv_broadcast\n"); 20745 if (un->un_dcvb_timeid == NULL) { 20746 un->un_dcvb_timeid = 20747 timeout(sd_delayed_cv_broadcast, un, 20748 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY)); 20749 } 20750 } else { 20751 SD_TRACE(SD_LOG_COMMON, un, 20752 "sd_media_watch_cb: immediate cv_broadcast\n"); 20753 cv_broadcast(&un->un_state_cv); 20754 } 20755 } 20756 mutex_exit(SD_MUTEX(un)); 20757 return (0); 20758 } 20759 20760 20761 /* 20762 * Function: sd_dkio_get_temp 20763 * 20764 * Description: This routine is the driver entry point for handling ioctl 20765 * requests to get the disk temperature. 20766 * 20767 * Arguments: dev - the device number 20768 * arg - pointer to user provided dk_temperature structure. 20769 * flag - this argument is a pass through to ddi_copyxxx() 20770 * directly from the mode argument of ioctl(). 20771 * 20772 * Return Code: 0 20773 * EFAULT 20774 * ENXIO 20775 * EAGAIN 20776 */ 20777 20778 static int 20779 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag) 20780 { 20781 struct sd_lun *un = NULL; 20782 struct dk_temperature *dktemp = NULL; 20783 uchar_t *temperature_page; 20784 int rval = 0; 20785 int path_flag = SD_PATH_STANDARD; 20786 20787 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20788 return (ENXIO); 20789 } 20790 20791 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP); 20792 20793 /* copyin the disk temp argument to get the user flags */ 20794 if (ddi_copyin((void *)arg, dktemp, 20795 sizeof (struct dk_temperature), flag) != 0) { 20796 rval = EFAULT; 20797 goto done; 20798 } 20799 20800 /* Initialize the temperature to invalid. */ 20801 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 20802 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 20803 20804 /* 20805 * Note: Investigate removing the "bypass pm" semantic. 20806 * Can we just bypass PM always? 20807 */ 20808 if (dktemp->dkt_flags & DKT_BYPASS_PM) { 20809 path_flag = SD_PATH_DIRECT; 20810 ASSERT(!mutex_owned(&un->un_pm_mutex)); 20811 mutex_enter(&un->un_pm_mutex); 20812 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 20813 /* 20814 * If DKT_BYPASS_PM is set, and the drive happens to be 20815 * in low power mode, we can not wake it up, Need to 20816 * return EAGAIN. 20817 */ 20818 mutex_exit(&un->un_pm_mutex); 20819 rval = EAGAIN; 20820 goto done; 20821 } else { 20822 /* 20823 * Indicate to PM the device is busy. This is required 20824 * to avoid a race - i.e. the ioctl is issuing a 20825 * command and the pm framework brings down the device 20826 * to low power mode (possible power cut-off on some 20827 * platforms). 20828 */ 20829 mutex_exit(&un->un_pm_mutex); 20830 if (sd_pm_entry(un) != DDI_SUCCESS) { 20831 rval = EAGAIN; 20832 goto done; 20833 } 20834 } 20835 } 20836 20837 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP); 20838 20839 if ((rval = sd_send_scsi_LOG_SENSE(un, temperature_page, 20840 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag)) != 0) { 20841 goto done2; 20842 } 20843 20844 /* 20845 * For the current temperature verify that the parameter length is 0x02 20846 * and the parameter code is 0x00 20847 */ 20848 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) && 20849 (temperature_page[5] == 0x00)) { 20850 if (temperature_page[9] == 0xFF) { 20851 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 20852 } else { 20853 dktemp->dkt_cur_temp = (short)(temperature_page[9]); 20854 } 20855 } 20856 20857 /* 20858 * For the reference temperature verify that the parameter 20859 * length is 0x02 and the parameter code is 0x01 20860 */ 20861 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) && 20862 (temperature_page[11] == 0x01)) { 20863 if (temperature_page[15] == 0xFF) { 20864 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 20865 } else { 20866 dktemp->dkt_ref_temp = (short)(temperature_page[15]); 20867 } 20868 } 20869 20870 /* Do the copyout regardless of the temperature commands status. */ 20871 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature), 20872 flag) != 0) { 20873 rval = EFAULT; 20874 } 20875 20876 done2: 20877 if (path_flag == SD_PATH_DIRECT) { 20878 sd_pm_exit(un); 20879 } 20880 20881 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE); 20882 done: 20883 if (dktemp != NULL) { 20884 kmem_free(dktemp, sizeof (struct dk_temperature)); 20885 } 20886 20887 return (rval); 20888 } 20889 20890 20891 /* 20892 * Function: sd_log_page_supported 20893 * 20894 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of 20895 * supported log pages. 20896 * 20897 * Arguments: un - 20898 * log_page - 20899 * 20900 * Return Code: -1 - on error (log sense is optional and may not be supported). 20901 * 0 - log page not found. 20902 * 1 - log page found. 20903 */ 20904 20905 static int 20906 sd_log_page_supported(struct sd_lun *un, int log_page) 20907 { 20908 uchar_t *log_page_data; 20909 int i; 20910 int match = 0; 20911 int log_size; 20912 20913 log_page_data = kmem_zalloc(0xFF, KM_SLEEP); 20914 20915 if (sd_send_scsi_LOG_SENSE(un, log_page_data, 0xFF, 0, 0x01, 0, 20916 SD_PATH_DIRECT) != 0) { 20917 SD_ERROR(SD_LOG_COMMON, un, 20918 "sd_log_page_supported: failed log page retrieval\n"); 20919 kmem_free(log_page_data, 0xFF); 20920 return (-1); 20921 } 20922 log_size = log_page_data[3]; 20923 20924 /* 20925 * The list of supported log pages start from the fourth byte. Check 20926 * until we run out of log pages or a match is found. 20927 */ 20928 for (i = 4; (i < (log_size + 4)) && !match; i++) { 20929 if (log_page_data[i] == log_page) { 20930 match++; 20931 } 20932 } 20933 kmem_free(log_page_data, 0xFF); 20934 return (match); 20935 } 20936 20937 20938 /* 20939 * Function: sd_mhdioc_failfast 20940 * 20941 * Description: This routine is the driver entry point for handling ioctl 20942 * requests to enable/disable the multihost failfast option. 20943 * (MHIOCENFAILFAST) 20944 * 20945 * Arguments: dev - the device number 20946 * arg - user specified probing interval. 20947 * flag - this argument is a pass through to ddi_copyxxx() 20948 * directly from the mode argument of ioctl(). 20949 * 20950 * Return Code: 0 20951 * EFAULT 20952 * ENXIO 20953 */ 20954 20955 static int 20956 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag) 20957 { 20958 struct sd_lun *un = NULL; 20959 int mh_time; 20960 int rval = 0; 20961 20962 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20963 return (ENXIO); 20964 } 20965 20966 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag)) 20967 return (EFAULT); 20968 20969 if (mh_time) { 20970 mutex_enter(SD_MUTEX(un)); 20971 un->un_resvd_status |= SD_FAILFAST; 20972 mutex_exit(SD_MUTEX(un)); 20973 /* 20974 * If mh_time is INT_MAX, then this ioctl is being used for 20975 * SCSI-3 PGR purposes, and we don't need to spawn watch thread. 20976 */ 20977 if (mh_time != INT_MAX) { 20978 rval = sd_check_mhd(dev, mh_time); 20979 } 20980 } else { 20981 (void) sd_check_mhd(dev, 0); 20982 mutex_enter(SD_MUTEX(un)); 20983 un->un_resvd_status &= ~SD_FAILFAST; 20984 mutex_exit(SD_MUTEX(un)); 20985 } 20986 return (rval); 20987 } 20988 20989 20990 /* 20991 * Function: sd_mhdioc_takeown 20992 * 20993 * Description: This routine is the driver entry point for handling ioctl 20994 * requests to forcefully acquire exclusive access rights to the 20995 * multihost disk (MHIOCTKOWN). 20996 * 20997 * Arguments: dev - the device number 20998 * arg - user provided structure specifying the delay 20999 * parameters in milliseconds 21000 * flag - this argument is a pass through to ddi_copyxxx() 21001 * directly from the mode argument of ioctl(). 21002 * 21003 * Return Code: 0 21004 * EFAULT 21005 * ENXIO 21006 */ 21007 21008 static int 21009 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag) 21010 { 21011 struct sd_lun *un = NULL; 21012 struct mhioctkown *tkown = NULL; 21013 int rval = 0; 21014 21015 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21016 return (ENXIO); 21017 } 21018 21019 if (arg != NULL) { 21020 tkown = (struct mhioctkown *) 21021 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP); 21022 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag); 21023 if (rval != 0) { 21024 rval = EFAULT; 21025 goto error; 21026 } 21027 } 21028 21029 rval = sd_take_ownership(dev, tkown); 21030 mutex_enter(SD_MUTEX(un)); 21031 if (rval == 0) { 21032 un->un_resvd_status |= SD_RESERVE; 21033 if (tkown != NULL && tkown->reinstate_resv_delay != 0) { 21034 sd_reinstate_resv_delay = 21035 tkown->reinstate_resv_delay * 1000; 21036 } else { 21037 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 21038 } 21039 /* 21040 * Give the scsi_watch routine interval set by 21041 * the MHIOCENFAILFAST ioctl precedence here. 21042 */ 21043 if ((un->un_resvd_status & SD_FAILFAST) == 0) { 21044 mutex_exit(SD_MUTEX(un)); 21045 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000); 21046 SD_TRACE(SD_LOG_IOCTL_MHD, un, 21047 "sd_mhdioc_takeown : %d\n", 21048 sd_reinstate_resv_delay); 21049 } else { 21050 mutex_exit(SD_MUTEX(un)); 21051 } 21052 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY, 21053 sd_mhd_reset_notify_cb, (caddr_t)un); 21054 } else { 21055 un->un_resvd_status &= ~SD_RESERVE; 21056 mutex_exit(SD_MUTEX(un)); 21057 } 21058 21059 error: 21060 if (tkown != NULL) { 21061 kmem_free(tkown, sizeof (struct mhioctkown)); 21062 } 21063 return (rval); 21064 } 21065 21066 21067 /* 21068 * Function: sd_mhdioc_release 21069 * 21070 * Description: This routine is the driver entry point for handling ioctl 21071 * requests to release exclusive access rights to the multihost 21072 * disk (MHIOCRELEASE). 21073 * 21074 * Arguments: dev - the device number 21075 * 21076 * Return Code: 0 21077 * ENXIO 21078 */ 21079 21080 static int 21081 sd_mhdioc_release(dev_t dev) 21082 { 21083 struct sd_lun *un = NULL; 21084 timeout_id_t resvd_timeid_save; 21085 int resvd_status_save; 21086 int rval = 0; 21087 21088 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21089 return (ENXIO); 21090 } 21091 21092 mutex_enter(SD_MUTEX(un)); 21093 resvd_status_save = un->un_resvd_status; 21094 un->un_resvd_status &= 21095 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE); 21096 if (un->un_resvd_timeid) { 21097 resvd_timeid_save = un->un_resvd_timeid; 21098 un->un_resvd_timeid = NULL; 21099 mutex_exit(SD_MUTEX(un)); 21100 (void) untimeout(resvd_timeid_save); 21101 } else { 21102 mutex_exit(SD_MUTEX(un)); 21103 } 21104 21105 /* 21106 * destroy any pending timeout thread that may be attempting to 21107 * reinstate reservation on this device. 21108 */ 21109 sd_rmv_resv_reclaim_req(dev); 21110 21111 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) { 21112 mutex_enter(SD_MUTEX(un)); 21113 if ((un->un_mhd_token) && 21114 ((un->un_resvd_status & SD_FAILFAST) == 0)) { 21115 mutex_exit(SD_MUTEX(un)); 21116 (void) sd_check_mhd(dev, 0); 21117 } else { 21118 mutex_exit(SD_MUTEX(un)); 21119 } 21120 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 21121 sd_mhd_reset_notify_cb, (caddr_t)un); 21122 } else { 21123 /* 21124 * sd_mhd_watch_cb will restart the resvd recover timeout thread 21125 */ 21126 mutex_enter(SD_MUTEX(un)); 21127 un->un_resvd_status = resvd_status_save; 21128 mutex_exit(SD_MUTEX(un)); 21129 } 21130 return (rval); 21131 } 21132 21133 21134 /* 21135 * Function: sd_mhdioc_register_devid 21136 * 21137 * Description: This routine is the driver entry point for handling ioctl 21138 * requests to register the device id (MHIOCREREGISTERDEVID). 21139 * 21140 * Note: The implementation for this ioctl has been updated to 21141 * be consistent with the original PSARC case (1999/357) 21142 * (4375899, 4241671, 4220005) 21143 * 21144 * Arguments: dev - the device number 21145 * 21146 * Return Code: 0 21147 * ENXIO 21148 */ 21149 21150 static int 21151 sd_mhdioc_register_devid(dev_t dev) 21152 { 21153 struct sd_lun *un = NULL; 21154 int rval = 0; 21155 21156 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21157 return (ENXIO); 21158 } 21159 21160 ASSERT(!mutex_owned(SD_MUTEX(un))); 21161 21162 mutex_enter(SD_MUTEX(un)); 21163 21164 /* If a devid already exists, de-register it */ 21165 if (un->un_devid != NULL) { 21166 ddi_devid_unregister(SD_DEVINFO(un)); 21167 /* 21168 * After unregister devid, needs to free devid memory 21169 */ 21170 ddi_devid_free(un->un_devid); 21171 un->un_devid = NULL; 21172 } 21173 21174 /* Check for reservation conflict */ 21175 mutex_exit(SD_MUTEX(un)); 21176 rval = sd_send_scsi_TEST_UNIT_READY(un, 0); 21177 mutex_enter(SD_MUTEX(un)); 21178 21179 switch (rval) { 21180 case 0: 21181 sd_register_devid(un, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED); 21182 break; 21183 case EACCES: 21184 break; 21185 default: 21186 rval = EIO; 21187 } 21188 21189 mutex_exit(SD_MUTEX(un)); 21190 return (rval); 21191 } 21192 21193 21194 /* 21195 * Function: sd_mhdioc_inkeys 21196 * 21197 * Description: This routine is the driver entry point for handling ioctl 21198 * requests to issue the SCSI-3 Persistent In Read Keys command 21199 * to the device (MHIOCGRP_INKEYS). 21200 * 21201 * Arguments: dev - the device number 21202 * arg - user provided in_keys structure 21203 * flag - this argument is a pass through to ddi_copyxxx() 21204 * directly from the mode argument of ioctl(). 21205 * 21206 * Return Code: code returned by sd_persistent_reservation_in_read_keys() 21207 * ENXIO 21208 * EFAULT 21209 */ 21210 21211 static int 21212 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag) 21213 { 21214 struct sd_lun *un; 21215 mhioc_inkeys_t inkeys; 21216 int rval = 0; 21217 21218 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21219 return (ENXIO); 21220 } 21221 21222 #ifdef _MULTI_DATAMODEL 21223 switch (ddi_model_convert_from(flag & FMODELS)) { 21224 case DDI_MODEL_ILP32: { 21225 struct mhioc_inkeys32 inkeys32; 21226 21227 if (ddi_copyin(arg, &inkeys32, 21228 sizeof (struct mhioc_inkeys32), flag) != 0) { 21229 return (EFAULT); 21230 } 21231 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li; 21232 if ((rval = sd_persistent_reservation_in_read_keys(un, 21233 &inkeys, flag)) != 0) { 21234 return (rval); 21235 } 21236 inkeys32.generation = inkeys.generation; 21237 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32), 21238 flag) != 0) { 21239 return (EFAULT); 21240 } 21241 break; 21242 } 21243 case DDI_MODEL_NONE: 21244 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), 21245 flag) != 0) { 21246 return (EFAULT); 21247 } 21248 if ((rval = sd_persistent_reservation_in_read_keys(un, 21249 &inkeys, flag)) != 0) { 21250 return (rval); 21251 } 21252 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), 21253 flag) != 0) { 21254 return (EFAULT); 21255 } 21256 break; 21257 } 21258 21259 #else /* ! _MULTI_DATAMODEL */ 21260 21261 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) { 21262 return (EFAULT); 21263 } 21264 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag); 21265 if (rval != 0) { 21266 return (rval); 21267 } 21268 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) { 21269 return (EFAULT); 21270 } 21271 21272 #endif /* _MULTI_DATAMODEL */ 21273 21274 return (rval); 21275 } 21276 21277 21278 /* 21279 * Function: sd_mhdioc_inresv 21280 * 21281 * Description: This routine is the driver entry point for handling ioctl 21282 * requests to issue the SCSI-3 Persistent In Read Reservations 21283 * command to the device (MHIOCGRP_INKEYS). 21284 * 21285 * Arguments: dev - the device number 21286 * arg - user provided in_resv structure 21287 * flag - this argument is a pass through to ddi_copyxxx() 21288 * directly from the mode argument of ioctl(). 21289 * 21290 * Return Code: code returned by sd_persistent_reservation_in_read_resv() 21291 * ENXIO 21292 * EFAULT 21293 */ 21294 21295 static int 21296 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag) 21297 { 21298 struct sd_lun *un; 21299 mhioc_inresvs_t inresvs; 21300 int rval = 0; 21301 21302 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21303 return (ENXIO); 21304 } 21305 21306 #ifdef _MULTI_DATAMODEL 21307 21308 switch (ddi_model_convert_from(flag & FMODELS)) { 21309 case DDI_MODEL_ILP32: { 21310 struct mhioc_inresvs32 inresvs32; 21311 21312 if (ddi_copyin(arg, &inresvs32, 21313 sizeof (struct mhioc_inresvs32), flag) != 0) { 21314 return (EFAULT); 21315 } 21316 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li; 21317 if ((rval = sd_persistent_reservation_in_read_resv(un, 21318 &inresvs, flag)) != 0) { 21319 return (rval); 21320 } 21321 inresvs32.generation = inresvs.generation; 21322 if (ddi_copyout(&inresvs32, arg, 21323 sizeof (struct mhioc_inresvs32), flag) != 0) { 21324 return (EFAULT); 21325 } 21326 break; 21327 } 21328 case DDI_MODEL_NONE: 21329 if (ddi_copyin(arg, &inresvs, 21330 sizeof (mhioc_inresvs_t), flag) != 0) { 21331 return (EFAULT); 21332 } 21333 if ((rval = sd_persistent_reservation_in_read_resv(un, 21334 &inresvs, flag)) != 0) { 21335 return (rval); 21336 } 21337 if (ddi_copyout(&inresvs, arg, 21338 sizeof (mhioc_inresvs_t), flag) != 0) { 21339 return (EFAULT); 21340 } 21341 break; 21342 } 21343 21344 #else /* ! _MULTI_DATAMODEL */ 21345 21346 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) { 21347 return (EFAULT); 21348 } 21349 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag); 21350 if (rval != 0) { 21351 return (rval); 21352 } 21353 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) { 21354 return (EFAULT); 21355 } 21356 21357 #endif /* ! _MULTI_DATAMODEL */ 21358 21359 return (rval); 21360 } 21361 21362 21363 /* 21364 * The following routines support the clustering functionality described below 21365 * and implement lost reservation reclaim functionality. 21366 * 21367 * Clustering 21368 * ---------- 21369 * The clustering code uses two different, independent forms of SCSI 21370 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3 21371 * Persistent Group Reservations. For any particular disk, it will use either 21372 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk. 21373 * 21374 * SCSI-2 21375 * The cluster software takes ownership of a multi-hosted disk by issuing the 21376 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the 21377 * MHIOCRELEASE ioctl.Closely related is the MHIOCENFAILFAST ioctl -- a cluster, 21378 * just after taking ownership of the disk with the MHIOCTKOWN ioctl then issues 21379 * the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the driver. The 21380 * meaning of failfast is that if the driver (on this host) ever encounters the 21381 * scsi error return code RESERVATION_CONFLICT from the device, it should 21382 * immediately panic the host. The motivation for this ioctl is that if this 21383 * host does encounter reservation conflict, the underlying cause is that some 21384 * other host of the cluster has decided that this host is no longer in the 21385 * cluster and has seized control of the disks for itself. Since this host is no 21386 * longer in the cluster, it ought to panic itself. The MHIOCENFAILFAST ioctl 21387 * does two things: 21388 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT 21389 * error to panic the host 21390 * (b) it sets up a periodic timer to test whether this host still has 21391 * "access" (in that no other host has reserved the device): if the 21392 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The 21393 * purpose of that periodic timer is to handle scenarios where the host is 21394 * otherwise temporarily quiescent, temporarily doing no real i/o. 21395 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host, 21396 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for 21397 * the device itself. 21398 * 21399 * SCSI-3 PGR 21400 * A direct semantic implementation of the SCSI-3 Persistent Reservation 21401 * facility is supported through the shared multihost disk ioctls 21402 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE, 21403 * MHIOCGRP_PREEMPTANDABORT) 21404 * 21405 * Reservation Reclaim: 21406 * -------------------- 21407 * To support the lost reservation reclaim operations this driver creates a 21408 * single thread to handle reinstating reservations on all devices that have 21409 * lost reservations sd_resv_reclaim_requests are logged for all devices that 21410 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb 21411 * and the reservation reclaim thread loops through the requests to regain the 21412 * lost reservations. 21413 */ 21414 21415 /* 21416 * Function: sd_check_mhd() 21417 * 21418 * Description: This function sets up and submits a scsi watch request or 21419 * terminates an existing watch request. This routine is used in 21420 * support of reservation reclaim. 21421 * 21422 * Arguments: dev - the device 'dev_t' is used for context to discriminate 21423 * among multiple watches that share the callback function 21424 * interval - the number of microseconds specifying the watch 21425 * interval for issuing TEST UNIT READY commands. If 21426 * set to 0 the watch should be terminated. If the 21427 * interval is set to 0 and if the device is required 21428 * to hold reservation while disabling failfast, the 21429 * watch is restarted with an interval of 21430 * reinstate_resv_delay. 21431 * 21432 * Return Code: 0 - Successful submit/terminate of scsi watch request 21433 * ENXIO - Indicates an invalid device was specified 21434 * EAGAIN - Unable to submit the scsi watch request 21435 */ 21436 21437 static int 21438 sd_check_mhd(dev_t dev, int interval) 21439 { 21440 struct sd_lun *un; 21441 opaque_t token; 21442 21443 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21444 return (ENXIO); 21445 } 21446 21447 /* is this a watch termination request? */ 21448 if (interval == 0) { 21449 mutex_enter(SD_MUTEX(un)); 21450 /* if there is an existing watch task then terminate it */ 21451 if (un->un_mhd_token) { 21452 token = un->un_mhd_token; 21453 un->un_mhd_token = NULL; 21454 mutex_exit(SD_MUTEX(un)); 21455 (void) scsi_watch_request_terminate(token, 21456 SCSI_WATCH_TERMINATE_WAIT); 21457 mutex_enter(SD_MUTEX(un)); 21458 } else { 21459 mutex_exit(SD_MUTEX(un)); 21460 /* 21461 * Note: If we return here we don't check for the 21462 * failfast case. This is the original legacy 21463 * implementation but perhaps we should be checking 21464 * the failfast case. 21465 */ 21466 return (0); 21467 } 21468 /* 21469 * If the device is required to hold reservation while 21470 * disabling failfast, we need to restart the scsi_watch 21471 * routine with an interval of reinstate_resv_delay. 21472 */ 21473 if (un->un_resvd_status & SD_RESERVE) { 21474 interval = sd_reinstate_resv_delay/1000; 21475 } else { 21476 /* no failfast so bail */ 21477 mutex_exit(SD_MUTEX(un)); 21478 return (0); 21479 } 21480 mutex_exit(SD_MUTEX(un)); 21481 } 21482 21483 /* 21484 * adjust minimum time interval to 1 second, 21485 * and convert from msecs to usecs 21486 */ 21487 if (interval > 0 && interval < 1000) { 21488 interval = 1000; 21489 } 21490 interval *= 1000; 21491 21492 /* 21493 * submit the request to the scsi_watch service 21494 */ 21495 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval, 21496 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev); 21497 if (token == NULL) { 21498 return (EAGAIN); 21499 } 21500 21501 /* 21502 * save token for termination later on 21503 */ 21504 mutex_enter(SD_MUTEX(un)); 21505 un->un_mhd_token = token; 21506 mutex_exit(SD_MUTEX(un)); 21507 return (0); 21508 } 21509 21510 21511 /* 21512 * Function: sd_mhd_watch_cb() 21513 * 21514 * Description: This function is the call back function used by the scsi watch 21515 * facility. The scsi watch facility sends the "Test Unit Ready" 21516 * and processes the status. If applicable (i.e. a "Unit Attention" 21517 * status and automatic "Request Sense" not used) the scsi watch 21518 * facility will send a "Request Sense" and retrieve the sense data 21519 * to be passed to this callback function. In either case the 21520 * automatic "Request Sense" or the facility submitting one, this 21521 * callback is passed the status and sense data. 21522 * 21523 * Arguments: arg - the device 'dev_t' is used for context to discriminate 21524 * among multiple watches that share this callback function 21525 * resultp - scsi watch facility result packet containing scsi 21526 * packet, status byte and sense data 21527 * 21528 * Return Code: 0 - continue the watch task 21529 * non-zero - terminate the watch task 21530 */ 21531 21532 static int 21533 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 21534 { 21535 struct sd_lun *un; 21536 struct scsi_status *statusp; 21537 uint8_t *sensep; 21538 struct scsi_pkt *pkt; 21539 uchar_t actual_sense_length; 21540 dev_t dev = (dev_t)arg; 21541 21542 ASSERT(resultp != NULL); 21543 statusp = resultp->statusp; 21544 sensep = (uint8_t *)resultp->sensep; 21545 pkt = resultp->pkt; 21546 actual_sense_length = resultp->actual_sense_length; 21547 21548 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21549 return (ENXIO); 21550 } 21551 21552 SD_TRACE(SD_LOG_IOCTL_MHD, un, 21553 "sd_mhd_watch_cb: reason '%s', status '%s'\n", 21554 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp))); 21555 21556 /* Begin processing of the status and/or sense data */ 21557 if (pkt->pkt_reason != CMD_CMPLT) { 21558 /* Handle the incomplete packet */ 21559 sd_mhd_watch_incomplete(un, pkt); 21560 return (0); 21561 } else if (*((unsigned char *)statusp) != STATUS_GOOD) { 21562 if (*((unsigned char *)statusp) 21563 == STATUS_RESERVATION_CONFLICT) { 21564 /* 21565 * Handle a reservation conflict by panicking if 21566 * configured for failfast or by logging the conflict 21567 * and updating the reservation status 21568 */ 21569 mutex_enter(SD_MUTEX(un)); 21570 if ((un->un_resvd_status & SD_FAILFAST) && 21571 (sd_failfast_enable)) { 21572 sd_panic_for_res_conflict(un); 21573 /*NOTREACHED*/ 21574 } 21575 SD_INFO(SD_LOG_IOCTL_MHD, un, 21576 "sd_mhd_watch_cb: Reservation Conflict\n"); 21577 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 21578 mutex_exit(SD_MUTEX(un)); 21579 } 21580 } 21581 21582 if (sensep != NULL) { 21583 if (actual_sense_length >= (SENSE_LENGTH - 2)) { 21584 mutex_enter(SD_MUTEX(un)); 21585 if ((scsi_sense_asc(sensep) == 21586 SD_SCSI_RESET_SENSE_CODE) && 21587 (un->un_resvd_status & SD_RESERVE)) { 21588 /* 21589 * The additional sense code indicates a power 21590 * on or bus device reset has occurred; update 21591 * the reservation status. 21592 */ 21593 un->un_resvd_status |= 21594 (SD_LOST_RESERVE | SD_WANT_RESERVE); 21595 SD_INFO(SD_LOG_IOCTL_MHD, un, 21596 "sd_mhd_watch_cb: Lost Reservation\n"); 21597 } 21598 } else { 21599 return (0); 21600 } 21601 } else { 21602 mutex_enter(SD_MUTEX(un)); 21603 } 21604 21605 if ((un->un_resvd_status & SD_RESERVE) && 21606 (un->un_resvd_status & SD_LOST_RESERVE)) { 21607 if (un->un_resvd_status & SD_WANT_RESERVE) { 21608 /* 21609 * A reset occurred in between the last probe and this 21610 * one so if a timeout is pending cancel it. 21611 */ 21612 if (un->un_resvd_timeid) { 21613 timeout_id_t temp_id = un->un_resvd_timeid; 21614 un->un_resvd_timeid = NULL; 21615 mutex_exit(SD_MUTEX(un)); 21616 (void) untimeout(temp_id); 21617 mutex_enter(SD_MUTEX(un)); 21618 } 21619 un->un_resvd_status &= ~SD_WANT_RESERVE; 21620 } 21621 if (un->un_resvd_timeid == 0) { 21622 /* Schedule a timeout to handle the lost reservation */ 21623 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover, 21624 (void *)dev, 21625 drv_usectohz(sd_reinstate_resv_delay)); 21626 } 21627 } 21628 mutex_exit(SD_MUTEX(un)); 21629 return (0); 21630 } 21631 21632 21633 /* 21634 * Function: sd_mhd_watch_incomplete() 21635 * 21636 * Description: This function is used to find out why a scsi pkt sent by the 21637 * scsi watch facility was not completed. Under some scenarios this 21638 * routine will return. Otherwise it will send a bus reset to see 21639 * if the drive is still online. 21640 * 21641 * Arguments: un - driver soft state (unit) structure 21642 * pkt - incomplete scsi pkt 21643 */ 21644 21645 static void 21646 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt) 21647 { 21648 int be_chatty; 21649 int perr; 21650 21651 ASSERT(pkt != NULL); 21652 ASSERT(un != NULL); 21653 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT)); 21654 perr = (pkt->pkt_statistics & STAT_PERR); 21655 21656 mutex_enter(SD_MUTEX(un)); 21657 if (un->un_state == SD_STATE_DUMPING) { 21658 mutex_exit(SD_MUTEX(un)); 21659 return; 21660 } 21661 21662 switch (pkt->pkt_reason) { 21663 case CMD_UNX_BUS_FREE: 21664 /* 21665 * If we had a parity error that caused the target to drop BSY*, 21666 * don't be chatty about it. 21667 */ 21668 if (perr && be_chatty) { 21669 be_chatty = 0; 21670 } 21671 break; 21672 case CMD_TAG_REJECT: 21673 /* 21674 * The SCSI-2 spec states that a tag reject will be sent by the 21675 * target if tagged queuing is not supported. A tag reject may 21676 * also be sent during certain initialization periods or to 21677 * control internal resources. For the latter case the target 21678 * may also return Queue Full. 21679 * 21680 * If this driver receives a tag reject from a target that is 21681 * going through an init period or controlling internal 21682 * resources tagged queuing will be disabled. This is a less 21683 * than optimal behavior but the driver is unable to determine 21684 * the target state and assumes tagged queueing is not supported 21685 */ 21686 pkt->pkt_flags = 0; 21687 un->un_tagflags = 0; 21688 21689 if (un->un_f_opt_queueing == TRUE) { 21690 un->un_throttle = min(un->un_throttle, 3); 21691 } else { 21692 un->un_throttle = 1; 21693 } 21694 mutex_exit(SD_MUTEX(un)); 21695 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 21696 mutex_enter(SD_MUTEX(un)); 21697 break; 21698 case CMD_INCOMPLETE: 21699 /* 21700 * The transport stopped with an abnormal state, fallthrough and 21701 * reset the target and/or bus unless selection did not complete 21702 * (indicated by STATE_GOT_BUS) in which case we don't want to 21703 * go through a target/bus reset 21704 */ 21705 if (pkt->pkt_state == STATE_GOT_BUS) { 21706 break; 21707 } 21708 /*FALLTHROUGH*/ 21709 21710 case CMD_TIMEOUT: 21711 default: 21712 /* 21713 * The lun may still be running the command, so a lun reset 21714 * should be attempted. If the lun reset fails or cannot be 21715 * issued, than try a target reset. Lastly try a bus reset. 21716 */ 21717 if ((pkt->pkt_statistics & 21718 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 21719 int reset_retval = 0; 21720 mutex_exit(SD_MUTEX(un)); 21721 if (un->un_f_allow_bus_device_reset == TRUE) { 21722 if (un->un_f_lun_reset_enabled == TRUE) { 21723 reset_retval = 21724 scsi_reset(SD_ADDRESS(un), 21725 RESET_LUN); 21726 } 21727 if (reset_retval == 0) { 21728 reset_retval = 21729 scsi_reset(SD_ADDRESS(un), 21730 RESET_TARGET); 21731 } 21732 } 21733 if (reset_retval == 0) { 21734 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 21735 } 21736 mutex_enter(SD_MUTEX(un)); 21737 } 21738 break; 21739 } 21740 21741 /* A device/bus reset has occurred; update the reservation status. */ 21742 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics & 21743 (STAT_BUS_RESET | STAT_DEV_RESET))) { 21744 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 21745 un->un_resvd_status |= 21746 (SD_LOST_RESERVE | SD_WANT_RESERVE); 21747 SD_INFO(SD_LOG_IOCTL_MHD, un, 21748 "sd_mhd_watch_incomplete: Lost Reservation\n"); 21749 } 21750 } 21751 21752 /* 21753 * The disk has been turned off; Update the device state. 21754 * 21755 * Note: Should we be offlining the disk here? 21756 */ 21757 if (pkt->pkt_state == STATE_GOT_BUS) { 21758 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: " 21759 "Disk not responding to selection\n"); 21760 if (un->un_state != SD_STATE_OFFLINE) { 21761 New_state(un, SD_STATE_OFFLINE); 21762 } 21763 } else if (be_chatty) { 21764 /* 21765 * suppress messages if they are all the same pkt reason; 21766 * with TQ, many (up to 256) are returned with the same 21767 * pkt_reason 21768 */ 21769 if (pkt->pkt_reason != un->un_last_pkt_reason) { 21770 SD_ERROR(SD_LOG_IOCTL_MHD, un, 21771 "sd_mhd_watch_incomplete: " 21772 "SCSI transport failed: reason '%s'\n", 21773 scsi_rname(pkt->pkt_reason)); 21774 } 21775 } 21776 un->un_last_pkt_reason = pkt->pkt_reason; 21777 mutex_exit(SD_MUTEX(un)); 21778 } 21779 21780 21781 /* 21782 * Function: sd_sname() 21783 * 21784 * Description: This is a simple little routine to return a string containing 21785 * a printable description of command status byte for use in 21786 * logging. 21787 * 21788 * Arguments: status - pointer to a status byte 21789 * 21790 * Return Code: char * - string containing status description. 21791 */ 21792 21793 static char * 21794 sd_sname(uchar_t status) 21795 { 21796 switch (status & STATUS_MASK) { 21797 case STATUS_GOOD: 21798 return ("good status"); 21799 case STATUS_CHECK: 21800 return ("check condition"); 21801 case STATUS_MET: 21802 return ("condition met"); 21803 case STATUS_BUSY: 21804 return ("busy"); 21805 case STATUS_INTERMEDIATE: 21806 return ("intermediate"); 21807 case STATUS_INTERMEDIATE_MET: 21808 return ("intermediate - condition met"); 21809 case STATUS_RESERVATION_CONFLICT: 21810 return ("reservation_conflict"); 21811 case STATUS_TERMINATED: 21812 return ("command terminated"); 21813 case STATUS_QFULL: 21814 return ("queue full"); 21815 default: 21816 return ("<unknown status>"); 21817 } 21818 } 21819 21820 21821 /* 21822 * Function: sd_mhd_resvd_recover() 21823 * 21824 * Description: This function adds a reservation entry to the 21825 * sd_resv_reclaim_request list and signals the reservation 21826 * reclaim thread that there is work pending. If the reservation 21827 * reclaim thread has not been previously created this function 21828 * will kick it off. 21829 * 21830 * Arguments: arg - the device 'dev_t' is used for context to discriminate 21831 * among multiple watches that share this callback function 21832 * 21833 * Context: This routine is called by timeout() and is run in interrupt 21834 * context. It must not sleep or call other functions which may 21835 * sleep. 21836 */ 21837 21838 static void 21839 sd_mhd_resvd_recover(void *arg) 21840 { 21841 dev_t dev = (dev_t)arg; 21842 struct sd_lun *un; 21843 struct sd_thr_request *sd_treq = NULL; 21844 struct sd_thr_request *sd_cur = NULL; 21845 struct sd_thr_request *sd_prev = NULL; 21846 int already_there = 0; 21847 21848 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21849 return; 21850 } 21851 21852 mutex_enter(SD_MUTEX(un)); 21853 un->un_resvd_timeid = NULL; 21854 if (un->un_resvd_status & SD_WANT_RESERVE) { 21855 /* 21856 * There was a reset so don't issue the reserve, allow the 21857 * sd_mhd_watch_cb callback function to notice this and 21858 * reschedule the timeout for reservation. 21859 */ 21860 mutex_exit(SD_MUTEX(un)); 21861 return; 21862 } 21863 mutex_exit(SD_MUTEX(un)); 21864 21865 /* 21866 * Add this device to the sd_resv_reclaim_request list and the 21867 * sd_resv_reclaim_thread should take care of the rest. 21868 * 21869 * Note: We can't sleep in this context so if the memory allocation 21870 * fails allow the sd_mhd_watch_cb callback function to notice this and 21871 * reschedule the timeout for reservation. (4378460) 21872 */ 21873 sd_treq = (struct sd_thr_request *) 21874 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP); 21875 if (sd_treq == NULL) { 21876 return; 21877 } 21878 21879 sd_treq->sd_thr_req_next = NULL; 21880 sd_treq->dev = dev; 21881 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 21882 if (sd_tr.srq_thr_req_head == NULL) { 21883 sd_tr.srq_thr_req_head = sd_treq; 21884 } else { 21885 sd_cur = sd_prev = sd_tr.srq_thr_req_head; 21886 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) { 21887 if (sd_cur->dev == dev) { 21888 /* 21889 * already in Queue so don't log 21890 * another request for the device 21891 */ 21892 already_there = 1; 21893 break; 21894 } 21895 sd_prev = sd_cur; 21896 } 21897 if (!already_there) { 21898 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: " 21899 "logging request for %lx\n", dev); 21900 sd_prev->sd_thr_req_next = sd_treq; 21901 } else { 21902 kmem_free(sd_treq, sizeof (struct sd_thr_request)); 21903 } 21904 } 21905 21906 /* 21907 * Create a kernel thread to do the reservation reclaim and free up this 21908 * thread. We cannot block this thread while we go away to do the 21909 * reservation reclaim 21910 */ 21911 if (sd_tr.srq_resv_reclaim_thread == NULL) 21912 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0, 21913 sd_resv_reclaim_thread, NULL, 21914 0, &p0, TS_RUN, v.v_maxsyspri - 2); 21915 21916 /* Tell the reservation reclaim thread that it has work to do */ 21917 cv_signal(&sd_tr.srq_resv_reclaim_cv); 21918 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 21919 } 21920 21921 /* 21922 * Function: sd_resv_reclaim_thread() 21923 * 21924 * Description: This function implements the reservation reclaim operations 21925 * 21926 * Arguments: arg - the device 'dev_t' is used for context to discriminate 21927 * among multiple watches that share this callback function 21928 */ 21929 21930 static void 21931 sd_resv_reclaim_thread() 21932 { 21933 struct sd_lun *un; 21934 struct sd_thr_request *sd_mhreq; 21935 21936 /* Wait for work */ 21937 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 21938 if (sd_tr.srq_thr_req_head == NULL) { 21939 cv_wait(&sd_tr.srq_resv_reclaim_cv, 21940 &sd_tr.srq_resv_reclaim_mutex); 21941 } 21942 21943 /* Loop while we have work */ 21944 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) { 21945 un = ddi_get_soft_state(sd_state, 21946 SDUNIT(sd_tr.srq_thr_cur_req->dev)); 21947 if (un == NULL) { 21948 /* 21949 * softstate structure is NULL so just 21950 * dequeue the request and continue 21951 */ 21952 sd_tr.srq_thr_req_head = 21953 sd_tr.srq_thr_cur_req->sd_thr_req_next; 21954 kmem_free(sd_tr.srq_thr_cur_req, 21955 sizeof (struct sd_thr_request)); 21956 continue; 21957 } 21958 21959 /* dequeue the request */ 21960 sd_mhreq = sd_tr.srq_thr_cur_req; 21961 sd_tr.srq_thr_req_head = 21962 sd_tr.srq_thr_cur_req->sd_thr_req_next; 21963 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 21964 21965 /* 21966 * Reclaim reservation only if SD_RESERVE is still set. There 21967 * may have been a call to MHIOCRELEASE before we got here. 21968 */ 21969 mutex_enter(SD_MUTEX(un)); 21970 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 21971 /* 21972 * Note: The SD_LOST_RESERVE flag is cleared before 21973 * reclaiming the reservation. If this is done after the 21974 * call to sd_reserve_release a reservation loss in the 21975 * window between pkt completion of reserve cmd and 21976 * mutex_enter below may not be recognized 21977 */ 21978 un->un_resvd_status &= ~SD_LOST_RESERVE; 21979 mutex_exit(SD_MUTEX(un)); 21980 21981 if (sd_reserve_release(sd_mhreq->dev, 21982 SD_RESERVE) == 0) { 21983 mutex_enter(SD_MUTEX(un)); 21984 un->un_resvd_status |= SD_RESERVE; 21985 mutex_exit(SD_MUTEX(un)); 21986 SD_INFO(SD_LOG_IOCTL_MHD, un, 21987 "sd_resv_reclaim_thread: " 21988 "Reservation Recovered\n"); 21989 } else { 21990 mutex_enter(SD_MUTEX(un)); 21991 un->un_resvd_status |= SD_LOST_RESERVE; 21992 mutex_exit(SD_MUTEX(un)); 21993 SD_INFO(SD_LOG_IOCTL_MHD, un, 21994 "sd_resv_reclaim_thread: Failed " 21995 "Reservation Recovery\n"); 21996 } 21997 } else { 21998 mutex_exit(SD_MUTEX(un)); 21999 } 22000 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22001 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req); 22002 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22003 sd_mhreq = sd_tr.srq_thr_cur_req = NULL; 22004 /* 22005 * wakeup the destroy thread if anyone is waiting on 22006 * us to complete. 22007 */ 22008 cv_signal(&sd_tr.srq_inprocess_cv); 22009 SD_TRACE(SD_LOG_IOCTL_MHD, un, 22010 "sd_resv_reclaim_thread: cv_signalling current request \n"); 22011 } 22012 22013 /* 22014 * cleanup the sd_tr structure now that this thread will not exist 22015 */ 22016 ASSERT(sd_tr.srq_thr_req_head == NULL); 22017 ASSERT(sd_tr.srq_thr_cur_req == NULL); 22018 sd_tr.srq_resv_reclaim_thread = NULL; 22019 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22020 thread_exit(); 22021 } 22022 22023 22024 /* 22025 * Function: sd_rmv_resv_reclaim_req() 22026 * 22027 * Description: This function removes any pending reservation reclaim requests 22028 * for the specified device. 22029 * 22030 * Arguments: dev - the device 'dev_t' 22031 */ 22032 22033 static void 22034 sd_rmv_resv_reclaim_req(dev_t dev) 22035 { 22036 struct sd_thr_request *sd_mhreq; 22037 struct sd_thr_request *sd_prev; 22038 22039 /* Remove a reservation reclaim request from the list */ 22040 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22041 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) { 22042 /* 22043 * We are attempting to reinstate reservation for 22044 * this device. We wait for sd_reserve_release() 22045 * to return before we return. 22046 */ 22047 cv_wait(&sd_tr.srq_inprocess_cv, 22048 &sd_tr.srq_resv_reclaim_mutex); 22049 } else { 22050 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head; 22051 if (sd_mhreq && sd_mhreq->dev == dev) { 22052 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next; 22053 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22054 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22055 return; 22056 } 22057 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) { 22058 if (sd_mhreq && sd_mhreq->dev == dev) { 22059 break; 22060 } 22061 sd_prev = sd_mhreq; 22062 } 22063 if (sd_mhreq != NULL) { 22064 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next; 22065 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22066 } 22067 } 22068 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22069 } 22070 22071 22072 /* 22073 * Function: sd_mhd_reset_notify_cb() 22074 * 22075 * Description: This is a call back function for scsi_reset_notify. This 22076 * function updates the softstate reserved status and logs the 22077 * reset. The driver scsi watch facility callback function 22078 * (sd_mhd_watch_cb) and reservation reclaim thread functionality 22079 * will reclaim the reservation. 22080 * 22081 * Arguments: arg - driver soft state (unit) structure 22082 */ 22083 22084 static void 22085 sd_mhd_reset_notify_cb(caddr_t arg) 22086 { 22087 struct sd_lun *un = (struct sd_lun *)arg; 22088 22089 mutex_enter(SD_MUTEX(un)); 22090 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 22091 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE); 22092 SD_INFO(SD_LOG_IOCTL_MHD, un, 22093 "sd_mhd_reset_notify_cb: Lost Reservation\n"); 22094 } 22095 mutex_exit(SD_MUTEX(un)); 22096 } 22097 22098 22099 /* 22100 * Function: sd_take_ownership() 22101 * 22102 * Description: This routine implements an algorithm to achieve a stable 22103 * reservation on disks which don't implement priority reserve, 22104 * and makes sure that other host lose re-reservation attempts. 22105 * This algorithm contains of a loop that keeps issuing the RESERVE 22106 * for some period of time (min_ownership_delay, default 6 seconds) 22107 * During that loop, it looks to see if there has been a bus device 22108 * reset or bus reset (both of which cause an existing reservation 22109 * to be lost). If the reservation is lost issue RESERVE until a 22110 * period of min_ownership_delay with no resets has gone by, or 22111 * until max_ownership_delay has expired. This loop ensures that 22112 * the host really did manage to reserve the device, in spite of 22113 * resets. The looping for min_ownership_delay (default six 22114 * seconds) is important to early generation clustering products, 22115 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an 22116 * MHIOCENFAILFAST periodic timer of two seconds. By having 22117 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having 22118 * MHIOCENFAILFAST poll every two seconds, the idea is that by the 22119 * time the MHIOCTKOWN ioctl returns, the other host (if any) will 22120 * have already noticed, via the MHIOCENFAILFAST polling, that it 22121 * no longer "owns" the disk and will have panicked itself. Thus, 22122 * the host issuing the MHIOCTKOWN is assured (with timing 22123 * dependencies) that by the time it actually starts to use the 22124 * disk for real work, the old owner is no longer accessing it. 22125 * 22126 * min_ownership_delay is the minimum amount of time for which the 22127 * disk must be reserved continuously devoid of resets before the 22128 * MHIOCTKOWN ioctl will return success. 22129 * 22130 * max_ownership_delay indicates the amount of time by which the 22131 * take ownership should succeed or timeout with an error. 22132 * 22133 * Arguments: dev - the device 'dev_t' 22134 * *p - struct containing timing info. 22135 * 22136 * Return Code: 0 for success or error code 22137 */ 22138 22139 static int 22140 sd_take_ownership(dev_t dev, struct mhioctkown *p) 22141 { 22142 struct sd_lun *un; 22143 int rval; 22144 int err; 22145 int reservation_count = 0; 22146 int min_ownership_delay = 6000000; /* in usec */ 22147 int max_ownership_delay = 30000000; /* in usec */ 22148 clock_t start_time; /* starting time of this algorithm */ 22149 clock_t end_time; /* time limit for giving up */ 22150 clock_t ownership_time; /* time limit for stable ownership */ 22151 clock_t current_time; 22152 clock_t previous_current_time; 22153 22154 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22155 return (ENXIO); 22156 } 22157 22158 /* 22159 * Attempt a device reservation. A priority reservation is requested. 22160 */ 22161 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE)) 22162 != SD_SUCCESS) { 22163 SD_ERROR(SD_LOG_IOCTL_MHD, un, 22164 "sd_take_ownership: return(1)=%d\n", rval); 22165 return (rval); 22166 } 22167 22168 /* Update the softstate reserved status to indicate the reservation */ 22169 mutex_enter(SD_MUTEX(un)); 22170 un->un_resvd_status |= SD_RESERVE; 22171 un->un_resvd_status &= 22172 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT); 22173 mutex_exit(SD_MUTEX(un)); 22174 22175 if (p != NULL) { 22176 if (p->min_ownership_delay != 0) { 22177 min_ownership_delay = p->min_ownership_delay * 1000; 22178 } 22179 if (p->max_ownership_delay != 0) { 22180 max_ownership_delay = p->max_ownership_delay * 1000; 22181 } 22182 } 22183 SD_INFO(SD_LOG_IOCTL_MHD, un, 22184 "sd_take_ownership: min, max delays: %d, %d\n", 22185 min_ownership_delay, max_ownership_delay); 22186 22187 start_time = ddi_get_lbolt(); 22188 current_time = start_time; 22189 ownership_time = current_time + drv_usectohz(min_ownership_delay); 22190 end_time = start_time + drv_usectohz(max_ownership_delay); 22191 22192 while (current_time - end_time < 0) { 22193 delay(drv_usectohz(500000)); 22194 22195 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) { 22196 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) { 22197 mutex_enter(SD_MUTEX(un)); 22198 rval = (un->un_resvd_status & 22199 SD_RESERVATION_CONFLICT) ? EACCES : EIO; 22200 mutex_exit(SD_MUTEX(un)); 22201 break; 22202 } 22203 } 22204 previous_current_time = current_time; 22205 current_time = ddi_get_lbolt(); 22206 mutex_enter(SD_MUTEX(un)); 22207 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) { 22208 ownership_time = ddi_get_lbolt() + 22209 drv_usectohz(min_ownership_delay); 22210 reservation_count = 0; 22211 } else { 22212 reservation_count++; 22213 } 22214 un->un_resvd_status |= SD_RESERVE; 22215 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE); 22216 mutex_exit(SD_MUTEX(un)); 22217 22218 SD_INFO(SD_LOG_IOCTL_MHD, un, 22219 "sd_take_ownership: ticks for loop iteration=%ld, " 22220 "reservation=%s\n", (current_time - previous_current_time), 22221 reservation_count ? "ok" : "reclaimed"); 22222 22223 if (current_time - ownership_time >= 0 && 22224 reservation_count >= 4) { 22225 rval = 0; /* Achieved a stable ownership */ 22226 break; 22227 } 22228 if (current_time - end_time >= 0) { 22229 rval = EACCES; /* No ownership in max possible time */ 22230 break; 22231 } 22232 } 22233 SD_TRACE(SD_LOG_IOCTL_MHD, un, 22234 "sd_take_ownership: return(2)=%d\n", rval); 22235 return (rval); 22236 } 22237 22238 22239 /* 22240 * Function: sd_reserve_release() 22241 * 22242 * Description: This function builds and sends scsi RESERVE, RELEASE, and 22243 * PRIORITY RESERVE commands based on a user specified command type 22244 * 22245 * Arguments: dev - the device 'dev_t' 22246 * cmd - user specified command type; one of SD_PRIORITY_RESERVE, 22247 * SD_RESERVE, SD_RELEASE 22248 * 22249 * Return Code: 0 or Error Code 22250 */ 22251 22252 static int 22253 sd_reserve_release(dev_t dev, int cmd) 22254 { 22255 struct uscsi_cmd *com = NULL; 22256 struct sd_lun *un = NULL; 22257 char cdb[CDB_GROUP0]; 22258 int rval; 22259 22260 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) || 22261 (cmd == SD_PRIORITY_RESERVE)); 22262 22263 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22264 return (ENXIO); 22265 } 22266 22267 /* instantiate and initialize the command and cdb */ 22268 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 22269 bzero(cdb, CDB_GROUP0); 22270 com->uscsi_flags = USCSI_SILENT; 22271 com->uscsi_timeout = un->un_reserve_release_time; 22272 com->uscsi_cdblen = CDB_GROUP0; 22273 com->uscsi_cdb = cdb; 22274 if (cmd == SD_RELEASE) { 22275 cdb[0] = SCMD_RELEASE; 22276 } else { 22277 cdb[0] = SCMD_RESERVE; 22278 } 22279 22280 /* Send the command. */ 22281 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 22282 SD_PATH_STANDARD); 22283 22284 /* 22285 * "break" a reservation that is held by another host, by issuing a 22286 * reset if priority reserve is desired, and we could not get the 22287 * device. 22288 */ 22289 if ((cmd == SD_PRIORITY_RESERVE) && 22290 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 22291 /* 22292 * First try to reset the LUN. If we cannot, then try a target 22293 * reset, followed by a bus reset if the target reset fails. 22294 */ 22295 int reset_retval = 0; 22296 if (un->un_f_lun_reset_enabled == TRUE) { 22297 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 22298 } 22299 if (reset_retval == 0) { 22300 /* The LUN reset either failed or was not issued */ 22301 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 22302 } 22303 if ((reset_retval == 0) && 22304 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) { 22305 rval = EIO; 22306 kmem_free(com, sizeof (*com)); 22307 return (rval); 22308 } 22309 22310 bzero(com, sizeof (struct uscsi_cmd)); 22311 com->uscsi_flags = USCSI_SILENT; 22312 com->uscsi_cdb = cdb; 22313 com->uscsi_cdblen = CDB_GROUP0; 22314 com->uscsi_timeout = 5; 22315 22316 /* 22317 * Reissue the last reserve command, this time without request 22318 * sense. Assume that it is just a regular reserve command. 22319 */ 22320 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 22321 SD_PATH_STANDARD); 22322 } 22323 22324 /* Return an error if still getting a reservation conflict. */ 22325 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 22326 rval = EACCES; 22327 } 22328 22329 kmem_free(com, sizeof (*com)); 22330 return (rval); 22331 } 22332 22333 22334 #define SD_NDUMP_RETRIES 12 22335 /* 22336 * System Crash Dump routine 22337 */ 22338 22339 static int 22340 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 22341 { 22342 int instance; 22343 int partition; 22344 int i; 22345 int err; 22346 struct sd_lun *un; 22347 struct scsi_pkt *wr_pktp; 22348 struct buf *wr_bp; 22349 struct buf wr_buf; 22350 daddr_t tgt_byte_offset; /* rmw - byte offset for target */ 22351 daddr_t tgt_blkno; /* rmw - blkno for target */ 22352 size_t tgt_byte_count; /* rmw - # of bytes to xfer */ 22353 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */ 22354 size_t io_start_offset; 22355 int doing_rmw = FALSE; 22356 int rval; 22357 #if defined(__i386) || defined(__amd64) 22358 ssize_t dma_resid; 22359 daddr_t oblkno; 22360 #endif 22361 diskaddr_t nblks = 0; 22362 diskaddr_t start_block; 22363 22364 instance = SDUNIT(dev); 22365 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 22366 !SD_IS_VALID_LABEL(un) || ISCD(un)) { 22367 return (ENXIO); 22368 } 22369 22370 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 22371 22372 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); 22373 22374 partition = SDPART(dev); 22375 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); 22376 22377 /* Validate blocks to dump at against partition size. */ 22378 22379 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 22380 &nblks, &start_block, NULL, NULL, (void *)SD_PATH_DIRECT); 22381 22382 if ((blkno + nblk) > nblks) { 22383 SD_TRACE(SD_LOG_DUMP, un, 22384 "sddump: dump range larger than partition: " 22385 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 22386 blkno, nblk, nblks); 22387 return (EINVAL); 22388 } 22389 22390 mutex_enter(&un->un_pm_mutex); 22391 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 22392 struct scsi_pkt *start_pktp; 22393 22394 mutex_exit(&un->un_pm_mutex); 22395 22396 /* 22397 * use pm framework to power on HBA 1st 22398 */ 22399 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 22400 22401 /* 22402 * Dump no long uses sdpower to power on a device, it's 22403 * in-line here so it can be done in polled mode. 22404 */ 22405 22406 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n"); 22407 22408 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL, 22409 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL); 22410 22411 if (start_pktp == NULL) { 22412 /* We were not given a SCSI packet, fail. */ 22413 return (EIO); 22414 } 22415 bzero(start_pktp->pkt_cdbp, CDB_GROUP0); 22416 start_pktp->pkt_cdbp[0] = SCMD_START_STOP; 22417 start_pktp->pkt_cdbp[4] = SD_TARGET_START; 22418 start_pktp->pkt_flags = FLAG_NOINTR; 22419 22420 mutex_enter(SD_MUTEX(un)); 22421 SD_FILL_SCSI1_LUN(un, start_pktp); 22422 mutex_exit(SD_MUTEX(un)); 22423 /* 22424 * Scsi_poll returns 0 (success) if the command completes and 22425 * the status block is STATUS_GOOD. 22426 */ 22427 if (sd_scsi_poll(un, start_pktp) != 0) { 22428 scsi_destroy_pkt(start_pktp); 22429 return (EIO); 22430 } 22431 scsi_destroy_pkt(start_pktp); 22432 (void) sd_ddi_pm_resume(un); 22433 } else { 22434 mutex_exit(&un->un_pm_mutex); 22435 } 22436 22437 mutex_enter(SD_MUTEX(un)); 22438 un->un_throttle = 0; 22439 22440 /* 22441 * The first time through, reset the specific target device. 22442 * However, when cpr calls sddump we know that sd is in a 22443 * a good state so no bus reset is required. 22444 * Clear sense data via Request Sense cmd. 22445 * In sddump we don't care about allow_bus_device_reset anymore 22446 */ 22447 22448 if ((un->un_state != SD_STATE_SUSPENDED) && 22449 (un->un_state != SD_STATE_DUMPING)) { 22450 22451 New_state(un, SD_STATE_DUMPING); 22452 22453 if (un->un_f_is_fibre == FALSE) { 22454 mutex_exit(SD_MUTEX(un)); 22455 /* 22456 * Attempt a bus reset for parallel scsi. 22457 * 22458 * Note: A bus reset is required because on some host 22459 * systems (i.e. E420R) a bus device reset is 22460 * insufficient to reset the state of the target. 22461 * 22462 * Note: Don't issue the reset for fibre-channel, 22463 * because this tends to hang the bus (loop) for 22464 * too long while everyone is logging out and in 22465 * and the deadman timer for dumping will fire 22466 * before the dump is complete. 22467 */ 22468 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) { 22469 mutex_enter(SD_MUTEX(un)); 22470 Restore_state(un); 22471 mutex_exit(SD_MUTEX(un)); 22472 return (EIO); 22473 } 22474 22475 /* Delay to give the device some recovery time. */ 22476 drv_usecwait(10000); 22477 22478 if (sd_send_polled_RQS(un) == SD_FAILURE) { 22479 SD_INFO(SD_LOG_DUMP, un, 22480 "sddump: sd_send_polled_RQS failed\n"); 22481 } 22482 mutex_enter(SD_MUTEX(un)); 22483 } 22484 } 22485 22486 /* 22487 * Convert the partition-relative block number to a 22488 * disk physical block number. 22489 */ 22490 blkno += start_block; 22491 22492 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno); 22493 22494 22495 /* 22496 * Check if the device has a non-512 block size. 22497 */ 22498 wr_bp = NULL; 22499 if (NOT_DEVBSIZE(un)) { 22500 tgt_byte_offset = blkno * un->un_sys_blocksize; 22501 tgt_byte_count = nblk * un->un_sys_blocksize; 22502 if ((tgt_byte_offset % un->un_tgt_blocksize) || 22503 (tgt_byte_count % un->un_tgt_blocksize)) { 22504 doing_rmw = TRUE; 22505 /* 22506 * Calculate the block number and number of block 22507 * in terms of the media block size. 22508 */ 22509 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 22510 tgt_nblk = 22511 ((tgt_byte_offset + tgt_byte_count + 22512 (un->un_tgt_blocksize - 1)) / 22513 un->un_tgt_blocksize) - tgt_blkno; 22514 22515 /* 22516 * Invoke the routine which is going to do read part 22517 * of read-modify-write. 22518 * Note that this routine returns a pointer to 22519 * a valid bp in wr_bp. 22520 */ 22521 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk, 22522 &wr_bp); 22523 if (err) { 22524 mutex_exit(SD_MUTEX(un)); 22525 return (err); 22526 } 22527 /* 22528 * Offset is being calculated as - 22529 * (original block # * system block size) - 22530 * (new block # * target block size) 22531 */ 22532 io_start_offset = 22533 ((uint64_t)(blkno * un->un_sys_blocksize)) - 22534 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize)); 22535 22536 ASSERT((io_start_offset >= 0) && 22537 (io_start_offset < un->un_tgt_blocksize)); 22538 /* 22539 * Do the modify portion of read modify write. 22540 */ 22541 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset], 22542 (size_t)nblk * un->un_sys_blocksize); 22543 } else { 22544 doing_rmw = FALSE; 22545 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 22546 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize; 22547 } 22548 22549 /* Convert blkno and nblk to target blocks */ 22550 blkno = tgt_blkno; 22551 nblk = tgt_nblk; 22552 } else { 22553 wr_bp = &wr_buf; 22554 bzero(wr_bp, sizeof (struct buf)); 22555 wr_bp->b_flags = B_BUSY; 22556 wr_bp->b_un.b_addr = addr; 22557 wr_bp->b_bcount = nblk << DEV_BSHIFT; 22558 wr_bp->b_resid = 0; 22559 } 22560 22561 mutex_exit(SD_MUTEX(un)); 22562 22563 /* 22564 * Obtain a SCSI packet for the write command. 22565 * It should be safe to call the allocator here without 22566 * worrying about being locked for DVMA mapping because 22567 * the address we're passed is already a DVMA mapping 22568 * 22569 * We are also not going to worry about semaphore ownership 22570 * in the dump buffer. Dumping is single threaded at present. 22571 */ 22572 22573 wr_pktp = NULL; 22574 22575 #if defined(__i386) || defined(__amd64) 22576 dma_resid = wr_bp->b_bcount; 22577 oblkno = blkno; 22578 while (dma_resid != 0) { 22579 #endif 22580 22581 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 22582 wr_bp->b_flags &= ~B_ERROR; 22583 22584 #if defined(__i386) || defined(__amd64) 22585 blkno = oblkno + 22586 ((wr_bp->b_bcount - dma_resid) / 22587 un->un_tgt_blocksize); 22588 nblk = dma_resid / un->un_tgt_blocksize; 22589 22590 if (wr_pktp) { 22591 /* Partial DMA transfers after initial transfer */ 22592 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp, 22593 blkno, nblk); 22594 } else { 22595 /* Initial transfer */ 22596 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 22597 un->un_pkt_flags, NULL_FUNC, NULL, 22598 blkno, nblk); 22599 } 22600 #else 22601 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 22602 0, NULL_FUNC, NULL, blkno, nblk); 22603 #endif 22604 22605 if (rval == 0) { 22606 /* We were given a SCSI packet, continue. */ 22607 break; 22608 } 22609 22610 if (i == 0) { 22611 if (wr_bp->b_flags & B_ERROR) { 22612 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 22613 "no resources for dumping; " 22614 "error code: 0x%x, retrying", 22615 geterror(wr_bp)); 22616 } else { 22617 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 22618 "no resources for dumping; retrying"); 22619 } 22620 } else if (i != (SD_NDUMP_RETRIES - 1)) { 22621 if (wr_bp->b_flags & B_ERROR) { 22622 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 22623 "no resources for dumping; error code: " 22624 "0x%x, retrying\n", geterror(wr_bp)); 22625 } 22626 } else { 22627 if (wr_bp->b_flags & B_ERROR) { 22628 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 22629 "no resources for dumping; " 22630 "error code: 0x%x, retries failed, " 22631 "giving up.\n", geterror(wr_bp)); 22632 } else { 22633 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 22634 "no resources for dumping; " 22635 "retries failed, giving up.\n"); 22636 } 22637 mutex_enter(SD_MUTEX(un)); 22638 Restore_state(un); 22639 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) { 22640 mutex_exit(SD_MUTEX(un)); 22641 scsi_free_consistent_buf(wr_bp); 22642 } else { 22643 mutex_exit(SD_MUTEX(un)); 22644 } 22645 return (EIO); 22646 } 22647 drv_usecwait(10000); 22648 } 22649 22650 #if defined(__i386) || defined(__amd64) 22651 /* 22652 * save the resid from PARTIAL_DMA 22653 */ 22654 dma_resid = wr_pktp->pkt_resid; 22655 if (dma_resid != 0) 22656 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid); 22657 wr_pktp->pkt_resid = 0; 22658 #endif 22659 22660 /* SunBug 1222170 */ 22661 wr_pktp->pkt_flags = FLAG_NOINTR; 22662 22663 err = EIO; 22664 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 22665 22666 /* 22667 * Scsi_poll returns 0 (success) if the command completes and 22668 * the status block is STATUS_GOOD. We should only check 22669 * errors if this condition is not true. Even then we should 22670 * send our own request sense packet only if we have a check 22671 * condition and auto request sense has not been performed by 22672 * the hba. 22673 */ 22674 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n"); 22675 22676 if ((sd_scsi_poll(un, wr_pktp) == 0) && 22677 (wr_pktp->pkt_resid == 0)) { 22678 err = SD_SUCCESS; 22679 break; 22680 } 22681 22682 /* 22683 * Check CMD_DEV_GONE 1st, give up if device is gone. 22684 */ 22685 if (wr_pktp->pkt_reason == CMD_DEV_GONE) { 22686 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 22687 "Device is gone\n"); 22688 break; 22689 } 22690 22691 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) { 22692 SD_INFO(SD_LOG_DUMP, un, 22693 "sddump: write failed with CHECK, try # %d\n", i); 22694 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) { 22695 (void) sd_send_polled_RQS(un); 22696 } 22697 22698 continue; 22699 } 22700 22701 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) { 22702 int reset_retval = 0; 22703 22704 SD_INFO(SD_LOG_DUMP, un, 22705 "sddump: write failed with BUSY, try # %d\n", i); 22706 22707 if (un->un_f_lun_reset_enabled == TRUE) { 22708 reset_retval = scsi_reset(SD_ADDRESS(un), 22709 RESET_LUN); 22710 } 22711 if (reset_retval == 0) { 22712 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 22713 } 22714 (void) sd_send_polled_RQS(un); 22715 22716 } else { 22717 SD_INFO(SD_LOG_DUMP, un, 22718 "sddump: write failed with 0x%x, try # %d\n", 22719 SD_GET_PKT_STATUS(wr_pktp), i); 22720 mutex_enter(SD_MUTEX(un)); 22721 sd_reset_target(un, wr_pktp); 22722 mutex_exit(SD_MUTEX(un)); 22723 } 22724 22725 /* 22726 * If we are not getting anywhere with lun/target resets, 22727 * let's reset the bus. 22728 */ 22729 if (i == SD_NDUMP_RETRIES/2) { 22730 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 22731 (void) sd_send_polled_RQS(un); 22732 } 22733 22734 } 22735 #if defined(__i386) || defined(__amd64) 22736 } /* dma_resid */ 22737 #endif 22738 22739 scsi_destroy_pkt(wr_pktp); 22740 mutex_enter(SD_MUTEX(un)); 22741 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) { 22742 mutex_exit(SD_MUTEX(un)); 22743 scsi_free_consistent_buf(wr_bp); 22744 } else { 22745 mutex_exit(SD_MUTEX(un)); 22746 } 22747 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err); 22748 return (err); 22749 } 22750 22751 /* 22752 * Function: sd_scsi_poll() 22753 * 22754 * Description: This is a wrapper for the scsi_poll call. 22755 * 22756 * Arguments: sd_lun - The unit structure 22757 * scsi_pkt - The scsi packet being sent to the device. 22758 * 22759 * Return Code: 0 - Command completed successfully with good status 22760 * -1 - Command failed. This could indicate a check condition 22761 * or other status value requiring recovery action. 22762 * 22763 */ 22764 22765 static int 22766 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp) 22767 { 22768 int status; 22769 22770 ASSERT(un != NULL); 22771 ASSERT(!mutex_owned(SD_MUTEX(un))); 22772 ASSERT(pktp != NULL); 22773 22774 status = SD_SUCCESS; 22775 22776 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) { 22777 pktp->pkt_flags |= un->un_tagflags; 22778 pktp->pkt_flags &= ~FLAG_NODISCON; 22779 } 22780 22781 status = sd_ddi_scsi_poll(pktp); 22782 /* 22783 * Scsi_poll returns 0 (success) if the command completes and the 22784 * status block is STATUS_GOOD. We should only check errors if this 22785 * condition is not true. Even then we should send our own request 22786 * sense packet only if we have a check condition and auto 22787 * request sense has not been performed by the hba. 22788 * Don't get RQS data if pkt_reason is CMD_DEV_GONE. 22789 */ 22790 if ((status != SD_SUCCESS) && 22791 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) && 22792 (pktp->pkt_state & STATE_ARQ_DONE) == 0 && 22793 (pktp->pkt_reason != CMD_DEV_GONE)) 22794 (void) sd_send_polled_RQS(un); 22795 22796 return (status); 22797 } 22798 22799 /* 22800 * Function: sd_send_polled_RQS() 22801 * 22802 * Description: This sends the request sense command to a device. 22803 * 22804 * Arguments: sd_lun - The unit structure 22805 * 22806 * Return Code: 0 - Command completed successfully with good status 22807 * -1 - Command failed. 22808 * 22809 */ 22810 22811 static int 22812 sd_send_polled_RQS(struct sd_lun *un) 22813 { 22814 int ret_val; 22815 struct scsi_pkt *rqs_pktp; 22816 struct buf *rqs_bp; 22817 22818 ASSERT(un != NULL); 22819 ASSERT(!mutex_owned(SD_MUTEX(un))); 22820 22821 ret_val = SD_SUCCESS; 22822 22823 rqs_pktp = un->un_rqs_pktp; 22824 rqs_bp = un->un_rqs_bp; 22825 22826 mutex_enter(SD_MUTEX(un)); 22827 22828 if (un->un_sense_isbusy) { 22829 ret_val = SD_FAILURE; 22830 mutex_exit(SD_MUTEX(un)); 22831 return (ret_val); 22832 } 22833 22834 /* 22835 * If the request sense buffer (and packet) is not in use, 22836 * let's set the un_sense_isbusy and send our packet 22837 */ 22838 un->un_sense_isbusy = 1; 22839 rqs_pktp->pkt_resid = 0; 22840 rqs_pktp->pkt_reason = 0; 22841 rqs_pktp->pkt_flags |= FLAG_NOINTR; 22842 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH); 22843 22844 mutex_exit(SD_MUTEX(un)); 22845 22846 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at" 22847 " 0x%p\n", rqs_bp->b_un.b_addr); 22848 22849 /* 22850 * Can't send this to sd_scsi_poll, we wrap ourselves around the 22851 * axle - it has a call into us! 22852 */ 22853 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) { 22854 SD_INFO(SD_LOG_COMMON, un, 22855 "sd_send_polled_RQS: RQS failed\n"); 22856 } 22857 22858 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:", 22859 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX); 22860 22861 mutex_enter(SD_MUTEX(un)); 22862 un->un_sense_isbusy = 0; 22863 mutex_exit(SD_MUTEX(un)); 22864 22865 return (ret_val); 22866 } 22867 22868 /* 22869 * Defines needed for localized version of the scsi_poll routine. 22870 */ 22871 #define SD_CSEC 10000 /* usecs */ 22872 #define SD_SEC_TO_CSEC (1000000/SD_CSEC) 22873 22874 22875 /* 22876 * Function: sd_ddi_scsi_poll() 22877 * 22878 * Description: Localized version of the scsi_poll routine. The purpose is to 22879 * send a scsi_pkt to a device as a polled command. This version 22880 * is to ensure more robust handling of transport errors. 22881 * Specifically this routine cures not ready, coming ready 22882 * transition for power up and reset of sonoma's. This can take 22883 * up to 45 seconds for power-on and 20 seconds for reset of a 22884 * sonoma lun. 22885 * 22886 * Arguments: scsi_pkt - The scsi_pkt being sent to a device 22887 * 22888 * Return Code: 0 - Command completed successfully with good status 22889 * -1 - Command failed. 22890 * 22891 */ 22892 22893 static int 22894 sd_ddi_scsi_poll(struct scsi_pkt *pkt) 22895 { 22896 int busy_count; 22897 int timeout; 22898 int rval = SD_FAILURE; 22899 int savef; 22900 uint8_t *sensep; 22901 long savet; 22902 void (*savec)(); 22903 /* 22904 * The following is defined in machdep.c and is used in determining if 22905 * the scsi transport system will do polled I/O instead of interrupt 22906 * I/O when called from xx_dump(). 22907 */ 22908 extern int do_polled_io; 22909 22910 /* 22911 * save old flags in pkt, to restore at end 22912 */ 22913 savef = pkt->pkt_flags; 22914 savec = pkt->pkt_comp; 22915 savet = pkt->pkt_time; 22916 22917 pkt->pkt_flags |= FLAG_NOINTR; 22918 22919 /* 22920 * XXX there is nothing in the SCSA spec that states that we should not 22921 * do a callback for polled cmds; however, removing this will break sd 22922 * and probably other target drivers 22923 */ 22924 pkt->pkt_comp = NULL; 22925 22926 /* 22927 * we don't like a polled command without timeout. 22928 * 60 seconds seems long enough. 22929 */ 22930 if (pkt->pkt_time == 0) { 22931 pkt->pkt_time = SCSI_POLL_TIMEOUT; 22932 } 22933 22934 /* 22935 * Send polled cmd. 22936 * 22937 * We do some error recovery for various errors. Tran_busy, 22938 * queue full, and non-dispatched commands are retried every 10 msec. 22939 * as they are typically transient failures. Busy status and Not 22940 * Ready are retried every second as this status takes a while to 22941 * change. Unit attention is retried for pkt_time (60) times 22942 * with no delay. 22943 */ 22944 timeout = pkt->pkt_time * SD_SEC_TO_CSEC; 22945 22946 for (busy_count = 0; busy_count < timeout; busy_count++) { 22947 int rc; 22948 int poll_delay; 22949 22950 /* 22951 * Initialize pkt status variables. 22952 */ 22953 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0; 22954 22955 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) { 22956 if (rc != TRAN_BUSY) { 22957 /* Transport failed - give up. */ 22958 break; 22959 } else { 22960 /* Transport busy - try again. */ 22961 poll_delay = 1 * SD_CSEC; /* 10 msec */ 22962 } 22963 } else { 22964 /* 22965 * Transport accepted - check pkt status. 22966 */ 22967 rc = (*pkt->pkt_scbp) & STATUS_MASK; 22968 if (pkt->pkt_reason == CMD_CMPLT && 22969 rc == STATUS_CHECK && 22970 pkt->pkt_state & STATE_ARQ_DONE) { 22971 struct scsi_arq_status *arqstat = 22972 (struct scsi_arq_status *)(pkt->pkt_scbp); 22973 22974 sensep = (uint8_t *)&arqstat->sts_sensedata; 22975 } else { 22976 sensep = NULL; 22977 } 22978 22979 if ((pkt->pkt_reason == CMD_CMPLT) && 22980 (rc == STATUS_GOOD)) { 22981 /* No error - we're done */ 22982 rval = SD_SUCCESS; 22983 break; 22984 22985 } else if (pkt->pkt_reason == CMD_DEV_GONE) { 22986 /* Lost connection - give up */ 22987 break; 22988 22989 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) && 22990 (pkt->pkt_state == 0)) { 22991 /* Pkt not dispatched - try again. */ 22992 poll_delay = 1 * SD_CSEC; /* 10 msec. */ 22993 22994 } else if ((pkt->pkt_reason == CMD_CMPLT) && 22995 (rc == STATUS_QFULL)) { 22996 /* Queue full - try again. */ 22997 poll_delay = 1 * SD_CSEC; /* 10 msec. */ 22998 22999 } else if ((pkt->pkt_reason == CMD_CMPLT) && 23000 (rc == STATUS_BUSY)) { 23001 /* Busy - try again. */ 23002 poll_delay = 100 * SD_CSEC; /* 1 sec. */ 23003 busy_count += (SD_SEC_TO_CSEC - 1); 23004 23005 } else if ((sensep != NULL) && 23006 (scsi_sense_key(sensep) == 23007 KEY_UNIT_ATTENTION)) { 23008 /* Unit Attention - try again */ 23009 busy_count += (SD_SEC_TO_CSEC - 1); /* 1 */ 23010 continue; 23011 23012 } else if ((sensep != NULL) && 23013 (scsi_sense_key(sensep) == KEY_NOT_READY) && 23014 (scsi_sense_asc(sensep) == 0x04) && 23015 (scsi_sense_ascq(sensep) == 0x01)) { 23016 /* Not ready -> ready - try again. */ 23017 poll_delay = 100 * SD_CSEC; /* 1 sec. */ 23018 busy_count += (SD_SEC_TO_CSEC - 1); 23019 23020 } else { 23021 /* BAD status - give up. */ 23022 break; 23023 } 23024 } 23025 23026 if ((curthread->t_flag & T_INTR_THREAD) == 0 && 23027 !do_polled_io) { 23028 delay(drv_usectohz(poll_delay)); 23029 } else { 23030 /* we busy wait during cpr_dump or interrupt threads */ 23031 drv_usecwait(poll_delay); 23032 } 23033 } 23034 23035 pkt->pkt_flags = savef; 23036 pkt->pkt_comp = savec; 23037 pkt->pkt_time = savet; 23038 return (rval); 23039 } 23040 23041 23042 /* 23043 * Function: sd_persistent_reservation_in_read_keys 23044 * 23045 * Description: This routine is the driver entry point for handling CD-ROM 23046 * multi-host persistent reservation requests (MHIOCGRP_INKEYS) 23047 * by sending the SCSI-3 PRIN commands to the device. 23048 * Processes the read keys command response by copying the 23049 * reservation key information into the user provided buffer. 23050 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented. 23051 * 23052 * Arguments: un - Pointer to soft state struct for the target. 23053 * usrp - user provided pointer to multihost Persistent In Read 23054 * Keys structure (mhioc_inkeys_t) 23055 * flag - this argument is a pass through to ddi_copyxxx() 23056 * directly from the mode argument of ioctl(). 23057 * 23058 * Return Code: 0 - Success 23059 * EACCES 23060 * ENOTSUP 23061 * errno return code from sd_send_scsi_cmd() 23062 * 23063 * Context: Can sleep. Does not return until command is completed. 23064 */ 23065 23066 static int 23067 sd_persistent_reservation_in_read_keys(struct sd_lun *un, 23068 mhioc_inkeys_t *usrp, int flag) 23069 { 23070 #ifdef _MULTI_DATAMODEL 23071 struct mhioc_key_list32 li32; 23072 #endif 23073 sd_prin_readkeys_t *in; 23074 mhioc_inkeys_t *ptr; 23075 mhioc_key_list_t li; 23076 uchar_t *data_bufp; 23077 int data_len; 23078 int rval; 23079 size_t copysz; 23080 23081 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) { 23082 return (EINVAL); 23083 } 23084 bzero(&li, sizeof (mhioc_key_list_t)); 23085 23086 /* 23087 * Get the listsize from user 23088 */ 23089 #ifdef _MULTI_DATAMODEL 23090 23091 switch (ddi_model_convert_from(flag & FMODELS)) { 23092 case DDI_MODEL_ILP32: 23093 copysz = sizeof (struct mhioc_key_list32); 23094 if (ddi_copyin(ptr->li, &li32, copysz, flag)) { 23095 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23096 "sd_persistent_reservation_in_read_keys: " 23097 "failed ddi_copyin: mhioc_key_list32_t\n"); 23098 rval = EFAULT; 23099 goto done; 23100 } 23101 li.listsize = li32.listsize; 23102 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list; 23103 break; 23104 23105 case DDI_MODEL_NONE: 23106 copysz = sizeof (mhioc_key_list_t); 23107 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 23108 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23109 "sd_persistent_reservation_in_read_keys: " 23110 "failed ddi_copyin: mhioc_key_list_t\n"); 23111 rval = EFAULT; 23112 goto done; 23113 } 23114 break; 23115 } 23116 23117 #else /* ! _MULTI_DATAMODEL */ 23118 copysz = sizeof (mhioc_key_list_t); 23119 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 23120 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23121 "sd_persistent_reservation_in_read_keys: " 23122 "failed ddi_copyin: mhioc_key_list_t\n"); 23123 rval = EFAULT; 23124 goto done; 23125 } 23126 #endif 23127 23128 data_len = li.listsize * MHIOC_RESV_KEY_SIZE; 23129 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t)); 23130 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 23131 23132 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 23133 data_len, data_bufp)) != 0) { 23134 goto done; 23135 } 23136 in = (sd_prin_readkeys_t *)data_bufp; 23137 ptr->generation = BE_32(in->generation); 23138 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE; 23139 23140 /* 23141 * Return the min(listsize, listlen) keys 23142 */ 23143 #ifdef _MULTI_DATAMODEL 23144 23145 switch (ddi_model_convert_from(flag & FMODELS)) { 23146 case DDI_MODEL_ILP32: 23147 li32.listlen = li.listlen; 23148 if (ddi_copyout(&li32, ptr->li, copysz, flag)) { 23149 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23150 "sd_persistent_reservation_in_read_keys: " 23151 "failed ddi_copyout: mhioc_key_list32_t\n"); 23152 rval = EFAULT; 23153 goto done; 23154 } 23155 break; 23156 23157 case DDI_MODEL_NONE: 23158 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 23159 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23160 "sd_persistent_reservation_in_read_keys: " 23161 "failed ddi_copyout: mhioc_key_list_t\n"); 23162 rval = EFAULT; 23163 goto done; 23164 } 23165 break; 23166 } 23167 23168 #else /* ! _MULTI_DATAMODEL */ 23169 23170 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 23171 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23172 "sd_persistent_reservation_in_read_keys: " 23173 "failed ddi_copyout: mhioc_key_list_t\n"); 23174 rval = EFAULT; 23175 goto done; 23176 } 23177 23178 #endif /* _MULTI_DATAMODEL */ 23179 23180 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE, 23181 li.listsize * MHIOC_RESV_KEY_SIZE); 23182 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) { 23183 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23184 "sd_persistent_reservation_in_read_keys: " 23185 "failed ddi_copyout: keylist\n"); 23186 rval = EFAULT; 23187 } 23188 done: 23189 kmem_free(data_bufp, data_len); 23190 return (rval); 23191 } 23192 23193 23194 /* 23195 * Function: sd_persistent_reservation_in_read_resv 23196 * 23197 * Description: This routine is the driver entry point for handling CD-ROM 23198 * multi-host persistent reservation requests (MHIOCGRP_INRESV) 23199 * by sending the SCSI-3 PRIN commands to the device. 23200 * Process the read persistent reservations command response by 23201 * copying the reservation information into the user provided 23202 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented. 23203 * 23204 * Arguments: un - Pointer to soft state struct for the target. 23205 * usrp - user provided pointer to multihost Persistent In Read 23206 * Keys structure (mhioc_inkeys_t) 23207 * flag - this argument is a pass through to ddi_copyxxx() 23208 * directly from the mode argument of ioctl(). 23209 * 23210 * Return Code: 0 - Success 23211 * EACCES 23212 * ENOTSUP 23213 * errno return code from sd_send_scsi_cmd() 23214 * 23215 * Context: Can sleep. Does not return until command is completed. 23216 */ 23217 23218 static int 23219 sd_persistent_reservation_in_read_resv(struct sd_lun *un, 23220 mhioc_inresvs_t *usrp, int flag) 23221 { 23222 #ifdef _MULTI_DATAMODEL 23223 struct mhioc_resv_desc_list32 resvlist32; 23224 #endif 23225 sd_prin_readresv_t *in; 23226 mhioc_inresvs_t *ptr; 23227 sd_readresv_desc_t *readresv_ptr; 23228 mhioc_resv_desc_list_t resvlist; 23229 mhioc_resv_desc_t resvdesc; 23230 uchar_t *data_bufp; 23231 int data_len; 23232 int rval; 23233 int i; 23234 size_t copysz; 23235 mhioc_resv_desc_t *bufp; 23236 23237 if ((ptr = usrp) == NULL) { 23238 return (EINVAL); 23239 } 23240 23241 /* 23242 * Get the listsize from user 23243 */ 23244 #ifdef _MULTI_DATAMODEL 23245 switch (ddi_model_convert_from(flag & FMODELS)) { 23246 case DDI_MODEL_ILP32: 23247 copysz = sizeof (struct mhioc_resv_desc_list32); 23248 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) { 23249 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23250 "sd_persistent_reservation_in_read_resv: " 23251 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23252 rval = EFAULT; 23253 goto done; 23254 } 23255 resvlist.listsize = resvlist32.listsize; 23256 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list; 23257 break; 23258 23259 case DDI_MODEL_NONE: 23260 copysz = sizeof (mhioc_resv_desc_list_t); 23261 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 23262 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23263 "sd_persistent_reservation_in_read_resv: " 23264 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23265 rval = EFAULT; 23266 goto done; 23267 } 23268 break; 23269 } 23270 #else /* ! _MULTI_DATAMODEL */ 23271 copysz = sizeof (mhioc_resv_desc_list_t); 23272 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 23273 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23274 "sd_persistent_reservation_in_read_resv: " 23275 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23276 rval = EFAULT; 23277 goto done; 23278 } 23279 #endif /* ! _MULTI_DATAMODEL */ 23280 23281 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN; 23282 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t)); 23283 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 23284 23285 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_RESV, 23286 data_len, data_bufp)) != 0) { 23287 goto done; 23288 } 23289 in = (sd_prin_readresv_t *)data_bufp; 23290 ptr->generation = BE_32(in->generation); 23291 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN; 23292 23293 /* 23294 * Return the min(listsize, listlen( keys 23295 */ 23296 #ifdef _MULTI_DATAMODEL 23297 23298 switch (ddi_model_convert_from(flag & FMODELS)) { 23299 case DDI_MODEL_ILP32: 23300 resvlist32.listlen = resvlist.listlen; 23301 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) { 23302 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23303 "sd_persistent_reservation_in_read_resv: " 23304 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 23305 rval = EFAULT; 23306 goto done; 23307 } 23308 break; 23309 23310 case DDI_MODEL_NONE: 23311 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 23312 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23313 "sd_persistent_reservation_in_read_resv: " 23314 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 23315 rval = EFAULT; 23316 goto done; 23317 } 23318 break; 23319 } 23320 23321 #else /* ! _MULTI_DATAMODEL */ 23322 23323 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 23324 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23325 "sd_persistent_reservation_in_read_resv: " 23326 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 23327 rval = EFAULT; 23328 goto done; 23329 } 23330 23331 #endif /* ! _MULTI_DATAMODEL */ 23332 23333 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc; 23334 bufp = resvlist.list; 23335 copysz = sizeof (mhioc_resv_desc_t); 23336 for (i = 0; i < min(resvlist.listlen, resvlist.listsize); 23337 i++, readresv_ptr++, bufp++) { 23338 23339 bcopy(&readresv_ptr->resvkey, &resvdesc.key, 23340 MHIOC_RESV_KEY_SIZE); 23341 resvdesc.type = readresv_ptr->type; 23342 resvdesc.scope = readresv_ptr->scope; 23343 resvdesc.scope_specific_addr = 23344 BE_32(readresv_ptr->scope_specific_addr); 23345 23346 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) { 23347 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23348 "sd_persistent_reservation_in_read_resv: " 23349 "failed ddi_copyout: resvlist\n"); 23350 rval = EFAULT; 23351 goto done; 23352 } 23353 } 23354 done: 23355 kmem_free(data_bufp, data_len); 23356 return (rval); 23357 } 23358 23359 23360 /* 23361 * Function: sr_change_blkmode() 23362 * 23363 * Description: This routine is the driver entry point for handling CD-ROM 23364 * block mode ioctl requests. Support for returning and changing 23365 * the current block size in use by the device is implemented. The 23366 * LBA size is changed via a MODE SELECT Block Descriptor. 23367 * 23368 * This routine issues a mode sense with an allocation length of 23369 * 12 bytes for the mode page header and a single block descriptor. 23370 * 23371 * Arguments: dev - the device 'dev_t' 23372 * cmd - the request type; one of CDROMGBLKMODE (get) or 23373 * CDROMSBLKMODE (set) 23374 * data - current block size or requested block size 23375 * flag - this argument is a pass through to ddi_copyxxx() directly 23376 * from the mode argument of ioctl(). 23377 * 23378 * Return Code: the code returned by sd_send_scsi_cmd() 23379 * EINVAL if invalid arguments are provided 23380 * EFAULT if ddi_copyxxx() fails 23381 * ENXIO if fail ddi_get_soft_state 23382 * EIO if invalid mode sense block descriptor length 23383 * 23384 */ 23385 23386 static int 23387 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag) 23388 { 23389 struct sd_lun *un = NULL; 23390 struct mode_header *sense_mhp, *select_mhp; 23391 struct block_descriptor *sense_desc, *select_desc; 23392 int current_bsize; 23393 int rval = EINVAL; 23394 uchar_t *sense = NULL; 23395 uchar_t *select = NULL; 23396 23397 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE)); 23398 23399 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23400 return (ENXIO); 23401 } 23402 23403 /* 23404 * The block length is changed via the Mode Select block descriptor, the 23405 * "Read/Write Error Recovery" mode page (0x1) contents are not actually 23406 * required as part of this routine. Therefore the mode sense allocation 23407 * length is specified to be the length of a mode page header and a 23408 * block descriptor. 23409 */ 23410 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 23411 23412 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 23413 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD)) != 0) { 23414 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23415 "sr_change_blkmode: Mode Sense Failed\n"); 23416 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23417 return (rval); 23418 } 23419 23420 /* Check the block descriptor len to handle only 1 block descriptor */ 23421 sense_mhp = (struct mode_header *)sense; 23422 if ((sense_mhp->bdesc_length == 0) || 23423 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) { 23424 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23425 "sr_change_blkmode: Mode Sense returned invalid block" 23426 " descriptor length\n"); 23427 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23428 return (EIO); 23429 } 23430 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH); 23431 current_bsize = ((sense_desc->blksize_hi << 16) | 23432 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo); 23433 23434 /* Process command */ 23435 switch (cmd) { 23436 case CDROMGBLKMODE: 23437 /* Return the block size obtained during the mode sense */ 23438 if (ddi_copyout(¤t_bsize, (void *)data, 23439 sizeof (int), flag) != 0) 23440 rval = EFAULT; 23441 break; 23442 case CDROMSBLKMODE: 23443 /* Validate the requested block size */ 23444 switch (data) { 23445 case CDROM_BLK_512: 23446 case CDROM_BLK_1024: 23447 case CDROM_BLK_2048: 23448 case CDROM_BLK_2056: 23449 case CDROM_BLK_2336: 23450 case CDROM_BLK_2340: 23451 case CDROM_BLK_2352: 23452 case CDROM_BLK_2368: 23453 case CDROM_BLK_2448: 23454 case CDROM_BLK_2646: 23455 case CDROM_BLK_2647: 23456 break; 23457 default: 23458 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23459 "sr_change_blkmode: " 23460 "Block Size '%ld' Not Supported\n", data); 23461 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23462 return (EINVAL); 23463 } 23464 23465 /* 23466 * The current block size matches the requested block size so 23467 * there is no need to send the mode select to change the size 23468 */ 23469 if (current_bsize == data) { 23470 break; 23471 } 23472 23473 /* Build the select data for the requested block size */ 23474 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 23475 select_mhp = (struct mode_header *)select; 23476 select_desc = 23477 (struct block_descriptor *)(select + MODE_HEADER_LENGTH); 23478 /* 23479 * The LBA size is changed via the block descriptor, so the 23480 * descriptor is built according to the user data 23481 */ 23482 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH; 23483 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16); 23484 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8); 23485 select_desc->blksize_lo = (char)((data) & 0x000000ff); 23486 23487 /* Send the mode select for the requested block size */ 23488 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 23489 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 23490 SD_PATH_STANDARD)) != 0) { 23491 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23492 "sr_change_blkmode: Mode Select Failed\n"); 23493 /* 23494 * The mode select failed for the requested block size, 23495 * so reset the data for the original block size and 23496 * send it to the target. The error is indicated by the 23497 * return value for the failed mode select. 23498 */ 23499 select_desc->blksize_hi = sense_desc->blksize_hi; 23500 select_desc->blksize_mid = sense_desc->blksize_mid; 23501 select_desc->blksize_lo = sense_desc->blksize_lo; 23502 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 23503 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 23504 SD_PATH_STANDARD); 23505 } else { 23506 ASSERT(!mutex_owned(SD_MUTEX(un))); 23507 mutex_enter(SD_MUTEX(un)); 23508 sd_update_block_info(un, (uint32_t)data, 0); 23509 mutex_exit(SD_MUTEX(un)); 23510 } 23511 break; 23512 default: 23513 /* should not reach here, but check anyway */ 23514 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23515 "sr_change_blkmode: Command '%x' Not Supported\n", cmd); 23516 rval = EINVAL; 23517 break; 23518 } 23519 23520 if (select) { 23521 kmem_free(select, BUFLEN_CHG_BLK_MODE); 23522 } 23523 if (sense) { 23524 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23525 } 23526 return (rval); 23527 } 23528 23529 23530 /* 23531 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines 23532 * implement driver support for getting and setting the CD speed. The command 23533 * set used will be based on the device type. If the device has not been 23534 * identified as MMC the Toshiba vendor specific mode page will be used. If 23535 * the device is MMC but does not support the Real Time Streaming feature 23536 * the SET CD SPEED command will be used to set speed and mode page 0x2A will 23537 * be used to read the speed. 23538 */ 23539 23540 /* 23541 * Function: sr_change_speed() 23542 * 23543 * Description: This routine is the driver entry point for handling CD-ROM 23544 * drive speed ioctl requests for devices supporting the Toshiba 23545 * vendor specific drive speed mode page. Support for returning 23546 * and changing the current drive speed in use by the device is 23547 * implemented. 23548 * 23549 * Arguments: dev - the device 'dev_t' 23550 * cmd - the request type; one of CDROMGDRVSPEED (get) or 23551 * CDROMSDRVSPEED (set) 23552 * data - current drive speed or requested drive speed 23553 * flag - this argument is a pass through to ddi_copyxxx() directly 23554 * from the mode argument of ioctl(). 23555 * 23556 * Return Code: the code returned by sd_send_scsi_cmd() 23557 * EINVAL if invalid arguments are provided 23558 * EFAULT if ddi_copyxxx() fails 23559 * ENXIO if fail ddi_get_soft_state 23560 * EIO if invalid mode sense block descriptor length 23561 */ 23562 23563 static int 23564 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 23565 { 23566 struct sd_lun *un = NULL; 23567 struct mode_header *sense_mhp, *select_mhp; 23568 struct mode_speed *sense_page, *select_page; 23569 int current_speed; 23570 int rval = EINVAL; 23571 int bd_len; 23572 uchar_t *sense = NULL; 23573 uchar_t *select = NULL; 23574 23575 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 23576 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23577 return (ENXIO); 23578 } 23579 23580 /* 23581 * Note: The drive speed is being modified here according to a Toshiba 23582 * vendor specific mode page (0x31). 23583 */ 23584 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 23585 23586 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 23587 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED, 23588 SD_PATH_STANDARD)) != 0) { 23589 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23590 "sr_change_speed: Mode Sense Failed\n"); 23591 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 23592 return (rval); 23593 } 23594 sense_mhp = (struct mode_header *)sense; 23595 23596 /* Check the block descriptor len to handle only 1 block descriptor */ 23597 bd_len = sense_mhp->bdesc_length; 23598 if (bd_len > MODE_BLK_DESC_LENGTH) { 23599 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23600 "sr_change_speed: Mode Sense returned invalid block " 23601 "descriptor length\n"); 23602 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 23603 return (EIO); 23604 } 23605 23606 sense_page = (struct mode_speed *) 23607 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 23608 current_speed = sense_page->speed; 23609 23610 /* Process command */ 23611 switch (cmd) { 23612 case CDROMGDRVSPEED: 23613 /* Return the drive speed obtained during the mode sense */ 23614 if (current_speed == 0x2) { 23615 current_speed = CDROM_TWELVE_SPEED; 23616 } 23617 if (ddi_copyout(¤t_speed, (void *)data, 23618 sizeof (int), flag) != 0) { 23619 rval = EFAULT; 23620 } 23621 break; 23622 case CDROMSDRVSPEED: 23623 /* Validate the requested drive speed */ 23624 switch ((uchar_t)data) { 23625 case CDROM_TWELVE_SPEED: 23626 data = 0x2; 23627 /*FALLTHROUGH*/ 23628 case CDROM_NORMAL_SPEED: 23629 case CDROM_DOUBLE_SPEED: 23630 case CDROM_QUAD_SPEED: 23631 case CDROM_MAXIMUM_SPEED: 23632 break; 23633 default: 23634 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23635 "sr_change_speed: " 23636 "Drive Speed '%d' Not Supported\n", (uchar_t)data); 23637 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 23638 return (EINVAL); 23639 } 23640 23641 /* 23642 * The current drive speed matches the requested drive speed so 23643 * there is no need to send the mode select to change the speed 23644 */ 23645 if (current_speed == data) { 23646 break; 23647 } 23648 23649 /* Build the select data for the requested drive speed */ 23650 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 23651 select_mhp = (struct mode_header *)select; 23652 select_mhp->bdesc_length = 0; 23653 select_page = 23654 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 23655 select_page = 23656 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 23657 select_page->mode_page.code = CDROM_MODE_SPEED; 23658 select_page->mode_page.length = 2; 23659 select_page->speed = (uchar_t)data; 23660 23661 /* Send the mode select for the requested block size */ 23662 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 23663 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 23664 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 23665 /* 23666 * The mode select failed for the requested drive speed, 23667 * so reset the data for the original drive speed and 23668 * send it to the target. The error is indicated by the 23669 * return value for the failed mode select. 23670 */ 23671 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23672 "sr_drive_speed: Mode Select Failed\n"); 23673 select_page->speed = sense_page->speed; 23674 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 23675 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 23676 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 23677 } 23678 break; 23679 default: 23680 /* should not reach here, but check anyway */ 23681 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23682 "sr_change_speed: Command '%x' Not Supported\n", cmd); 23683 rval = EINVAL; 23684 break; 23685 } 23686 23687 if (select) { 23688 kmem_free(select, BUFLEN_MODE_CDROM_SPEED); 23689 } 23690 if (sense) { 23691 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 23692 } 23693 23694 return (rval); 23695 } 23696 23697 23698 /* 23699 * Function: sr_atapi_change_speed() 23700 * 23701 * Description: This routine is the driver entry point for handling CD-ROM 23702 * drive speed ioctl requests for MMC devices that do not support 23703 * the Real Time Streaming feature (0x107). 23704 * 23705 * Note: This routine will use the SET SPEED command which may not 23706 * be supported by all devices. 23707 * 23708 * Arguments: dev- the device 'dev_t' 23709 * cmd- the request type; one of CDROMGDRVSPEED (get) or 23710 * CDROMSDRVSPEED (set) 23711 * data- current drive speed or requested drive speed 23712 * flag- this argument is a pass through to ddi_copyxxx() directly 23713 * from the mode argument of ioctl(). 23714 * 23715 * Return Code: the code returned by sd_send_scsi_cmd() 23716 * EINVAL if invalid arguments are provided 23717 * EFAULT if ddi_copyxxx() fails 23718 * ENXIO if fail ddi_get_soft_state 23719 * EIO if invalid mode sense block descriptor length 23720 */ 23721 23722 static int 23723 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 23724 { 23725 struct sd_lun *un; 23726 struct uscsi_cmd *com = NULL; 23727 struct mode_header_grp2 *sense_mhp; 23728 uchar_t *sense_page; 23729 uchar_t *sense = NULL; 23730 char cdb[CDB_GROUP5]; 23731 int bd_len; 23732 int current_speed = 0; 23733 int max_speed = 0; 23734 int rval; 23735 23736 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 23737 23738 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23739 return (ENXIO); 23740 } 23741 23742 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 23743 23744 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 23745 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, 23746 SD_PATH_STANDARD)) != 0) { 23747 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23748 "sr_atapi_change_speed: Mode Sense Failed\n"); 23749 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 23750 return (rval); 23751 } 23752 23753 /* Check the block descriptor len to handle only 1 block descriptor */ 23754 sense_mhp = (struct mode_header_grp2 *)sense; 23755 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 23756 if (bd_len > MODE_BLK_DESC_LENGTH) { 23757 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23758 "sr_atapi_change_speed: Mode Sense returned invalid " 23759 "block descriptor length\n"); 23760 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 23761 return (EIO); 23762 } 23763 23764 /* Calculate the current and maximum drive speeds */ 23765 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 23766 current_speed = (sense_page[14] << 8) | sense_page[15]; 23767 max_speed = (sense_page[8] << 8) | sense_page[9]; 23768 23769 /* Process the command */ 23770 switch (cmd) { 23771 case CDROMGDRVSPEED: 23772 current_speed /= SD_SPEED_1X; 23773 if (ddi_copyout(¤t_speed, (void *)data, 23774 sizeof (int), flag) != 0) 23775 rval = EFAULT; 23776 break; 23777 case CDROMSDRVSPEED: 23778 /* Convert the speed code to KB/sec */ 23779 switch ((uchar_t)data) { 23780 case CDROM_NORMAL_SPEED: 23781 current_speed = SD_SPEED_1X; 23782 break; 23783 case CDROM_DOUBLE_SPEED: 23784 current_speed = 2 * SD_SPEED_1X; 23785 break; 23786 case CDROM_QUAD_SPEED: 23787 current_speed = 4 * SD_SPEED_1X; 23788 break; 23789 case CDROM_TWELVE_SPEED: 23790 current_speed = 12 * SD_SPEED_1X; 23791 break; 23792 case CDROM_MAXIMUM_SPEED: 23793 current_speed = 0xffff; 23794 break; 23795 default: 23796 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23797 "sr_atapi_change_speed: invalid drive speed %d\n", 23798 (uchar_t)data); 23799 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 23800 return (EINVAL); 23801 } 23802 23803 /* Check the request against the drive's max speed. */ 23804 if (current_speed != 0xffff) { 23805 if (current_speed > max_speed) { 23806 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 23807 return (EINVAL); 23808 } 23809 } 23810 23811 /* 23812 * Build and send the SET SPEED command 23813 * 23814 * Note: The SET SPEED (0xBB) command used in this routine is 23815 * obsolete per the SCSI MMC spec but still supported in the 23816 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 23817 * therefore the command is still implemented in this routine. 23818 */ 23819 bzero(cdb, sizeof (cdb)); 23820 cdb[0] = (char)SCMD_SET_CDROM_SPEED; 23821 cdb[2] = (uchar_t)(current_speed >> 8); 23822 cdb[3] = (uchar_t)current_speed; 23823 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 23824 com->uscsi_cdb = (caddr_t)cdb; 23825 com->uscsi_cdblen = CDB_GROUP5; 23826 com->uscsi_bufaddr = NULL; 23827 com->uscsi_buflen = 0; 23828 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 23829 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, 0, SD_PATH_STANDARD); 23830 break; 23831 default: 23832 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23833 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd); 23834 rval = EINVAL; 23835 } 23836 23837 if (sense) { 23838 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 23839 } 23840 if (com) { 23841 kmem_free(com, sizeof (*com)); 23842 } 23843 return (rval); 23844 } 23845 23846 23847 /* 23848 * Function: sr_pause_resume() 23849 * 23850 * Description: This routine is the driver entry point for handling CD-ROM 23851 * pause/resume ioctl requests. This only affects the audio play 23852 * operation. 23853 * 23854 * Arguments: dev - the device 'dev_t' 23855 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used 23856 * for setting the resume bit of the cdb. 23857 * 23858 * Return Code: the code returned by sd_send_scsi_cmd() 23859 * EINVAL if invalid mode specified 23860 * 23861 */ 23862 23863 static int 23864 sr_pause_resume(dev_t dev, int cmd) 23865 { 23866 struct sd_lun *un; 23867 struct uscsi_cmd *com; 23868 char cdb[CDB_GROUP1]; 23869 int rval; 23870 23871 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23872 return (ENXIO); 23873 } 23874 23875 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 23876 bzero(cdb, CDB_GROUP1); 23877 cdb[0] = SCMD_PAUSE_RESUME; 23878 switch (cmd) { 23879 case CDROMRESUME: 23880 cdb[8] = 1; 23881 break; 23882 case CDROMPAUSE: 23883 cdb[8] = 0; 23884 break; 23885 default: 23886 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:" 23887 " Command '%x' Not Supported\n", cmd); 23888 rval = EINVAL; 23889 goto done; 23890 } 23891 23892 com->uscsi_cdb = cdb; 23893 com->uscsi_cdblen = CDB_GROUP1; 23894 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 23895 23896 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 23897 SD_PATH_STANDARD); 23898 23899 done: 23900 kmem_free(com, sizeof (*com)); 23901 return (rval); 23902 } 23903 23904 23905 /* 23906 * Function: sr_play_msf() 23907 * 23908 * Description: This routine is the driver entry point for handling CD-ROM 23909 * ioctl requests to output the audio signals at the specified 23910 * starting address and continue the audio play until the specified 23911 * ending address (CDROMPLAYMSF) The address is in Minute Second 23912 * Frame (MSF) format. 23913 * 23914 * Arguments: dev - the device 'dev_t' 23915 * data - pointer to user provided audio msf structure, 23916 * specifying start/end addresses. 23917 * flag - this argument is a pass through to ddi_copyxxx() 23918 * directly from the mode argument of ioctl(). 23919 * 23920 * Return Code: the code returned by sd_send_scsi_cmd() 23921 * EFAULT if ddi_copyxxx() fails 23922 * ENXIO if fail ddi_get_soft_state 23923 * EINVAL if data pointer is NULL 23924 */ 23925 23926 static int 23927 sr_play_msf(dev_t dev, caddr_t data, int flag) 23928 { 23929 struct sd_lun *un; 23930 struct uscsi_cmd *com; 23931 struct cdrom_msf msf_struct; 23932 struct cdrom_msf *msf = &msf_struct; 23933 char cdb[CDB_GROUP1]; 23934 int rval; 23935 23936 if (data == NULL) { 23937 return (EINVAL); 23938 } 23939 23940 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23941 return (ENXIO); 23942 } 23943 23944 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) { 23945 return (EFAULT); 23946 } 23947 23948 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 23949 bzero(cdb, CDB_GROUP1); 23950 cdb[0] = SCMD_PLAYAUDIO_MSF; 23951 if (un->un_f_cfg_playmsf_bcd == TRUE) { 23952 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0); 23953 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0); 23954 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0); 23955 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1); 23956 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1); 23957 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1); 23958 } else { 23959 cdb[3] = msf->cdmsf_min0; 23960 cdb[4] = msf->cdmsf_sec0; 23961 cdb[5] = msf->cdmsf_frame0; 23962 cdb[6] = msf->cdmsf_min1; 23963 cdb[7] = msf->cdmsf_sec1; 23964 cdb[8] = msf->cdmsf_frame1; 23965 } 23966 com->uscsi_cdb = cdb; 23967 com->uscsi_cdblen = CDB_GROUP1; 23968 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 23969 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 23970 SD_PATH_STANDARD); 23971 kmem_free(com, sizeof (*com)); 23972 return (rval); 23973 } 23974 23975 23976 /* 23977 * Function: sr_play_trkind() 23978 * 23979 * Description: This routine is the driver entry point for handling CD-ROM 23980 * ioctl requests to output the audio signals at the specified 23981 * starting address and continue the audio play until the specified 23982 * ending address (CDROMPLAYTRKIND). The address is in Track Index 23983 * format. 23984 * 23985 * Arguments: dev - the device 'dev_t' 23986 * data - pointer to user provided audio track/index structure, 23987 * specifying start/end addresses. 23988 * flag - this argument is a pass through to ddi_copyxxx() 23989 * directly from the mode argument of ioctl(). 23990 * 23991 * Return Code: the code returned by sd_send_scsi_cmd() 23992 * EFAULT if ddi_copyxxx() fails 23993 * ENXIO if fail ddi_get_soft_state 23994 * EINVAL if data pointer is NULL 23995 */ 23996 23997 static int 23998 sr_play_trkind(dev_t dev, caddr_t data, int flag) 23999 { 24000 struct cdrom_ti ti_struct; 24001 struct cdrom_ti *ti = &ti_struct; 24002 struct uscsi_cmd *com = NULL; 24003 char cdb[CDB_GROUP1]; 24004 int rval; 24005 24006 if (data == NULL) { 24007 return (EINVAL); 24008 } 24009 24010 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) { 24011 return (EFAULT); 24012 } 24013 24014 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24015 bzero(cdb, CDB_GROUP1); 24016 cdb[0] = SCMD_PLAYAUDIO_TI; 24017 cdb[4] = ti->cdti_trk0; 24018 cdb[5] = ti->cdti_ind0; 24019 cdb[7] = ti->cdti_trk1; 24020 cdb[8] = ti->cdti_ind1; 24021 com->uscsi_cdb = cdb; 24022 com->uscsi_cdblen = CDB_GROUP1; 24023 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24024 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24025 SD_PATH_STANDARD); 24026 kmem_free(com, sizeof (*com)); 24027 return (rval); 24028 } 24029 24030 24031 /* 24032 * Function: sr_read_all_subcodes() 24033 * 24034 * Description: This routine is the driver entry point for handling CD-ROM 24035 * ioctl requests to return raw subcode data while the target is 24036 * playing audio (CDROMSUBCODE). 24037 * 24038 * Arguments: dev - the device 'dev_t' 24039 * data - pointer to user provided cdrom subcode structure, 24040 * specifying the transfer length and address. 24041 * flag - this argument is a pass through to ddi_copyxxx() 24042 * directly from the mode argument of ioctl(). 24043 * 24044 * Return Code: the code returned by sd_send_scsi_cmd() 24045 * EFAULT if ddi_copyxxx() fails 24046 * ENXIO if fail ddi_get_soft_state 24047 * EINVAL if data pointer is NULL 24048 */ 24049 24050 static int 24051 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag) 24052 { 24053 struct sd_lun *un = NULL; 24054 struct uscsi_cmd *com = NULL; 24055 struct cdrom_subcode *subcode = NULL; 24056 int rval; 24057 size_t buflen; 24058 char cdb[CDB_GROUP5]; 24059 24060 #ifdef _MULTI_DATAMODEL 24061 /* To support ILP32 applications in an LP64 world */ 24062 struct cdrom_subcode32 cdrom_subcode32; 24063 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32; 24064 #endif 24065 if (data == NULL) { 24066 return (EINVAL); 24067 } 24068 24069 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24070 return (ENXIO); 24071 } 24072 24073 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP); 24074 24075 #ifdef _MULTI_DATAMODEL 24076 switch (ddi_model_convert_from(flag & FMODELS)) { 24077 case DDI_MODEL_ILP32: 24078 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) { 24079 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24080 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24081 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24082 return (EFAULT); 24083 } 24084 /* Convert the ILP32 uscsi data from the application to LP64 */ 24085 cdrom_subcode32tocdrom_subcode(cdsc32, subcode); 24086 break; 24087 case DDI_MODEL_NONE: 24088 if (ddi_copyin(data, subcode, 24089 sizeof (struct cdrom_subcode), flag)) { 24090 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24091 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24092 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24093 return (EFAULT); 24094 } 24095 break; 24096 } 24097 #else /* ! _MULTI_DATAMODEL */ 24098 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) { 24099 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24100 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24101 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24102 return (EFAULT); 24103 } 24104 #endif /* _MULTI_DATAMODEL */ 24105 24106 /* 24107 * Since MMC-2 expects max 3 bytes for length, check if the 24108 * length input is greater than 3 bytes 24109 */ 24110 if ((subcode->cdsc_length & 0xFF000000) != 0) { 24111 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24112 "sr_read_all_subcodes: " 24113 "cdrom transfer length too large: %d (limit %d)\n", 24114 subcode->cdsc_length, 0xFFFFFF); 24115 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24116 return (EINVAL); 24117 } 24118 24119 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length; 24120 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24121 bzero(cdb, CDB_GROUP5); 24122 24123 if (un->un_f_mmc_cap == TRUE) { 24124 cdb[0] = (char)SCMD_READ_CD; 24125 cdb[2] = (char)0xff; 24126 cdb[3] = (char)0xff; 24127 cdb[4] = (char)0xff; 24128 cdb[5] = (char)0xff; 24129 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 24130 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 24131 cdb[8] = ((subcode->cdsc_length) & 0x000000ff); 24132 cdb[10] = 1; 24133 } else { 24134 /* 24135 * Note: A vendor specific command (0xDF) is being used her to 24136 * request a read of all subcodes. 24137 */ 24138 cdb[0] = (char)SCMD_READ_ALL_SUBCODES; 24139 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24); 24140 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 24141 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 24142 cdb[9] = ((subcode->cdsc_length) & 0x000000ff); 24143 } 24144 com->uscsi_cdb = cdb; 24145 com->uscsi_cdblen = CDB_GROUP5; 24146 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr; 24147 com->uscsi_buflen = buflen; 24148 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24149 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 24150 SD_PATH_STANDARD); 24151 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24152 kmem_free(com, sizeof (*com)); 24153 return (rval); 24154 } 24155 24156 24157 /* 24158 * Function: sr_read_subchannel() 24159 * 24160 * Description: This routine is the driver entry point for handling CD-ROM 24161 * ioctl requests to return the Q sub-channel data of the CD 24162 * current position block. (CDROMSUBCHNL) The data includes the 24163 * track number, index number, absolute CD-ROM address (LBA or MSF 24164 * format per the user) , track relative CD-ROM address (LBA or MSF 24165 * format per the user), control data and audio status. 24166 * 24167 * Arguments: dev - the device 'dev_t' 24168 * data - pointer to user provided cdrom sub-channel structure 24169 * flag - this argument is a pass through to ddi_copyxxx() 24170 * directly from the mode argument of ioctl(). 24171 * 24172 * Return Code: the code returned by sd_send_scsi_cmd() 24173 * EFAULT if ddi_copyxxx() fails 24174 * ENXIO if fail ddi_get_soft_state 24175 * EINVAL if data pointer is NULL 24176 */ 24177 24178 static int 24179 sr_read_subchannel(dev_t dev, caddr_t data, int flag) 24180 { 24181 struct sd_lun *un; 24182 struct uscsi_cmd *com; 24183 struct cdrom_subchnl subchanel; 24184 struct cdrom_subchnl *subchnl = &subchanel; 24185 char cdb[CDB_GROUP1]; 24186 caddr_t buffer; 24187 int rval; 24188 24189 if (data == NULL) { 24190 return (EINVAL); 24191 } 24192 24193 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24194 (un->un_state == SD_STATE_OFFLINE)) { 24195 return (ENXIO); 24196 } 24197 24198 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) { 24199 return (EFAULT); 24200 } 24201 24202 buffer = kmem_zalloc((size_t)16, KM_SLEEP); 24203 bzero(cdb, CDB_GROUP1); 24204 cdb[0] = SCMD_READ_SUBCHANNEL; 24205 /* Set the MSF bit based on the user requested address format */ 24206 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02; 24207 /* 24208 * Set the Q bit in byte 2 to indicate that Q sub-channel data be 24209 * returned 24210 */ 24211 cdb[2] = 0x40; 24212 /* 24213 * Set byte 3 to specify the return data format. A value of 0x01 24214 * indicates that the CD-ROM current position should be returned. 24215 */ 24216 cdb[3] = 0x01; 24217 cdb[8] = 0x10; 24218 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24219 com->uscsi_cdb = cdb; 24220 com->uscsi_cdblen = CDB_GROUP1; 24221 com->uscsi_bufaddr = buffer; 24222 com->uscsi_buflen = 16; 24223 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24224 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24225 SD_PATH_STANDARD); 24226 if (rval != 0) { 24227 kmem_free(buffer, 16); 24228 kmem_free(com, sizeof (*com)); 24229 return (rval); 24230 } 24231 24232 /* Process the returned Q sub-channel data */ 24233 subchnl->cdsc_audiostatus = buffer[1]; 24234 subchnl->cdsc_adr = (buffer[5] & 0xF0); 24235 subchnl->cdsc_ctrl = (buffer[5] & 0x0F); 24236 subchnl->cdsc_trk = buffer[6]; 24237 subchnl->cdsc_ind = buffer[7]; 24238 if (subchnl->cdsc_format & CDROM_LBA) { 24239 subchnl->cdsc_absaddr.lba = 24240 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 24241 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 24242 subchnl->cdsc_reladdr.lba = 24243 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) + 24244 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]); 24245 } else if (un->un_f_cfg_readsub_bcd == TRUE) { 24246 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]); 24247 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]); 24248 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]); 24249 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]); 24250 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]); 24251 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]); 24252 } else { 24253 subchnl->cdsc_absaddr.msf.minute = buffer[9]; 24254 subchnl->cdsc_absaddr.msf.second = buffer[10]; 24255 subchnl->cdsc_absaddr.msf.frame = buffer[11]; 24256 subchnl->cdsc_reladdr.msf.minute = buffer[13]; 24257 subchnl->cdsc_reladdr.msf.second = buffer[14]; 24258 subchnl->cdsc_reladdr.msf.frame = buffer[15]; 24259 } 24260 kmem_free(buffer, 16); 24261 kmem_free(com, sizeof (*com)); 24262 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag) 24263 != 0) { 24264 return (EFAULT); 24265 } 24266 return (rval); 24267 } 24268 24269 24270 /* 24271 * Function: sr_read_tocentry() 24272 * 24273 * Description: This routine is the driver entry point for handling CD-ROM 24274 * ioctl requests to read from the Table of Contents (TOC) 24275 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL 24276 * fields, the starting address (LBA or MSF format per the user) 24277 * and the data mode if the user specified track is a data track. 24278 * 24279 * Note: The READ HEADER (0x44) command used in this routine is 24280 * obsolete per the SCSI MMC spec but still supported in the 24281 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 24282 * therefore the command is still implemented in this routine. 24283 * 24284 * Arguments: dev - the device 'dev_t' 24285 * data - pointer to user provided toc entry structure, 24286 * specifying the track # and the address format 24287 * (LBA or MSF). 24288 * flag - this argument is a pass through to ddi_copyxxx() 24289 * directly from the mode argument of ioctl(). 24290 * 24291 * Return Code: the code returned by sd_send_scsi_cmd() 24292 * EFAULT if ddi_copyxxx() fails 24293 * ENXIO if fail ddi_get_soft_state 24294 * EINVAL if data pointer is NULL 24295 */ 24296 24297 static int 24298 sr_read_tocentry(dev_t dev, caddr_t data, int flag) 24299 { 24300 struct sd_lun *un = NULL; 24301 struct uscsi_cmd *com; 24302 struct cdrom_tocentry toc_entry; 24303 struct cdrom_tocentry *entry = &toc_entry; 24304 caddr_t buffer; 24305 int rval; 24306 char cdb[CDB_GROUP1]; 24307 24308 if (data == NULL) { 24309 return (EINVAL); 24310 } 24311 24312 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24313 (un->un_state == SD_STATE_OFFLINE)) { 24314 return (ENXIO); 24315 } 24316 24317 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) { 24318 return (EFAULT); 24319 } 24320 24321 /* Validate the requested track and address format */ 24322 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) { 24323 return (EINVAL); 24324 } 24325 24326 if (entry->cdte_track == 0) { 24327 return (EINVAL); 24328 } 24329 24330 buffer = kmem_zalloc((size_t)12, KM_SLEEP); 24331 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24332 bzero(cdb, CDB_GROUP1); 24333 24334 cdb[0] = SCMD_READ_TOC; 24335 /* Set the MSF bit based on the user requested address format */ 24336 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2); 24337 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 24338 cdb[6] = BYTE_TO_BCD(entry->cdte_track); 24339 } else { 24340 cdb[6] = entry->cdte_track; 24341 } 24342 24343 /* 24344 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 24345 * (4 byte TOC response header + 8 byte track descriptor) 24346 */ 24347 cdb[8] = 12; 24348 com->uscsi_cdb = cdb; 24349 com->uscsi_cdblen = CDB_GROUP1; 24350 com->uscsi_bufaddr = buffer; 24351 com->uscsi_buflen = 0x0C; 24352 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ); 24353 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24354 SD_PATH_STANDARD); 24355 if (rval != 0) { 24356 kmem_free(buffer, 12); 24357 kmem_free(com, sizeof (*com)); 24358 return (rval); 24359 } 24360 24361 /* Process the toc entry */ 24362 entry->cdte_adr = (buffer[5] & 0xF0) >> 4; 24363 entry->cdte_ctrl = (buffer[5] & 0x0F); 24364 if (entry->cdte_format & CDROM_LBA) { 24365 entry->cdte_addr.lba = 24366 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 24367 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 24368 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) { 24369 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]); 24370 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]); 24371 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]); 24372 /* 24373 * Send a READ TOC command using the LBA address format to get 24374 * the LBA for the track requested so it can be used in the 24375 * READ HEADER request 24376 * 24377 * Note: The MSF bit of the READ HEADER command specifies the 24378 * output format. The block address specified in that command 24379 * must be in LBA format. 24380 */ 24381 cdb[1] = 0; 24382 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24383 SD_PATH_STANDARD); 24384 if (rval != 0) { 24385 kmem_free(buffer, 12); 24386 kmem_free(com, sizeof (*com)); 24387 return (rval); 24388 } 24389 } else { 24390 entry->cdte_addr.msf.minute = buffer[9]; 24391 entry->cdte_addr.msf.second = buffer[10]; 24392 entry->cdte_addr.msf.frame = buffer[11]; 24393 /* 24394 * Send a READ TOC command using the LBA address format to get 24395 * the LBA for the track requested so it can be used in the 24396 * READ HEADER request 24397 * 24398 * Note: The MSF bit of the READ HEADER command specifies the 24399 * output format. The block address specified in that command 24400 * must be in LBA format. 24401 */ 24402 cdb[1] = 0; 24403 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24404 SD_PATH_STANDARD); 24405 if (rval != 0) { 24406 kmem_free(buffer, 12); 24407 kmem_free(com, sizeof (*com)); 24408 return (rval); 24409 } 24410 } 24411 24412 /* 24413 * Build and send the READ HEADER command to determine the data mode of 24414 * the user specified track. 24415 */ 24416 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) && 24417 (entry->cdte_track != CDROM_LEADOUT)) { 24418 bzero(cdb, CDB_GROUP1); 24419 cdb[0] = SCMD_READ_HEADER; 24420 cdb[2] = buffer[8]; 24421 cdb[3] = buffer[9]; 24422 cdb[4] = buffer[10]; 24423 cdb[5] = buffer[11]; 24424 cdb[8] = 0x08; 24425 com->uscsi_buflen = 0x08; 24426 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24427 SD_PATH_STANDARD); 24428 if (rval == 0) { 24429 entry->cdte_datamode = buffer[0]; 24430 } else { 24431 /* 24432 * READ HEADER command failed, since this is 24433 * obsoleted in one spec, its better to return 24434 * -1 for an invlid track so that we can still 24435 * recieve the rest of the TOC data. 24436 */ 24437 entry->cdte_datamode = (uchar_t)-1; 24438 } 24439 } else { 24440 entry->cdte_datamode = (uchar_t)-1; 24441 } 24442 24443 kmem_free(buffer, 12); 24444 kmem_free(com, sizeof (*com)); 24445 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0) 24446 return (EFAULT); 24447 24448 return (rval); 24449 } 24450 24451 24452 /* 24453 * Function: sr_read_tochdr() 24454 * 24455 * Description: This routine is the driver entry point for handling CD-ROM 24456 * ioctl requests to read the Table of Contents (TOC) header 24457 * (CDROMREADTOHDR). The TOC header consists of the disk starting 24458 * and ending track numbers 24459 * 24460 * Arguments: dev - the device 'dev_t' 24461 * data - pointer to user provided toc header structure, 24462 * specifying the starting and ending track numbers. 24463 * flag - this argument is a pass through to ddi_copyxxx() 24464 * directly from the mode argument of ioctl(). 24465 * 24466 * Return Code: the code returned by sd_send_scsi_cmd() 24467 * EFAULT if ddi_copyxxx() fails 24468 * ENXIO if fail ddi_get_soft_state 24469 * EINVAL if data pointer is NULL 24470 */ 24471 24472 static int 24473 sr_read_tochdr(dev_t dev, caddr_t data, int flag) 24474 { 24475 struct sd_lun *un; 24476 struct uscsi_cmd *com; 24477 struct cdrom_tochdr toc_header; 24478 struct cdrom_tochdr *hdr = &toc_header; 24479 char cdb[CDB_GROUP1]; 24480 int rval; 24481 caddr_t buffer; 24482 24483 if (data == NULL) { 24484 return (EINVAL); 24485 } 24486 24487 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24488 (un->un_state == SD_STATE_OFFLINE)) { 24489 return (ENXIO); 24490 } 24491 24492 buffer = kmem_zalloc(4, KM_SLEEP); 24493 bzero(cdb, CDB_GROUP1); 24494 cdb[0] = SCMD_READ_TOC; 24495 /* 24496 * Specifying a track number of 0x00 in the READ TOC command indicates 24497 * that the TOC header should be returned 24498 */ 24499 cdb[6] = 0x00; 24500 /* 24501 * Bytes 7 & 8 are the 4 byte allocation length for TOC header. 24502 * (2 byte data len + 1 byte starting track # + 1 byte ending track #) 24503 */ 24504 cdb[8] = 0x04; 24505 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24506 com->uscsi_cdb = cdb; 24507 com->uscsi_cdblen = CDB_GROUP1; 24508 com->uscsi_bufaddr = buffer; 24509 com->uscsi_buflen = 0x04; 24510 com->uscsi_timeout = 300; 24511 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24512 24513 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24514 SD_PATH_STANDARD); 24515 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 24516 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]); 24517 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]); 24518 } else { 24519 hdr->cdth_trk0 = buffer[2]; 24520 hdr->cdth_trk1 = buffer[3]; 24521 } 24522 kmem_free(buffer, 4); 24523 kmem_free(com, sizeof (*com)); 24524 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) { 24525 return (EFAULT); 24526 } 24527 return (rval); 24528 } 24529 24530 24531 /* 24532 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(), 24533 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for 24534 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data, 24535 * digital audio and extended architecture digital audio. These modes are 24536 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3 24537 * MMC specs. 24538 * 24539 * In addition to support for the various data formats these routines also 24540 * include support for devices that implement only the direct access READ 24541 * commands (0x08, 0x28), devices that implement the READ_CD commands 24542 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and 24543 * READ CDXA commands (0xD8, 0xDB) 24544 */ 24545 24546 /* 24547 * Function: sr_read_mode1() 24548 * 24549 * Description: This routine is the driver entry point for handling CD-ROM 24550 * ioctl read mode1 requests (CDROMREADMODE1). 24551 * 24552 * Arguments: dev - the device 'dev_t' 24553 * data - pointer to user provided cd read structure specifying 24554 * the lba buffer address and length. 24555 * flag - this argument is a pass through to ddi_copyxxx() 24556 * directly from the mode argument of ioctl(). 24557 * 24558 * Return Code: the code returned by sd_send_scsi_cmd() 24559 * EFAULT if ddi_copyxxx() fails 24560 * ENXIO if fail ddi_get_soft_state 24561 * EINVAL if data pointer is NULL 24562 */ 24563 24564 static int 24565 sr_read_mode1(dev_t dev, caddr_t data, int flag) 24566 { 24567 struct sd_lun *un; 24568 struct cdrom_read mode1_struct; 24569 struct cdrom_read *mode1 = &mode1_struct; 24570 int rval; 24571 #ifdef _MULTI_DATAMODEL 24572 /* To support ILP32 applications in an LP64 world */ 24573 struct cdrom_read32 cdrom_read32; 24574 struct cdrom_read32 *cdrd32 = &cdrom_read32; 24575 #endif /* _MULTI_DATAMODEL */ 24576 24577 if (data == NULL) { 24578 return (EINVAL); 24579 } 24580 24581 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24582 (un->un_state == SD_STATE_OFFLINE)) { 24583 return (ENXIO); 24584 } 24585 24586 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 24587 "sd_read_mode1: entry: un:0x%p\n", un); 24588 24589 #ifdef _MULTI_DATAMODEL 24590 switch (ddi_model_convert_from(flag & FMODELS)) { 24591 case DDI_MODEL_ILP32: 24592 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 24593 return (EFAULT); 24594 } 24595 /* Convert the ILP32 uscsi data from the application to LP64 */ 24596 cdrom_read32tocdrom_read(cdrd32, mode1); 24597 break; 24598 case DDI_MODEL_NONE: 24599 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 24600 return (EFAULT); 24601 } 24602 } 24603 #else /* ! _MULTI_DATAMODEL */ 24604 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 24605 return (EFAULT); 24606 } 24607 #endif /* _MULTI_DATAMODEL */ 24608 24609 rval = sd_send_scsi_READ(un, mode1->cdread_bufaddr, 24610 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD); 24611 24612 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 24613 "sd_read_mode1: exit: un:0x%p\n", un); 24614 24615 return (rval); 24616 } 24617 24618 24619 /* 24620 * Function: sr_read_cd_mode2() 24621 * 24622 * Description: This routine is the driver entry point for handling CD-ROM 24623 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 24624 * support the READ CD (0xBE) command or the 1st generation 24625 * READ CD (0xD4) command. 24626 * 24627 * Arguments: dev - the device 'dev_t' 24628 * data - pointer to user provided cd read structure specifying 24629 * the lba buffer address and length. 24630 * flag - this argument is a pass through to ddi_copyxxx() 24631 * directly from the mode argument of ioctl(). 24632 * 24633 * Return Code: the code returned by sd_send_scsi_cmd() 24634 * EFAULT if ddi_copyxxx() fails 24635 * ENXIO if fail ddi_get_soft_state 24636 * EINVAL if data pointer is NULL 24637 */ 24638 24639 static int 24640 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag) 24641 { 24642 struct sd_lun *un; 24643 struct uscsi_cmd *com; 24644 struct cdrom_read mode2_struct; 24645 struct cdrom_read *mode2 = &mode2_struct; 24646 uchar_t cdb[CDB_GROUP5]; 24647 int nblocks; 24648 int rval; 24649 #ifdef _MULTI_DATAMODEL 24650 /* To support ILP32 applications in an LP64 world */ 24651 struct cdrom_read32 cdrom_read32; 24652 struct cdrom_read32 *cdrd32 = &cdrom_read32; 24653 #endif /* _MULTI_DATAMODEL */ 24654 24655 if (data == NULL) { 24656 return (EINVAL); 24657 } 24658 24659 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24660 (un->un_state == SD_STATE_OFFLINE)) { 24661 return (ENXIO); 24662 } 24663 24664 #ifdef _MULTI_DATAMODEL 24665 switch (ddi_model_convert_from(flag & FMODELS)) { 24666 case DDI_MODEL_ILP32: 24667 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 24668 return (EFAULT); 24669 } 24670 /* Convert the ILP32 uscsi data from the application to LP64 */ 24671 cdrom_read32tocdrom_read(cdrd32, mode2); 24672 break; 24673 case DDI_MODEL_NONE: 24674 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 24675 return (EFAULT); 24676 } 24677 break; 24678 } 24679 24680 #else /* ! _MULTI_DATAMODEL */ 24681 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 24682 return (EFAULT); 24683 } 24684 #endif /* _MULTI_DATAMODEL */ 24685 24686 bzero(cdb, sizeof (cdb)); 24687 if (un->un_f_cfg_read_cd_xd4 == TRUE) { 24688 /* Read command supported by 1st generation atapi drives */ 24689 cdb[0] = SCMD_READ_CDD4; 24690 } else { 24691 /* Universal CD Access Command */ 24692 cdb[0] = SCMD_READ_CD; 24693 } 24694 24695 /* 24696 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book 24697 */ 24698 cdb[1] = CDROM_SECTOR_TYPE_MODE2; 24699 24700 /* set the start address */ 24701 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF); 24702 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF); 24703 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 24704 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF); 24705 24706 /* set the transfer length */ 24707 nblocks = mode2->cdread_buflen / 2336; 24708 cdb[6] = (uchar_t)(nblocks >> 16); 24709 cdb[7] = (uchar_t)(nblocks >> 8); 24710 cdb[8] = (uchar_t)nblocks; 24711 24712 /* set the filter bits */ 24713 cdb[9] = CDROM_READ_CD_USERDATA; 24714 24715 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24716 com->uscsi_cdb = (caddr_t)cdb; 24717 com->uscsi_cdblen = sizeof (cdb); 24718 com->uscsi_bufaddr = mode2->cdread_bufaddr; 24719 com->uscsi_buflen = mode2->cdread_buflen; 24720 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24721 24722 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 24723 SD_PATH_STANDARD); 24724 kmem_free(com, sizeof (*com)); 24725 return (rval); 24726 } 24727 24728 24729 /* 24730 * Function: sr_read_mode2() 24731 * 24732 * Description: This routine is the driver entry point for handling CD-ROM 24733 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 24734 * do not support the READ CD (0xBE) command. 24735 * 24736 * Arguments: dev - the device 'dev_t' 24737 * data - pointer to user provided cd read structure specifying 24738 * the lba buffer address and length. 24739 * flag - this argument is a pass through to ddi_copyxxx() 24740 * directly from the mode argument of ioctl(). 24741 * 24742 * Return Code: the code returned by sd_send_scsi_cmd() 24743 * EFAULT if ddi_copyxxx() fails 24744 * ENXIO if fail ddi_get_soft_state 24745 * EINVAL if data pointer is NULL 24746 * EIO if fail to reset block size 24747 * EAGAIN if commands are in progress in the driver 24748 */ 24749 24750 static int 24751 sr_read_mode2(dev_t dev, caddr_t data, int flag) 24752 { 24753 struct sd_lun *un; 24754 struct cdrom_read mode2_struct; 24755 struct cdrom_read *mode2 = &mode2_struct; 24756 int rval; 24757 uint32_t restore_blksize; 24758 struct uscsi_cmd *com; 24759 uchar_t cdb[CDB_GROUP0]; 24760 int nblocks; 24761 24762 #ifdef _MULTI_DATAMODEL 24763 /* To support ILP32 applications in an LP64 world */ 24764 struct cdrom_read32 cdrom_read32; 24765 struct cdrom_read32 *cdrd32 = &cdrom_read32; 24766 #endif /* _MULTI_DATAMODEL */ 24767 24768 if (data == NULL) { 24769 return (EINVAL); 24770 } 24771 24772 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24773 (un->un_state == SD_STATE_OFFLINE)) { 24774 return (ENXIO); 24775 } 24776 24777 /* 24778 * Because this routine will update the device and driver block size 24779 * being used we want to make sure there are no commands in progress. 24780 * If commands are in progress the user will have to try again. 24781 * 24782 * We check for 1 instead of 0 because we increment un_ncmds_in_driver 24783 * in sdioctl to protect commands from sdioctl through to the top of 24784 * sd_uscsi_strategy. See sdioctl for details. 24785 */ 24786 mutex_enter(SD_MUTEX(un)); 24787 if (un->un_ncmds_in_driver != 1) { 24788 mutex_exit(SD_MUTEX(un)); 24789 return (EAGAIN); 24790 } 24791 mutex_exit(SD_MUTEX(un)); 24792 24793 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 24794 "sd_read_mode2: entry: un:0x%p\n", un); 24795 24796 #ifdef _MULTI_DATAMODEL 24797 switch (ddi_model_convert_from(flag & FMODELS)) { 24798 case DDI_MODEL_ILP32: 24799 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 24800 return (EFAULT); 24801 } 24802 /* Convert the ILP32 uscsi data from the application to LP64 */ 24803 cdrom_read32tocdrom_read(cdrd32, mode2); 24804 break; 24805 case DDI_MODEL_NONE: 24806 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 24807 return (EFAULT); 24808 } 24809 break; 24810 } 24811 #else /* ! _MULTI_DATAMODEL */ 24812 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) { 24813 return (EFAULT); 24814 } 24815 #endif /* _MULTI_DATAMODEL */ 24816 24817 /* Store the current target block size for restoration later */ 24818 restore_blksize = un->un_tgt_blocksize; 24819 24820 /* Change the device and soft state target block size to 2336 */ 24821 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) { 24822 rval = EIO; 24823 goto done; 24824 } 24825 24826 24827 bzero(cdb, sizeof (cdb)); 24828 24829 /* set READ operation */ 24830 cdb[0] = SCMD_READ; 24831 24832 /* adjust lba for 2kbyte blocks from 512 byte blocks */ 24833 mode2->cdread_lba >>= 2; 24834 24835 /* set the start address */ 24836 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F); 24837 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 24838 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF); 24839 24840 /* set the transfer length */ 24841 nblocks = mode2->cdread_buflen / 2336; 24842 cdb[4] = (uchar_t)nblocks & 0xFF; 24843 24844 /* build command */ 24845 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24846 com->uscsi_cdb = (caddr_t)cdb; 24847 com->uscsi_cdblen = sizeof (cdb); 24848 com->uscsi_bufaddr = mode2->cdread_bufaddr; 24849 com->uscsi_buflen = mode2->cdread_buflen; 24850 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24851 24852 /* 24853 * Issue SCSI command with user space address for read buffer. 24854 * 24855 * This sends the command through main channel in the driver. 24856 * 24857 * Since this is accessed via an IOCTL call, we go through the 24858 * standard path, so that if the device was powered down, then 24859 * it would be 'awakened' to handle the command. 24860 */ 24861 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 24862 SD_PATH_STANDARD); 24863 24864 kmem_free(com, sizeof (*com)); 24865 24866 /* Restore the device and soft state target block size */ 24867 if (sr_sector_mode(dev, restore_blksize) != 0) { 24868 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24869 "can't do switch back to mode 1\n"); 24870 /* 24871 * If sd_send_scsi_READ succeeded we still need to report 24872 * an error because we failed to reset the block size 24873 */ 24874 if (rval == 0) { 24875 rval = EIO; 24876 } 24877 } 24878 24879 done: 24880 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 24881 "sd_read_mode2: exit: un:0x%p\n", un); 24882 24883 return (rval); 24884 } 24885 24886 24887 /* 24888 * Function: sr_sector_mode() 24889 * 24890 * Description: This utility function is used by sr_read_mode2 to set the target 24891 * block size based on the user specified size. This is a legacy 24892 * implementation based upon a vendor specific mode page 24893 * 24894 * Arguments: dev - the device 'dev_t' 24895 * data - flag indicating if block size is being set to 2336 or 24896 * 512. 24897 * 24898 * Return Code: the code returned by sd_send_scsi_cmd() 24899 * EFAULT if ddi_copyxxx() fails 24900 * ENXIO if fail ddi_get_soft_state 24901 * EINVAL if data pointer is NULL 24902 */ 24903 24904 static int 24905 sr_sector_mode(dev_t dev, uint32_t blksize) 24906 { 24907 struct sd_lun *un; 24908 uchar_t *sense; 24909 uchar_t *select; 24910 int rval; 24911 24912 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24913 (un->un_state == SD_STATE_OFFLINE)) { 24914 return (ENXIO); 24915 } 24916 24917 sense = kmem_zalloc(20, KM_SLEEP); 24918 24919 /* Note: This is a vendor specific mode page (0x81) */ 24920 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 20, 0x81, 24921 SD_PATH_STANDARD)) != 0) { 24922 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 24923 "sr_sector_mode: Mode Sense failed\n"); 24924 kmem_free(sense, 20); 24925 return (rval); 24926 } 24927 select = kmem_zalloc(20, KM_SLEEP); 24928 select[3] = 0x08; 24929 select[10] = ((blksize >> 8) & 0xff); 24930 select[11] = (blksize & 0xff); 24931 select[12] = 0x01; 24932 select[13] = 0x06; 24933 select[14] = sense[14]; 24934 select[15] = sense[15]; 24935 if (blksize == SD_MODE2_BLKSIZE) { 24936 select[14] |= 0x01; 24937 } 24938 24939 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 20, 24940 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 24941 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 24942 "sr_sector_mode: Mode Select failed\n"); 24943 } else { 24944 /* 24945 * Only update the softstate block size if we successfully 24946 * changed the device block mode. 24947 */ 24948 mutex_enter(SD_MUTEX(un)); 24949 sd_update_block_info(un, blksize, 0); 24950 mutex_exit(SD_MUTEX(un)); 24951 } 24952 kmem_free(sense, 20); 24953 kmem_free(select, 20); 24954 return (rval); 24955 } 24956 24957 24958 /* 24959 * Function: sr_read_cdda() 24960 * 24961 * Description: This routine is the driver entry point for handling CD-ROM 24962 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If 24963 * the target supports CDDA these requests are handled via a vendor 24964 * specific command (0xD8) If the target does not support CDDA 24965 * these requests are handled via the READ CD command (0xBE). 24966 * 24967 * Arguments: dev - the device 'dev_t' 24968 * data - pointer to user provided CD-DA structure specifying 24969 * the track starting address, transfer length, and 24970 * subcode options. 24971 * flag - this argument is a pass through to ddi_copyxxx() 24972 * directly from the mode argument of ioctl(). 24973 * 24974 * Return Code: the code returned by sd_send_scsi_cmd() 24975 * EFAULT if ddi_copyxxx() fails 24976 * ENXIO if fail ddi_get_soft_state 24977 * EINVAL if invalid arguments are provided 24978 * ENOTTY 24979 */ 24980 24981 static int 24982 sr_read_cdda(dev_t dev, caddr_t data, int flag) 24983 { 24984 struct sd_lun *un; 24985 struct uscsi_cmd *com; 24986 struct cdrom_cdda *cdda; 24987 int rval; 24988 size_t buflen; 24989 char cdb[CDB_GROUP5]; 24990 24991 #ifdef _MULTI_DATAMODEL 24992 /* To support ILP32 applications in an LP64 world */ 24993 struct cdrom_cdda32 cdrom_cdda32; 24994 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32; 24995 #endif /* _MULTI_DATAMODEL */ 24996 24997 if (data == NULL) { 24998 return (EINVAL); 24999 } 25000 25001 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25002 return (ENXIO); 25003 } 25004 25005 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP); 25006 25007 #ifdef _MULTI_DATAMODEL 25008 switch (ddi_model_convert_from(flag & FMODELS)) { 25009 case DDI_MODEL_ILP32: 25010 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) { 25011 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25012 "sr_read_cdda: ddi_copyin Failed\n"); 25013 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25014 return (EFAULT); 25015 } 25016 /* Convert the ILP32 uscsi data from the application to LP64 */ 25017 cdrom_cdda32tocdrom_cdda(cdda32, cdda); 25018 break; 25019 case DDI_MODEL_NONE: 25020 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 25021 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25022 "sr_read_cdda: ddi_copyin Failed\n"); 25023 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25024 return (EFAULT); 25025 } 25026 break; 25027 } 25028 #else /* ! _MULTI_DATAMODEL */ 25029 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 25030 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25031 "sr_read_cdda: ddi_copyin Failed\n"); 25032 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25033 return (EFAULT); 25034 } 25035 #endif /* _MULTI_DATAMODEL */ 25036 25037 /* 25038 * Since MMC-2 expects max 3 bytes for length, check if the 25039 * length input is greater than 3 bytes 25040 */ 25041 if ((cdda->cdda_length & 0xFF000000) != 0) { 25042 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: " 25043 "cdrom transfer length too large: %d (limit %d)\n", 25044 cdda->cdda_length, 0xFFFFFF); 25045 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25046 return (EINVAL); 25047 } 25048 25049 switch (cdda->cdda_subcode) { 25050 case CDROM_DA_NO_SUBCODE: 25051 buflen = CDROM_BLK_2352 * cdda->cdda_length; 25052 break; 25053 case CDROM_DA_SUBQ: 25054 buflen = CDROM_BLK_2368 * cdda->cdda_length; 25055 break; 25056 case CDROM_DA_ALL_SUBCODE: 25057 buflen = CDROM_BLK_2448 * cdda->cdda_length; 25058 break; 25059 case CDROM_DA_SUBCODE_ONLY: 25060 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length; 25061 break; 25062 default: 25063 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25064 "sr_read_cdda: Subcode '0x%x' Not Supported\n", 25065 cdda->cdda_subcode); 25066 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25067 return (EINVAL); 25068 } 25069 25070 /* Build and send the command */ 25071 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25072 bzero(cdb, CDB_GROUP5); 25073 25074 if (un->un_f_cfg_cdda == TRUE) { 25075 cdb[0] = (char)SCMD_READ_CD; 25076 cdb[1] = 0x04; 25077 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 25078 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 25079 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 25080 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 25081 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 25082 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 25083 cdb[8] = ((cdda->cdda_length) & 0x000000ff); 25084 cdb[9] = 0x10; 25085 switch (cdda->cdda_subcode) { 25086 case CDROM_DA_NO_SUBCODE : 25087 cdb[10] = 0x0; 25088 break; 25089 case CDROM_DA_SUBQ : 25090 cdb[10] = 0x2; 25091 break; 25092 case CDROM_DA_ALL_SUBCODE : 25093 cdb[10] = 0x1; 25094 break; 25095 case CDROM_DA_SUBCODE_ONLY : 25096 /* FALLTHROUGH */ 25097 default : 25098 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25099 kmem_free(com, sizeof (*com)); 25100 return (ENOTTY); 25101 } 25102 } else { 25103 cdb[0] = (char)SCMD_READ_CDDA; 25104 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 25105 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 25106 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 25107 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 25108 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24); 25109 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 25110 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 25111 cdb[9] = ((cdda->cdda_length) & 0x000000ff); 25112 cdb[10] = cdda->cdda_subcode; 25113 } 25114 25115 com->uscsi_cdb = cdb; 25116 com->uscsi_cdblen = CDB_GROUP5; 25117 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data; 25118 com->uscsi_buflen = buflen; 25119 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25120 25121 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25122 SD_PATH_STANDARD); 25123 25124 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25125 kmem_free(com, sizeof (*com)); 25126 return (rval); 25127 } 25128 25129 25130 /* 25131 * Function: sr_read_cdxa() 25132 * 25133 * Description: This routine is the driver entry point for handling CD-ROM 25134 * ioctl requests to return CD-XA (Extended Architecture) data. 25135 * (CDROMCDXA). 25136 * 25137 * Arguments: dev - the device 'dev_t' 25138 * data - pointer to user provided CD-XA structure specifying 25139 * the data starting address, transfer length, and format 25140 * flag - this argument is a pass through to ddi_copyxxx() 25141 * directly from the mode argument of ioctl(). 25142 * 25143 * Return Code: the code returned by sd_send_scsi_cmd() 25144 * EFAULT if ddi_copyxxx() fails 25145 * ENXIO if fail ddi_get_soft_state 25146 * EINVAL if data pointer is NULL 25147 */ 25148 25149 static int 25150 sr_read_cdxa(dev_t dev, caddr_t data, int flag) 25151 { 25152 struct sd_lun *un; 25153 struct uscsi_cmd *com; 25154 struct cdrom_cdxa *cdxa; 25155 int rval; 25156 size_t buflen; 25157 char cdb[CDB_GROUP5]; 25158 uchar_t read_flags; 25159 25160 #ifdef _MULTI_DATAMODEL 25161 /* To support ILP32 applications in an LP64 world */ 25162 struct cdrom_cdxa32 cdrom_cdxa32; 25163 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32; 25164 #endif /* _MULTI_DATAMODEL */ 25165 25166 if (data == NULL) { 25167 return (EINVAL); 25168 } 25169 25170 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25171 return (ENXIO); 25172 } 25173 25174 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP); 25175 25176 #ifdef _MULTI_DATAMODEL 25177 switch (ddi_model_convert_from(flag & FMODELS)) { 25178 case DDI_MODEL_ILP32: 25179 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) { 25180 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25181 return (EFAULT); 25182 } 25183 /* 25184 * Convert the ILP32 uscsi data from the 25185 * application to LP64 for internal use. 25186 */ 25187 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa); 25188 break; 25189 case DDI_MODEL_NONE: 25190 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 25191 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25192 return (EFAULT); 25193 } 25194 break; 25195 } 25196 #else /* ! _MULTI_DATAMODEL */ 25197 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 25198 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25199 return (EFAULT); 25200 } 25201 #endif /* _MULTI_DATAMODEL */ 25202 25203 /* 25204 * Since MMC-2 expects max 3 bytes for length, check if the 25205 * length input is greater than 3 bytes 25206 */ 25207 if ((cdxa->cdxa_length & 0xFF000000) != 0) { 25208 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: " 25209 "cdrom transfer length too large: %d (limit %d)\n", 25210 cdxa->cdxa_length, 0xFFFFFF); 25211 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25212 return (EINVAL); 25213 } 25214 25215 switch (cdxa->cdxa_format) { 25216 case CDROM_XA_DATA: 25217 buflen = CDROM_BLK_2048 * cdxa->cdxa_length; 25218 read_flags = 0x10; 25219 break; 25220 case CDROM_XA_SECTOR_DATA: 25221 buflen = CDROM_BLK_2352 * cdxa->cdxa_length; 25222 read_flags = 0xf8; 25223 break; 25224 case CDROM_XA_DATA_W_ERROR: 25225 buflen = CDROM_BLK_2646 * cdxa->cdxa_length; 25226 read_flags = 0xfc; 25227 break; 25228 default: 25229 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25230 "sr_read_cdxa: Format '0x%x' Not Supported\n", 25231 cdxa->cdxa_format); 25232 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25233 return (EINVAL); 25234 } 25235 25236 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25237 bzero(cdb, CDB_GROUP5); 25238 if (un->un_f_mmc_cap == TRUE) { 25239 cdb[0] = (char)SCMD_READ_CD; 25240 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 25241 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 25242 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 25243 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 25244 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 25245 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 25246 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff); 25247 cdb[9] = (char)read_flags; 25248 } else { 25249 /* 25250 * Note: A vendor specific command (0xDB) is being used her to 25251 * request a read of all subcodes. 25252 */ 25253 cdb[0] = (char)SCMD_READ_CDXA; 25254 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 25255 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 25256 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 25257 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 25258 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24); 25259 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 25260 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 25261 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff); 25262 cdb[10] = cdxa->cdxa_format; 25263 } 25264 com->uscsi_cdb = cdb; 25265 com->uscsi_cdblen = CDB_GROUP5; 25266 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data; 25267 com->uscsi_buflen = buflen; 25268 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25269 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25270 SD_PATH_STANDARD); 25271 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25272 kmem_free(com, sizeof (*com)); 25273 return (rval); 25274 } 25275 25276 25277 /* 25278 * Function: sr_eject() 25279 * 25280 * Description: This routine is the driver entry point for handling CD-ROM 25281 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT) 25282 * 25283 * Arguments: dev - the device 'dev_t' 25284 * 25285 * Return Code: the code returned by sd_send_scsi_cmd() 25286 */ 25287 25288 static int 25289 sr_eject(dev_t dev) 25290 { 25291 struct sd_lun *un; 25292 int rval; 25293 25294 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25295 (un->un_state == SD_STATE_OFFLINE)) { 25296 return (ENXIO); 25297 } 25298 25299 /* 25300 * To prevent race conditions with the eject 25301 * command, keep track of an eject command as 25302 * it progresses. If we are already handling 25303 * an eject command in the driver for the given 25304 * unit and another request to eject is received 25305 * immediately return EAGAIN so we don't lose 25306 * the command if the current eject command fails. 25307 */ 25308 mutex_enter(SD_MUTEX(un)); 25309 if (un->un_f_ejecting == TRUE) { 25310 mutex_exit(SD_MUTEX(un)); 25311 return (EAGAIN); 25312 } 25313 un->un_f_ejecting = TRUE; 25314 mutex_exit(SD_MUTEX(un)); 25315 25316 if ((rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 25317 SD_PATH_STANDARD)) != 0) { 25318 mutex_enter(SD_MUTEX(un)); 25319 un->un_f_ejecting = FALSE; 25320 mutex_exit(SD_MUTEX(un)); 25321 return (rval); 25322 } 25323 25324 rval = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_EJECT, 25325 SD_PATH_STANDARD); 25326 25327 if (rval == 0) { 25328 mutex_enter(SD_MUTEX(un)); 25329 sr_ejected(un); 25330 un->un_mediastate = DKIO_EJECTED; 25331 un->un_f_ejecting = FALSE; 25332 cv_broadcast(&un->un_state_cv); 25333 mutex_exit(SD_MUTEX(un)); 25334 } else { 25335 mutex_enter(SD_MUTEX(un)); 25336 un->un_f_ejecting = FALSE; 25337 mutex_exit(SD_MUTEX(un)); 25338 } 25339 return (rval); 25340 } 25341 25342 25343 /* 25344 * Function: sr_ejected() 25345 * 25346 * Description: This routine updates the soft state structure to invalidate the 25347 * geometry information after the media has been ejected or a 25348 * media eject has been detected. 25349 * 25350 * Arguments: un - driver soft state (unit) structure 25351 */ 25352 25353 static void 25354 sr_ejected(struct sd_lun *un) 25355 { 25356 struct sd_errstats *stp; 25357 25358 ASSERT(un != NULL); 25359 ASSERT(mutex_owned(SD_MUTEX(un))); 25360 25361 un->un_f_blockcount_is_valid = FALSE; 25362 un->un_f_tgt_blocksize_is_valid = FALSE; 25363 mutex_exit(SD_MUTEX(un)); 25364 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 25365 mutex_enter(SD_MUTEX(un)); 25366 25367 if (un->un_errstats != NULL) { 25368 stp = (struct sd_errstats *)un->un_errstats->ks_data; 25369 stp->sd_capacity.value.ui64 = 0; 25370 } 25371 } 25372 25373 25374 /* 25375 * Function: sr_check_wp() 25376 * 25377 * Description: This routine checks the write protection of a removable 25378 * media disk and hotpluggable devices via the write protect bit of 25379 * the Mode Page Header device specific field. Some devices choke 25380 * on unsupported mode page. In order to workaround this issue, 25381 * this routine has been implemented to use 0x3f mode page(request 25382 * for all pages) for all device types. 25383 * 25384 * Arguments: dev - the device 'dev_t' 25385 * 25386 * Return Code: int indicating if the device is write protected (1) or not (0) 25387 * 25388 * Context: Kernel thread. 25389 * 25390 */ 25391 25392 static int 25393 sr_check_wp(dev_t dev) 25394 { 25395 struct sd_lun *un; 25396 uchar_t device_specific; 25397 uchar_t *sense; 25398 int hdrlen; 25399 int rval = FALSE; 25400 25401 /* 25402 * Note: The return codes for this routine should be reworked to 25403 * properly handle the case of a NULL softstate. 25404 */ 25405 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25406 return (FALSE); 25407 } 25408 25409 if (un->un_f_cfg_is_atapi == TRUE) { 25410 /* 25411 * The mode page contents are not required; set the allocation 25412 * length for the mode page header only 25413 */ 25414 hdrlen = MODE_HEADER_LENGTH_GRP2; 25415 sense = kmem_zalloc(hdrlen, KM_SLEEP); 25416 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, hdrlen, 25417 MODEPAGE_ALLPAGES, SD_PATH_STANDARD) != 0) 25418 goto err_exit; 25419 device_specific = 25420 ((struct mode_header_grp2 *)sense)->device_specific; 25421 } else { 25422 hdrlen = MODE_HEADER_LENGTH; 25423 sense = kmem_zalloc(hdrlen, KM_SLEEP); 25424 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, hdrlen, 25425 MODEPAGE_ALLPAGES, SD_PATH_STANDARD) != 0) 25426 goto err_exit; 25427 device_specific = 25428 ((struct mode_header *)sense)->device_specific; 25429 } 25430 25431 /* 25432 * Write protect mode sense failed; not all disks 25433 * understand this query. Return FALSE assuming that 25434 * these devices are not writable. 25435 */ 25436 if (device_specific & WRITE_PROTECT) { 25437 rval = TRUE; 25438 } 25439 25440 err_exit: 25441 kmem_free(sense, hdrlen); 25442 return (rval); 25443 } 25444 25445 /* 25446 * Function: sr_volume_ctrl() 25447 * 25448 * Description: This routine is the driver entry point for handling CD-ROM 25449 * audio output volume ioctl requests. (CDROMVOLCTRL) 25450 * 25451 * Arguments: dev - the device 'dev_t' 25452 * data - pointer to user audio volume control structure 25453 * flag - this argument is a pass through to ddi_copyxxx() 25454 * directly from the mode argument of ioctl(). 25455 * 25456 * Return Code: the code returned by sd_send_scsi_cmd() 25457 * EFAULT if ddi_copyxxx() fails 25458 * ENXIO if fail ddi_get_soft_state 25459 * EINVAL if data pointer is NULL 25460 * 25461 */ 25462 25463 static int 25464 sr_volume_ctrl(dev_t dev, caddr_t data, int flag) 25465 { 25466 struct sd_lun *un; 25467 struct cdrom_volctrl volume; 25468 struct cdrom_volctrl *vol = &volume; 25469 uchar_t *sense_page; 25470 uchar_t *select_page; 25471 uchar_t *sense; 25472 uchar_t *select; 25473 int sense_buflen; 25474 int select_buflen; 25475 int rval; 25476 25477 if (data == NULL) { 25478 return (EINVAL); 25479 } 25480 25481 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25482 (un->un_state == SD_STATE_OFFLINE)) { 25483 return (ENXIO); 25484 } 25485 25486 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) { 25487 return (EFAULT); 25488 } 25489 25490 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 25491 struct mode_header_grp2 *sense_mhp; 25492 struct mode_header_grp2 *select_mhp; 25493 int bd_len; 25494 25495 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN; 25496 select_buflen = MODE_HEADER_LENGTH_GRP2 + 25497 MODEPAGE_AUDIO_CTRL_LEN; 25498 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 25499 select = kmem_zalloc(select_buflen, KM_SLEEP); 25500 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 25501 sense_buflen, MODEPAGE_AUDIO_CTRL, 25502 SD_PATH_STANDARD)) != 0) { 25503 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 25504 "sr_volume_ctrl: Mode Sense Failed\n"); 25505 kmem_free(sense, sense_buflen); 25506 kmem_free(select, select_buflen); 25507 return (rval); 25508 } 25509 sense_mhp = (struct mode_header_grp2 *)sense; 25510 select_mhp = (struct mode_header_grp2 *)select; 25511 bd_len = (sense_mhp->bdesc_length_hi << 8) | 25512 sense_mhp->bdesc_length_lo; 25513 if (bd_len > MODE_BLK_DESC_LENGTH) { 25514 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25515 "sr_volume_ctrl: Mode Sense returned invalid " 25516 "block descriptor length\n"); 25517 kmem_free(sense, sense_buflen); 25518 kmem_free(select, select_buflen); 25519 return (EIO); 25520 } 25521 sense_page = (uchar_t *) 25522 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 25523 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2); 25524 select_mhp->length_msb = 0; 25525 select_mhp->length_lsb = 0; 25526 select_mhp->bdesc_length_hi = 0; 25527 select_mhp->bdesc_length_lo = 0; 25528 } else { 25529 struct mode_header *sense_mhp, *select_mhp; 25530 25531 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 25532 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 25533 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 25534 select = kmem_zalloc(select_buflen, KM_SLEEP); 25535 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 25536 sense_buflen, MODEPAGE_AUDIO_CTRL, 25537 SD_PATH_STANDARD)) != 0) { 25538 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25539 "sr_volume_ctrl: Mode Sense Failed\n"); 25540 kmem_free(sense, sense_buflen); 25541 kmem_free(select, select_buflen); 25542 return (rval); 25543 } 25544 sense_mhp = (struct mode_header *)sense; 25545 select_mhp = (struct mode_header *)select; 25546 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) { 25547 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25548 "sr_volume_ctrl: Mode Sense returned invalid " 25549 "block descriptor length\n"); 25550 kmem_free(sense, sense_buflen); 25551 kmem_free(select, select_buflen); 25552 return (EIO); 25553 } 25554 sense_page = (uchar_t *) 25555 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 25556 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH); 25557 select_mhp->length = 0; 25558 select_mhp->bdesc_length = 0; 25559 } 25560 /* 25561 * Note: An audio control data structure could be created and overlayed 25562 * on the following in place of the array indexing method implemented. 25563 */ 25564 25565 /* Build the select data for the user volume data */ 25566 select_page[0] = MODEPAGE_AUDIO_CTRL; 25567 select_page[1] = 0xE; 25568 /* Set the immediate bit */ 25569 select_page[2] = 0x04; 25570 /* Zero out reserved fields */ 25571 select_page[3] = 0x00; 25572 select_page[4] = 0x00; 25573 /* Return sense data for fields not to be modified */ 25574 select_page[5] = sense_page[5]; 25575 select_page[6] = sense_page[6]; 25576 select_page[7] = sense_page[7]; 25577 /* Set the user specified volume levels for channel 0 and 1 */ 25578 select_page[8] = 0x01; 25579 select_page[9] = vol->channel0; 25580 select_page[10] = 0x02; 25581 select_page[11] = vol->channel1; 25582 /* Channel 2 and 3 are currently unsupported so return the sense data */ 25583 select_page[12] = sense_page[12]; 25584 select_page[13] = sense_page[13]; 25585 select_page[14] = sense_page[14]; 25586 select_page[15] = sense_page[15]; 25587 25588 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 25589 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, select, 25590 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 25591 } else { 25592 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 25593 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 25594 } 25595 25596 kmem_free(sense, sense_buflen); 25597 kmem_free(select, select_buflen); 25598 return (rval); 25599 } 25600 25601 25602 /* 25603 * Function: sr_read_sony_session_offset() 25604 * 25605 * Description: This routine is the driver entry point for handling CD-ROM 25606 * ioctl requests for session offset information. (CDROMREADOFFSET) 25607 * The address of the first track in the last session of a 25608 * multi-session CD-ROM is returned 25609 * 25610 * Note: This routine uses a vendor specific key value in the 25611 * command control field without implementing any vendor check here 25612 * or in the ioctl routine. 25613 * 25614 * Arguments: dev - the device 'dev_t' 25615 * data - pointer to an int to hold the requested address 25616 * flag - this argument is a pass through to ddi_copyxxx() 25617 * directly from the mode argument of ioctl(). 25618 * 25619 * Return Code: the code returned by sd_send_scsi_cmd() 25620 * EFAULT if ddi_copyxxx() fails 25621 * ENXIO if fail ddi_get_soft_state 25622 * EINVAL if data pointer is NULL 25623 */ 25624 25625 static int 25626 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag) 25627 { 25628 struct sd_lun *un; 25629 struct uscsi_cmd *com; 25630 caddr_t buffer; 25631 char cdb[CDB_GROUP1]; 25632 int session_offset = 0; 25633 int rval; 25634 25635 if (data == NULL) { 25636 return (EINVAL); 25637 } 25638 25639 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25640 (un->un_state == SD_STATE_OFFLINE)) { 25641 return (ENXIO); 25642 } 25643 25644 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP); 25645 bzero(cdb, CDB_GROUP1); 25646 cdb[0] = SCMD_READ_TOC; 25647 /* 25648 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 25649 * (4 byte TOC response header + 8 byte response data) 25650 */ 25651 cdb[8] = SONY_SESSION_OFFSET_LEN; 25652 /* Byte 9 is the control byte. A vendor specific value is used */ 25653 cdb[9] = SONY_SESSION_OFFSET_KEY; 25654 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25655 com->uscsi_cdb = cdb; 25656 com->uscsi_cdblen = CDB_GROUP1; 25657 com->uscsi_bufaddr = buffer; 25658 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN; 25659 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25660 25661 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25662 SD_PATH_STANDARD); 25663 if (rval != 0) { 25664 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 25665 kmem_free(com, sizeof (*com)); 25666 return (rval); 25667 } 25668 if (buffer[1] == SONY_SESSION_OFFSET_VALID) { 25669 session_offset = 25670 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 25671 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 25672 /* 25673 * Offset returned offset in current lbasize block's. Convert to 25674 * 2k block's to return to the user 25675 */ 25676 if (un->un_tgt_blocksize == CDROM_BLK_512) { 25677 session_offset >>= 2; 25678 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) { 25679 session_offset >>= 1; 25680 } 25681 } 25682 25683 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) { 25684 rval = EFAULT; 25685 } 25686 25687 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 25688 kmem_free(com, sizeof (*com)); 25689 return (rval); 25690 } 25691 25692 25693 /* 25694 * Function: sd_wm_cache_constructor() 25695 * 25696 * Description: Cache Constructor for the wmap cache for the read/modify/write 25697 * devices. 25698 * 25699 * Arguments: wm - A pointer to the sd_w_map to be initialized. 25700 * un - sd_lun structure for the device. 25701 * flag - the km flags passed to constructor 25702 * 25703 * Return Code: 0 on success. 25704 * -1 on failure. 25705 */ 25706 25707 /*ARGSUSED*/ 25708 static int 25709 sd_wm_cache_constructor(void *wm, void *un, int flags) 25710 { 25711 bzero(wm, sizeof (struct sd_w_map)); 25712 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL); 25713 return (0); 25714 } 25715 25716 25717 /* 25718 * Function: sd_wm_cache_destructor() 25719 * 25720 * Description: Cache destructor for the wmap cache for the read/modify/write 25721 * devices. 25722 * 25723 * Arguments: wm - A pointer to the sd_w_map to be initialized. 25724 * un - sd_lun structure for the device. 25725 */ 25726 /*ARGSUSED*/ 25727 static void 25728 sd_wm_cache_destructor(void *wm, void *un) 25729 { 25730 cv_destroy(&((struct sd_w_map *)wm)->wm_avail); 25731 } 25732 25733 25734 /* 25735 * Function: sd_range_lock() 25736 * 25737 * Description: Lock the range of blocks specified as parameter to ensure 25738 * that read, modify write is atomic and no other i/o writes 25739 * to the same location. The range is specified in terms 25740 * of start and end blocks. Block numbers are the actual 25741 * media block numbers and not system. 25742 * 25743 * Arguments: un - sd_lun structure for the device. 25744 * startb - The starting block number 25745 * endb - The end block number 25746 * typ - type of i/o - simple/read_modify_write 25747 * 25748 * Return Code: wm - pointer to the wmap structure. 25749 * 25750 * Context: This routine can sleep. 25751 */ 25752 25753 static struct sd_w_map * 25754 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ) 25755 { 25756 struct sd_w_map *wmp = NULL; 25757 struct sd_w_map *sl_wmp = NULL; 25758 struct sd_w_map *tmp_wmp; 25759 wm_state state = SD_WM_CHK_LIST; 25760 25761 25762 ASSERT(un != NULL); 25763 ASSERT(!mutex_owned(SD_MUTEX(un))); 25764 25765 mutex_enter(SD_MUTEX(un)); 25766 25767 while (state != SD_WM_DONE) { 25768 25769 switch (state) { 25770 case SD_WM_CHK_LIST: 25771 /* 25772 * This is the starting state. Check the wmap list 25773 * to see if the range is currently available. 25774 */ 25775 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) { 25776 /* 25777 * If this is a simple write and no rmw 25778 * i/o is pending then try to lock the 25779 * range as the range should be available. 25780 */ 25781 state = SD_WM_LOCK_RANGE; 25782 } else { 25783 tmp_wmp = sd_get_range(un, startb, endb); 25784 if (tmp_wmp != NULL) { 25785 if ((wmp != NULL) && ONLIST(un, wmp)) { 25786 /* 25787 * Should not keep onlist wmps 25788 * while waiting this macro 25789 * will also do wmp = NULL; 25790 */ 25791 FREE_ONLIST_WMAP(un, wmp); 25792 } 25793 /* 25794 * sl_wmp is the wmap on which wait 25795 * is done, since the tmp_wmp points 25796 * to the inuse wmap, set sl_wmp to 25797 * tmp_wmp and change the state to sleep 25798 */ 25799 sl_wmp = tmp_wmp; 25800 state = SD_WM_WAIT_MAP; 25801 } else { 25802 state = SD_WM_LOCK_RANGE; 25803 } 25804 25805 } 25806 break; 25807 25808 case SD_WM_LOCK_RANGE: 25809 ASSERT(un->un_wm_cache); 25810 /* 25811 * The range need to be locked, try to get a wmap. 25812 * First attempt it with NO_SLEEP, want to avoid a sleep 25813 * if possible as we will have to release the sd mutex 25814 * if we have to sleep. 25815 */ 25816 if (wmp == NULL) 25817 wmp = kmem_cache_alloc(un->un_wm_cache, 25818 KM_NOSLEEP); 25819 if (wmp == NULL) { 25820 mutex_exit(SD_MUTEX(un)); 25821 _NOTE(DATA_READABLE_WITHOUT_LOCK 25822 (sd_lun::un_wm_cache)) 25823 wmp = kmem_cache_alloc(un->un_wm_cache, 25824 KM_SLEEP); 25825 mutex_enter(SD_MUTEX(un)); 25826 /* 25827 * we released the mutex so recheck and go to 25828 * check list state. 25829 */ 25830 state = SD_WM_CHK_LIST; 25831 } else { 25832 /* 25833 * We exit out of state machine since we 25834 * have the wmap. Do the housekeeping first. 25835 * place the wmap on the wmap list if it is not 25836 * on it already and then set the state to done. 25837 */ 25838 wmp->wm_start = startb; 25839 wmp->wm_end = endb; 25840 wmp->wm_flags = typ | SD_WM_BUSY; 25841 if (typ & SD_WTYPE_RMW) { 25842 un->un_rmw_count++; 25843 } 25844 /* 25845 * If not already on the list then link 25846 */ 25847 if (!ONLIST(un, wmp)) { 25848 wmp->wm_next = un->un_wm; 25849 wmp->wm_prev = NULL; 25850 if (wmp->wm_next) 25851 wmp->wm_next->wm_prev = wmp; 25852 un->un_wm = wmp; 25853 } 25854 state = SD_WM_DONE; 25855 } 25856 break; 25857 25858 case SD_WM_WAIT_MAP: 25859 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY); 25860 /* 25861 * Wait is done on sl_wmp, which is set in the 25862 * check_list state. 25863 */ 25864 sl_wmp->wm_wanted_count++; 25865 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un)); 25866 sl_wmp->wm_wanted_count--; 25867 /* 25868 * We can reuse the memory from the completed sl_wmp 25869 * lock range for our new lock, but only if noone is 25870 * waiting for it. 25871 */ 25872 ASSERT(!(sl_wmp->wm_flags & SD_WM_BUSY)); 25873 if (sl_wmp->wm_wanted_count == 0) { 25874 if (wmp != NULL) 25875 CHK_N_FREEWMP(un, wmp); 25876 wmp = sl_wmp; 25877 } 25878 sl_wmp = NULL; 25879 /* 25880 * After waking up, need to recheck for availability of 25881 * range. 25882 */ 25883 state = SD_WM_CHK_LIST; 25884 break; 25885 25886 default: 25887 panic("sd_range_lock: " 25888 "Unknown state %d in sd_range_lock", state); 25889 /*NOTREACHED*/ 25890 } /* switch(state) */ 25891 25892 } /* while(state != SD_WM_DONE) */ 25893 25894 mutex_exit(SD_MUTEX(un)); 25895 25896 ASSERT(wmp != NULL); 25897 25898 return (wmp); 25899 } 25900 25901 25902 /* 25903 * Function: sd_get_range() 25904 * 25905 * Description: Find if there any overlapping I/O to this one 25906 * Returns the write-map of 1st such I/O, NULL otherwise. 25907 * 25908 * Arguments: un - sd_lun structure for the device. 25909 * startb - The starting block number 25910 * endb - The end block number 25911 * 25912 * Return Code: wm - pointer to the wmap structure. 25913 */ 25914 25915 static struct sd_w_map * 25916 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb) 25917 { 25918 struct sd_w_map *wmp; 25919 25920 ASSERT(un != NULL); 25921 25922 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) { 25923 if (!(wmp->wm_flags & SD_WM_BUSY)) { 25924 continue; 25925 } 25926 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) { 25927 break; 25928 } 25929 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) { 25930 break; 25931 } 25932 } 25933 25934 return (wmp); 25935 } 25936 25937 25938 /* 25939 * Function: sd_free_inlist_wmap() 25940 * 25941 * Description: Unlink and free a write map struct. 25942 * 25943 * Arguments: un - sd_lun structure for the device. 25944 * wmp - sd_w_map which needs to be unlinked. 25945 */ 25946 25947 static void 25948 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp) 25949 { 25950 ASSERT(un != NULL); 25951 25952 if (un->un_wm == wmp) { 25953 un->un_wm = wmp->wm_next; 25954 } else { 25955 wmp->wm_prev->wm_next = wmp->wm_next; 25956 } 25957 25958 if (wmp->wm_next) { 25959 wmp->wm_next->wm_prev = wmp->wm_prev; 25960 } 25961 25962 wmp->wm_next = wmp->wm_prev = NULL; 25963 25964 kmem_cache_free(un->un_wm_cache, wmp); 25965 } 25966 25967 25968 /* 25969 * Function: sd_range_unlock() 25970 * 25971 * Description: Unlock the range locked by wm. 25972 * Free write map if nobody else is waiting on it. 25973 * 25974 * Arguments: un - sd_lun structure for the device. 25975 * wmp - sd_w_map which needs to be unlinked. 25976 */ 25977 25978 static void 25979 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm) 25980 { 25981 ASSERT(un != NULL); 25982 ASSERT(wm != NULL); 25983 ASSERT(!mutex_owned(SD_MUTEX(un))); 25984 25985 mutex_enter(SD_MUTEX(un)); 25986 25987 if (wm->wm_flags & SD_WTYPE_RMW) { 25988 un->un_rmw_count--; 25989 } 25990 25991 if (wm->wm_wanted_count) { 25992 wm->wm_flags = 0; 25993 /* 25994 * Broadcast that the wmap is available now. 25995 */ 25996 cv_broadcast(&wm->wm_avail); 25997 } else { 25998 /* 25999 * If no one is waiting on the map, it should be free'ed. 26000 */ 26001 sd_free_inlist_wmap(un, wm); 26002 } 26003 26004 mutex_exit(SD_MUTEX(un)); 26005 } 26006 26007 26008 /* 26009 * Function: sd_read_modify_write_task 26010 * 26011 * Description: Called from a taskq thread to initiate the write phase of 26012 * a read-modify-write request. This is used for targets where 26013 * un->un_sys_blocksize != un->un_tgt_blocksize. 26014 * 26015 * Arguments: arg - a pointer to the buf(9S) struct for the write command. 26016 * 26017 * Context: Called under taskq thread context. 26018 */ 26019 26020 static void 26021 sd_read_modify_write_task(void *arg) 26022 { 26023 struct sd_mapblocksize_info *bsp; 26024 struct buf *bp; 26025 struct sd_xbuf *xp; 26026 struct sd_lun *un; 26027 26028 bp = arg; /* The bp is given in arg */ 26029 ASSERT(bp != NULL); 26030 26031 /* Get the pointer to the layer-private data struct */ 26032 xp = SD_GET_XBUF(bp); 26033 ASSERT(xp != NULL); 26034 bsp = xp->xb_private; 26035 ASSERT(bsp != NULL); 26036 26037 un = SD_GET_UN(bp); 26038 ASSERT(un != NULL); 26039 ASSERT(!mutex_owned(SD_MUTEX(un))); 26040 26041 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 26042 "sd_read_modify_write_task: entry: buf:0x%p\n", bp); 26043 26044 /* 26045 * This is the write phase of a read-modify-write request, called 26046 * under the context of a taskq thread in response to the completion 26047 * of the read portion of the rmw request completing under interrupt 26048 * context. The write request must be sent from here down the iostart 26049 * chain as if it were being sent from sd_mapblocksize_iostart(), so 26050 * we use the layer index saved in the layer-private data area. 26051 */ 26052 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp); 26053 26054 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 26055 "sd_read_modify_write_task: exit: buf:0x%p\n", bp); 26056 } 26057 26058 26059 /* 26060 * Function: sddump_do_read_of_rmw() 26061 * 26062 * Description: This routine will be called from sddump, If sddump is called 26063 * with an I/O which not aligned on device blocksize boundary 26064 * then the write has to be converted to read-modify-write. 26065 * Do the read part here in order to keep sddump simple. 26066 * Note - That the sd_mutex is held across the call to this 26067 * routine. 26068 * 26069 * Arguments: un - sd_lun 26070 * blkno - block number in terms of media block size. 26071 * nblk - number of blocks. 26072 * bpp - pointer to pointer to the buf structure. On return 26073 * from this function, *bpp points to the valid buffer 26074 * to which the write has to be done. 26075 * 26076 * Return Code: 0 for success or errno-type return code 26077 */ 26078 26079 static int 26080 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 26081 struct buf **bpp) 26082 { 26083 int err; 26084 int i; 26085 int rval; 26086 struct buf *bp; 26087 struct scsi_pkt *pkt = NULL; 26088 uint32_t target_blocksize; 26089 26090 ASSERT(un != NULL); 26091 ASSERT(mutex_owned(SD_MUTEX(un))); 26092 26093 target_blocksize = un->un_tgt_blocksize; 26094 26095 mutex_exit(SD_MUTEX(un)); 26096 26097 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL, 26098 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL); 26099 if (bp == NULL) { 26100 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26101 "no resources for dumping; giving up"); 26102 err = ENOMEM; 26103 goto done; 26104 } 26105 26106 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL, 26107 blkno, nblk); 26108 if (rval != 0) { 26109 scsi_free_consistent_buf(bp); 26110 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26111 "no resources for dumping; giving up"); 26112 err = ENOMEM; 26113 goto done; 26114 } 26115 26116 pkt->pkt_flags |= FLAG_NOINTR; 26117 26118 err = EIO; 26119 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 26120 26121 /* 26122 * Scsi_poll returns 0 (success) if the command completes and 26123 * the status block is STATUS_GOOD. We should only check 26124 * errors if this condition is not true. Even then we should 26125 * send our own request sense packet only if we have a check 26126 * condition and auto request sense has not been performed by 26127 * the hba. 26128 */ 26129 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n"); 26130 26131 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) { 26132 err = 0; 26133 break; 26134 } 26135 26136 /* 26137 * Check CMD_DEV_GONE 1st, give up if device is gone, 26138 * no need to read RQS data. 26139 */ 26140 if (pkt->pkt_reason == CMD_DEV_GONE) { 26141 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26142 "Device is gone\n"); 26143 break; 26144 } 26145 26146 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) { 26147 SD_INFO(SD_LOG_DUMP, un, 26148 "sddump: read failed with CHECK, try # %d\n", i); 26149 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) { 26150 (void) sd_send_polled_RQS(un); 26151 } 26152 26153 continue; 26154 } 26155 26156 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) { 26157 int reset_retval = 0; 26158 26159 SD_INFO(SD_LOG_DUMP, un, 26160 "sddump: read failed with BUSY, try # %d\n", i); 26161 26162 if (un->un_f_lun_reset_enabled == TRUE) { 26163 reset_retval = scsi_reset(SD_ADDRESS(un), 26164 RESET_LUN); 26165 } 26166 if (reset_retval == 0) { 26167 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 26168 } 26169 (void) sd_send_polled_RQS(un); 26170 26171 } else { 26172 SD_INFO(SD_LOG_DUMP, un, 26173 "sddump: read failed with 0x%x, try # %d\n", 26174 SD_GET_PKT_STATUS(pkt), i); 26175 mutex_enter(SD_MUTEX(un)); 26176 sd_reset_target(un, pkt); 26177 mutex_exit(SD_MUTEX(un)); 26178 } 26179 26180 /* 26181 * If we are not getting anywhere with lun/target resets, 26182 * let's reset the bus. 26183 */ 26184 if (i > SD_NDUMP_RETRIES/2) { 26185 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 26186 (void) sd_send_polled_RQS(un); 26187 } 26188 26189 } 26190 scsi_destroy_pkt(pkt); 26191 26192 if (err != 0) { 26193 scsi_free_consistent_buf(bp); 26194 *bpp = NULL; 26195 } else { 26196 *bpp = bp; 26197 } 26198 26199 done: 26200 mutex_enter(SD_MUTEX(un)); 26201 return (err); 26202 } 26203 26204 26205 /* 26206 * Function: sd_failfast_flushq 26207 * 26208 * Description: Take all bp's on the wait queue that have B_FAILFAST set 26209 * in b_flags and move them onto the failfast queue, then kick 26210 * off a thread to return all bp's on the failfast queue to 26211 * their owners with an error set. 26212 * 26213 * Arguments: un - pointer to the soft state struct for the instance. 26214 * 26215 * Context: may execute in interrupt context. 26216 */ 26217 26218 static void 26219 sd_failfast_flushq(struct sd_lun *un) 26220 { 26221 struct buf *bp; 26222 struct buf *next_waitq_bp; 26223 struct buf *prev_waitq_bp = NULL; 26224 26225 ASSERT(un != NULL); 26226 ASSERT(mutex_owned(SD_MUTEX(un))); 26227 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE); 26228 ASSERT(un->un_failfast_bp == NULL); 26229 26230 SD_TRACE(SD_LOG_IO_FAILFAST, un, 26231 "sd_failfast_flushq: entry: un:0x%p\n", un); 26232 26233 /* 26234 * Check if we should flush all bufs when entering failfast state, or 26235 * just those with B_FAILFAST set. 26236 */ 26237 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { 26238 /* 26239 * Move *all* bp's on the wait queue to the failfast flush 26240 * queue, including those that do NOT have B_FAILFAST set. 26241 */ 26242 if (un->un_failfast_headp == NULL) { 26243 ASSERT(un->un_failfast_tailp == NULL); 26244 un->un_failfast_headp = un->un_waitq_headp; 26245 } else { 26246 ASSERT(un->un_failfast_tailp != NULL); 26247 un->un_failfast_tailp->av_forw = un->un_waitq_headp; 26248 } 26249 26250 un->un_failfast_tailp = un->un_waitq_tailp; 26251 26252 /* update kstat for each bp moved out of the waitq */ 26253 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) { 26254 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 26255 } 26256 26257 /* empty the waitq */ 26258 un->un_waitq_headp = un->un_waitq_tailp = NULL; 26259 26260 } else { 26261 /* 26262 * Go thru the wait queue, pick off all entries with 26263 * B_FAILFAST set, and move these onto the failfast queue. 26264 */ 26265 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) { 26266 /* 26267 * Save the pointer to the next bp on the wait queue, 26268 * so we get to it on the next iteration of this loop. 26269 */ 26270 next_waitq_bp = bp->av_forw; 26271 26272 /* 26273 * If this bp from the wait queue does NOT have 26274 * B_FAILFAST set, just move on to the next element 26275 * in the wait queue. Note, this is the only place 26276 * where it is correct to set prev_waitq_bp. 26277 */ 26278 if ((bp->b_flags & B_FAILFAST) == 0) { 26279 prev_waitq_bp = bp; 26280 continue; 26281 } 26282 26283 /* 26284 * Remove the bp from the wait queue. 26285 */ 26286 if (bp == un->un_waitq_headp) { 26287 /* The bp is the first element of the waitq. */ 26288 un->un_waitq_headp = next_waitq_bp; 26289 if (un->un_waitq_headp == NULL) { 26290 /* The wait queue is now empty */ 26291 un->un_waitq_tailp = NULL; 26292 } 26293 } else { 26294 /* 26295 * The bp is either somewhere in the middle 26296 * or at the end of the wait queue. 26297 */ 26298 ASSERT(un->un_waitq_headp != NULL); 26299 ASSERT(prev_waitq_bp != NULL); 26300 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST) 26301 == 0); 26302 if (bp == un->un_waitq_tailp) { 26303 /* bp is the last entry on the waitq. */ 26304 ASSERT(next_waitq_bp == NULL); 26305 un->un_waitq_tailp = prev_waitq_bp; 26306 } 26307 prev_waitq_bp->av_forw = next_waitq_bp; 26308 } 26309 bp->av_forw = NULL; 26310 26311 /* 26312 * update kstat since the bp is moved out of 26313 * the waitq 26314 */ 26315 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 26316 26317 /* 26318 * Now put the bp onto the failfast queue. 26319 */ 26320 if (un->un_failfast_headp == NULL) { 26321 /* failfast queue is currently empty */ 26322 ASSERT(un->un_failfast_tailp == NULL); 26323 un->un_failfast_headp = 26324 un->un_failfast_tailp = bp; 26325 } else { 26326 /* Add the bp to the end of the failfast q */ 26327 ASSERT(un->un_failfast_tailp != NULL); 26328 ASSERT(un->un_failfast_tailp->b_flags & 26329 B_FAILFAST); 26330 un->un_failfast_tailp->av_forw = bp; 26331 un->un_failfast_tailp = bp; 26332 } 26333 } 26334 } 26335 26336 /* 26337 * Now return all bp's on the failfast queue to their owners. 26338 */ 26339 while ((bp = un->un_failfast_headp) != NULL) { 26340 26341 un->un_failfast_headp = bp->av_forw; 26342 if (un->un_failfast_headp == NULL) { 26343 un->un_failfast_tailp = NULL; 26344 } 26345 26346 /* 26347 * We want to return the bp with a failure error code, but 26348 * we do not want a call to sd_start_cmds() to occur here, 26349 * so use sd_return_failed_command_no_restart() instead of 26350 * sd_return_failed_command(). 26351 */ 26352 sd_return_failed_command_no_restart(un, bp, EIO); 26353 } 26354 26355 /* Flush the xbuf queues if required. */ 26356 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) { 26357 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback); 26358 } 26359 26360 SD_TRACE(SD_LOG_IO_FAILFAST, un, 26361 "sd_failfast_flushq: exit: un:0x%p\n", un); 26362 } 26363 26364 26365 /* 26366 * Function: sd_failfast_flushq_callback 26367 * 26368 * Description: Return TRUE if the given bp meets the criteria for failfast 26369 * flushing. Used with ddi_xbuf_flushq(9F). 26370 * 26371 * Arguments: bp - ptr to buf struct to be examined. 26372 * 26373 * Context: Any 26374 */ 26375 26376 static int 26377 sd_failfast_flushq_callback(struct buf *bp) 26378 { 26379 /* 26380 * Return TRUE if (1) we want to flush ALL bufs when the failfast 26381 * state is entered; OR (2) the given bp has B_FAILFAST set. 26382 */ 26383 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || 26384 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE); 26385 } 26386 26387 26388 26389 #if defined(__i386) || defined(__amd64) 26390 /* 26391 * Function: sd_setup_next_xfer 26392 * 26393 * Description: Prepare next I/O operation using DMA_PARTIAL 26394 * 26395 */ 26396 26397 static int 26398 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 26399 struct scsi_pkt *pkt, struct sd_xbuf *xp) 26400 { 26401 ssize_t num_blks_not_xfered; 26402 daddr_t strt_blk_num; 26403 ssize_t bytes_not_xfered; 26404 int rval; 26405 26406 ASSERT(pkt->pkt_resid == 0); 26407 26408 /* 26409 * Calculate next block number and amount to be transferred. 26410 * 26411 * How much data NOT transfered to the HBA yet. 26412 */ 26413 bytes_not_xfered = xp->xb_dma_resid; 26414 26415 /* 26416 * figure how many blocks NOT transfered to the HBA yet. 26417 */ 26418 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered); 26419 26420 /* 26421 * set starting block number to the end of what WAS transfered. 26422 */ 26423 strt_blk_num = xp->xb_blkno + 26424 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered); 26425 26426 /* 26427 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt 26428 * will call scsi_initpkt with NULL_FUNC so we do not have to release 26429 * the disk mutex here. 26430 */ 26431 rval = sd_setup_next_rw_pkt(un, pkt, bp, 26432 strt_blk_num, num_blks_not_xfered); 26433 26434 if (rval == 0) { 26435 26436 /* 26437 * Success. 26438 * 26439 * Adjust things if there are still more blocks to be 26440 * transfered. 26441 */ 26442 xp->xb_dma_resid = pkt->pkt_resid; 26443 pkt->pkt_resid = 0; 26444 26445 return (1); 26446 } 26447 26448 /* 26449 * There's really only one possible return value from 26450 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt 26451 * returns NULL. 26452 */ 26453 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 26454 26455 bp->b_resid = bp->b_bcount; 26456 bp->b_flags |= B_ERROR; 26457 26458 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26459 "Error setting up next portion of DMA transfer\n"); 26460 26461 return (0); 26462 } 26463 #endif 26464 26465 /* 26466 * Function: sd_panic_for_res_conflict 26467 * 26468 * Description: Call panic with a string formated with "Reservation Conflict" 26469 * and a human readable identifier indicating the SD instance 26470 * that experienced the reservation conflict. 26471 * 26472 * Arguments: un - pointer to the soft state struct for the instance. 26473 * 26474 * Context: may execute in interrupt context. 26475 */ 26476 26477 #define SD_RESV_CONFLICT_FMT_LEN 40 26478 void 26479 sd_panic_for_res_conflict(struct sd_lun *un) 26480 { 26481 char panic_str[SD_RESV_CONFLICT_FMT_LEN+MAXPATHLEN]; 26482 char path_str[MAXPATHLEN]; 26483 26484 (void) snprintf(panic_str, sizeof (panic_str), 26485 "Reservation Conflict\nDisk: %s", 26486 ddi_pathname(SD_DEVINFO(un), path_str)); 26487 26488 panic(panic_str); 26489 } 26490 26491 /* 26492 * Note: The following sd_faultinjection_ioctl( ) routines implement 26493 * driver support for handling fault injection for error analysis 26494 * causing faults in multiple layers of the driver. 26495 * 26496 */ 26497 26498 #ifdef SD_FAULT_INJECTION 26499 static uint_t sd_fault_injection_on = 0; 26500 26501 /* 26502 * Function: sd_faultinjection_ioctl() 26503 * 26504 * Description: This routine is the driver entry point for handling 26505 * faultinjection ioctls to inject errors into the 26506 * layer model 26507 * 26508 * Arguments: cmd - the ioctl cmd recieved 26509 * arg - the arguments from user and returns 26510 */ 26511 26512 static void 26513 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) { 26514 26515 uint_t i; 26516 uint_t rval; 26517 26518 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); 26519 26520 mutex_enter(SD_MUTEX(un)); 26521 26522 switch (cmd) { 26523 case SDIOCRUN: 26524 /* Allow pushed faults to be injected */ 26525 SD_INFO(SD_LOG_SDTEST, un, 26526 "sd_faultinjection_ioctl: Injecting Fault Run\n"); 26527 26528 sd_fault_injection_on = 1; 26529 26530 SD_INFO(SD_LOG_IOERR, un, 26531 "sd_faultinjection_ioctl: run finished\n"); 26532 break; 26533 26534 case SDIOCSTART: 26535 /* Start Injection Session */ 26536 SD_INFO(SD_LOG_SDTEST, un, 26537 "sd_faultinjection_ioctl: Injecting Fault Start\n"); 26538 26539 sd_fault_injection_on = 0; 26540 un->sd_injection_mask = 0xFFFFFFFF; 26541 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 26542 un->sd_fi_fifo_pkt[i] = NULL; 26543 un->sd_fi_fifo_xb[i] = NULL; 26544 un->sd_fi_fifo_un[i] = NULL; 26545 un->sd_fi_fifo_arq[i] = NULL; 26546 } 26547 un->sd_fi_fifo_start = 0; 26548 un->sd_fi_fifo_end = 0; 26549 26550 mutex_enter(&(un->un_fi_mutex)); 26551 un->sd_fi_log[0] = '\0'; 26552 un->sd_fi_buf_len = 0; 26553 mutex_exit(&(un->un_fi_mutex)); 26554 26555 SD_INFO(SD_LOG_IOERR, un, 26556 "sd_faultinjection_ioctl: start finished\n"); 26557 break; 26558 26559 case SDIOCSTOP: 26560 /* Stop Injection Session */ 26561 SD_INFO(SD_LOG_SDTEST, un, 26562 "sd_faultinjection_ioctl: Injecting Fault Stop\n"); 26563 sd_fault_injection_on = 0; 26564 un->sd_injection_mask = 0x0; 26565 26566 /* Empty stray or unuseds structs from fifo */ 26567 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 26568 if (un->sd_fi_fifo_pkt[i] != NULL) { 26569 kmem_free(un->sd_fi_fifo_pkt[i], 26570 sizeof (struct sd_fi_pkt)); 26571 } 26572 if (un->sd_fi_fifo_xb[i] != NULL) { 26573 kmem_free(un->sd_fi_fifo_xb[i], 26574 sizeof (struct sd_fi_xb)); 26575 } 26576 if (un->sd_fi_fifo_un[i] != NULL) { 26577 kmem_free(un->sd_fi_fifo_un[i], 26578 sizeof (struct sd_fi_un)); 26579 } 26580 if (un->sd_fi_fifo_arq[i] != NULL) { 26581 kmem_free(un->sd_fi_fifo_arq[i], 26582 sizeof (struct sd_fi_arq)); 26583 } 26584 un->sd_fi_fifo_pkt[i] = NULL; 26585 un->sd_fi_fifo_un[i] = NULL; 26586 un->sd_fi_fifo_xb[i] = NULL; 26587 un->sd_fi_fifo_arq[i] = NULL; 26588 } 26589 un->sd_fi_fifo_start = 0; 26590 un->sd_fi_fifo_end = 0; 26591 26592 SD_INFO(SD_LOG_IOERR, un, 26593 "sd_faultinjection_ioctl: stop finished\n"); 26594 break; 26595 26596 case SDIOCINSERTPKT: 26597 /* Store a packet struct to be pushed onto fifo */ 26598 SD_INFO(SD_LOG_SDTEST, un, 26599 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); 26600 26601 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 26602 26603 sd_fault_injection_on = 0; 26604 26605 /* No more that SD_FI_MAX_ERROR allowed in Queue */ 26606 if (un->sd_fi_fifo_pkt[i] != NULL) { 26607 kmem_free(un->sd_fi_fifo_pkt[i], 26608 sizeof (struct sd_fi_pkt)); 26609 } 26610 if (arg != NULL) { 26611 un->sd_fi_fifo_pkt[i] = 26612 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); 26613 if (un->sd_fi_fifo_pkt[i] == NULL) { 26614 /* Alloc failed don't store anything */ 26615 break; 26616 } 26617 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], 26618 sizeof (struct sd_fi_pkt), 0); 26619 if (rval == -1) { 26620 kmem_free(un->sd_fi_fifo_pkt[i], 26621 sizeof (struct sd_fi_pkt)); 26622 un->sd_fi_fifo_pkt[i] = NULL; 26623 } 26624 } else { 26625 SD_INFO(SD_LOG_IOERR, un, 26626 "sd_faultinjection_ioctl: pkt null\n"); 26627 } 26628 break; 26629 26630 case SDIOCINSERTXB: 26631 /* Store a xb struct to be pushed onto fifo */ 26632 SD_INFO(SD_LOG_SDTEST, un, 26633 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); 26634 26635 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 26636 26637 sd_fault_injection_on = 0; 26638 26639 if (un->sd_fi_fifo_xb[i] != NULL) { 26640 kmem_free(un->sd_fi_fifo_xb[i], 26641 sizeof (struct sd_fi_xb)); 26642 un->sd_fi_fifo_xb[i] = NULL; 26643 } 26644 if (arg != NULL) { 26645 un->sd_fi_fifo_xb[i] = 26646 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); 26647 if (un->sd_fi_fifo_xb[i] == NULL) { 26648 /* Alloc failed don't store anything */ 26649 break; 26650 } 26651 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], 26652 sizeof (struct sd_fi_xb), 0); 26653 26654 if (rval == -1) { 26655 kmem_free(un->sd_fi_fifo_xb[i], 26656 sizeof (struct sd_fi_xb)); 26657 un->sd_fi_fifo_xb[i] = NULL; 26658 } 26659 } else { 26660 SD_INFO(SD_LOG_IOERR, un, 26661 "sd_faultinjection_ioctl: xb null\n"); 26662 } 26663 break; 26664 26665 case SDIOCINSERTUN: 26666 /* Store a un struct to be pushed onto fifo */ 26667 SD_INFO(SD_LOG_SDTEST, un, 26668 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); 26669 26670 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 26671 26672 sd_fault_injection_on = 0; 26673 26674 if (un->sd_fi_fifo_un[i] != NULL) { 26675 kmem_free(un->sd_fi_fifo_un[i], 26676 sizeof (struct sd_fi_un)); 26677 un->sd_fi_fifo_un[i] = NULL; 26678 } 26679 if (arg != NULL) { 26680 un->sd_fi_fifo_un[i] = 26681 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); 26682 if (un->sd_fi_fifo_un[i] == NULL) { 26683 /* Alloc failed don't store anything */ 26684 break; 26685 } 26686 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], 26687 sizeof (struct sd_fi_un), 0); 26688 if (rval == -1) { 26689 kmem_free(un->sd_fi_fifo_un[i], 26690 sizeof (struct sd_fi_un)); 26691 un->sd_fi_fifo_un[i] = NULL; 26692 } 26693 26694 } else { 26695 SD_INFO(SD_LOG_IOERR, un, 26696 "sd_faultinjection_ioctl: un null\n"); 26697 } 26698 26699 break; 26700 26701 case SDIOCINSERTARQ: 26702 /* Store a arq struct to be pushed onto fifo */ 26703 SD_INFO(SD_LOG_SDTEST, un, 26704 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); 26705 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 26706 26707 sd_fault_injection_on = 0; 26708 26709 if (un->sd_fi_fifo_arq[i] != NULL) { 26710 kmem_free(un->sd_fi_fifo_arq[i], 26711 sizeof (struct sd_fi_arq)); 26712 un->sd_fi_fifo_arq[i] = NULL; 26713 } 26714 if (arg != NULL) { 26715 un->sd_fi_fifo_arq[i] = 26716 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); 26717 if (un->sd_fi_fifo_arq[i] == NULL) { 26718 /* Alloc failed don't store anything */ 26719 break; 26720 } 26721 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], 26722 sizeof (struct sd_fi_arq), 0); 26723 if (rval == -1) { 26724 kmem_free(un->sd_fi_fifo_arq[i], 26725 sizeof (struct sd_fi_arq)); 26726 un->sd_fi_fifo_arq[i] = NULL; 26727 } 26728 26729 } else { 26730 SD_INFO(SD_LOG_IOERR, un, 26731 "sd_faultinjection_ioctl: arq null\n"); 26732 } 26733 26734 break; 26735 26736 case SDIOCPUSH: 26737 /* Push stored xb, pkt, un, and arq onto fifo */ 26738 sd_fault_injection_on = 0; 26739 26740 if (arg != NULL) { 26741 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); 26742 if (rval != -1 && 26743 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 26744 un->sd_fi_fifo_end += i; 26745 } 26746 } else { 26747 SD_INFO(SD_LOG_IOERR, un, 26748 "sd_faultinjection_ioctl: push arg null\n"); 26749 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 26750 un->sd_fi_fifo_end++; 26751 } 26752 } 26753 SD_INFO(SD_LOG_IOERR, un, 26754 "sd_faultinjection_ioctl: push to end=%d\n", 26755 un->sd_fi_fifo_end); 26756 break; 26757 26758 case SDIOCRETRIEVE: 26759 /* Return buffer of log from Injection session */ 26760 SD_INFO(SD_LOG_SDTEST, un, 26761 "sd_faultinjection_ioctl: Injecting Fault Retreive"); 26762 26763 sd_fault_injection_on = 0; 26764 26765 mutex_enter(&(un->un_fi_mutex)); 26766 rval = ddi_copyout(un->sd_fi_log, (void *)arg, 26767 un->sd_fi_buf_len+1, 0); 26768 mutex_exit(&(un->un_fi_mutex)); 26769 26770 if (rval == -1) { 26771 /* 26772 * arg is possibly invalid setting 26773 * it to NULL for return 26774 */ 26775 arg = NULL; 26776 } 26777 break; 26778 } 26779 26780 mutex_exit(SD_MUTEX(un)); 26781 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl:" 26782 " exit\n"); 26783 } 26784 26785 26786 /* 26787 * Function: sd_injection_log() 26788 * 26789 * Description: This routine adds buff to the already existing injection log 26790 * for retrieval via faultinjection_ioctl for use in fault 26791 * detection and recovery 26792 * 26793 * Arguments: buf - the string to add to the log 26794 */ 26795 26796 static void 26797 sd_injection_log(char *buf, struct sd_lun *un) 26798 { 26799 uint_t len; 26800 26801 ASSERT(un != NULL); 26802 ASSERT(buf != NULL); 26803 26804 mutex_enter(&(un->un_fi_mutex)); 26805 26806 len = min(strlen(buf), 255); 26807 /* Add logged value to Injection log to be returned later */ 26808 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) { 26809 uint_t offset = strlen((char *)un->sd_fi_log); 26810 char *destp = (char *)un->sd_fi_log + offset; 26811 int i; 26812 for (i = 0; i < len; i++) { 26813 *destp++ = *buf++; 26814 } 26815 un->sd_fi_buf_len += len; 26816 un->sd_fi_log[un->sd_fi_buf_len] = '\0'; 26817 } 26818 26819 mutex_exit(&(un->un_fi_mutex)); 26820 } 26821 26822 26823 /* 26824 * Function: sd_faultinjection() 26825 * 26826 * Description: This routine takes the pkt and changes its 26827 * content based on error injection scenerio. 26828 * 26829 * Arguments: pktp - packet to be changed 26830 */ 26831 26832 static void 26833 sd_faultinjection(struct scsi_pkt *pktp) 26834 { 26835 uint_t i; 26836 struct sd_fi_pkt *fi_pkt; 26837 struct sd_fi_xb *fi_xb; 26838 struct sd_fi_un *fi_un; 26839 struct sd_fi_arq *fi_arq; 26840 struct buf *bp; 26841 struct sd_xbuf *xb; 26842 struct sd_lun *un; 26843 26844 ASSERT(pktp != NULL); 26845 26846 /* pull bp xb and un from pktp */ 26847 bp = (struct buf *)pktp->pkt_private; 26848 xb = SD_GET_XBUF(bp); 26849 un = SD_GET_UN(bp); 26850 26851 ASSERT(un != NULL); 26852 26853 mutex_enter(SD_MUTEX(un)); 26854 26855 SD_TRACE(SD_LOG_SDTEST, un, 26856 "sd_faultinjection: entry Injection from sdintr\n"); 26857 26858 /* if injection is off return */ 26859 if (sd_fault_injection_on == 0 || 26860 un->sd_fi_fifo_start == un->sd_fi_fifo_end) { 26861 mutex_exit(SD_MUTEX(un)); 26862 return; 26863 } 26864 26865 26866 /* take next set off fifo */ 26867 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; 26868 26869 fi_pkt = un->sd_fi_fifo_pkt[i]; 26870 fi_xb = un->sd_fi_fifo_xb[i]; 26871 fi_un = un->sd_fi_fifo_un[i]; 26872 fi_arq = un->sd_fi_fifo_arq[i]; 26873 26874 26875 /* set variables accordingly */ 26876 /* set pkt if it was on fifo */ 26877 if (fi_pkt != NULL) { 26878 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags"); 26879 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp"); 26880 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp"); 26881 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state"); 26882 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics"); 26883 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason"); 26884 26885 } 26886 26887 /* set xb if it was on fifo */ 26888 if (fi_xb != NULL) { 26889 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno"); 26890 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid"); 26891 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count"); 26892 SD_CONDSET(xb, xb, xb_victim_retry_count, 26893 "xb_victim_retry_count"); 26894 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status"); 26895 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state"); 26896 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid"); 26897 26898 /* copy in block data from sense */ 26899 if (fi_xb->xb_sense_data[0] != -1) { 26900 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, 26901 SENSE_LENGTH); 26902 } 26903 26904 /* copy in extended sense codes */ 26905 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_code, 26906 "es_code"); 26907 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_key, 26908 "es_key"); 26909 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_add_code, 26910 "es_add_code"); 26911 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, 26912 es_qual_code, "es_qual_code"); 26913 } 26914 26915 /* set un if it was on fifo */ 26916 if (fi_un != NULL) { 26917 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb"); 26918 SD_CONDSET(un, un, un_ctype, "un_ctype"); 26919 SD_CONDSET(un, un, un_reset_retry_count, 26920 "un_reset_retry_count"); 26921 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type"); 26922 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status"); 26923 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled"); 26924 SD_CONDSET(un, un, un_f_allow_bus_device_reset, 26925 "un_f_allow_bus_device_reset"); 26926 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing"); 26927 26928 } 26929 26930 /* copy in auto request sense if it was on fifo */ 26931 if (fi_arq != NULL) { 26932 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq)); 26933 } 26934 26935 /* free structs */ 26936 if (un->sd_fi_fifo_pkt[i] != NULL) { 26937 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); 26938 } 26939 if (un->sd_fi_fifo_xb[i] != NULL) { 26940 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); 26941 } 26942 if (un->sd_fi_fifo_un[i] != NULL) { 26943 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); 26944 } 26945 if (un->sd_fi_fifo_arq[i] != NULL) { 26946 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); 26947 } 26948 26949 /* 26950 * kmem_free does not gurantee to set to NULL 26951 * since we uses these to determine if we set 26952 * values or not lets confirm they are always 26953 * NULL after free 26954 */ 26955 un->sd_fi_fifo_pkt[i] = NULL; 26956 un->sd_fi_fifo_un[i] = NULL; 26957 un->sd_fi_fifo_xb[i] = NULL; 26958 un->sd_fi_fifo_arq[i] = NULL; 26959 26960 un->sd_fi_fifo_start++; 26961 26962 mutex_exit(SD_MUTEX(un)); 26963 26964 SD_TRACE(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n"); 26965 } 26966 26967 #endif /* SD_FAULT_INJECTION */ 26968 26969 /* 26970 * This routine is invoked in sd_unit_attach(). Before calling it, the 26971 * properties in conf file should be processed already, and "hotpluggable" 26972 * property was processed also. 26973 * 26974 * The sd driver distinguishes 3 different type of devices: removable media, 26975 * non-removable media, and hotpluggable. Below the differences are defined: 26976 * 26977 * 1. Device ID 26978 * 26979 * The device ID of a device is used to identify this device. Refer to 26980 * ddi_devid_register(9F). 26981 * 26982 * For a non-removable media disk device which can provide 0x80 or 0x83 26983 * VPD page (refer to INQUIRY command of SCSI SPC specification), a unique 26984 * device ID is created to identify this device. For other non-removable 26985 * media devices, a default device ID is created only if this device has 26986 * at least 2 alter cylinders. Otherwise, this device has no devid. 26987 * 26988 * ------------------------------------------------------- 26989 * removable media hotpluggable | Can Have Device ID 26990 * ------------------------------------------------------- 26991 * false false | Yes 26992 * false true | Yes 26993 * true x | No 26994 * ------------------------------------------------------ 26995 * 26996 * 26997 * 2. SCSI group 4 commands 26998 * 26999 * In SCSI specs, only some commands in group 4 command set can use 27000 * 8-byte addresses that can be used to access >2TB storage spaces. 27001 * Other commands have no such capability. Without supporting group4, 27002 * it is impossible to make full use of storage spaces of a disk with 27003 * capacity larger than 2TB. 27004 * 27005 * ----------------------------------------------- 27006 * removable media hotpluggable LP64 | Group 27007 * ----------------------------------------------- 27008 * false false false | 1 27009 * false false true | 4 27010 * false true false | 1 27011 * false true true | 4 27012 * true x x | 5 27013 * ----------------------------------------------- 27014 * 27015 * 27016 * 3. Check for VTOC Label 27017 * 27018 * If a direct-access disk has no EFI label, sd will check if it has a 27019 * valid VTOC label. Now, sd also does that check for removable media 27020 * and hotpluggable devices. 27021 * 27022 * -------------------------------------------------------------- 27023 * Direct-Access removable media hotpluggable | Check Label 27024 * ------------------------------------------------------------- 27025 * false false false | No 27026 * false false true | No 27027 * false true false | Yes 27028 * false true true | Yes 27029 * true x x | Yes 27030 * -------------------------------------------------------------- 27031 * 27032 * 27033 * 4. Building default VTOC label 27034 * 27035 * As section 3 says, sd checks if some kinds of devices have VTOC label. 27036 * If those devices have no valid VTOC label, sd(7d) will attempt to 27037 * create default VTOC for them. Currently sd creates default VTOC label 27038 * for all devices on x86 platform (VTOC_16), but only for removable 27039 * media devices on SPARC (VTOC_8). 27040 * 27041 * ----------------------------------------------------------- 27042 * removable media hotpluggable platform | Default Label 27043 * ----------------------------------------------------------- 27044 * false false sparc | No 27045 * false true x86 | Yes 27046 * false true sparc | Yes 27047 * true x x | Yes 27048 * ---------------------------------------------------------- 27049 * 27050 * 27051 * 5. Supported blocksizes of target devices 27052 * 27053 * Sd supports non-512-byte blocksize for removable media devices only. 27054 * For other devices, only 512-byte blocksize is supported. This may be 27055 * changed in near future because some RAID devices require non-512-byte 27056 * blocksize 27057 * 27058 * ----------------------------------------------------------- 27059 * removable media hotpluggable | non-512-byte blocksize 27060 * ----------------------------------------------------------- 27061 * false false | No 27062 * false true | No 27063 * true x | Yes 27064 * ----------------------------------------------------------- 27065 * 27066 * 27067 * 6. Automatic mount & unmount 27068 * 27069 * Sd(7d) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query 27070 * if a device is removable media device. It return 1 for removable media 27071 * devices, and 0 for others. 27072 * 27073 * The automatic mounting subsystem should distinguish between the types 27074 * of devices and apply automounting policies to each. 27075 * 27076 * 27077 * 7. fdisk partition management 27078 * 27079 * Fdisk is traditional partition method on x86 platform. Sd(7d) driver 27080 * just supports fdisk partitions on x86 platform. On sparc platform, sd 27081 * doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize 27082 * fdisk partitions on both x86 and SPARC platform. 27083 * 27084 * ----------------------------------------------------------- 27085 * platform removable media USB/1394 | fdisk supported 27086 * ----------------------------------------------------------- 27087 * x86 X X | true 27088 * ------------------------------------------------------------ 27089 * sparc X X | false 27090 * ------------------------------------------------------------ 27091 * 27092 * 27093 * 8. MBOOT/MBR 27094 * 27095 * Although sd(7d) doesn't support fdisk on SPARC platform, it does support 27096 * read/write mboot for removable media devices on sparc platform. 27097 * 27098 * ----------------------------------------------------------- 27099 * platform removable media USB/1394 | mboot supported 27100 * ----------------------------------------------------------- 27101 * x86 X X | true 27102 * ------------------------------------------------------------ 27103 * sparc false false | false 27104 * sparc false true | true 27105 * sparc true false | true 27106 * sparc true true | true 27107 * ------------------------------------------------------------ 27108 * 27109 * 27110 * 9. error handling during opening device 27111 * 27112 * If failed to open a disk device, an errno is returned. For some kinds 27113 * of errors, different errno is returned depending on if this device is 27114 * a removable media device. This brings USB/1394 hard disks in line with 27115 * expected hard disk behavior. It is not expected that this breaks any 27116 * application. 27117 * 27118 * ------------------------------------------------------ 27119 * removable media hotpluggable | errno 27120 * ------------------------------------------------------ 27121 * false false | EIO 27122 * false true | EIO 27123 * true x | ENXIO 27124 * ------------------------------------------------------ 27125 * 27126 * 27127 * 11. ioctls: DKIOCEJECT, CDROMEJECT 27128 * 27129 * These IOCTLs are applicable only to removable media devices. 27130 * 27131 * ----------------------------------------------------------- 27132 * removable media hotpluggable |DKIOCEJECT, CDROMEJECT 27133 * ----------------------------------------------------------- 27134 * false false | No 27135 * false true | No 27136 * true x | Yes 27137 * ----------------------------------------------------------- 27138 * 27139 * 27140 * 12. Kstats for partitions 27141 * 27142 * sd creates partition kstat for non-removable media devices. USB and 27143 * Firewire hard disks now have partition kstats 27144 * 27145 * ------------------------------------------------------ 27146 * removable media hotplugable | kstat 27147 * ------------------------------------------------------ 27148 * false false | Yes 27149 * false true | Yes 27150 * true x | No 27151 * ------------------------------------------------------ 27152 * 27153 * 27154 * 13. Removable media & hotpluggable properties 27155 * 27156 * Sd driver creates a "removable-media" property for removable media 27157 * devices. Parent nexus drivers create a "hotpluggable" property if 27158 * it supports hotplugging. 27159 * 27160 * --------------------------------------------------------------------- 27161 * removable media hotpluggable | "removable-media" " hotpluggable" 27162 * --------------------------------------------------------------------- 27163 * false false | No No 27164 * false true | No Yes 27165 * true false | Yes No 27166 * true true | Yes Yes 27167 * --------------------------------------------------------------------- 27168 * 27169 * 27170 * 14. Power Management 27171 * 27172 * sd only power manages removable media devices or devices that support 27173 * LOG_SENSE or have a "pm-capable" property (PSARC/2002/250) 27174 * 27175 * A parent nexus that supports hotplugging can also set "pm-capable" 27176 * if the disk can be power managed. 27177 * 27178 * ------------------------------------------------------------ 27179 * removable media hotpluggable pm-capable | power manage 27180 * ------------------------------------------------------------ 27181 * false false false | No 27182 * false false true | Yes 27183 * false true false | No 27184 * false true true | Yes 27185 * true x x | Yes 27186 * ------------------------------------------------------------ 27187 * 27188 * USB and firewire hard disks can now be power managed independently 27189 * of the framebuffer 27190 * 27191 * 27192 * 15. Support for USB disks with capacity larger than 1TB 27193 * 27194 * Currently, sd doesn't permit a fixed disk device with capacity 27195 * larger than 1TB to be used in a 32-bit operating system environment. 27196 * However, sd doesn't do that for removable media devices. Instead, it 27197 * assumes that removable media devices cannot have a capacity larger 27198 * than 1TB. Therefore, using those devices on 32-bit system is partially 27199 * supported, which can cause some unexpected results. 27200 * 27201 * --------------------------------------------------------------------- 27202 * removable media USB/1394 | Capacity > 1TB | Used in 32-bit env 27203 * --------------------------------------------------------------------- 27204 * false false | true | no 27205 * false true | true | no 27206 * true false | true | Yes 27207 * true true | true | Yes 27208 * --------------------------------------------------------------------- 27209 * 27210 * 27211 * 16. Check write-protection at open time 27212 * 27213 * When a removable media device is being opened for writing without NDELAY 27214 * flag, sd will check if this device is writable. If attempting to open 27215 * without NDELAY flag a write-protected device, this operation will abort. 27216 * 27217 * ------------------------------------------------------------ 27218 * removable media USB/1394 | WP Check 27219 * ------------------------------------------------------------ 27220 * false false | No 27221 * false true | No 27222 * true false | Yes 27223 * true true | Yes 27224 * ------------------------------------------------------------ 27225 * 27226 * 27227 * 17. syslog when corrupted VTOC is encountered 27228 * 27229 * Currently, if an invalid VTOC is encountered, sd only print syslog 27230 * for fixed SCSI disks. 27231 * ------------------------------------------------------------ 27232 * removable media USB/1394 | print syslog 27233 * ------------------------------------------------------------ 27234 * false false | Yes 27235 * false true | No 27236 * true false | No 27237 * true true | No 27238 * ------------------------------------------------------------ 27239 */ 27240 static void 27241 sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi) 27242 { 27243 int pm_capable_prop; 27244 27245 ASSERT(un->un_sd); 27246 ASSERT(un->un_sd->sd_inq); 27247 27248 /* 27249 * Enable SYNC CACHE support for all devices. 27250 */ 27251 un->un_f_sync_cache_supported = TRUE; 27252 27253 if (un->un_sd->sd_inq->inq_rmb) { 27254 /* 27255 * The media of this device is removable. And for this kind 27256 * of devices, it is possible to change medium after opening 27257 * devices. Thus we should support this operation. 27258 */ 27259 un->un_f_has_removable_media = TRUE; 27260 27261 /* 27262 * support non-512-byte blocksize of removable media devices 27263 */ 27264 un->un_f_non_devbsize_supported = TRUE; 27265 27266 /* 27267 * Assume that all removable media devices support DOOR_LOCK 27268 */ 27269 un->un_f_doorlock_supported = TRUE; 27270 27271 /* 27272 * For a removable media device, it is possible to be opened 27273 * with NDELAY flag when there is no media in drive, in this 27274 * case we don't care if device is writable. But if without 27275 * NDELAY flag, we need to check if media is write-protected. 27276 */ 27277 un->un_f_chk_wp_open = TRUE; 27278 27279 /* 27280 * need to start a SCSI watch thread to monitor media state, 27281 * when media is being inserted or ejected, notify syseventd. 27282 */ 27283 un->un_f_monitor_media_state = TRUE; 27284 27285 /* 27286 * Some devices don't support START_STOP_UNIT command. 27287 * Therefore, we'd better check if a device supports it 27288 * before sending it. 27289 */ 27290 un->un_f_check_start_stop = TRUE; 27291 27292 /* 27293 * support eject media ioctl: 27294 * FDEJECT, DKIOCEJECT, CDROMEJECT 27295 */ 27296 un->un_f_eject_media_supported = TRUE; 27297 27298 /* 27299 * Because many removable-media devices don't support 27300 * LOG_SENSE, we couldn't use this command to check if 27301 * a removable media device support power-management. 27302 * We assume that they support power-management via 27303 * START_STOP_UNIT command and can be spun up and down 27304 * without limitations. 27305 */ 27306 un->un_f_pm_supported = TRUE; 27307 27308 /* 27309 * Need to create a zero length (Boolean) property 27310 * removable-media for the removable media devices. 27311 * Note that the return value of the property is not being 27312 * checked, since if unable to create the property 27313 * then do not want the attach to fail altogether. Consistent 27314 * with other property creation in attach. 27315 */ 27316 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, 27317 DDI_PROP_CANSLEEP, "removable-media", NULL, 0); 27318 27319 } else { 27320 /* 27321 * create device ID for device 27322 */ 27323 un->un_f_devid_supported = TRUE; 27324 27325 /* 27326 * Spin up non-removable-media devices once it is attached 27327 */ 27328 un->un_f_attach_spinup = TRUE; 27329 27330 /* 27331 * According to SCSI specification, Sense data has two kinds of 27332 * format: fixed format, and descriptor format. At present, we 27333 * don't support descriptor format sense data for removable 27334 * media. 27335 */ 27336 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT) { 27337 un->un_f_descr_format_supported = TRUE; 27338 } 27339 27340 /* 27341 * kstats are created only for non-removable media devices. 27342 * 27343 * Set this in sd.conf to 0 in order to disable kstats. The 27344 * default is 1, so they are enabled by default. 27345 */ 27346 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY, 27347 SD_DEVINFO(un), DDI_PROP_DONTPASS, 27348 "enable-partition-kstats", 1)); 27349 27350 /* 27351 * Check if HBA has set the "pm-capable" property. 27352 * If "pm-capable" exists and is non-zero then we can 27353 * power manage the device without checking the start/stop 27354 * cycle count log sense page. 27355 * 27356 * If "pm-capable" exists and is SD_PM_CAPABLE_FALSE (0) 27357 * then we should not power manage the device. 27358 * 27359 * If "pm-capable" doesn't exist then pm_capable_prop will 27360 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case, 27361 * sd will check the start/stop cycle count log sense page 27362 * and power manage the device if the cycle count limit has 27363 * not been exceeded. 27364 */ 27365 pm_capable_prop = ddi_prop_get_int(DDI_DEV_T_ANY, devi, 27366 DDI_PROP_DONTPASS, "pm-capable", SD_PM_CAPABLE_UNDEFINED); 27367 if (pm_capable_prop == SD_PM_CAPABLE_UNDEFINED) { 27368 un->un_f_log_sense_supported = TRUE; 27369 } else { 27370 /* 27371 * pm-capable property exists. 27372 * 27373 * Convert "TRUE" values for pm_capable_prop to 27374 * SD_PM_CAPABLE_TRUE (1) to make it easier to check 27375 * later. "TRUE" values are any values except 27376 * SD_PM_CAPABLE_FALSE (0) and 27377 * SD_PM_CAPABLE_UNDEFINED (-1) 27378 */ 27379 if (pm_capable_prop == SD_PM_CAPABLE_FALSE) { 27380 un->un_f_log_sense_supported = FALSE; 27381 } else { 27382 un->un_f_pm_supported = TRUE; 27383 } 27384 27385 SD_INFO(SD_LOG_ATTACH_DETACH, un, 27386 "sd_unit_attach: un:0x%p pm-capable " 27387 "property set to %d.\n", un, un->un_f_pm_supported); 27388 } 27389 } 27390 27391 if (un->un_f_is_hotpluggable) { 27392 27393 /* 27394 * Have to watch hotpluggable devices as well, since 27395 * that's the only way for userland applications to 27396 * detect hot removal while device is busy/mounted. 27397 */ 27398 un->un_f_monitor_media_state = TRUE; 27399 27400 un->un_f_check_start_stop = TRUE; 27401 27402 } 27403 } 27404 27405 /* 27406 * sd_tg_rdwr: 27407 * Provides rdwr access for cmlb via sd_tgops. The start_block is 27408 * in sys block size, req_length in bytes. 27409 * 27410 */ 27411 static int 27412 sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 27413 diskaddr_t start_block, size_t reqlength, void *tg_cookie) 27414 { 27415 struct sd_lun *un; 27416 int path_flag = (int)(uintptr_t)tg_cookie; 27417 char *dkl = NULL; 27418 diskaddr_t real_addr = start_block; 27419 diskaddr_t first_byte, end_block; 27420 27421 size_t buffer_size = reqlength; 27422 int rval; 27423 diskaddr_t cap; 27424 uint32_t lbasize; 27425 27426 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 27427 if (un == NULL) 27428 return (ENXIO); 27429 27430 if (cmd != TG_READ && cmd != TG_WRITE) 27431 return (EINVAL); 27432 27433 mutex_enter(SD_MUTEX(un)); 27434 if (un->un_f_tgt_blocksize_is_valid == FALSE) { 27435 mutex_exit(SD_MUTEX(un)); 27436 rval = sd_send_scsi_READ_CAPACITY(un, (uint64_t *)&cap, 27437 &lbasize, path_flag); 27438 if (rval != 0) 27439 return (rval); 27440 mutex_enter(SD_MUTEX(un)); 27441 sd_update_block_info(un, lbasize, cap); 27442 if ((un->un_f_tgt_blocksize_is_valid == FALSE)) { 27443 mutex_exit(SD_MUTEX(un)); 27444 return (EIO); 27445 } 27446 } 27447 27448 if (NOT_DEVBSIZE(un)) { 27449 /* 27450 * sys_blocksize != tgt_blocksize, need to re-adjust 27451 * blkno and save the index to beginning of dk_label 27452 */ 27453 first_byte = SD_SYSBLOCKS2BYTES(un, start_block); 27454 real_addr = first_byte / un->un_tgt_blocksize; 27455 27456 end_block = (first_byte + reqlength + 27457 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize; 27458 27459 /* round up buffer size to multiple of target block size */ 27460 buffer_size = (end_block - real_addr) * un->un_tgt_blocksize; 27461 27462 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_tg_rdwr", 27463 "label_addr: 0x%x allocation size: 0x%x\n", 27464 real_addr, buffer_size); 27465 27466 if (((first_byte % un->un_tgt_blocksize) != 0) || 27467 (reqlength % un->un_tgt_blocksize) != 0) 27468 /* the request is not aligned */ 27469 dkl = kmem_zalloc(buffer_size, KM_SLEEP); 27470 } 27471 27472 /* 27473 * The MMC standard allows READ CAPACITY to be 27474 * inaccurate by a bounded amount (in the interest of 27475 * response latency). As a result, failed READs are 27476 * commonplace (due to the reading of metadata and not 27477 * data). Depending on the per-Vendor/drive Sense data, 27478 * the failed READ can cause many (unnecessary) retries. 27479 */ 27480 27481 if (ISCD(un) && (cmd == TG_READ) && 27482 (un->un_f_blockcount_is_valid == TRUE) && 27483 ((start_block == (un->un_blockcount - 1))|| 27484 (start_block == (un->un_blockcount - 2)))) { 27485 path_flag = SD_PATH_DIRECT_PRIORITY; 27486 } 27487 27488 mutex_exit(SD_MUTEX(un)); 27489 if (cmd == TG_READ) { 27490 rval = sd_send_scsi_READ(un, (dkl != NULL)? dkl: bufaddr, 27491 buffer_size, real_addr, path_flag); 27492 if (dkl != NULL) 27493 bcopy(dkl + SD_TGTBYTEOFFSET(un, start_block, 27494 real_addr), bufaddr, reqlength); 27495 } else { 27496 if (dkl) { 27497 rval = sd_send_scsi_READ(un, dkl, buffer_size, 27498 real_addr, path_flag); 27499 if (rval) { 27500 kmem_free(dkl, buffer_size); 27501 return (rval); 27502 } 27503 bcopy(bufaddr, dkl + SD_TGTBYTEOFFSET(un, start_block, 27504 real_addr), reqlength); 27505 } 27506 rval = sd_send_scsi_WRITE(un, (dkl != NULL)? dkl: bufaddr, 27507 buffer_size, real_addr, path_flag); 27508 } 27509 27510 if (dkl != NULL) 27511 kmem_free(dkl, buffer_size); 27512 27513 return (rval); 27514 } 27515 27516 27517 static int 27518 sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie) 27519 { 27520 27521 struct sd_lun *un; 27522 diskaddr_t cap; 27523 uint32_t lbasize; 27524 int path_flag = (int)(uintptr_t)tg_cookie; 27525 int ret = 0; 27526 27527 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 27528 if (un == NULL) 27529 return (ENXIO); 27530 27531 switch (cmd) { 27532 case TG_GETPHYGEOM: 27533 case TG_GETVIRTGEOM: 27534 case TG_GETCAPACITY: 27535 case TG_GETBLOCKSIZE: 27536 mutex_enter(SD_MUTEX(un)); 27537 27538 if ((un->un_f_blockcount_is_valid == TRUE) && 27539 (un->un_f_tgt_blocksize_is_valid == TRUE)) { 27540 cap = un->un_blockcount; 27541 lbasize = un->un_tgt_blocksize; 27542 mutex_exit(SD_MUTEX(un)); 27543 } else { 27544 mutex_exit(SD_MUTEX(un)); 27545 ret = sd_send_scsi_READ_CAPACITY(un, (uint64_t *)&cap, 27546 &lbasize, path_flag); 27547 if (ret != 0) 27548 return (ret); 27549 mutex_enter(SD_MUTEX(un)); 27550 sd_update_block_info(un, lbasize, cap); 27551 if ((un->un_f_blockcount_is_valid == FALSE) || 27552 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 27553 mutex_exit(SD_MUTEX(un)); 27554 return (EIO); 27555 } 27556 mutex_exit(SD_MUTEX(un)); 27557 } 27558 27559 if (cmd == TG_GETCAPACITY) { 27560 *(diskaddr_t *)arg = cap; 27561 return (0); 27562 } 27563 27564 if (cmd == TG_GETBLOCKSIZE) { 27565 *(uint32_t *)arg = lbasize; 27566 return (0); 27567 } 27568 27569 if (cmd == TG_GETPHYGEOM) 27570 ret = sd_get_physical_geometry(un, (cmlb_geom_t *)arg, 27571 cap, lbasize, path_flag); 27572 else 27573 /* TG_GETVIRTGEOM */ 27574 ret = sd_get_virtual_geometry(un, 27575 (cmlb_geom_t *)arg, cap, lbasize); 27576 27577 return (ret); 27578 27579 case TG_GETATTR: 27580 mutex_enter(SD_MUTEX(un)); 27581 ((tg_attribute_t *)arg)->media_is_writable = 27582 un->un_f_mmc_writable_media; 27583 mutex_exit(SD_MUTEX(un)); 27584 return (0); 27585 default: 27586 return (ENOTTY); 27587 27588 } 27589 27590 } 27591