1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * SCSI disk target driver. 30 */ 31 #include <sys/scsi/scsi.h> 32 #include <sys/dkbad.h> 33 #include <sys/dklabel.h> 34 #include <sys/dkio.h> 35 #include <sys/fdio.h> 36 #include <sys/cdio.h> 37 #include <sys/mhd.h> 38 #include <sys/vtoc.h> 39 #include <sys/dktp/fdisk.h> 40 #include <sys/kstat.h> 41 #include <sys/vtrace.h> 42 #include <sys/note.h> 43 #include <sys/thread.h> 44 #include <sys/proc.h> 45 #include <sys/efi_partition.h> 46 #include <sys/var.h> 47 #include <sys/aio_req.h> 48 49 #ifdef __lock_lint 50 #define _LP64 51 #define __amd64 52 #endif 53 54 #if (defined(__fibre)) 55 /* Note: is there a leadville version of the following? */ 56 #include <sys/fc4/fcal_linkapp.h> 57 #endif 58 #include <sys/taskq.h> 59 #include <sys/uuid.h> 60 #include <sys/byteorder.h> 61 #include <sys/sdt.h> 62 63 #include "sd_xbuf.h" 64 65 #include <sys/scsi/targets/sddef.h> 66 #include <sys/cmlb.h> 67 68 69 /* 70 * Loadable module info. 71 */ 72 #if (defined(__fibre)) 73 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver %I%" 74 char _depends_on[] = "misc/scsi misc/cmlb drv/fcp"; 75 #else 76 #define SD_MODULE_NAME "SCSI Disk Driver %I%" 77 char _depends_on[] = "misc/scsi misc/cmlb"; 78 #endif 79 80 /* 81 * Define the interconnect type, to allow the driver to distinguish 82 * between parallel SCSI (sd) and fibre channel (ssd) behaviors. 83 * 84 * This is really for backward compatibility. In the future, the driver 85 * should actually check the "interconnect-type" property as reported by 86 * the HBA; however at present this property is not defined by all HBAs, 87 * so we will use this #define (1) to permit the driver to run in 88 * backward-compatibility mode; and (2) to print a notification message 89 * if an FC HBA does not support the "interconnect-type" property. The 90 * behavior of the driver will be to assume parallel SCSI behaviors unless 91 * the "interconnect-type" property is defined by the HBA **AND** has a 92 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or 93 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre 94 * Channel behaviors (as per the old ssd). (Note that the 95 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and 96 * will result in the driver assuming parallel SCSI behaviors.) 97 * 98 * (see common/sys/scsi/impl/services.h) 99 * 100 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default 101 * since some FC HBAs may already support that, and there is some code in 102 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the 103 * default would confuse that code, and besides things should work fine 104 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the 105 * "interconnect_type" property. 106 * 107 */ 108 #if (defined(__fibre)) 109 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE 110 #else 111 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL 112 #endif 113 114 /* 115 * The name of the driver, established from the module name in _init. 116 */ 117 static char *sd_label = NULL; 118 119 /* 120 * Driver name is unfortunately prefixed on some driver.conf properties. 121 */ 122 #if (defined(__fibre)) 123 #define sd_max_xfer_size ssd_max_xfer_size 124 #define sd_config_list ssd_config_list 125 static char *sd_max_xfer_size = "ssd_max_xfer_size"; 126 static char *sd_config_list = "ssd-config-list"; 127 #else 128 static char *sd_max_xfer_size = "sd_max_xfer_size"; 129 static char *sd_config_list = "sd-config-list"; 130 #endif 131 132 /* 133 * Driver global variables 134 */ 135 136 #if (defined(__fibre)) 137 /* 138 * These #defines are to avoid namespace collisions that occur because this 139 * code is currently used to compile two separate driver modules: sd and ssd. 140 * All global variables need to be treated this way (even if declared static) 141 * in order to allow the debugger to resolve the names properly. 142 * It is anticipated that in the near future the ssd module will be obsoleted, 143 * at which time this namespace issue should go away. 144 */ 145 #define sd_state ssd_state 146 #define sd_io_time ssd_io_time 147 #define sd_failfast_enable ssd_failfast_enable 148 #define sd_ua_retry_count ssd_ua_retry_count 149 #define sd_report_pfa ssd_report_pfa 150 #define sd_max_throttle ssd_max_throttle 151 #define sd_min_throttle ssd_min_throttle 152 #define sd_rot_delay ssd_rot_delay 153 154 #define sd_retry_on_reservation_conflict \ 155 ssd_retry_on_reservation_conflict 156 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay 157 #define sd_resv_conflict_name ssd_resv_conflict_name 158 159 #define sd_component_mask ssd_component_mask 160 #define sd_level_mask ssd_level_mask 161 #define sd_debug_un ssd_debug_un 162 #define sd_error_level ssd_error_level 163 164 #define sd_xbuf_active_limit ssd_xbuf_active_limit 165 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit 166 167 #define sd_tr ssd_tr 168 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout 169 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout 170 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable 171 #define sd_check_media_time ssd_check_media_time 172 #define sd_wait_cmds_complete ssd_wait_cmds_complete 173 #define sd_label_mutex ssd_label_mutex 174 #define sd_detach_mutex ssd_detach_mutex 175 #define sd_log_buf ssd_log_buf 176 #define sd_log_mutex ssd_log_mutex 177 178 #define sd_disk_table ssd_disk_table 179 #define sd_disk_table_size ssd_disk_table_size 180 #define sd_sense_mutex ssd_sense_mutex 181 #define sd_cdbtab ssd_cdbtab 182 183 #define sd_cb_ops ssd_cb_ops 184 #define sd_ops ssd_ops 185 #define sd_additional_codes ssd_additional_codes 186 #define sd_tgops ssd_tgops 187 188 #define sd_minor_data ssd_minor_data 189 #define sd_minor_data_efi ssd_minor_data_efi 190 191 #define sd_tq ssd_tq 192 #define sd_wmr_tq ssd_wmr_tq 193 #define sd_taskq_name ssd_taskq_name 194 #define sd_wmr_taskq_name ssd_wmr_taskq_name 195 #define sd_taskq_minalloc ssd_taskq_minalloc 196 #define sd_taskq_maxalloc ssd_taskq_maxalloc 197 198 #define sd_dump_format_string ssd_dump_format_string 199 200 #define sd_iostart_chain ssd_iostart_chain 201 #define sd_iodone_chain ssd_iodone_chain 202 203 #define sd_pm_idletime ssd_pm_idletime 204 205 #define sd_force_pm_supported ssd_force_pm_supported 206 207 #define sd_dtype_optical_bind ssd_dtype_optical_bind 208 209 #endif 210 211 212 #ifdef SDDEBUG 213 int sd_force_pm_supported = 0; 214 #endif /* SDDEBUG */ 215 216 void *sd_state = NULL; 217 int sd_io_time = SD_IO_TIME; 218 int sd_failfast_enable = 1; 219 int sd_ua_retry_count = SD_UA_RETRY_COUNT; 220 int sd_report_pfa = 1; 221 int sd_max_throttle = SD_MAX_THROTTLE; 222 int sd_min_throttle = SD_MIN_THROTTLE; 223 int sd_rot_delay = 4; /* Default 4ms Rotation delay */ 224 int sd_qfull_throttle_enable = TRUE; 225 226 int sd_retry_on_reservation_conflict = 1; 227 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 228 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) 229 230 static int sd_dtype_optical_bind = -1; 231 232 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ 233 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; 234 235 /* 236 * Global data for debug logging. To enable debug printing, sd_component_mask 237 * and sd_level_mask should be set to the desired bit patterns as outlined in 238 * sddef.h. 239 */ 240 uint_t sd_component_mask = 0x0; 241 uint_t sd_level_mask = 0x0; 242 struct sd_lun *sd_debug_un = NULL; 243 uint_t sd_error_level = SCSI_ERR_RETRYABLE; 244 245 /* Note: these may go away in the future... */ 246 static uint32_t sd_xbuf_active_limit = 512; 247 static uint32_t sd_xbuf_reserve_limit = 16; 248 249 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 }; 250 251 /* 252 * Timer value used to reset the throttle after it has been reduced 253 * (typically in response to TRAN_BUSY or STATUS_QFULL) 254 */ 255 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT; 256 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT; 257 258 /* 259 * Interval value associated with the media change scsi watch. 260 */ 261 static int sd_check_media_time = 3000000; 262 263 /* 264 * Wait value used for in progress operations during a DDI_SUSPEND 265 */ 266 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE; 267 268 /* 269 * sd_label_mutex protects a static buffer used in the disk label 270 * component of the driver 271 */ 272 static kmutex_t sd_label_mutex; 273 274 /* 275 * sd_detach_mutex protects un_layer_count, un_detach_count, and 276 * un_opens_in_progress in the sd_lun structure. 277 */ 278 static kmutex_t sd_detach_mutex; 279 280 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex, 281 sd_lun::{un_layer_count un_detach_count un_opens_in_progress})) 282 283 /* 284 * Global buffer and mutex for debug logging 285 */ 286 static char sd_log_buf[1024]; 287 static kmutex_t sd_log_mutex; 288 289 /* 290 * Structs and globals for recording attached lun information. 291 * This maintains a chain. Each node in the chain represents a SCSI controller. 292 * The structure records the number of luns attached to each target connected 293 * with the controller. 294 * For parallel scsi device only. 295 */ 296 struct sd_scsi_hba_tgt_lun { 297 struct sd_scsi_hba_tgt_lun *next; 298 dev_info_t *pdip; 299 int nlun[NTARGETS_WIDE]; 300 }; 301 302 /* 303 * Flag to indicate the lun is attached or detached 304 */ 305 #define SD_SCSI_LUN_ATTACH 0 306 #define SD_SCSI_LUN_DETACH 1 307 308 static kmutex_t sd_scsi_target_lun_mutex; 309 static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL; 310 311 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 312 sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip)) 313 314 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 315 sd_scsi_target_lun_head)) 316 317 /* 318 * "Smart" Probe Caching structs, globals, #defines, etc. 319 * For parallel scsi and non-self-identify device only. 320 */ 321 322 /* 323 * The following resources and routines are implemented to support 324 * "smart" probing, which caches the scsi_probe() results in an array, 325 * in order to help avoid long probe times. 326 */ 327 struct sd_scsi_probe_cache { 328 struct sd_scsi_probe_cache *next; 329 dev_info_t *pdip; 330 int cache[NTARGETS_WIDE]; 331 }; 332 333 static kmutex_t sd_scsi_probe_cache_mutex; 334 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; 335 336 /* 337 * Really we only need protection on the head of the linked list, but 338 * better safe than sorry. 339 */ 340 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 341 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) 342 343 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 344 sd_scsi_probe_cache_head)) 345 346 347 /* 348 * Vendor specific data name property declarations 349 */ 350 351 #if defined(__fibre) || defined(__i386) ||defined(__amd64) 352 353 static sd_tunables seagate_properties = { 354 SEAGATE_THROTTLE_VALUE, 355 0, 356 0, 357 0, 358 0, 359 0, 360 0, 361 0, 362 0 363 }; 364 365 366 static sd_tunables fujitsu_properties = { 367 FUJITSU_THROTTLE_VALUE, 368 0, 369 0, 370 0, 371 0, 372 0, 373 0, 374 0, 375 0 376 }; 377 378 static sd_tunables ibm_properties = { 379 IBM_THROTTLE_VALUE, 380 0, 381 0, 382 0, 383 0, 384 0, 385 0, 386 0, 387 0 388 }; 389 390 static sd_tunables purple_properties = { 391 PURPLE_THROTTLE_VALUE, 392 0, 393 0, 394 PURPLE_BUSY_RETRIES, 395 PURPLE_RESET_RETRY_COUNT, 396 PURPLE_RESERVE_RELEASE_TIME, 397 0, 398 0, 399 0 400 }; 401 402 static sd_tunables sve_properties = { 403 SVE_THROTTLE_VALUE, 404 0, 405 0, 406 SVE_BUSY_RETRIES, 407 SVE_RESET_RETRY_COUNT, 408 SVE_RESERVE_RELEASE_TIME, 409 SVE_MIN_THROTTLE_VALUE, 410 SVE_DISKSORT_DISABLED_FLAG, 411 0 412 }; 413 414 static sd_tunables maserati_properties = { 415 0, 416 0, 417 0, 418 0, 419 0, 420 0, 421 0, 422 MASERATI_DISKSORT_DISABLED_FLAG, 423 MASERATI_LUN_RESET_ENABLED_FLAG 424 }; 425 426 static sd_tunables pirus_properties = { 427 PIRUS_THROTTLE_VALUE, 428 0, 429 PIRUS_NRR_COUNT, 430 PIRUS_BUSY_RETRIES, 431 PIRUS_RESET_RETRY_COUNT, 432 0, 433 PIRUS_MIN_THROTTLE_VALUE, 434 PIRUS_DISKSORT_DISABLED_FLAG, 435 PIRUS_LUN_RESET_ENABLED_FLAG 436 }; 437 438 #endif 439 440 #if (defined(__sparc) && !defined(__fibre)) || \ 441 (defined(__i386) || defined(__amd64)) 442 443 444 static sd_tunables elite_properties = { 445 ELITE_THROTTLE_VALUE, 446 0, 447 0, 448 0, 449 0, 450 0, 451 0, 452 0, 453 0 454 }; 455 456 static sd_tunables st31200n_properties = { 457 ST31200N_THROTTLE_VALUE, 458 0, 459 0, 460 0, 461 0, 462 0, 463 0, 464 0, 465 0 466 }; 467 468 #endif /* Fibre or not */ 469 470 static sd_tunables lsi_properties_scsi = { 471 LSI_THROTTLE_VALUE, 472 0, 473 LSI_NOTREADY_RETRIES, 474 0, 475 0, 476 0, 477 0, 478 0, 479 0 480 }; 481 482 static sd_tunables symbios_properties = { 483 SYMBIOS_THROTTLE_VALUE, 484 0, 485 SYMBIOS_NOTREADY_RETRIES, 486 0, 487 0, 488 0, 489 0, 490 0, 491 0 492 }; 493 494 static sd_tunables lsi_properties = { 495 0, 496 0, 497 LSI_NOTREADY_RETRIES, 498 0, 499 0, 500 0, 501 0, 502 0, 503 0 504 }; 505 506 static sd_tunables lsi_oem_properties = { 507 0, 508 0, 509 LSI_OEM_NOTREADY_RETRIES, 510 0, 511 0, 512 0, 513 0, 514 0, 515 0, 516 1 517 }; 518 519 520 521 #if (defined(SD_PROP_TST)) 522 523 #define SD_TST_CTYPE_VAL CTYPE_CDROM 524 #define SD_TST_THROTTLE_VAL 16 525 #define SD_TST_NOTREADY_VAL 12 526 #define SD_TST_BUSY_VAL 60 527 #define SD_TST_RST_RETRY_VAL 36 528 #define SD_TST_RSV_REL_TIME 60 529 530 static sd_tunables tst_properties = { 531 SD_TST_THROTTLE_VAL, 532 SD_TST_CTYPE_VAL, 533 SD_TST_NOTREADY_VAL, 534 SD_TST_BUSY_VAL, 535 SD_TST_RST_RETRY_VAL, 536 SD_TST_RSV_REL_TIME, 537 0, 538 0, 539 0 540 }; 541 #endif 542 543 /* This is similar to the ANSI toupper implementation */ 544 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C)) 545 546 /* 547 * Static Driver Configuration Table 548 * 549 * This is the table of disks which need throttle adjustment (or, perhaps 550 * something else as defined by the flags at a future time.) device_id 551 * is a string consisting of concatenated vid (vendor), pid (product/model) 552 * and revision strings as defined in the scsi_inquiry structure. Offsets of 553 * the parts of the string are as defined by the sizes in the scsi_inquiry 554 * structure. Device type is searched as far as the device_id string is 555 * defined. Flags defines which values are to be set in the driver from the 556 * properties list. 557 * 558 * Entries below which begin and end with a "*" are a special case. 559 * These do not have a specific vendor, and the string which follows 560 * can appear anywhere in the 16 byte PID portion of the inquiry data. 561 * 562 * Entries below which begin and end with a " " (blank) are a special 563 * case. The comparison function will treat multiple consecutive blanks 564 * as equivalent to a single blank. For example, this causes a 565 * sd_disk_table entry of " NEC CDROM " to match a device's id string 566 * of "NEC CDROM". 567 * 568 * Note: The MD21 controller type has been obsoleted. 569 * ST318202F is a Legacy device 570 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been 571 * made with an FC connection. The entries here are a legacy. 572 */ 573 static sd_disk_config_t sd_disk_table[] = { 574 #if defined(__fibre) || defined(__i386) || defined(__amd64) 575 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 576 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 577 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 578 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 579 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 580 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 581 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 582 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 583 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 584 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 585 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 586 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 587 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 588 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 589 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 590 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 591 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 592 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 593 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 594 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 595 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 596 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 597 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 598 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties }, 599 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties }, 600 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties }, 601 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties }, 602 { "IBM 1724-100", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 603 { "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 604 { "IBM 1726-22x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 605 { "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 606 { "IBM 1726-42x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 607 { "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 608 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 609 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 610 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 611 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 612 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 613 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 614 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 615 { "IBM 1814", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 616 { "IBM 1814-200", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 617 { "IBM 1818", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 618 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 619 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 620 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 621 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 622 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT | 623 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 624 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT | 625 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 626 { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties }, 627 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, 628 { "SUN T3", SD_CONF_BSET_THROTTLE | 629 SD_CONF_BSET_BSY_RETRY_COUNT| 630 SD_CONF_BSET_RST_RETRIES| 631 SD_CONF_BSET_RSV_REL_TIME, 632 &purple_properties }, 633 { "SUN SESS01", SD_CONF_BSET_THROTTLE | 634 SD_CONF_BSET_BSY_RETRY_COUNT| 635 SD_CONF_BSET_RST_RETRIES| 636 SD_CONF_BSET_RSV_REL_TIME| 637 SD_CONF_BSET_MIN_THROTTLE| 638 SD_CONF_BSET_DISKSORT_DISABLED, 639 &sve_properties }, 640 { "SUN T4", SD_CONF_BSET_THROTTLE | 641 SD_CONF_BSET_BSY_RETRY_COUNT| 642 SD_CONF_BSET_RST_RETRIES| 643 SD_CONF_BSET_RSV_REL_TIME, 644 &purple_properties }, 645 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | 646 SD_CONF_BSET_LUN_RESET_ENABLED, 647 &maserati_properties }, 648 { "SUN SE6920", SD_CONF_BSET_THROTTLE | 649 SD_CONF_BSET_NRR_COUNT| 650 SD_CONF_BSET_BSY_RETRY_COUNT| 651 SD_CONF_BSET_RST_RETRIES| 652 SD_CONF_BSET_MIN_THROTTLE| 653 SD_CONF_BSET_DISKSORT_DISABLED| 654 SD_CONF_BSET_LUN_RESET_ENABLED, 655 &pirus_properties }, 656 { "SUN SE6940", SD_CONF_BSET_THROTTLE | 657 SD_CONF_BSET_NRR_COUNT| 658 SD_CONF_BSET_BSY_RETRY_COUNT| 659 SD_CONF_BSET_RST_RETRIES| 660 SD_CONF_BSET_MIN_THROTTLE| 661 SD_CONF_BSET_DISKSORT_DISABLED| 662 SD_CONF_BSET_LUN_RESET_ENABLED, 663 &pirus_properties }, 664 { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE | 665 SD_CONF_BSET_NRR_COUNT| 666 SD_CONF_BSET_BSY_RETRY_COUNT| 667 SD_CONF_BSET_RST_RETRIES| 668 SD_CONF_BSET_MIN_THROTTLE| 669 SD_CONF_BSET_DISKSORT_DISABLED| 670 SD_CONF_BSET_LUN_RESET_ENABLED, 671 &pirus_properties }, 672 { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE | 673 SD_CONF_BSET_NRR_COUNT| 674 SD_CONF_BSET_BSY_RETRY_COUNT| 675 SD_CONF_BSET_RST_RETRIES| 676 SD_CONF_BSET_MIN_THROTTLE| 677 SD_CONF_BSET_DISKSORT_DISABLED| 678 SD_CONF_BSET_LUN_RESET_ENABLED, 679 &pirus_properties }, 680 { "SUN PSX1000", SD_CONF_BSET_THROTTLE | 681 SD_CONF_BSET_NRR_COUNT| 682 SD_CONF_BSET_BSY_RETRY_COUNT| 683 SD_CONF_BSET_RST_RETRIES| 684 SD_CONF_BSET_MIN_THROTTLE| 685 SD_CONF_BSET_DISKSORT_DISABLED| 686 SD_CONF_BSET_LUN_RESET_ENABLED, 687 &pirus_properties }, 688 { "SUN SE6330", SD_CONF_BSET_THROTTLE | 689 SD_CONF_BSET_NRR_COUNT| 690 SD_CONF_BSET_BSY_RETRY_COUNT| 691 SD_CONF_BSET_RST_RETRIES| 692 SD_CONF_BSET_MIN_THROTTLE| 693 SD_CONF_BSET_DISKSORT_DISABLED| 694 SD_CONF_BSET_LUN_RESET_ENABLED, 695 &pirus_properties }, 696 { "SUN STK6580_6780", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 697 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 698 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 699 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 700 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 701 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, 702 #endif /* fibre or NON-sparc platforms */ 703 #if ((defined(__sparc) && !defined(__fibre)) ||\ 704 (defined(__i386) || defined(__amd64))) 705 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, 706 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, 707 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, 708 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, 709 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, 710 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL }, 711 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, 712 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, 713 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, 714 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, 715 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, 716 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, 717 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, 718 &symbios_properties }, 719 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, 720 &lsi_properties_scsi }, 721 #if defined(__i386) || defined(__amd64) 722 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD 723 | SD_CONF_BSET_READSUB_BCD 724 | SD_CONF_BSET_READ_TOC_ADDR_BCD 725 | SD_CONF_BSET_NO_READ_HEADER 726 | SD_CONF_BSET_READ_CD_XD4), NULL }, 727 728 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD 729 | SD_CONF_BSET_READSUB_BCD 730 | SD_CONF_BSET_READ_TOC_ADDR_BCD 731 | SD_CONF_BSET_NO_READ_HEADER 732 | SD_CONF_BSET_READ_CD_XD4), NULL }, 733 #endif /* __i386 || __amd64 */ 734 #endif /* sparc NON-fibre or NON-sparc platforms */ 735 736 #if (defined(SD_PROP_TST)) 737 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE 738 | SD_CONF_BSET_CTYPE 739 | SD_CONF_BSET_NRR_COUNT 740 | SD_CONF_BSET_FAB_DEVID 741 | SD_CONF_BSET_NOCACHE 742 | SD_CONF_BSET_BSY_RETRY_COUNT 743 | SD_CONF_BSET_PLAYMSF_BCD 744 | SD_CONF_BSET_READSUB_BCD 745 | SD_CONF_BSET_READ_TOC_TRK_BCD 746 | SD_CONF_BSET_READ_TOC_ADDR_BCD 747 | SD_CONF_BSET_NO_READ_HEADER 748 | SD_CONF_BSET_READ_CD_XD4 749 | SD_CONF_BSET_RST_RETRIES 750 | SD_CONF_BSET_RSV_REL_TIME 751 | SD_CONF_BSET_TUR_CHECK), &tst_properties}, 752 #endif 753 }; 754 755 static const int sd_disk_table_size = 756 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); 757 758 759 760 #define SD_INTERCONNECT_PARALLEL 0 761 #define SD_INTERCONNECT_FABRIC 1 762 #define SD_INTERCONNECT_FIBRE 2 763 #define SD_INTERCONNECT_SSA 3 764 #define SD_INTERCONNECT_SATA 4 765 #define SD_IS_PARALLEL_SCSI(un) \ 766 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL) 767 #define SD_IS_SERIAL(un) \ 768 ((un)->un_interconnect_type == SD_INTERCONNECT_SATA) 769 770 /* 771 * Definitions used by device id registration routines 772 */ 773 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */ 774 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */ 775 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */ 776 777 static kmutex_t sd_sense_mutex = {0}; 778 779 /* 780 * Macros for updates of the driver state 781 */ 782 #define New_state(un, s) \ 783 (un)->un_last_state = (un)->un_state, (un)->un_state = (s) 784 #define Restore_state(un) \ 785 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); } 786 787 static struct sd_cdbinfo sd_cdbtab[] = { 788 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, }, 789 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, }, 790 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, }, 791 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, }, 792 }; 793 794 /* 795 * Specifies the number of seconds that must have elapsed since the last 796 * cmd. has completed for a device to be declared idle to the PM framework. 797 */ 798 static int sd_pm_idletime = 1; 799 800 /* 801 * Internal function prototypes 802 */ 803 804 #if (defined(__fibre)) 805 /* 806 * These #defines are to avoid namespace collisions that occur because this 807 * code is currently used to compile two separate driver modules: sd and ssd. 808 * All function names need to be treated this way (even if declared static) 809 * in order to allow the debugger to resolve the names properly. 810 * It is anticipated that in the near future the ssd module will be obsoleted, 811 * at which time this ugliness should go away. 812 */ 813 #define sd_log_trace ssd_log_trace 814 #define sd_log_info ssd_log_info 815 #define sd_log_err ssd_log_err 816 #define sdprobe ssdprobe 817 #define sdinfo ssdinfo 818 #define sd_prop_op ssd_prop_op 819 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init 820 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini 821 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache 822 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache 823 #define sd_scsi_target_lun_init ssd_scsi_target_lun_init 824 #define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini 825 #define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count 826 #define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target 827 #define sd_spin_up_unit ssd_spin_up_unit 828 #define sd_enable_descr_sense ssd_enable_descr_sense 829 #define sd_reenable_dsense_task ssd_reenable_dsense_task 830 #define sd_set_mmc_caps ssd_set_mmc_caps 831 #define sd_read_unit_properties ssd_read_unit_properties 832 #define sd_process_sdconf_file ssd_process_sdconf_file 833 #define sd_process_sdconf_table ssd_process_sdconf_table 834 #define sd_sdconf_id_match ssd_sdconf_id_match 835 #define sd_blank_cmp ssd_blank_cmp 836 #define sd_chk_vers1_data ssd_chk_vers1_data 837 #define sd_set_vers1_properties ssd_set_vers1_properties 838 839 #define sd_get_physical_geometry ssd_get_physical_geometry 840 #define sd_get_virtual_geometry ssd_get_virtual_geometry 841 #define sd_update_block_info ssd_update_block_info 842 #define sd_register_devid ssd_register_devid 843 #define sd_get_devid ssd_get_devid 844 #define sd_create_devid ssd_create_devid 845 #define sd_write_deviceid ssd_write_deviceid 846 #define sd_check_vpd_page_support ssd_check_vpd_page_support 847 #define sd_setup_pm ssd_setup_pm 848 #define sd_create_pm_components ssd_create_pm_components 849 #define sd_ddi_suspend ssd_ddi_suspend 850 #define sd_ddi_pm_suspend ssd_ddi_pm_suspend 851 #define sd_ddi_resume ssd_ddi_resume 852 #define sd_ddi_pm_resume ssd_ddi_pm_resume 853 #define sdpower ssdpower 854 #define sdattach ssdattach 855 #define sddetach ssddetach 856 #define sd_unit_attach ssd_unit_attach 857 #define sd_unit_detach ssd_unit_detach 858 #define sd_set_unit_attributes ssd_set_unit_attributes 859 #define sd_create_errstats ssd_create_errstats 860 #define sd_set_errstats ssd_set_errstats 861 #define sd_set_pstats ssd_set_pstats 862 #define sddump ssddump 863 #define sd_scsi_poll ssd_scsi_poll 864 #define sd_send_polled_RQS ssd_send_polled_RQS 865 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll 866 #define sd_init_event_callbacks ssd_init_event_callbacks 867 #define sd_event_callback ssd_event_callback 868 #define sd_cache_control ssd_cache_control 869 #define sd_get_write_cache_enabled ssd_get_write_cache_enabled 870 #define sd_get_nv_sup ssd_get_nv_sup 871 #define sd_make_device ssd_make_device 872 #define sdopen ssdopen 873 #define sdclose ssdclose 874 #define sd_ready_and_valid ssd_ready_and_valid 875 #define sdmin ssdmin 876 #define sdread ssdread 877 #define sdwrite ssdwrite 878 #define sdaread ssdaread 879 #define sdawrite ssdawrite 880 #define sdstrategy ssdstrategy 881 #define sdioctl ssdioctl 882 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart 883 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart 884 #define sd_checksum_iostart ssd_checksum_iostart 885 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart 886 #define sd_pm_iostart ssd_pm_iostart 887 #define sd_core_iostart ssd_core_iostart 888 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone 889 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone 890 #define sd_checksum_iodone ssd_checksum_iodone 891 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone 892 #define sd_pm_iodone ssd_pm_iodone 893 #define sd_initpkt_for_buf ssd_initpkt_for_buf 894 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf 895 #define sd_setup_rw_pkt ssd_setup_rw_pkt 896 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt 897 #define sd_buf_iodone ssd_buf_iodone 898 #define sd_uscsi_strategy ssd_uscsi_strategy 899 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi 900 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi 901 #define sd_uscsi_iodone ssd_uscsi_iodone 902 #define sd_xbuf_strategy ssd_xbuf_strategy 903 #define sd_xbuf_init ssd_xbuf_init 904 #define sd_pm_entry ssd_pm_entry 905 #define sd_pm_exit ssd_pm_exit 906 907 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler 908 #define sd_pm_timeout_handler ssd_pm_timeout_handler 909 910 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq 911 #define sdintr ssdintr 912 #define sd_start_cmds ssd_start_cmds 913 #define sd_send_scsi_cmd ssd_send_scsi_cmd 914 #define sd_bioclone_alloc ssd_bioclone_alloc 915 #define sd_bioclone_free ssd_bioclone_free 916 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc 917 #define sd_shadow_buf_free ssd_shadow_buf_free 918 #define sd_print_transport_rejected_message \ 919 ssd_print_transport_rejected_message 920 #define sd_retry_command ssd_retry_command 921 #define sd_set_retry_bp ssd_set_retry_bp 922 #define sd_send_request_sense_command ssd_send_request_sense_command 923 #define sd_start_retry_command ssd_start_retry_command 924 #define sd_start_direct_priority_command \ 925 ssd_start_direct_priority_command 926 #define sd_return_failed_command ssd_return_failed_command 927 #define sd_return_failed_command_no_restart \ 928 ssd_return_failed_command_no_restart 929 #define sd_return_command ssd_return_command 930 #define sd_sync_with_callback ssd_sync_with_callback 931 #define sdrunout ssdrunout 932 #define sd_mark_rqs_busy ssd_mark_rqs_busy 933 #define sd_mark_rqs_idle ssd_mark_rqs_idle 934 #define sd_reduce_throttle ssd_reduce_throttle 935 #define sd_restore_throttle ssd_restore_throttle 936 #define sd_print_incomplete_msg ssd_print_incomplete_msg 937 #define sd_init_cdb_limits ssd_init_cdb_limits 938 #define sd_pkt_status_good ssd_pkt_status_good 939 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition 940 #define sd_pkt_status_busy ssd_pkt_status_busy 941 #define sd_pkt_status_reservation_conflict \ 942 ssd_pkt_status_reservation_conflict 943 #define sd_pkt_status_qfull ssd_pkt_status_qfull 944 #define sd_handle_request_sense ssd_handle_request_sense 945 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense 946 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg 947 #define sd_validate_sense_data ssd_validate_sense_data 948 #define sd_decode_sense ssd_decode_sense 949 #define sd_print_sense_msg ssd_print_sense_msg 950 #define sd_sense_key_no_sense ssd_sense_key_no_sense 951 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error 952 #define sd_sense_key_not_ready ssd_sense_key_not_ready 953 #define sd_sense_key_medium_or_hardware_error \ 954 ssd_sense_key_medium_or_hardware_error 955 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request 956 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention 957 #define sd_sense_key_fail_command ssd_sense_key_fail_command 958 #define sd_sense_key_blank_check ssd_sense_key_blank_check 959 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command 960 #define sd_sense_key_default ssd_sense_key_default 961 #define sd_print_retry_msg ssd_print_retry_msg 962 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg 963 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete 964 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err 965 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset 966 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted 967 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout 968 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free 969 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject 970 #define sd_pkt_reason_default ssd_pkt_reason_default 971 #define sd_reset_target ssd_reset_target 972 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback 973 #define sd_start_stop_unit_task ssd_start_stop_unit_task 974 #define sd_taskq_create ssd_taskq_create 975 #define sd_taskq_delete ssd_taskq_delete 976 #define sd_media_change_task ssd_media_change_task 977 #define sd_handle_mchange ssd_handle_mchange 978 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK 979 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY 980 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 981 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION 982 #define sd_send_scsi_feature_GET_CONFIGURATION \ 983 sd_send_scsi_feature_GET_CONFIGURATION 984 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT 985 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY 986 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY 987 #define sd_send_scsi_PERSISTENT_RESERVE_IN \ 988 ssd_send_scsi_PERSISTENT_RESERVE_IN 989 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ 990 ssd_send_scsi_PERSISTENT_RESERVE_OUT 991 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE 992 #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \ 993 ssd_send_scsi_SYNCHRONIZE_CACHE_biodone 994 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE 995 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT 996 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR 997 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE 998 #define sd_alloc_rqs ssd_alloc_rqs 999 #define sd_free_rqs ssd_free_rqs 1000 #define sd_dump_memory ssd_dump_memory 1001 #define sd_get_media_info ssd_get_media_info 1002 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info 1003 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf 1004 #define sd_setup_next_xfer ssd_setup_next_xfer 1005 #define sd_dkio_get_temp ssd_dkio_get_temp 1006 #define sd_check_mhd ssd_check_mhd 1007 #define sd_mhd_watch_cb ssd_mhd_watch_cb 1008 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete 1009 #define sd_sname ssd_sname 1010 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover 1011 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread 1012 #define sd_take_ownership ssd_take_ownership 1013 #define sd_reserve_release ssd_reserve_release 1014 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req 1015 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb 1016 #define sd_persistent_reservation_in_read_keys \ 1017 ssd_persistent_reservation_in_read_keys 1018 #define sd_persistent_reservation_in_read_resv \ 1019 ssd_persistent_reservation_in_read_resv 1020 #define sd_mhdioc_takeown ssd_mhdioc_takeown 1021 #define sd_mhdioc_failfast ssd_mhdioc_failfast 1022 #define sd_mhdioc_release ssd_mhdioc_release 1023 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid 1024 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys 1025 #define sd_mhdioc_inresv ssd_mhdioc_inresv 1026 #define sr_change_blkmode ssr_change_blkmode 1027 #define sr_change_speed ssr_change_speed 1028 #define sr_atapi_change_speed ssr_atapi_change_speed 1029 #define sr_pause_resume ssr_pause_resume 1030 #define sr_play_msf ssr_play_msf 1031 #define sr_play_trkind ssr_play_trkind 1032 #define sr_read_all_subcodes ssr_read_all_subcodes 1033 #define sr_read_subchannel ssr_read_subchannel 1034 #define sr_read_tocentry ssr_read_tocentry 1035 #define sr_read_tochdr ssr_read_tochdr 1036 #define sr_read_cdda ssr_read_cdda 1037 #define sr_read_cdxa ssr_read_cdxa 1038 #define sr_read_mode1 ssr_read_mode1 1039 #define sr_read_mode2 ssr_read_mode2 1040 #define sr_read_cd_mode2 ssr_read_cd_mode2 1041 #define sr_sector_mode ssr_sector_mode 1042 #define sr_eject ssr_eject 1043 #define sr_ejected ssr_ejected 1044 #define sr_check_wp ssr_check_wp 1045 #define sd_check_media ssd_check_media 1046 #define sd_media_watch_cb ssd_media_watch_cb 1047 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast 1048 #define sr_volume_ctrl ssr_volume_ctrl 1049 #define sr_read_sony_session_offset ssr_read_sony_session_offset 1050 #define sd_log_page_supported ssd_log_page_supported 1051 #define sd_check_for_writable_cd ssd_check_for_writable_cd 1052 #define sd_wm_cache_constructor ssd_wm_cache_constructor 1053 #define sd_wm_cache_destructor ssd_wm_cache_destructor 1054 #define sd_range_lock ssd_range_lock 1055 #define sd_get_range ssd_get_range 1056 #define sd_free_inlist_wmap ssd_free_inlist_wmap 1057 #define sd_range_unlock ssd_range_unlock 1058 #define sd_read_modify_write_task ssd_read_modify_write_task 1059 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw 1060 1061 #define sd_iostart_chain ssd_iostart_chain 1062 #define sd_iodone_chain ssd_iodone_chain 1063 #define sd_initpkt_map ssd_initpkt_map 1064 #define sd_destroypkt_map ssd_destroypkt_map 1065 #define sd_chain_type_map ssd_chain_type_map 1066 #define sd_chain_index_map ssd_chain_index_map 1067 1068 #define sd_failfast_flushctl ssd_failfast_flushctl 1069 #define sd_failfast_flushq ssd_failfast_flushq 1070 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback 1071 1072 #define sd_is_lsi ssd_is_lsi 1073 #define sd_tg_rdwr ssd_tg_rdwr 1074 #define sd_tg_getinfo ssd_tg_getinfo 1075 1076 #endif /* #if (defined(__fibre)) */ 1077 1078 1079 int _init(void); 1080 int _fini(void); 1081 int _info(struct modinfo *modinfop); 1082 1083 /*PRINTFLIKE3*/ 1084 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1085 /*PRINTFLIKE3*/ 1086 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1087 /*PRINTFLIKE3*/ 1088 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1089 1090 static int sdprobe(dev_info_t *devi); 1091 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 1092 void **result); 1093 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1094 int mod_flags, char *name, caddr_t valuep, int *lengthp); 1095 1096 /* 1097 * Smart probe for parallel scsi 1098 */ 1099 static void sd_scsi_probe_cache_init(void); 1100 static void sd_scsi_probe_cache_fini(void); 1101 static void sd_scsi_clear_probe_cache(void); 1102 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)()); 1103 1104 /* 1105 * Attached luns on target for parallel scsi 1106 */ 1107 static void sd_scsi_target_lun_init(void); 1108 static void sd_scsi_target_lun_fini(void); 1109 static int sd_scsi_get_target_lun_count(dev_info_t *dip, int target); 1110 static void sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag); 1111 1112 static int sd_spin_up_unit(struct sd_lun *un); 1113 #ifdef _LP64 1114 static void sd_enable_descr_sense(struct sd_lun *un); 1115 static void sd_reenable_dsense_task(void *arg); 1116 #endif /* _LP64 */ 1117 1118 static void sd_set_mmc_caps(struct sd_lun *un); 1119 1120 static void sd_read_unit_properties(struct sd_lun *un); 1121 static int sd_process_sdconf_file(struct sd_lun *un); 1122 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags, 1123 int *data_list, sd_tunables *values); 1124 static void sd_process_sdconf_table(struct sd_lun *un); 1125 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen); 1126 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen); 1127 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 1128 int list_len, char *dataname_ptr); 1129 static void sd_set_vers1_properties(struct sd_lun *un, int flags, 1130 sd_tunables *prop_list); 1131 1132 static void sd_register_devid(struct sd_lun *un, dev_info_t *devi, 1133 int reservation_flag); 1134 static int sd_get_devid(struct sd_lun *un); 1135 static ddi_devid_t sd_create_devid(struct sd_lun *un); 1136 static int sd_write_deviceid(struct sd_lun *un); 1137 static int sd_get_devid_page(struct sd_lun *un, uchar_t *wwn, int *len); 1138 static int sd_check_vpd_page_support(struct sd_lun *un); 1139 1140 static void sd_setup_pm(struct sd_lun *un, dev_info_t *devi); 1141 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); 1142 1143 static int sd_ddi_suspend(dev_info_t *devi); 1144 static int sd_ddi_pm_suspend(struct sd_lun *un); 1145 static int sd_ddi_resume(dev_info_t *devi); 1146 static int sd_ddi_pm_resume(struct sd_lun *un); 1147 static int sdpower(dev_info_t *devi, int component, int level); 1148 1149 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 1150 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 1151 static int sd_unit_attach(dev_info_t *devi); 1152 static int sd_unit_detach(dev_info_t *devi); 1153 1154 static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi); 1155 static void sd_create_errstats(struct sd_lun *un, int instance); 1156 static void sd_set_errstats(struct sd_lun *un); 1157 static void sd_set_pstats(struct sd_lun *un); 1158 1159 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 1160 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); 1161 static int sd_send_polled_RQS(struct sd_lun *un); 1162 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); 1163 1164 #if (defined(__fibre)) 1165 /* 1166 * Event callbacks (photon) 1167 */ 1168 static void sd_init_event_callbacks(struct sd_lun *un); 1169 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); 1170 #endif 1171 1172 /* 1173 * Defines for sd_cache_control 1174 */ 1175 1176 #define SD_CACHE_ENABLE 1 1177 #define SD_CACHE_DISABLE 0 1178 #define SD_CACHE_NOCHANGE -1 1179 1180 static int sd_cache_control(struct sd_lun *un, int rcd_flag, int wce_flag); 1181 static int sd_get_write_cache_enabled(struct sd_lun *un, int *is_enabled); 1182 static void sd_get_nv_sup(struct sd_lun *un); 1183 static dev_t sd_make_device(dev_info_t *devi); 1184 1185 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize, 1186 uint64_t capacity); 1187 1188 /* 1189 * Driver entry point functions. 1190 */ 1191 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 1192 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 1193 static int sd_ready_and_valid(struct sd_lun *un); 1194 1195 static void sdmin(struct buf *bp); 1196 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p); 1197 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 1198 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1199 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1200 1201 static int sdstrategy(struct buf *bp); 1202 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1203 1204 /* 1205 * Function prototypes for layering functions in the iostart chain. 1206 */ 1207 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un, 1208 struct buf *bp); 1209 static void sd_mapblocksize_iostart(int index, struct sd_lun *un, 1210 struct buf *bp); 1211 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp); 1212 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un, 1213 struct buf *bp); 1214 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp); 1215 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp); 1216 1217 /* 1218 * Function prototypes for layering functions in the iodone chain. 1219 */ 1220 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp); 1221 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp); 1222 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un, 1223 struct buf *bp); 1224 static void sd_mapblocksize_iodone(int index, struct sd_lun *un, 1225 struct buf *bp); 1226 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp); 1227 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un, 1228 struct buf *bp); 1229 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp); 1230 1231 /* 1232 * Prototypes for functions to support buf(9S) based IO. 1233 */ 1234 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg); 1235 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **); 1236 static void sd_destroypkt_for_buf(struct buf *); 1237 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp, 1238 struct buf *bp, int flags, 1239 int (*callback)(caddr_t), caddr_t callback_arg, 1240 diskaddr_t lba, uint32_t blockcount); 1241 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp, 1242 struct buf *bp, diskaddr_t lba, uint32_t blockcount); 1243 1244 /* 1245 * Prototypes for functions to support USCSI IO. 1246 */ 1247 static int sd_uscsi_strategy(struct buf *bp); 1248 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **); 1249 static void sd_destroypkt_for_uscsi(struct buf *); 1250 1251 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 1252 uchar_t chain_type, void *pktinfop); 1253 1254 static int sd_pm_entry(struct sd_lun *un); 1255 static void sd_pm_exit(struct sd_lun *un); 1256 1257 static void sd_pm_idletimeout_handler(void *arg); 1258 1259 /* 1260 * sd_core internal functions (used at the sd_core_io layer). 1261 */ 1262 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp); 1263 static void sdintr(struct scsi_pkt *pktp); 1264 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp); 1265 1266 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1267 enum uio_seg dataspace, int path_flag); 1268 1269 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen, 1270 daddr_t blkno, int (*func)(struct buf *)); 1271 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen, 1272 uint_t bflags, daddr_t blkno, int (*func)(struct buf *)); 1273 static void sd_bioclone_free(struct buf *bp); 1274 static void sd_shadow_buf_free(struct buf *bp); 1275 1276 static void sd_print_transport_rejected_message(struct sd_lun *un, 1277 struct sd_xbuf *xp, int code); 1278 static void sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, 1279 void *arg, int code); 1280 static void sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, 1281 void *arg, int code); 1282 static void sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, 1283 void *arg, int code); 1284 1285 static void sd_retry_command(struct sd_lun *un, struct buf *bp, 1286 int retry_check_flag, 1287 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, 1288 int c), 1289 void *user_arg, int failure_code, clock_t retry_delay, 1290 void (*statp)(kstat_io_t *)); 1291 1292 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, 1293 clock_t retry_delay, void (*statp)(kstat_io_t *)); 1294 1295 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 1296 struct scsi_pkt *pktp); 1297 static void sd_start_retry_command(void *arg); 1298 static void sd_start_direct_priority_command(void *arg); 1299 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, 1300 int errcode); 1301 static void sd_return_failed_command_no_restart(struct sd_lun *un, 1302 struct buf *bp, int errcode); 1303 static void sd_return_command(struct sd_lun *un, struct buf *bp); 1304 static void sd_sync_with_callback(struct sd_lun *un); 1305 static int sdrunout(caddr_t arg); 1306 1307 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp); 1308 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp); 1309 1310 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type); 1311 static void sd_restore_throttle(void *arg); 1312 1313 static void sd_init_cdb_limits(struct sd_lun *un); 1314 1315 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 1316 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1317 1318 /* 1319 * Error handling functions 1320 */ 1321 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 1322 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1323 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, 1324 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1325 static void sd_pkt_status_reservation_conflict(struct sd_lun *un, 1326 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1327 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 1328 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1329 1330 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp, 1331 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1332 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 1333 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1334 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp, 1335 struct sd_xbuf *xp, size_t actual_len); 1336 static void sd_decode_sense(struct sd_lun *un, struct buf *bp, 1337 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1338 1339 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp, 1340 void *arg, int code); 1341 1342 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 1343 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1344 static void sd_sense_key_recoverable_error(struct sd_lun *un, 1345 uint8_t *sense_datap, 1346 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1347 static void sd_sense_key_not_ready(struct sd_lun *un, 1348 uint8_t *sense_datap, 1349 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1350 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 1351 uint8_t *sense_datap, 1352 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1353 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 1354 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1355 static void sd_sense_key_unit_attention(struct sd_lun *un, 1356 uint8_t *sense_datap, 1357 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1358 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 1359 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1360 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 1361 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1362 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 1363 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1364 static void sd_sense_key_default(struct sd_lun *un, 1365 uint8_t *sense_datap, 1366 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1367 1368 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp, 1369 void *arg, int flag); 1370 1371 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 1372 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1373 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 1374 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1375 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 1376 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1377 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 1378 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1379 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 1380 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1381 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 1382 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1383 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 1384 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1385 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 1386 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1387 1388 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp); 1389 1390 static void sd_start_stop_unit_callback(void *arg); 1391 static void sd_start_stop_unit_task(void *arg); 1392 1393 static void sd_taskq_create(void); 1394 static void sd_taskq_delete(void); 1395 static void sd_media_change_task(void *arg); 1396 1397 static int sd_handle_mchange(struct sd_lun *un); 1398 static int sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag); 1399 static int sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, 1400 uint32_t *lbap, int path_flag); 1401 static int sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 1402 uint32_t *lbap, int path_flag); 1403 static int sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, 1404 int path_flag); 1405 static int sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, 1406 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp); 1407 static int sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag); 1408 static int sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, 1409 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp); 1410 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, 1411 uchar_t usr_cmd, uchar_t *usr_bufp); 1412 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, 1413 struct dk_callback *dkc); 1414 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp); 1415 static int sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, 1416 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1417 uchar_t *bufaddr, uint_t buflen, int path_flag); 1418 static int sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 1419 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1420 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag); 1421 static int sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, 1422 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag); 1423 static int sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, 1424 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag); 1425 static int sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 1426 size_t buflen, daddr_t start_block, int path_flag); 1427 #define sd_send_scsi_READ(un, bufaddr, buflen, start_block, path_flag) \ 1428 sd_send_scsi_RDWR(un, SCMD_READ, bufaddr, buflen, start_block, \ 1429 path_flag) 1430 #define sd_send_scsi_WRITE(un, bufaddr, buflen, start_block, path_flag) \ 1431 sd_send_scsi_RDWR(un, SCMD_WRITE, bufaddr, buflen, start_block,\ 1432 path_flag) 1433 1434 static int sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, 1435 uint16_t buflen, uchar_t page_code, uchar_t page_control, 1436 uint16_t param_ptr, int path_flag); 1437 1438 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un); 1439 static void sd_free_rqs(struct sd_lun *un); 1440 1441 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, 1442 uchar_t *data, int len, int fmt); 1443 static void sd_panic_for_res_conflict(struct sd_lun *un); 1444 1445 /* 1446 * Disk Ioctl Function Prototypes 1447 */ 1448 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag); 1449 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag); 1450 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag); 1451 1452 /* 1453 * Multi-host Ioctl Prototypes 1454 */ 1455 static int sd_check_mhd(dev_t dev, int interval); 1456 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1457 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt); 1458 static char *sd_sname(uchar_t status); 1459 static void sd_mhd_resvd_recover(void *arg); 1460 static void sd_resv_reclaim_thread(); 1461 static int sd_take_ownership(dev_t dev, struct mhioctkown *p); 1462 static int sd_reserve_release(dev_t dev, int cmd); 1463 static void sd_rmv_resv_reclaim_req(dev_t dev); 1464 static void sd_mhd_reset_notify_cb(caddr_t arg); 1465 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un, 1466 mhioc_inkeys_t *usrp, int flag); 1467 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un, 1468 mhioc_inresvs_t *usrp, int flag); 1469 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag); 1470 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag); 1471 static int sd_mhdioc_release(dev_t dev); 1472 static int sd_mhdioc_register_devid(dev_t dev); 1473 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag); 1474 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag); 1475 1476 /* 1477 * SCSI removable prototypes 1478 */ 1479 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag); 1480 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1481 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1482 static int sr_pause_resume(dev_t dev, int mode); 1483 static int sr_play_msf(dev_t dev, caddr_t data, int flag); 1484 static int sr_play_trkind(dev_t dev, caddr_t data, int flag); 1485 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag); 1486 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag); 1487 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag); 1488 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag); 1489 static int sr_read_cdda(dev_t dev, caddr_t data, int flag); 1490 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag); 1491 static int sr_read_mode1(dev_t dev, caddr_t data, int flag); 1492 static int sr_read_mode2(dev_t dev, caddr_t data, int flag); 1493 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag); 1494 static int sr_sector_mode(dev_t dev, uint32_t blksize); 1495 static int sr_eject(dev_t dev); 1496 static void sr_ejected(register struct sd_lun *un); 1497 static int sr_check_wp(dev_t dev); 1498 static int sd_check_media(dev_t dev, enum dkio_state state); 1499 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1500 static void sd_delayed_cv_broadcast(void *arg); 1501 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); 1502 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); 1503 1504 static int sd_log_page_supported(struct sd_lun *un, int log_page); 1505 1506 /* 1507 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. 1508 */ 1509 static void sd_check_for_writable_cd(struct sd_lun *un, int path_flag); 1510 static int sd_wm_cache_constructor(void *wm, void *un, int flags); 1511 static void sd_wm_cache_destructor(void *wm, void *un); 1512 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb, 1513 daddr_t endb, ushort_t typ); 1514 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb, 1515 daddr_t endb); 1516 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp); 1517 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm); 1518 static void sd_read_modify_write_task(void * arg); 1519 static int 1520 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 1521 struct buf **bpp); 1522 1523 1524 /* 1525 * Function prototypes for failfast support. 1526 */ 1527 static void sd_failfast_flushq(struct sd_lun *un); 1528 static int sd_failfast_flushq_callback(struct buf *bp); 1529 1530 /* 1531 * Function prototypes to check for lsi devices 1532 */ 1533 static void sd_is_lsi(struct sd_lun *un); 1534 1535 /* 1536 * Function prototypes for partial DMA support 1537 */ 1538 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 1539 struct scsi_pkt *pkt, struct sd_xbuf *xp); 1540 1541 1542 /* Function prototypes for cmlb */ 1543 static int sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 1544 diskaddr_t start_block, size_t reqlength, void *tg_cookie); 1545 1546 static int sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie); 1547 1548 /* 1549 * Constants for failfast support: 1550 * 1551 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO 1552 * failfast processing being performed. 1553 * 1554 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing 1555 * failfast processing on all bufs with B_FAILFAST set. 1556 */ 1557 1558 #define SD_FAILFAST_INACTIVE 0 1559 #define SD_FAILFAST_ACTIVE 1 1560 1561 /* 1562 * Bitmask to control behavior of buf(9S) flushes when a transition to 1563 * the failfast state occurs. Optional bits include: 1564 * 1565 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that 1566 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will 1567 * be flushed. 1568 * 1569 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the 1570 * driver, in addition to the regular wait queue. This includes the xbuf 1571 * queues. When clear, only the driver's wait queue will be flushed. 1572 */ 1573 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 1574 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 1575 1576 /* 1577 * The default behavior is to only flush bufs that have B_FAILFAST set, but 1578 * to flush all queues within the driver. 1579 */ 1580 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; 1581 1582 1583 /* 1584 * SD Testing Fault Injection 1585 */ 1586 #ifdef SD_FAULT_INJECTION 1587 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); 1588 static void sd_faultinjection(struct scsi_pkt *pktp); 1589 static void sd_injection_log(char *buf, struct sd_lun *un); 1590 #endif 1591 1592 /* 1593 * Device driver ops vector 1594 */ 1595 static struct cb_ops sd_cb_ops = { 1596 sdopen, /* open */ 1597 sdclose, /* close */ 1598 sdstrategy, /* strategy */ 1599 nodev, /* print */ 1600 sddump, /* dump */ 1601 sdread, /* read */ 1602 sdwrite, /* write */ 1603 sdioctl, /* ioctl */ 1604 nodev, /* devmap */ 1605 nodev, /* mmap */ 1606 nodev, /* segmap */ 1607 nochpoll, /* poll */ 1608 sd_prop_op, /* cb_prop_op */ 1609 0, /* streamtab */ 1610 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */ 1611 CB_REV, /* cb_rev */ 1612 sdaread, /* async I/O read entry point */ 1613 sdawrite /* async I/O write entry point */ 1614 }; 1615 1616 static struct dev_ops sd_ops = { 1617 DEVO_REV, /* devo_rev, */ 1618 0, /* refcnt */ 1619 sdinfo, /* info */ 1620 nulldev, /* identify */ 1621 sdprobe, /* probe */ 1622 sdattach, /* attach */ 1623 sddetach, /* detach */ 1624 nodev, /* reset */ 1625 &sd_cb_ops, /* driver operations */ 1626 NULL, /* bus operations */ 1627 sdpower /* power */ 1628 }; 1629 1630 1631 /* 1632 * This is the loadable module wrapper. 1633 */ 1634 #include <sys/modctl.h> 1635 1636 static struct modldrv modldrv = { 1637 &mod_driverops, /* Type of module. This one is a driver */ 1638 SD_MODULE_NAME, /* Module name. */ 1639 &sd_ops /* driver ops */ 1640 }; 1641 1642 1643 static struct modlinkage modlinkage = { 1644 MODREV_1, 1645 &modldrv, 1646 NULL 1647 }; 1648 1649 static cmlb_tg_ops_t sd_tgops = { 1650 TG_DK_OPS_VERSION_1, 1651 sd_tg_rdwr, 1652 sd_tg_getinfo 1653 }; 1654 1655 static struct scsi_asq_key_strings sd_additional_codes[] = { 1656 0x81, 0, "Logical Unit is Reserved", 1657 0x85, 0, "Audio Address Not Valid", 1658 0xb6, 0, "Media Load Mechanism Failed", 1659 0xB9, 0, "Audio Play Operation Aborted", 1660 0xbf, 0, "Buffer Overflow for Read All Subcodes Command", 1661 0x53, 2, "Medium removal prevented", 1662 0x6f, 0, "Authentication failed during key exchange", 1663 0x6f, 1, "Key not present", 1664 0x6f, 2, "Key not established", 1665 0x6f, 3, "Read without proper authentication", 1666 0x6f, 4, "Mismatched region to this logical unit", 1667 0x6f, 5, "Region reset count error", 1668 0xffff, 0x0, NULL 1669 }; 1670 1671 1672 /* 1673 * Struct for passing printing information for sense data messages 1674 */ 1675 struct sd_sense_info { 1676 int ssi_severity; 1677 int ssi_pfa_flag; 1678 }; 1679 1680 /* 1681 * Table of function pointers for iostart-side routines. Separate "chains" 1682 * of layered function calls are formed by placing the function pointers 1683 * sequentially in the desired order. Functions are called according to an 1684 * incrementing table index ordering. The last function in each chain must 1685 * be sd_core_iostart(). The corresponding iodone-side routines are expected 1686 * in the sd_iodone_chain[] array. 1687 * 1688 * Note: It may seem more natural to organize both the iostart and iodone 1689 * functions together, into an array of structures (or some similar 1690 * organization) with a common index, rather than two separate arrays which 1691 * must be maintained in synchronization. The purpose of this division is 1692 * to achieve improved performance: individual arrays allows for more 1693 * effective cache line utilization on certain platforms. 1694 */ 1695 1696 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp); 1697 1698 1699 static sd_chain_t sd_iostart_chain[] = { 1700 1701 /* Chain for buf IO for disk drive targets (PM enabled) */ 1702 sd_mapblockaddr_iostart, /* Index: 0 */ 1703 sd_pm_iostart, /* Index: 1 */ 1704 sd_core_iostart, /* Index: 2 */ 1705 1706 /* Chain for buf IO for disk drive targets (PM disabled) */ 1707 sd_mapblockaddr_iostart, /* Index: 3 */ 1708 sd_core_iostart, /* Index: 4 */ 1709 1710 /* Chain for buf IO for removable-media targets (PM enabled) */ 1711 sd_mapblockaddr_iostart, /* Index: 5 */ 1712 sd_mapblocksize_iostart, /* Index: 6 */ 1713 sd_pm_iostart, /* Index: 7 */ 1714 sd_core_iostart, /* Index: 8 */ 1715 1716 /* Chain for buf IO for removable-media targets (PM disabled) */ 1717 sd_mapblockaddr_iostart, /* Index: 9 */ 1718 sd_mapblocksize_iostart, /* Index: 10 */ 1719 sd_core_iostart, /* Index: 11 */ 1720 1721 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1722 sd_mapblockaddr_iostart, /* Index: 12 */ 1723 sd_checksum_iostart, /* Index: 13 */ 1724 sd_pm_iostart, /* Index: 14 */ 1725 sd_core_iostart, /* Index: 15 */ 1726 1727 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1728 sd_mapblockaddr_iostart, /* Index: 16 */ 1729 sd_checksum_iostart, /* Index: 17 */ 1730 sd_core_iostart, /* Index: 18 */ 1731 1732 /* Chain for USCSI commands (all targets) */ 1733 sd_pm_iostart, /* Index: 19 */ 1734 sd_core_iostart, /* Index: 20 */ 1735 1736 /* Chain for checksumming USCSI commands (all targets) */ 1737 sd_checksum_uscsi_iostart, /* Index: 21 */ 1738 sd_pm_iostart, /* Index: 22 */ 1739 sd_core_iostart, /* Index: 23 */ 1740 1741 /* Chain for "direct" USCSI commands (all targets) */ 1742 sd_core_iostart, /* Index: 24 */ 1743 1744 /* Chain for "direct priority" USCSI commands (all targets) */ 1745 sd_core_iostart, /* Index: 25 */ 1746 }; 1747 1748 /* 1749 * Macros to locate the first function of each iostart chain in the 1750 * sd_iostart_chain[] array. These are located by the index in the array. 1751 */ 1752 #define SD_CHAIN_DISK_IOSTART 0 1753 #define SD_CHAIN_DISK_IOSTART_NO_PM 3 1754 #define SD_CHAIN_RMMEDIA_IOSTART 5 1755 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9 1756 #define SD_CHAIN_CHKSUM_IOSTART 12 1757 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16 1758 #define SD_CHAIN_USCSI_CMD_IOSTART 19 1759 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21 1760 #define SD_CHAIN_DIRECT_CMD_IOSTART 24 1761 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25 1762 1763 1764 /* 1765 * Table of function pointers for the iodone-side routines for the driver- 1766 * internal layering mechanism. The calling sequence for iodone routines 1767 * uses a decrementing table index, so the last routine called in a chain 1768 * must be at the lowest array index location for that chain. The last 1769 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs) 1770 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering 1771 * of the functions in an iodone side chain must correspond to the ordering 1772 * of the iostart routines for that chain. Note that there is no iodone 1773 * side routine that corresponds to sd_core_iostart(), so there is no 1774 * entry in the table for this. 1775 */ 1776 1777 static sd_chain_t sd_iodone_chain[] = { 1778 1779 /* Chain for buf IO for disk drive targets (PM enabled) */ 1780 sd_buf_iodone, /* Index: 0 */ 1781 sd_mapblockaddr_iodone, /* Index: 1 */ 1782 sd_pm_iodone, /* Index: 2 */ 1783 1784 /* Chain for buf IO for disk drive targets (PM disabled) */ 1785 sd_buf_iodone, /* Index: 3 */ 1786 sd_mapblockaddr_iodone, /* Index: 4 */ 1787 1788 /* Chain for buf IO for removable-media targets (PM enabled) */ 1789 sd_buf_iodone, /* Index: 5 */ 1790 sd_mapblockaddr_iodone, /* Index: 6 */ 1791 sd_mapblocksize_iodone, /* Index: 7 */ 1792 sd_pm_iodone, /* Index: 8 */ 1793 1794 /* Chain for buf IO for removable-media targets (PM disabled) */ 1795 sd_buf_iodone, /* Index: 9 */ 1796 sd_mapblockaddr_iodone, /* Index: 10 */ 1797 sd_mapblocksize_iodone, /* Index: 11 */ 1798 1799 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1800 sd_buf_iodone, /* Index: 12 */ 1801 sd_mapblockaddr_iodone, /* Index: 13 */ 1802 sd_checksum_iodone, /* Index: 14 */ 1803 sd_pm_iodone, /* Index: 15 */ 1804 1805 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1806 sd_buf_iodone, /* Index: 16 */ 1807 sd_mapblockaddr_iodone, /* Index: 17 */ 1808 sd_checksum_iodone, /* Index: 18 */ 1809 1810 /* Chain for USCSI commands (non-checksum targets) */ 1811 sd_uscsi_iodone, /* Index: 19 */ 1812 sd_pm_iodone, /* Index: 20 */ 1813 1814 /* Chain for USCSI commands (checksum targets) */ 1815 sd_uscsi_iodone, /* Index: 21 */ 1816 sd_checksum_uscsi_iodone, /* Index: 22 */ 1817 sd_pm_iodone, /* Index: 22 */ 1818 1819 /* Chain for "direct" USCSI commands (all targets) */ 1820 sd_uscsi_iodone, /* Index: 24 */ 1821 1822 /* Chain for "direct priority" USCSI commands (all targets) */ 1823 sd_uscsi_iodone, /* Index: 25 */ 1824 }; 1825 1826 1827 /* 1828 * Macros to locate the "first" function in the sd_iodone_chain[] array for 1829 * each iodone-side chain. These are located by the array index, but as the 1830 * iodone side functions are called in a decrementing-index order, the 1831 * highest index number in each chain must be specified (as these correspond 1832 * to the first function in the iodone chain that will be called by the core 1833 * at IO completion time). 1834 */ 1835 1836 #define SD_CHAIN_DISK_IODONE 2 1837 #define SD_CHAIN_DISK_IODONE_NO_PM 4 1838 #define SD_CHAIN_RMMEDIA_IODONE 8 1839 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11 1840 #define SD_CHAIN_CHKSUM_IODONE 15 1841 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18 1842 #define SD_CHAIN_USCSI_CMD_IODONE 20 1843 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22 1844 #define SD_CHAIN_DIRECT_CMD_IODONE 24 1845 #define SD_CHAIN_PRIORITY_CMD_IODONE 25 1846 1847 1848 1849 1850 /* 1851 * Array to map a layering chain index to the appropriate initpkt routine. 1852 * The redundant entries are present so that the index used for accessing 1853 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1854 * with this table as well. 1855 */ 1856 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **); 1857 1858 static sd_initpkt_t sd_initpkt_map[] = { 1859 1860 /* Chain for buf IO for disk drive targets (PM enabled) */ 1861 sd_initpkt_for_buf, /* Index: 0 */ 1862 sd_initpkt_for_buf, /* Index: 1 */ 1863 sd_initpkt_for_buf, /* Index: 2 */ 1864 1865 /* Chain for buf IO for disk drive targets (PM disabled) */ 1866 sd_initpkt_for_buf, /* Index: 3 */ 1867 sd_initpkt_for_buf, /* Index: 4 */ 1868 1869 /* Chain for buf IO for removable-media targets (PM enabled) */ 1870 sd_initpkt_for_buf, /* Index: 5 */ 1871 sd_initpkt_for_buf, /* Index: 6 */ 1872 sd_initpkt_for_buf, /* Index: 7 */ 1873 sd_initpkt_for_buf, /* Index: 8 */ 1874 1875 /* Chain for buf IO for removable-media targets (PM disabled) */ 1876 sd_initpkt_for_buf, /* Index: 9 */ 1877 sd_initpkt_for_buf, /* Index: 10 */ 1878 sd_initpkt_for_buf, /* Index: 11 */ 1879 1880 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1881 sd_initpkt_for_buf, /* Index: 12 */ 1882 sd_initpkt_for_buf, /* Index: 13 */ 1883 sd_initpkt_for_buf, /* Index: 14 */ 1884 sd_initpkt_for_buf, /* Index: 15 */ 1885 1886 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1887 sd_initpkt_for_buf, /* Index: 16 */ 1888 sd_initpkt_for_buf, /* Index: 17 */ 1889 sd_initpkt_for_buf, /* Index: 18 */ 1890 1891 /* Chain for USCSI commands (non-checksum targets) */ 1892 sd_initpkt_for_uscsi, /* Index: 19 */ 1893 sd_initpkt_for_uscsi, /* Index: 20 */ 1894 1895 /* Chain for USCSI commands (checksum targets) */ 1896 sd_initpkt_for_uscsi, /* Index: 21 */ 1897 sd_initpkt_for_uscsi, /* Index: 22 */ 1898 sd_initpkt_for_uscsi, /* Index: 22 */ 1899 1900 /* Chain for "direct" USCSI commands (all targets) */ 1901 sd_initpkt_for_uscsi, /* Index: 24 */ 1902 1903 /* Chain for "direct priority" USCSI commands (all targets) */ 1904 sd_initpkt_for_uscsi, /* Index: 25 */ 1905 1906 }; 1907 1908 1909 /* 1910 * Array to map a layering chain index to the appropriate destroypktpkt routine. 1911 * The redundant entries are present so that the index used for accessing 1912 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1913 * with this table as well. 1914 */ 1915 typedef void (*sd_destroypkt_t)(struct buf *); 1916 1917 static sd_destroypkt_t sd_destroypkt_map[] = { 1918 1919 /* Chain for buf IO for disk drive targets (PM enabled) */ 1920 sd_destroypkt_for_buf, /* Index: 0 */ 1921 sd_destroypkt_for_buf, /* Index: 1 */ 1922 sd_destroypkt_for_buf, /* Index: 2 */ 1923 1924 /* Chain for buf IO for disk drive targets (PM disabled) */ 1925 sd_destroypkt_for_buf, /* Index: 3 */ 1926 sd_destroypkt_for_buf, /* Index: 4 */ 1927 1928 /* Chain for buf IO for removable-media targets (PM enabled) */ 1929 sd_destroypkt_for_buf, /* Index: 5 */ 1930 sd_destroypkt_for_buf, /* Index: 6 */ 1931 sd_destroypkt_for_buf, /* Index: 7 */ 1932 sd_destroypkt_for_buf, /* Index: 8 */ 1933 1934 /* Chain for buf IO for removable-media targets (PM disabled) */ 1935 sd_destroypkt_for_buf, /* Index: 9 */ 1936 sd_destroypkt_for_buf, /* Index: 10 */ 1937 sd_destroypkt_for_buf, /* Index: 11 */ 1938 1939 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1940 sd_destroypkt_for_buf, /* Index: 12 */ 1941 sd_destroypkt_for_buf, /* Index: 13 */ 1942 sd_destroypkt_for_buf, /* Index: 14 */ 1943 sd_destroypkt_for_buf, /* Index: 15 */ 1944 1945 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1946 sd_destroypkt_for_buf, /* Index: 16 */ 1947 sd_destroypkt_for_buf, /* Index: 17 */ 1948 sd_destroypkt_for_buf, /* Index: 18 */ 1949 1950 /* Chain for USCSI commands (non-checksum targets) */ 1951 sd_destroypkt_for_uscsi, /* Index: 19 */ 1952 sd_destroypkt_for_uscsi, /* Index: 20 */ 1953 1954 /* Chain for USCSI commands (checksum targets) */ 1955 sd_destroypkt_for_uscsi, /* Index: 21 */ 1956 sd_destroypkt_for_uscsi, /* Index: 22 */ 1957 sd_destroypkt_for_uscsi, /* Index: 22 */ 1958 1959 /* Chain for "direct" USCSI commands (all targets) */ 1960 sd_destroypkt_for_uscsi, /* Index: 24 */ 1961 1962 /* Chain for "direct priority" USCSI commands (all targets) */ 1963 sd_destroypkt_for_uscsi, /* Index: 25 */ 1964 1965 }; 1966 1967 1968 1969 /* 1970 * Array to map a layering chain index to the appropriate chain "type". 1971 * The chain type indicates a specific property/usage of the chain. 1972 * The redundant entries are present so that the index used for accessing 1973 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1974 * with this table as well. 1975 */ 1976 1977 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */ 1978 #define SD_CHAIN_BUFIO 1 /* regular buf IO */ 1979 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */ 1980 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */ 1981 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */ 1982 /* (for error recovery) */ 1983 1984 static int sd_chain_type_map[] = { 1985 1986 /* Chain for buf IO for disk drive targets (PM enabled) */ 1987 SD_CHAIN_BUFIO, /* Index: 0 */ 1988 SD_CHAIN_BUFIO, /* Index: 1 */ 1989 SD_CHAIN_BUFIO, /* Index: 2 */ 1990 1991 /* Chain for buf IO for disk drive targets (PM disabled) */ 1992 SD_CHAIN_BUFIO, /* Index: 3 */ 1993 SD_CHAIN_BUFIO, /* Index: 4 */ 1994 1995 /* Chain for buf IO for removable-media targets (PM enabled) */ 1996 SD_CHAIN_BUFIO, /* Index: 5 */ 1997 SD_CHAIN_BUFIO, /* Index: 6 */ 1998 SD_CHAIN_BUFIO, /* Index: 7 */ 1999 SD_CHAIN_BUFIO, /* Index: 8 */ 2000 2001 /* Chain for buf IO for removable-media targets (PM disabled) */ 2002 SD_CHAIN_BUFIO, /* Index: 9 */ 2003 SD_CHAIN_BUFIO, /* Index: 10 */ 2004 SD_CHAIN_BUFIO, /* Index: 11 */ 2005 2006 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2007 SD_CHAIN_BUFIO, /* Index: 12 */ 2008 SD_CHAIN_BUFIO, /* Index: 13 */ 2009 SD_CHAIN_BUFIO, /* Index: 14 */ 2010 SD_CHAIN_BUFIO, /* Index: 15 */ 2011 2012 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2013 SD_CHAIN_BUFIO, /* Index: 16 */ 2014 SD_CHAIN_BUFIO, /* Index: 17 */ 2015 SD_CHAIN_BUFIO, /* Index: 18 */ 2016 2017 /* Chain for USCSI commands (non-checksum targets) */ 2018 SD_CHAIN_USCSI, /* Index: 19 */ 2019 SD_CHAIN_USCSI, /* Index: 20 */ 2020 2021 /* Chain for USCSI commands (checksum targets) */ 2022 SD_CHAIN_USCSI, /* Index: 21 */ 2023 SD_CHAIN_USCSI, /* Index: 22 */ 2024 SD_CHAIN_USCSI, /* Index: 22 */ 2025 2026 /* Chain for "direct" USCSI commands (all targets) */ 2027 SD_CHAIN_DIRECT, /* Index: 24 */ 2028 2029 /* Chain for "direct priority" USCSI commands (all targets) */ 2030 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */ 2031 }; 2032 2033 2034 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */ 2035 #define SD_IS_BUFIO(xp) \ 2036 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO) 2037 2038 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */ 2039 #define SD_IS_DIRECT_PRIORITY(xp) \ 2040 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY) 2041 2042 2043 2044 /* 2045 * Struct, array, and macros to map a specific chain to the appropriate 2046 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays. 2047 * 2048 * The sd_chain_index_map[] array is used at attach time to set the various 2049 * un_xxx_chain type members of the sd_lun softstate to the specific layering 2050 * chain to be used with the instance. This allows different instances to use 2051 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart 2052 * and xb_chain_iodone index values in the sd_xbuf are initialized to these 2053 * values at sd_xbuf init time, this allows (1) layering chains may be changed 2054 * dynamically & without the use of locking; and (2) a layer may update the 2055 * xb_chain_io[start|done] member in a given xbuf with its current index value, 2056 * to allow for deferred processing of an IO within the same chain from a 2057 * different execution context. 2058 */ 2059 2060 struct sd_chain_index { 2061 int sci_iostart_index; 2062 int sci_iodone_index; 2063 }; 2064 2065 static struct sd_chain_index sd_chain_index_map[] = { 2066 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE }, 2067 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM }, 2068 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE }, 2069 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM }, 2070 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE }, 2071 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM }, 2072 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE }, 2073 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE }, 2074 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE }, 2075 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE }, 2076 }; 2077 2078 2079 /* 2080 * The following are indexes into the sd_chain_index_map[] array. 2081 */ 2082 2083 /* un->un_buf_chain_type must be set to one of these */ 2084 #define SD_CHAIN_INFO_DISK 0 2085 #define SD_CHAIN_INFO_DISK_NO_PM 1 2086 #define SD_CHAIN_INFO_RMMEDIA 2 2087 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3 2088 #define SD_CHAIN_INFO_CHKSUM 4 2089 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5 2090 2091 /* un->un_uscsi_chain_type must be set to one of these */ 2092 #define SD_CHAIN_INFO_USCSI_CMD 6 2093 /* USCSI with PM disabled is the same as DIRECT */ 2094 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8 2095 #define SD_CHAIN_INFO_USCSI_CHKSUM 7 2096 2097 /* un->un_direct_chain_type must be set to one of these */ 2098 #define SD_CHAIN_INFO_DIRECT_CMD 8 2099 2100 /* un->un_priority_chain_type must be set to one of these */ 2101 #define SD_CHAIN_INFO_PRIORITY_CMD 9 2102 2103 /* size for devid inquiries */ 2104 #define MAX_INQUIRY_SIZE 0xF0 2105 2106 /* 2107 * Macros used by functions to pass a given buf(9S) struct along to the 2108 * next function in the layering chain for further processing. 2109 * 2110 * In the following macros, passing more than three arguments to the called 2111 * routines causes the optimizer for the SPARC compiler to stop doing tail 2112 * call elimination which results in significant performance degradation. 2113 */ 2114 #define SD_BEGIN_IOSTART(index, un, bp) \ 2115 ((*(sd_iostart_chain[index]))(index, un, bp)) 2116 2117 #define SD_BEGIN_IODONE(index, un, bp) \ 2118 ((*(sd_iodone_chain[index]))(index, un, bp)) 2119 2120 #define SD_NEXT_IOSTART(index, un, bp) \ 2121 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp)) 2122 2123 #define SD_NEXT_IODONE(index, un, bp) \ 2124 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp)) 2125 2126 /* 2127 * Function: _init 2128 * 2129 * Description: This is the driver _init(9E) entry point. 2130 * 2131 * Return Code: Returns the value from mod_install(9F) or 2132 * ddi_soft_state_init(9F) as appropriate. 2133 * 2134 * Context: Called when driver module loaded. 2135 */ 2136 2137 int 2138 _init(void) 2139 { 2140 int err; 2141 2142 /* establish driver name from module name */ 2143 sd_label = mod_modname(&modlinkage); 2144 2145 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun), 2146 SD_MAXUNIT); 2147 2148 if (err != 0) { 2149 return (err); 2150 } 2151 2152 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL); 2153 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL); 2154 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL); 2155 2156 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL); 2157 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL); 2158 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL); 2159 2160 /* 2161 * it's ok to init here even for fibre device 2162 */ 2163 sd_scsi_probe_cache_init(); 2164 2165 sd_scsi_target_lun_init(); 2166 2167 /* 2168 * Creating taskq before mod_install ensures that all callers (threads) 2169 * that enter the module after a successfull mod_install encounter 2170 * a valid taskq. 2171 */ 2172 sd_taskq_create(); 2173 2174 err = mod_install(&modlinkage); 2175 if (err != 0) { 2176 /* delete taskq if install fails */ 2177 sd_taskq_delete(); 2178 2179 mutex_destroy(&sd_detach_mutex); 2180 mutex_destroy(&sd_log_mutex); 2181 mutex_destroy(&sd_label_mutex); 2182 2183 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2184 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2185 cv_destroy(&sd_tr.srq_inprocess_cv); 2186 2187 sd_scsi_probe_cache_fini(); 2188 2189 sd_scsi_target_lun_fini(); 2190 2191 ddi_soft_state_fini(&sd_state); 2192 return (err); 2193 } 2194 2195 return (err); 2196 } 2197 2198 2199 /* 2200 * Function: _fini 2201 * 2202 * Description: This is the driver _fini(9E) entry point. 2203 * 2204 * Return Code: Returns the value from mod_remove(9F) 2205 * 2206 * Context: Called when driver module is unloaded. 2207 */ 2208 2209 int 2210 _fini(void) 2211 { 2212 int err; 2213 2214 if ((err = mod_remove(&modlinkage)) != 0) { 2215 return (err); 2216 } 2217 2218 sd_taskq_delete(); 2219 2220 mutex_destroy(&sd_detach_mutex); 2221 mutex_destroy(&sd_log_mutex); 2222 mutex_destroy(&sd_label_mutex); 2223 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2224 2225 sd_scsi_probe_cache_fini(); 2226 2227 sd_scsi_target_lun_fini(); 2228 2229 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2230 cv_destroy(&sd_tr.srq_inprocess_cv); 2231 2232 ddi_soft_state_fini(&sd_state); 2233 2234 return (err); 2235 } 2236 2237 2238 /* 2239 * Function: _info 2240 * 2241 * Description: This is the driver _info(9E) entry point. 2242 * 2243 * Arguments: modinfop - pointer to the driver modinfo structure 2244 * 2245 * Return Code: Returns the value from mod_info(9F). 2246 * 2247 * Context: Kernel thread context 2248 */ 2249 2250 int 2251 _info(struct modinfo *modinfop) 2252 { 2253 return (mod_info(&modlinkage, modinfop)); 2254 } 2255 2256 2257 /* 2258 * The following routines implement the driver message logging facility. 2259 * They provide component- and level- based debug output filtering. 2260 * Output may also be restricted to messages for a single instance by 2261 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set 2262 * to NULL, then messages for all instances are printed. 2263 * 2264 * These routines have been cloned from each other due to the language 2265 * constraints of macros and variable argument list processing. 2266 */ 2267 2268 2269 /* 2270 * Function: sd_log_err 2271 * 2272 * Description: This routine is called by the SD_ERROR macro for debug 2273 * logging of error conditions. 2274 * 2275 * Arguments: comp - driver component being logged 2276 * dev - pointer to driver info structure 2277 * fmt - error string and format to be logged 2278 */ 2279 2280 static void 2281 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...) 2282 { 2283 va_list ap; 2284 dev_info_t *dev; 2285 2286 ASSERT(un != NULL); 2287 dev = SD_DEVINFO(un); 2288 ASSERT(dev != NULL); 2289 2290 /* 2291 * Filter messages based on the global component and level masks. 2292 * Also print if un matches the value of sd_debug_un, or if 2293 * sd_debug_un is set to NULL. 2294 */ 2295 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) && 2296 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2297 mutex_enter(&sd_log_mutex); 2298 va_start(ap, fmt); 2299 (void) vsprintf(sd_log_buf, fmt, ap); 2300 va_end(ap); 2301 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2302 mutex_exit(&sd_log_mutex); 2303 } 2304 #ifdef SD_FAULT_INJECTION 2305 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2306 if (un->sd_injection_mask & comp) { 2307 mutex_enter(&sd_log_mutex); 2308 va_start(ap, fmt); 2309 (void) vsprintf(sd_log_buf, fmt, ap); 2310 va_end(ap); 2311 sd_injection_log(sd_log_buf, un); 2312 mutex_exit(&sd_log_mutex); 2313 } 2314 #endif 2315 } 2316 2317 2318 /* 2319 * Function: sd_log_info 2320 * 2321 * Description: This routine is called by the SD_INFO macro for debug 2322 * logging of general purpose informational conditions. 2323 * 2324 * Arguments: comp - driver component being logged 2325 * dev - pointer to driver info structure 2326 * fmt - info string and format to be logged 2327 */ 2328 2329 static void 2330 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...) 2331 { 2332 va_list ap; 2333 dev_info_t *dev; 2334 2335 ASSERT(un != NULL); 2336 dev = SD_DEVINFO(un); 2337 ASSERT(dev != NULL); 2338 2339 /* 2340 * Filter messages based on the global component and level masks. 2341 * Also print if un matches the value of sd_debug_un, or if 2342 * sd_debug_un is set to NULL. 2343 */ 2344 if ((sd_component_mask & component) && 2345 (sd_level_mask & SD_LOGMASK_INFO) && 2346 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2347 mutex_enter(&sd_log_mutex); 2348 va_start(ap, fmt); 2349 (void) vsprintf(sd_log_buf, fmt, ap); 2350 va_end(ap); 2351 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2352 mutex_exit(&sd_log_mutex); 2353 } 2354 #ifdef SD_FAULT_INJECTION 2355 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2356 if (un->sd_injection_mask & component) { 2357 mutex_enter(&sd_log_mutex); 2358 va_start(ap, fmt); 2359 (void) vsprintf(sd_log_buf, fmt, ap); 2360 va_end(ap); 2361 sd_injection_log(sd_log_buf, un); 2362 mutex_exit(&sd_log_mutex); 2363 } 2364 #endif 2365 } 2366 2367 2368 /* 2369 * Function: sd_log_trace 2370 * 2371 * Description: This routine is called by the SD_TRACE macro for debug 2372 * logging of trace conditions (i.e. function entry/exit). 2373 * 2374 * Arguments: comp - driver component being logged 2375 * dev - pointer to driver info structure 2376 * fmt - trace string and format to be logged 2377 */ 2378 2379 static void 2380 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...) 2381 { 2382 va_list ap; 2383 dev_info_t *dev; 2384 2385 ASSERT(un != NULL); 2386 dev = SD_DEVINFO(un); 2387 ASSERT(dev != NULL); 2388 2389 /* 2390 * Filter messages based on the global component and level masks. 2391 * Also print if un matches the value of sd_debug_un, or if 2392 * sd_debug_un is set to NULL. 2393 */ 2394 if ((sd_component_mask & component) && 2395 (sd_level_mask & SD_LOGMASK_TRACE) && 2396 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2397 mutex_enter(&sd_log_mutex); 2398 va_start(ap, fmt); 2399 (void) vsprintf(sd_log_buf, fmt, ap); 2400 va_end(ap); 2401 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2402 mutex_exit(&sd_log_mutex); 2403 } 2404 #ifdef SD_FAULT_INJECTION 2405 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2406 if (un->sd_injection_mask & component) { 2407 mutex_enter(&sd_log_mutex); 2408 va_start(ap, fmt); 2409 (void) vsprintf(sd_log_buf, fmt, ap); 2410 va_end(ap); 2411 sd_injection_log(sd_log_buf, un); 2412 mutex_exit(&sd_log_mutex); 2413 } 2414 #endif 2415 } 2416 2417 2418 /* 2419 * Function: sdprobe 2420 * 2421 * Description: This is the driver probe(9e) entry point function. 2422 * 2423 * Arguments: devi - opaque device info handle 2424 * 2425 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful. 2426 * DDI_PROBE_FAILURE: If the probe failed. 2427 * DDI_PROBE_PARTIAL: If the instance is not present now, 2428 * but may be present in the future. 2429 */ 2430 2431 static int 2432 sdprobe(dev_info_t *devi) 2433 { 2434 struct scsi_device *devp; 2435 int rval; 2436 int instance; 2437 2438 /* 2439 * if it wasn't for pln, sdprobe could actually be nulldev 2440 * in the "__fibre" case. 2441 */ 2442 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { 2443 return (DDI_PROBE_DONTCARE); 2444 } 2445 2446 devp = ddi_get_driver_private(devi); 2447 2448 if (devp == NULL) { 2449 /* Ooops... nexus driver is mis-configured... */ 2450 return (DDI_PROBE_FAILURE); 2451 } 2452 2453 instance = ddi_get_instance(devi); 2454 2455 if (ddi_get_soft_state(sd_state, instance) != NULL) { 2456 return (DDI_PROBE_PARTIAL); 2457 } 2458 2459 /* 2460 * Call the SCSA utility probe routine to see if we actually 2461 * have a target at this SCSI nexus. 2462 */ 2463 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) { 2464 case SCSIPROBE_EXISTS: 2465 switch (devp->sd_inq->inq_dtype) { 2466 case DTYPE_DIRECT: 2467 rval = DDI_PROBE_SUCCESS; 2468 break; 2469 case DTYPE_RODIRECT: 2470 /* CDs etc. Can be removable media */ 2471 rval = DDI_PROBE_SUCCESS; 2472 break; 2473 case DTYPE_OPTICAL: 2474 /* 2475 * Rewritable optical driver HP115AA 2476 * Can also be removable media 2477 */ 2478 2479 /* 2480 * Do not attempt to bind to DTYPE_OPTICAL if 2481 * pre solaris 9 sparc sd behavior is required 2482 * 2483 * If first time through and sd_dtype_optical_bind 2484 * has not been set in /etc/system check properties 2485 */ 2486 2487 if (sd_dtype_optical_bind < 0) { 2488 sd_dtype_optical_bind = ddi_prop_get_int 2489 (DDI_DEV_T_ANY, devi, 0, 2490 "optical-device-bind", 1); 2491 } 2492 2493 if (sd_dtype_optical_bind == 0) { 2494 rval = DDI_PROBE_FAILURE; 2495 } else { 2496 rval = DDI_PROBE_SUCCESS; 2497 } 2498 break; 2499 2500 case DTYPE_NOTPRESENT: 2501 default: 2502 rval = DDI_PROBE_FAILURE; 2503 break; 2504 } 2505 break; 2506 default: 2507 rval = DDI_PROBE_PARTIAL; 2508 break; 2509 } 2510 2511 /* 2512 * This routine checks for resource allocation prior to freeing, 2513 * so it will take care of the "smart probing" case where a 2514 * scsi_probe() may or may not have been issued and will *not* 2515 * free previously-freed resources. 2516 */ 2517 scsi_unprobe(devp); 2518 return (rval); 2519 } 2520 2521 2522 /* 2523 * Function: sdinfo 2524 * 2525 * Description: This is the driver getinfo(9e) entry point function. 2526 * Given the device number, return the devinfo pointer from 2527 * the scsi_device structure or the instance number 2528 * associated with the dev_t. 2529 * 2530 * Arguments: dip - pointer to device info structure 2531 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO, 2532 * DDI_INFO_DEVT2INSTANCE) 2533 * arg - driver dev_t 2534 * resultp - user buffer for request response 2535 * 2536 * Return Code: DDI_SUCCESS 2537 * DDI_FAILURE 2538 */ 2539 /* ARGSUSED */ 2540 static int 2541 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2542 { 2543 struct sd_lun *un; 2544 dev_t dev; 2545 int instance; 2546 int error; 2547 2548 switch (infocmd) { 2549 case DDI_INFO_DEVT2DEVINFO: 2550 dev = (dev_t)arg; 2551 instance = SDUNIT(dev); 2552 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 2553 return (DDI_FAILURE); 2554 } 2555 *result = (void *) SD_DEVINFO(un); 2556 error = DDI_SUCCESS; 2557 break; 2558 case DDI_INFO_DEVT2INSTANCE: 2559 dev = (dev_t)arg; 2560 instance = SDUNIT(dev); 2561 *result = (void *)(uintptr_t)instance; 2562 error = DDI_SUCCESS; 2563 break; 2564 default: 2565 error = DDI_FAILURE; 2566 } 2567 return (error); 2568 } 2569 2570 /* 2571 * Function: sd_prop_op 2572 * 2573 * Description: This is the driver prop_op(9e) entry point function. 2574 * Return the number of blocks for the partition in question 2575 * or forward the request to the property facilities. 2576 * 2577 * Arguments: dev - device number 2578 * dip - pointer to device info structure 2579 * prop_op - property operator 2580 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent 2581 * name - pointer to property name 2582 * valuep - pointer or address of the user buffer 2583 * lengthp - property length 2584 * 2585 * Return Code: DDI_PROP_SUCCESS 2586 * DDI_PROP_NOT_FOUND 2587 * DDI_PROP_UNDEFINED 2588 * DDI_PROP_NO_MEMORY 2589 * DDI_PROP_BUF_TOO_SMALL 2590 */ 2591 2592 static int 2593 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 2594 char *name, caddr_t valuep, int *lengthp) 2595 { 2596 int instance = ddi_get_instance(dip); 2597 struct sd_lun *un; 2598 uint64_t nblocks64; 2599 uint_t dblk; 2600 2601 /* 2602 * Our dynamic properties are all device specific and size oriented. 2603 * Requests issued under conditions where size is valid are passed 2604 * to ddi_prop_op_nblocks with the size information, otherwise the 2605 * request is passed to ddi_prop_op. Size depends on valid geometry. 2606 */ 2607 un = ddi_get_soft_state(sd_state, instance); 2608 if ((dev == DDI_DEV_T_ANY) || (un == NULL)) { 2609 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2610 name, valuep, lengthp)); 2611 } else if (!SD_IS_VALID_LABEL(un)) { 2612 return (ddi_prop_op(dev, dip, prop_op, mod_flags, name, 2613 valuep, lengthp)); 2614 } 2615 2616 /* get nblocks value */ 2617 ASSERT(!mutex_owned(SD_MUTEX(un))); 2618 2619 (void) cmlb_partinfo(un->un_cmlbhandle, SDPART(dev), 2620 (diskaddr_t *)&nblocks64, NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 2621 2622 /* report size in target size blocks */ 2623 dblk = un->un_tgt_blocksize / un->un_sys_blocksize; 2624 return (ddi_prop_op_nblocks_blksize(dev, dip, prop_op, mod_flags, 2625 name, valuep, lengthp, nblocks64 / dblk, un->un_tgt_blocksize)); 2626 } 2627 2628 /* 2629 * The following functions are for smart probing: 2630 * sd_scsi_probe_cache_init() 2631 * sd_scsi_probe_cache_fini() 2632 * sd_scsi_clear_probe_cache() 2633 * sd_scsi_probe_with_cache() 2634 */ 2635 2636 /* 2637 * Function: sd_scsi_probe_cache_init 2638 * 2639 * Description: Initializes the probe response cache mutex and head pointer. 2640 * 2641 * Context: Kernel thread context 2642 */ 2643 2644 static void 2645 sd_scsi_probe_cache_init(void) 2646 { 2647 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL); 2648 sd_scsi_probe_cache_head = NULL; 2649 } 2650 2651 2652 /* 2653 * Function: sd_scsi_probe_cache_fini 2654 * 2655 * Description: Frees all resources associated with the probe response cache. 2656 * 2657 * Context: Kernel thread context 2658 */ 2659 2660 static void 2661 sd_scsi_probe_cache_fini(void) 2662 { 2663 struct sd_scsi_probe_cache *cp; 2664 struct sd_scsi_probe_cache *ncp; 2665 2666 /* Clean up our smart probing linked list */ 2667 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) { 2668 ncp = cp->next; 2669 kmem_free(cp, sizeof (struct sd_scsi_probe_cache)); 2670 } 2671 sd_scsi_probe_cache_head = NULL; 2672 mutex_destroy(&sd_scsi_probe_cache_mutex); 2673 } 2674 2675 2676 /* 2677 * Function: sd_scsi_clear_probe_cache 2678 * 2679 * Description: This routine clears the probe response cache. This is 2680 * done when open() returns ENXIO so that when deferred 2681 * attach is attempted (possibly after a device has been 2682 * turned on) we will retry the probe. Since we don't know 2683 * which target we failed to open, we just clear the 2684 * entire cache. 2685 * 2686 * Context: Kernel thread context 2687 */ 2688 2689 static void 2690 sd_scsi_clear_probe_cache(void) 2691 { 2692 struct sd_scsi_probe_cache *cp; 2693 int i; 2694 2695 mutex_enter(&sd_scsi_probe_cache_mutex); 2696 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2697 /* 2698 * Reset all entries to SCSIPROBE_EXISTS. This will 2699 * force probing to be performed the next time 2700 * sd_scsi_probe_with_cache is called. 2701 */ 2702 for (i = 0; i < NTARGETS_WIDE; i++) { 2703 cp->cache[i] = SCSIPROBE_EXISTS; 2704 } 2705 } 2706 mutex_exit(&sd_scsi_probe_cache_mutex); 2707 } 2708 2709 2710 /* 2711 * Function: sd_scsi_probe_with_cache 2712 * 2713 * Description: This routine implements support for a scsi device probe 2714 * with cache. The driver maintains a cache of the target 2715 * responses to scsi probes. If we get no response from a 2716 * target during a probe inquiry, we remember that, and we 2717 * avoid additional calls to scsi_probe on non-zero LUNs 2718 * on the same target until the cache is cleared. By doing 2719 * so we avoid the 1/4 sec selection timeout for nonzero 2720 * LUNs. lun0 of a target is always probed. 2721 * 2722 * Arguments: devp - Pointer to a scsi_device(9S) structure 2723 * waitfunc - indicates what the allocator routines should 2724 * do when resources are not available. This value 2725 * is passed on to scsi_probe() when that routine 2726 * is called. 2727 * 2728 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache; 2729 * otherwise the value returned by scsi_probe(9F). 2730 * 2731 * Context: Kernel thread context 2732 */ 2733 2734 static int 2735 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)()) 2736 { 2737 struct sd_scsi_probe_cache *cp; 2738 dev_info_t *pdip = ddi_get_parent(devp->sd_dev); 2739 int lun, tgt; 2740 2741 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2742 SCSI_ADDR_PROP_LUN, 0); 2743 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2744 SCSI_ADDR_PROP_TARGET, -1); 2745 2746 /* Make sure caching enabled and target in range */ 2747 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) { 2748 /* do it the old way (no cache) */ 2749 return (scsi_probe(devp, waitfn)); 2750 } 2751 2752 mutex_enter(&sd_scsi_probe_cache_mutex); 2753 2754 /* Find the cache for this scsi bus instance */ 2755 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2756 if (cp->pdip == pdip) { 2757 break; 2758 } 2759 } 2760 2761 /* If we can't find a cache for this pdip, create one */ 2762 if (cp == NULL) { 2763 int i; 2764 2765 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache), 2766 KM_SLEEP); 2767 cp->pdip = pdip; 2768 cp->next = sd_scsi_probe_cache_head; 2769 sd_scsi_probe_cache_head = cp; 2770 for (i = 0; i < NTARGETS_WIDE; i++) { 2771 cp->cache[i] = SCSIPROBE_EXISTS; 2772 } 2773 } 2774 2775 mutex_exit(&sd_scsi_probe_cache_mutex); 2776 2777 /* Recompute the cache for this target if LUN zero */ 2778 if (lun == 0) { 2779 cp->cache[tgt] = SCSIPROBE_EXISTS; 2780 } 2781 2782 /* Don't probe if cache remembers a NORESP from a previous LUN. */ 2783 if (cp->cache[tgt] != SCSIPROBE_EXISTS) { 2784 return (SCSIPROBE_NORESP); 2785 } 2786 2787 /* Do the actual probe; save & return the result */ 2788 return (cp->cache[tgt] = scsi_probe(devp, waitfn)); 2789 } 2790 2791 2792 /* 2793 * Function: sd_scsi_target_lun_init 2794 * 2795 * Description: Initializes the attached lun chain mutex and head pointer. 2796 * 2797 * Context: Kernel thread context 2798 */ 2799 2800 static void 2801 sd_scsi_target_lun_init(void) 2802 { 2803 mutex_init(&sd_scsi_target_lun_mutex, NULL, MUTEX_DRIVER, NULL); 2804 sd_scsi_target_lun_head = NULL; 2805 } 2806 2807 2808 /* 2809 * Function: sd_scsi_target_lun_fini 2810 * 2811 * Description: Frees all resources associated with the attached lun 2812 * chain 2813 * 2814 * Context: Kernel thread context 2815 */ 2816 2817 static void 2818 sd_scsi_target_lun_fini(void) 2819 { 2820 struct sd_scsi_hba_tgt_lun *cp; 2821 struct sd_scsi_hba_tgt_lun *ncp; 2822 2823 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = ncp) { 2824 ncp = cp->next; 2825 kmem_free(cp, sizeof (struct sd_scsi_hba_tgt_lun)); 2826 } 2827 sd_scsi_target_lun_head = NULL; 2828 mutex_destroy(&sd_scsi_target_lun_mutex); 2829 } 2830 2831 2832 /* 2833 * Function: sd_scsi_get_target_lun_count 2834 * 2835 * Description: This routine will check in the attached lun chain to see 2836 * how many luns are attached on the required SCSI controller 2837 * and target. Currently, some capabilities like tagged queue 2838 * are supported per target based by HBA. So all luns in a 2839 * target have the same capabilities. Based on this assumption, 2840 * sd should only set these capabilities once per target. This 2841 * function is called when sd needs to decide how many luns 2842 * already attached on a target. 2843 * 2844 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2845 * controller device. 2846 * target - The target ID on the controller's SCSI bus. 2847 * 2848 * Return Code: The number of luns attached on the required target and 2849 * controller. 2850 * -1 if target ID is not in parallel SCSI scope or the given 2851 * dip is not in the chain. 2852 * 2853 * Context: Kernel thread context 2854 */ 2855 2856 static int 2857 sd_scsi_get_target_lun_count(dev_info_t *dip, int target) 2858 { 2859 struct sd_scsi_hba_tgt_lun *cp; 2860 2861 if ((target < 0) || (target >= NTARGETS_WIDE)) { 2862 return (-1); 2863 } 2864 2865 mutex_enter(&sd_scsi_target_lun_mutex); 2866 2867 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2868 if (cp->pdip == dip) { 2869 break; 2870 } 2871 } 2872 2873 mutex_exit(&sd_scsi_target_lun_mutex); 2874 2875 if (cp == NULL) { 2876 return (-1); 2877 } 2878 2879 return (cp->nlun[target]); 2880 } 2881 2882 2883 /* 2884 * Function: sd_scsi_update_lun_on_target 2885 * 2886 * Description: This routine is used to update the attached lun chain when a 2887 * lun is attached or detached on a target. 2888 * 2889 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2890 * controller device. 2891 * target - The target ID on the controller's SCSI bus. 2892 * flag - Indicate the lun is attached or detached. 2893 * 2894 * Context: Kernel thread context 2895 */ 2896 2897 static void 2898 sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag) 2899 { 2900 struct sd_scsi_hba_tgt_lun *cp; 2901 2902 mutex_enter(&sd_scsi_target_lun_mutex); 2903 2904 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2905 if (cp->pdip == dip) { 2906 break; 2907 } 2908 } 2909 2910 if ((cp == NULL) && (flag == SD_SCSI_LUN_ATTACH)) { 2911 cp = kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun), 2912 KM_SLEEP); 2913 cp->pdip = dip; 2914 cp->next = sd_scsi_target_lun_head; 2915 sd_scsi_target_lun_head = cp; 2916 } 2917 2918 mutex_exit(&sd_scsi_target_lun_mutex); 2919 2920 if (cp != NULL) { 2921 if (flag == SD_SCSI_LUN_ATTACH) { 2922 cp->nlun[target] ++; 2923 } else { 2924 cp->nlun[target] --; 2925 } 2926 } 2927 } 2928 2929 2930 /* 2931 * Function: sd_spin_up_unit 2932 * 2933 * Description: Issues the following commands to spin-up the device: 2934 * START STOP UNIT, and INQUIRY. 2935 * 2936 * Arguments: un - driver soft state (unit) structure 2937 * 2938 * Return Code: 0 - success 2939 * EIO - failure 2940 * EACCES - reservation conflict 2941 * 2942 * Context: Kernel thread context 2943 */ 2944 2945 static int 2946 sd_spin_up_unit(struct sd_lun *un) 2947 { 2948 size_t resid = 0; 2949 int has_conflict = FALSE; 2950 uchar_t *bufaddr; 2951 2952 ASSERT(un != NULL); 2953 2954 /* 2955 * Send a throwaway START UNIT command. 2956 * 2957 * If we fail on this, we don't care presently what precisely 2958 * is wrong. EMC's arrays will also fail this with a check 2959 * condition (0x2/0x4/0x3) if the device is "inactive," but 2960 * we don't want to fail the attach because it may become 2961 * "active" later. 2962 */ 2963 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, SD_PATH_DIRECT) 2964 == EACCES) 2965 has_conflict = TRUE; 2966 2967 /* 2968 * Send another INQUIRY command to the target. This is necessary for 2969 * non-removable media direct access devices because their INQUIRY data 2970 * may not be fully qualified until they are spun up (perhaps via the 2971 * START command above). Note: This seems to be needed for some 2972 * legacy devices only.) The INQUIRY command should succeed even if a 2973 * Reservation Conflict is present. 2974 */ 2975 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP); 2976 if (sd_send_scsi_INQUIRY(un, bufaddr, SUN_INQSIZE, 0, 0, &resid) != 0) { 2977 kmem_free(bufaddr, SUN_INQSIZE); 2978 return (EIO); 2979 } 2980 2981 /* 2982 * If we got enough INQUIRY data, copy it over the old INQUIRY data. 2983 * Note that this routine does not return a failure here even if the 2984 * INQUIRY command did not return any data. This is a legacy behavior. 2985 */ 2986 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) { 2987 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE); 2988 } 2989 2990 kmem_free(bufaddr, SUN_INQSIZE); 2991 2992 /* If we hit a reservation conflict above, tell the caller. */ 2993 if (has_conflict == TRUE) { 2994 return (EACCES); 2995 } 2996 2997 return (0); 2998 } 2999 3000 #ifdef _LP64 3001 /* 3002 * Function: sd_enable_descr_sense 3003 * 3004 * Description: This routine attempts to select descriptor sense format 3005 * using the Control mode page. Devices that support 64 bit 3006 * LBAs (for >2TB luns) should also implement descriptor 3007 * sense data so we will call this function whenever we see 3008 * a lun larger than 2TB. If for some reason the device 3009 * supports 64 bit LBAs but doesn't support descriptor sense 3010 * presumably the mode select will fail. Everything will 3011 * continue to work normally except that we will not get 3012 * complete sense data for commands that fail with an LBA 3013 * larger than 32 bits. 3014 * 3015 * Arguments: un - driver soft state (unit) structure 3016 * 3017 * Context: Kernel thread context only 3018 */ 3019 3020 static void 3021 sd_enable_descr_sense(struct sd_lun *un) 3022 { 3023 uchar_t *header; 3024 struct mode_control_scsi3 *ctrl_bufp; 3025 size_t buflen; 3026 size_t bd_len; 3027 3028 /* 3029 * Read MODE SENSE page 0xA, Control Mode Page 3030 */ 3031 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH + 3032 sizeof (struct mode_control_scsi3); 3033 header = kmem_zalloc(buflen, KM_SLEEP); 3034 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 3035 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT) != 0) { 3036 SD_ERROR(SD_LOG_COMMON, un, 3037 "sd_enable_descr_sense: mode sense ctrl page failed\n"); 3038 goto eds_exit; 3039 } 3040 3041 /* 3042 * Determine size of Block Descriptors in order to locate 3043 * the mode page data. ATAPI devices return 0, SCSI devices 3044 * should return MODE_BLK_DESC_LENGTH. 3045 */ 3046 bd_len = ((struct mode_header *)header)->bdesc_length; 3047 3048 /* Clear the mode data length field for MODE SELECT */ 3049 ((struct mode_header *)header)->length = 0; 3050 3051 ctrl_bufp = (struct mode_control_scsi3 *) 3052 (header + MODE_HEADER_LENGTH + bd_len); 3053 3054 /* 3055 * If the page length is smaller than the expected value, 3056 * the target device doesn't support D_SENSE. Bail out here. 3057 */ 3058 if (ctrl_bufp->mode_page.length < 3059 sizeof (struct mode_control_scsi3) - 2) { 3060 SD_ERROR(SD_LOG_COMMON, un, 3061 "sd_enable_descr_sense: enable D_SENSE failed\n"); 3062 goto eds_exit; 3063 } 3064 3065 /* 3066 * Clear PS bit for MODE SELECT 3067 */ 3068 ctrl_bufp->mode_page.ps = 0; 3069 3070 /* 3071 * Set D_SENSE to enable descriptor sense format. 3072 */ 3073 ctrl_bufp->d_sense = 1; 3074 3075 /* 3076 * Use MODE SELECT to commit the change to the D_SENSE bit 3077 */ 3078 if (sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 3079 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT) != 0) { 3080 SD_INFO(SD_LOG_COMMON, un, 3081 "sd_enable_descr_sense: mode select ctrl page failed\n"); 3082 goto eds_exit; 3083 } 3084 3085 eds_exit: 3086 kmem_free(header, buflen); 3087 } 3088 3089 /* 3090 * Function: sd_reenable_dsense_task 3091 * 3092 * Description: Re-enable descriptor sense after device or bus reset 3093 * 3094 * Context: Executes in a taskq() thread context 3095 */ 3096 static void 3097 sd_reenable_dsense_task(void *arg) 3098 { 3099 struct sd_lun *un = arg; 3100 3101 ASSERT(un != NULL); 3102 sd_enable_descr_sense(un); 3103 } 3104 #endif /* _LP64 */ 3105 3106 /* 3107 * Function: sd_set_mmc_caps 3108 * 3109 * Description: This routine determines if the device is MMC compliant and if 3110 * the device supports CDDA via a mode sense of the CDVD 3111 * capabilities mode page. Also checks if the device is a 3112 * dvdram writable device. 3113 * 3114 * Arguments: un - driver soft state (unit) structure 3115 * 3116 * Context: Kernel thread context only 3117 */ 3118 3119 static void 3120 sd_set_mmc_caps(struct sd_lun *un) 3121 { 3122 struct mode_header_grp2 *sense_mhp; 3123 uchar_t *sense_page; 3124 caddr_t buf; 3125 int bd_len; 3126 int status; 3127 struct uscsi_cmd com; 3128 int rtn; 3129 uchar_t *out_data_rw, *out_data_hd; 3130 uchar_t *rqbuf_rw, *rqbuf_hd; 3131 3132 ASSERT(un != NULL); 3133 3134 /* 3135 * The flags which will be set in this function are - mmc compliant, 3136 * dvdram writable device, cdda support. Initialize them to FALSE 3137 * and if a capability is detected - it will be set to TRUE. 3138 */ 3139 un->un_f_mmc_cap = FALSE; 3140 un->un_f_dvdram_writable_device = FALSE; 3141 un->un_f_cfg_cdda = FALSE; 3142 3143 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3144 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 3145 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 3146 3147 if (status != 0) { 3148 /* command failed; just return */ 3149 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3150 return; 3151 } 3152 /* 3153 * If the mode sense request for the CDROM CAPABILITIES 3154 * page (0x2A) succeeds the device is assumed to be MMC. 3155 */ 3156 un->un_f_mmc_cap = TRUE; 3157 3158 /* Get to the page data */ 3159 sense_mhp = (struct mode_header_grp2 *)buf; 3160 bd_len = (sense_mhp->bdesc_length_hi << 8) | 3161 sense_mhp->bdesc_length_lo; 3162 if (bd_len > MODE_BLK_DESC_LENGTH) { 3163 /* 3164 * We did not get back the expected block descriptor 3165 * length so we cannot determine if the device supports 3166 * CDDA. However, we still indicate the device is MMC 3167 * according to the successful response to the page 3168 * 0x2A mode sense request. 3169 */ 3170 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3171 "sd_set_mmc_caps: Mode Sense returned " 3172 "invalid block descriptor length\n"); 3173 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3174 return; 3175 } 3176 3177 /* See if read CDDA is supported */ 3178 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + 3179 bd_len); 3180 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE; 3181 3182 /* See if writing DVD RAM is supported. */ 3183 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE; 3184 if (un->un_f_dvdram_writable_device == TRUE) { 3185 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3186 return; 3187 } 3188 3189 /* 3190 * If the device presents DVD or CD capabilities in the mode 3191 * page, we can return here since a RRD will not have 3192 * these capabilities. 3193 */ 3194 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3195 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3196 return; 3197 } 3198 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3199 3200 /* 3201 * If un->un_f_dvdram_writable_device is still FALSE, 3202 * check for a Removable Rigid Disk (RRD). A RRD 3203 * device is identified by the features RANDOM_WRITABLE and 3204 * HARDWARE_DEFECT_MANAGEMENT. 3205 */ 3206 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3207 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3208 3209 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3210 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3211 RANDOM_WRITABLE, SD_PATH_STANDARD); 3212 if (rtn != 0) { 3213 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3214 kmem_free(rqbuf_rw, SENSE_LENGTH); 3215 return; 3216 } 3217 3218 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3219 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3220 3221 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3222 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3223 HARDWARE_DEFECT_MANAGEMENT, SD_PATH_STANDARD); 3224 if (rtn == 0) { 3225 /* 3226 * We have good information, check for random writable 3227 * and hardware defect features. 3228 */ 3229 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3230 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) { 3231 un->un_f_dvdram_writable_device = TRUE; 3232 } 3233 } 3234 3235 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3236 kmem_free(rqbuf_rw, SENSE_LENGTH); 3237 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3238 kmem_free(rqbuf_hd, SENSE_LENGTH); 3239 } 3240 3241 /* 3242 * Function: sd_check_for_writable_cd 3243 * 3244 * Description: This routine determines if the media in the device is 3245 * writable or not. It uses the get configuration command (0x46) 3246 * to determine if the media is writable 3247 * 3248 * Arguments: un - driver soft state (unit) structure 3249 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" 3250 * chain and the normal command waitq, or 3251 * SD_PATH_DIRECT_PRIORITY to use the USCSI 3252 * "direct" chain and bypass the normal command 3253 * waitq. 3254 * 3255 * Context: Never called at interrupt context. 3256 */ 3257 3258 static void 3259 sd_check_for_writable_cd(struct sd_lun *un, int path_flag) 3260 { 3261 struct uscsi_cmd com; 3262 uchar_t *out_data; 3263 uchar_t *rqbuf; 3264 int rtn; 3265 uchar_t *out_data_rw, *out_data_hd; 3266 uchar_t *rqbuf_rw, *rqbuf_hd; 3267 struct mode_header_grp2 *sense_mhp; 3268 uchar_t *sense_page; 3269 caddr_t buf; 3270 int bd_len; 3271 int status; 3272 3273 ASSERT(un != NULL); 3274 ASSERT(mutex_owned(SD_MUTEX(un))); 3275 3276 /* 3277 * Initialize the writable media to false, if configuration info. 3278 * tells us otherwise then only we will set it. 3279 */ 3280 un->un_f_mmc_writable_media = FALSE; 3281 mutex_exit(SD_MUTEX(un)); 3282 3283 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 3284 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3285 3286 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, SENSE_LENGTH, 3287 out_data, SD_PROFILE_HEADER_LEN, path_flag); 3288 3289 mutex_enter(SD_MUTEX(un)); 3290 if (rtn == 0) { 3291 /* 3292 * We have good information, check for writable DVD. 3293 */ 3294 if ((out_data[6] == 0) && (out_data[7] == 0x12)) { 3295 un->un_f_mmc_writable_media = TRUE; 3296 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3297 kmem_free(rqbuf, SENSE_LENGTH); 3298 return; 3299 } 3300 } 3301 3302 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3303 kmem_free(rqbuf, SENSE_LENGTH); 3304 3305 /* 3306 * Determine if this is a RRD type device. 3307 */ 3308 mutex_exit(SD_MUTEX(un)); 3309 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3310 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 3311 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, path_flag); 3312 mutex_enter(SD_MUTEX(un)); 3313 if (status != 0) { 3314 /* command failed; just return */ 3315 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3316 return; 3317 } 3318 3319 /* Get to the page data */ 3320 sense_mhp = (struct mode_header_grp2 *)buf; 3321 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 3322 if (bd_len > MODE_BLK_DESC_LENGTH) { 3323 /* 3324 * We did not get back the expected block descriptor length so 3325 * we cannot check the mode page. 3326 */ 3327 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3328 "sd_check_for_writable_cd: Mode Sense returned " 3329 "invalid block descriptor length\n"); 3330 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3331 return; 3332 } 3333 3334 /* 3335 * If the device presents DVD or CD capabilities in the mode 3336 * page, we can return here since a RRD device will not have 3337 * these capabilities. 3338 */ 3339 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len); 3340 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3341 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3342 return; 3343 } 3344 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3345 3346 /* 3347 * If un->un_f_mmc_writable_media is still FALSE, 3348 * check for RRD type media. A RRD device is identified 3349 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT. 3350 */ 3351 mutex_exit(SD_MUTEX(un)); 3352 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3353 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3354 3355 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3356 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3357 RANDOM_WRITABLE, path_flag); 3358 if (rtn != 0) { 3359 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3360 kmem_free(rqbuf_rw, SENSE_LENGTH); 3361 mutex_enter(SD_MUTEX(un)); 3362 return; 3363 } 3364 3365 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3366 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3367 3368 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3369 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3370 HARDWARE_DEFECT_MANAGEMENT, path_flag); 3371 mutex_enter(SD_MUTEX(un)); 3372 if (rtn == 0) { 3373 /* 3374 * We have good information, check for random writable 3375 * and hardware defect features as current. 3376 */ 3377 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3378 (out_data_rw[10] & 0x1) && 3379 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) && 3380 (out_data_hd[10] & 0x1)) { 3381 un->un_f_mmc_writable_media = TRUE; 3382 } 3383 } 3384 3385 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3386 kmem_free(rqbuf_rw, SENSE_LENGTH); 3387 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3388 kmem_free(rqbuf_hd, SENSE_LENGTH); 3389 } 3390 3391 /* 3392 * Function: sd_read_unit_properties 3393 * 3394 * Description: The following implements a property lookup mechanism. 3395 * Properties for particular disks (keyed on vendor, model 3396 * and rev numbers) are sought in the sd.conf file via 3397 * sd_process_sdconf_file(), and if not found there, are 3398 * looked for in a list hardcoded in this driver via 3399 * sd_process_sdconf_table() Once located the properties 3400 * are used to update the driver unit structure. 3401 * 3402 * Arguments: un - driver soft state (unit) structure 3403 */ 3404 3405 static void 3406 sd_read_unit_properties(struct sd_lun *un) 3407 { 3408 /* 3409 * sd_process_sdconf_file returns SD_FAILURE if it cannot find 3410 * the "sd-config-list" property (from the sd.conf file) or if 3411 * there was not a match for the inquiry vid/pid. If this event 3412 * occurs the static driver configuration table is searched for 3413 * a match. 3414 */ 3415 ASSERT(un != NULL); 3416 if (sd_process_sdconf_file(un) == SD_FAILURE) { 3417 sd_process_sdconf_table(un); 3418 } 3419 3420 /* check for LSI device */ 3421 sd_is_lsi(un); 3422 3423 3424 } 3425 3426 3427 /* 3428 * Function: sd_process_sdconf_file 3429 * 3430 * Description: Use ddi_getlongprop to obtain the properties from the 3431 * driver's config file (ie, sd.conf) and update the driver 3432 * soft state structure accordingly. 3433 * 3434 * Arguments: un - driver soft state (unit) structure 3435 * 3436 * Return Code: SD_SUCCESS - The properties were successfully set according 3437 * to the driver configuration file. 3438 * SD_FAILURE - The driver config list was not obtained or 3439 * there was no vid/pid match. This indicates that 3440 * the static config table should be used. 3441 * 3442 * The config file has a property, "sd-config-list", which consists of 3443 * one or more duplets as follows: 3444 * 3445 * sd-config-list= 3446 * <duplet>, 3447 * [<duplet>,] 3448 * [<duplet>]; 3449 * 3450 * The structure of each duplet is as follows: 3451 * 3452 * <duplet>:= <vid+pid>,<data-property-name_list> 3453 * 3454 * The first entry of the duplet is the device ID string (the concatenated 3455 * vid & pid; not to be confused with a device_id). This is defined in 3456 * the same way as in the sd_disk_table. 3457 * 3458 * The second part of the duplet is a string that identifies a 3459 * data-property-name-list. The data-property-name-list is defined as 3460 * follows: 3461 * 3462 * <data-property-name-list>:=<data-property-name> [<data-property-name>] 3463 * 3464 * The syntax of <data-property-name> depends on the <version> field. 3465 * 3466 * If version = SD_CONF_VERSION_1 we have the following syntax: 3467 * 3468 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3469 * 3470 * where the prop0 value will be used to set prop0 if bit0 set in the 3471 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1 3472 * 3473 */ 3474 3475 static int 3476 sd_process_sdconf_file(struct sd_lun *un) 3477 { 3478 char *config_list = NULL; 3479 int config_list_len; 3480 int len; 3481 int dupletlen = 0; 3482 char *vidptr; 3483 int vidlen; 3484 char *dnlist_ptr; 3485 char *dataname_ptr; 3486 int dnlist_len; 3487 int dataname_len; 3488 int *data_list; 3489 int data_list_len; 3490 int rval = SD_FAILURE; 3491 int i; 3492 3493 ASSERT(un != NULL); 3494 3495 /* Obtain the configuration list associated with the .conf file */ 3496 if (ddi_getlongprop(DDI_DEV_T_ANY, SD_DEVINFO(un), DDI_PROP_DONTPASS, 3497 sd_config_list, (caddr_t)&config_list, &config_list_len) 3498 != DDI_PROP_SUCCESS) { 3499 return (SD_FAILURE); 3500 } 3501 3502 /* 3503 * Compare vids in each duplet to the inquiry vid - if a match is 3504 * made, get the data value and update the soft state structure 3505 * accordingly. 3506 * 3507 * Note: This algorithm is complex and difficult to maintain. It should 3508 * be replaced with a more robust implementation. 3509 */ 3510 for (len = config_list_len, vidptr = config_list; len > 0; 3511 vidptr += dupletlen, len -= dupletlen) { 3512 /* 3513 * Note: The assumption here is that each vid entry is on 3514 * a unique line from its associated duplet. 3515 */ 3516 vidlen = dupletlen = (int)strlen(vidptr); 3517 if ((vidlen == 0) || 3518 (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS)) { 3519 dupletlen++; 3520 continue; 3521 } 3522 3523 /* 3524 * dnlist contains 1 or more blank separated 3525 * data-property-name entries 3526 */ 3527 dnlist_ptr = vidptr + vidlen + 1; 3528 dnlist_len = (int)strlen(dnlist_ptr); 3529 dupletlen += dnlist_len + 2; 3530 3531 /* 3532 * Set a pointer for the first data-property-name 3533 * entry in the list 3534 */ 3535 dataname_ptr = dnlist_ptr; 3536 dataname_len = 0; 3537 3538 /* 3539 * Loop through all data-property-name entries in the 3540 * data-property-name-list setting the properties for each. 3541 */ 3542 while (dataname_len < dnlist_len) { 3543 int version; 3544 3545 /* 3546 * Determine the length of the current 3547 * data-property-name entry by indexing until a 3548 * blank or NULL is encountered. When the space is 3549 * encountered reset it to a NULL for compliance 3550 * with ddi_getlongprop(). 3551 */ 3552 for (i = 0; ((dataname_ptr[i] != ' ') && 3553 (dataname_ptr[i] != '\0')); i++) { 3554 ; 3555 } 3556 3557 dataname_len += i; 3558 /* If not null terminated, Make it so */ 3559 if (dataname_ptr[i] == ' ') { 3560 dataname_ptr[i] = '\0'; 3561 } 3562 dataname_len++; 3563 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3564 "sd_process_sdconf_file: disk:%s, data:%s\n", 3565 vidptr, dataname_ptr); 3566 3567 /* Get the data list */ 3568 if (ddi_getlongprop(DDI_DEV_T_ANY, SD_DEVINFO(un), 0, 3569 dataname_ptr, (caddr_t)&data_list, &data_list_len) 3570 != DDI_PROP_SUCCESS) { 3571 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3572 "sd_process_sdconf_file: data property (%s)" 3573 " has no value\n", dataname_ptr); 3574 dataname_ptr = dnlist_ptr + dataname_len; 3575 continue; 3576 } 3577 3578 version = data_list[0]; 3579 3580 if (version == SD_CONF_VERSION_1) { 3581 sd_tunables values; 3582 3583 /* Set the properties */ 3584 if (sd_chk_vers1_data(un, data_list[1], 3585 &data_list[2], data_list_len, dataname_ptr) 3586 == SD_SUCCESS) { 3587 sd_get_tunables_from_conf(un, 3588 data_list[1], &data_list[2], 3589 &values); 3590 sd_set_vers1_properties(un, 3591 data_list[1], &values); 3592 rval = SD_SUCCESS; 3593 } else { 3594 rval = SD_FAILURE; 3595 } 3596 } else { 3597 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3598 "data property %s version 0x%x is invalid.", 3599 dataname_ptr, version); 3600 rval = SD_FAILURE; 3601 } 3602 kmem_free(data_list, data_list_len); 3603 dataname_ptr = dnlist_ptr + dataname_len; 3604 } 3605 } 3606 3607 /* free up the memory allocated by ddi_getlongprop */ 3608 if (config_list) { 3609 kmem_free(config_list, config_list_len); 3610 } 3611 3612 return (rval); 3613 } 3614 3615 /* 3616 * Function: sd_get_tunables_from_conf() 3617 * 3618 * 3619 * This function reads the data list from the sd.conf file and pulls 3620 * the values that can have numeric values as arguments and places 3621 * the values in the appropriate sd_tunables member. 3622 * Since the order of the data list members varies across platforms 3623 * This function reads them from the data list in a platform specific 3624 * order and places them into the correct sd_tunable member that is 3625 * consistent across all platforms. 3626 */ 3627 static void 3628 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list, 3629 sd_tunables *values) 3630 { 3631 int i; 3632 int mask; 3633 3634 bzero(values, sizeof (sd_tunables)); 3635 3636 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3637 3638 mask = 1 << i; 3639 if (mask > flags) { 3640 break; 3641 } 3642 3643 switch (mask & flags) { 3644 case 0: /* This mask bit not set in flags */ 3645 continue; 3646 case SD_CONF_BSET_THROTTLE: 3647 values->sdt_throttle = data_list[i]; 3648 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3649 "sd_get_tunables_from_conf: throttle = %d\n", 3650 values->sdt_throttle); 3651 break; 3652 case SD_CONF_BSET_CTYPE: 3653 values->sdt_ctype = data_list[i]; 3654 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3655 "sd_get_tunables_from_conf: ctype = %d\n", 3656 values->sdt_ctype); 3657 break; 3658 case SD_CONF_BSET_NRR_COUNT: 3659 values->sdt_not_rdy_retries = data_list[i]; 3660 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3661 "sd_get_tunables_from_conf: not_rdy_retries = %d\n", 3662 values->sdt_not_rdy_retries); 3663 break; 3664 case SD_CONF_BSET_BSY_RETRY_COUNT: 3665 values->sdt_busy_retries = data_list[i]; 3666 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3667 "sd_get_tunables_from_conf: busy_retries = %d\n", 3668 values->sdt_busy_retries); 3669 break; 3670 case SD_CONF_BSET_RST_RETRIES: 3671 values->sdt_reset_retries = data_list[i]; 3672 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3673 "sd_get_tunables_from_conf: reset_retries = %d\n", 3674 values->sdt_reset_retries); 3675 break; 3676 case SD_CONF_BSET_RSV_REL_TIME: 3677 values->sdt_reserv_rel_time = data_list[i]; 3678 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3679 "sd_get_tunables_from_conf: reserv_rel_time = %d\n", 3680 values->sdt_reserv_rel_time); 3681 break; 3682 case SD_CONF_BSET_MIN_THROTTLE: 3683 values->sdt_min_throttle = data_list[i]; 3684 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3685 "sd_get_tunables_from_conf: min_throttle = %d\n", 3686 values->sdt_min_throttle); 3687 break; 3688 case SD_CONF_BSET_DISKSORT_DISABLED: 3689 values->sdt_disk_sort_dis = data_list[i]; 3690 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3691 "sd_get_tunables_from_conf: disk_sort_dis = %d\n", 3692 values->sdt_disk_sort_dis); 3693 break; 3694 case SD_CONF_BSET_LUN_RESET_ENABLED: 3695 values->sdt_lun_reset_enable = data_list[i]; 3696 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3697 "sd_get_tunables_from_conf: lun_reset_enable = %d" 3698 "\n", values->sdt_lun_reset_enable); 3699 break; 3700 case SD_CONF_BSET_CACHE_IS_NV: 3701 values->sdt_suppress_cache_flush = data_list[i]; 3702 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3703 "sd_get_tunables_from_conf: \ 3704 suppress_cache_flush = %d" 3705 "\n", values->sdt_suppress_cache_flush); 3706 break; 3707 } 3708 } 3709 } 3710 3711 /* 3712 * Function: sd_process_sdconf_table 3713 * 3714 * Description: Search the static configuration table for a match on the 3715 * inquiry vid/pid and update the driver soft state structure 3716 * according to the table property values for the device. 3717 * 3718 * The form of a configuration table entry is: 3719 * <vid+pid>,<flags>,<property-data> 3720 * "SEAGATE ST42400N",1,0x40000, 3721 * 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1; 3722 * 3723 * Arguments: un - driver soft state (unit) structure 3724 */ 3725 3726 static void 3727 sd_process_sdconf_table(struct sd_lun *un) 3728 { 3729 char *id = NULL; 3730 int table_index; 3731 int idlen; 3732 3733 ASSERT(un != NULL); 3734 for (table_index = 0; table_index < sd_disk_table_size; 3735 table_index++) { 3736 id = sd_disk_table[table_index].device_id; 3737 idlen = strlen(id); 3738 if (idlen == 0) { 3739 continue; 3740 } 3741 3742 /* 3743 * The static configuration table currently does not 3744 * implement version 10 properties. Additionally, 3745 * multiple data-property-name entries are not 3746 * implemented in the static configuration table. 3747 */ 3748 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 3749 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3750 "sd_process_sdconf_table: disk %s\n", id); 3751 sd_set_vers1_properties(un, 3752 sd_disk_table[table_index].flags, 3753 sd_disk_table[table_index].properties); 3754 break; 3755 } 3756 } 3757 } 3758 3759 3760 /* 3761 * Function: sd_sdconf_id_match 3762 * 3763 * Description: This local function implements a case sensitive vid/pid 3764 * comparison as well as the boundary cases of wild card and 3765 * multiple blanks. 3766 * 3767 * Note: An implicit assumption made here is that the scsi 3768 * inquiry structure will always keep the vid, pid and 3769 * revision strings in consecutive sequence, so they can be 3770 * read as a single string. If this assumption is not the 3771 * case, a separate string, to be used for the check, needs 3772 * to be built with these strings concatenated. 3773 * 3774 * Arguments: un - driver soft state (unit) structure 3775 * id - table or config file vid/pid 3776 * idlen - length of the vid/pid (bytes) 3777 * 3778 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 3779 * SD_FAILURE - Indicates no match with the inquiry vid/pid 3780 */ 3781 3782 static int 3783 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen) 3784 { 3785 struct scsi_inquiry *sd_inq; 3786 int rval = SD_SUCCESS; 3787 3788 ASSERT(un != NULL); 3789 sd_inq = un->un_sd->sd_inq; 3790 ASSERT(id != NULL); 3791 3792 /* 3793 * We use the inq_vid as a pointer to a buffer containing the 3794 * vid and pid and use the entire vid/pid length of the table 3795 * entry for the comparison. This works because the inq_pid 3796 * data member follows inq_vid in the scsi_inquiry structure. 3797 */ 3798 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) { 3799 /* 3800 * The user id string is compared to the inquiry vid/pid 3801 * using a case insensitive comparison and ignoring 3802 * multiple spaces. 3803 */ 3804 rval = sd_blank_cmp(un, id, idlen); 3805 if (rval != SD_SUCCESS) { 3806 /* 3807 * User id strings that start and end with a "*" 3808 * are a special case. These do not have a 3809 * specific vendor, and the product string can 3810 * appear anywhere in the 16 byte PID portion of 3811 * the inquiry data. This is a simple strstr() 3812 * type search for the user id in the inquiry data. 3813 */ 3814 if ((id[0] == '*') && (id[idlen - 1] == '*')) { 3815 char *pidptr = &id[1]; 3816 int i; 3817 int j; 3818 int pidstrlen = idlen - 2; 3819 j = sizeof (SD_INQUIRY(un)->inq_pid) - 3820 pidstrlen; 3821 3822 if (j < 0) { 3823 return (SD_FAILURE); 3824 } 3825 for (i = 0; i < j; i++) { 3826 if (bcmp(&SD_INQUIRY(un)->inq_pid[i], 3827 pidptr, pidstrlen) == 0) { 3828 rval = SD_SUCCESS; 3829 break; 3830 } 3831 } 3832 } 3833 } 3834 } 3835 return (rval); 3836 } 3837 3838 3839 /* 3840 * Function: sd_blank_cmp 3841 * 3842 * Description: If the id string starts and ends with a space, treat 3843 * multiple consecutive spaces as equivalent to a single 3844 * space. For example, this causes a sd_disk_table entry 3845 * of " NEC CDROM " to match a device's id string of 3846 * "NEC CDROM". 3847 * 3848 * Note: The success exit condition for this routine is if 3849 * the pointer to the table entry is '\0' and the cnt of 3850 * the inquiry length is zero. This will happen if the inquiry 3851 * string returned by the device is padded with spaces to be 3852 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The 3853 * SCSI spec states that the inquiry string is to be padded with 3854 * spaces. 3855 * 3856 * Arguments: un - driver soft state (unit) structure 3857 * id - table or config file vid/pid 3858 * idlen - length of the vid/pid (bytes) 3859 * 3860 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 3861 * SD_FAILURE - Indicates no match with the inquiry vid/pid 3862 */ 3863 3864 static int 3865 sd_blank_cmp(struct sd_lun *un, char *id, int idlen) 3866 { 3867 char *p1; 3868 char *p2; 3869 int cnt; 3870 cnt = sizeof (SD_INQUIRY(un)->inq_vid) + 3871 sizeof (SD_INQUIRY(un)->inq_pid); 3872 3873 ASSERT(un != NULL); 3874 p2 = un->un_sd->sd_inq->inq_vid; 3875 ASSERT(id != NULL); 3876 p1 = id; 3877 3878 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) { 3879 /* 3880 * Note: string p1 is terminated by a NUL but string p2 3881 * isn't. The end of p2 is determined by cnt. 3882 */ 3883 for (;;) { 3884 /* skip over any extra blanks in both strings */ 3885 while ((*p1 != '\0') && (*p1 == ' ')) { 3886 p1++; 3887 } 3888 while ((cnt != 0) && (*p2 == ' ')) { 3889 p2++; 3890 cnt--; 3891 } 3892 3893 /* compare the two strings */ 3894 if ((cnt == 0) || 3895 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) { 3896 break; 3897 } 3898 while ((cnt > 0) && 3899 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) { 3900 p1++; 3901 p2++; 3902 cnt--; 3903 } 3904 } 3905 } 3906 3907 /* return SD_SUCCESS if both strings match */ 3908 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE); 3909 } 3910 3911 3912 /* 3913 * Function: sd_chk_vers1_data 3914 * 3915 * Description: Verify the version 1 device properties provided by the 3916 * user via the configuration file 3917 * 3918 * Arguments: un - driver soft state (unit) structure 3919 * flags - integer mask indicating properties to be set 3920 * prop_list - integer list of property values 3921 * list_len - length of user provided data 3922 * 3923 * Return Code: SD_SUCCESS - Indicates the user provided data is valid 3924 * SD_FAILURE - Indicates the user provided data is invalid 3925 */ 3926 3927 static int 3928 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 3929 int list_len, char *dataname_ptr) 3930 { 3931 int i; 3932 int mask = 1; 3933 int index = 0; 3934 3935 ASSERT(un != NULL); 3936 3937 /* Check for a NULL property name and list */ 3938 if (dataname_ptr == NULL) { 3939 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3940 "sd_chk_vers1_data: NULL data property name."); 3941 return (SD_FAILURE); 3942 } 3943 if (prop_list == NULL) { 3944 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3945 "sd_chk_vers1_data: %s NULL data property list.", 3946 dataname_ptr); 3947 return (SD_FAILURE); 3948 } 3949 3950 /* Display a warning if undefined bits are set in the flags */ 3951 if (flags & ~SD_CONF_BIT_MASK) { 3952 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3953 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. " 3954 "Properties not set.", 3955 (flags & ~SD_CONF_BIT_MASK), dataname_ptr); 3956 return (SD_FAILURE); 3957 } 3958 3959 /* 3960 * Verify the length of the list by identifying the highest bit set 3961 * in the flags and validating that the property list has a length 3962 * up to the index of this bit. 3963 */ 3964 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3965 if (flags & mask) { 3966 index++; 3967 } 3968 mask = 1 << i; 3969 } 3970 if ((list_len / sizeof (int)) < (index + 2)) { 3971 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3972 "sd_chk_vers1_data: " 3973 "Data property list %s size is incorrect. " 3974 "Properties not set.", dataname_ptr); 3975 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: " 3976 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS); 3977 return (SD_FAILURE); 3978 } 3979 return (SD_SUCCESS); 3980 } 3981 3982 3983 /* 3984 * Function: sd_set_vers1_properties 3985 * 3986 * Description: Set version 1 device properties based on a property list 3987 * retrieved from the driver configuration file or static 3988 * configuration table. Version 1 properties have the format: 3989 * 3990 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3991 * 3992 * where the prop0 value will be used to set prop0 if bit0 3993 * is set in the flags 3994 * 3995 * Arguments: un - driver soft state (unit) structure 3996 * flags - integer mask indicating properties to be set 3997 * prop_list - integer list of property values 3998 */ 3999 4000 static void 4001 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list) 4002 { 4003 ASSERT(un != NULL); 4004 4005 /* 4006 * Set the flag to indicate cache is to be disabled. An attempt 4007 * to disable the cache via sd_cache_control() will be made 4008 * later during attach once the basic initialization is complete. 4009 */ 4010 if (flags & SD_CONF_BSET_NOCACHE) { 4011 un->un_f_opt_disable_cache = TRUE; 4012 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4013 "sd_set_vers1_properties: caching disabled flag set\n"); 4014 } 4015 4016 /* CD-specific configuration parameters */ 4017 if (flags & SD_CONF_BSET_PLAYMSF_BCD) { 4018 un->un_f_cfg_playmsf_bcd = TRUE; 4019 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4020 "sd_set_vers1_properties: playmsf_bcd set\n"); 4021 } 4022 if (flags & SD_CONF_BSET_READSUB_BCD) { 4023 un->un_f_cfg_readsub_bcd = TRUE; 4024 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4025 "sd_set_vers1_properties: readsub_bcd set\n"); 4026 } 4027 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) { 4028 un->un_f_cfg_read_toc_trk_bcd = TRUE; 4029 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4030 "sd_set_vers1_properties: read_toc_trk_bcd set\n"); 4031 } 4032 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) { 4033 un->un_f_cfg_read_toc_addr_bcd = TRUE; 4034 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4035 "sd_set_vers1_properties: read_toc_addr_bcd set\n"); 4036 } 4037 if (flags & SD_CONF_BSET_NO_READ_HEADER) { 4038 un->un_f_cfg_no_read_header = TRUE; 4039 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4040 "sd_set_vers1_properties: no_read_header set\n"); 4041 } 4042 if (flags & SD_CONF_BSET_READ_CD_XD4) { 4043 un->un_f_cfg_read_cd_xd4 = TRUE; 4044 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4045 "sd_set_vers1_properties: read_cd_xd4 set\n"); 4046 } 4047 4048 /* Support for devices which do not have valid/unique serial numbers */ 4049 if (flags & SD_CONF_BSET_FAB_DEVID) { 4050 un->un_f_opt_fab_devid = TRUE; 4051 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4052 "sd_set_vers1_properties: fab_devid bit set\n"); 4053 } 4054 4055 /* Support for user throttle configuration */ 4056 if (flags & SD_CONF_BSET_THROTTLE) { 4057 ASSERT(prop_list != NULL); 4058 un->un_saved_throttle = un->un_throttle = 4059 prop_list->sdt_throttle; 4060 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4061 "sd_set_vers1_properties: throttle set to %d\n", 4062 prop_list->sdt_throttle); 4063 } 4064 4065 /* Set the per disk retry count according to the conf file or table. */ 4066 if (flags & SD_CONF_BSET_NRR_COUNT) { 4067 ASSERT(prop_list != NULL); 4068 if (prop_list->sdt_not_rdy_retries) { 4069 un->un_notready_retry_count = 4070 prop_list->sdt_not_rdy_retries; 4071 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4072 "sd_set_vers1_properties: not ready retry count" 4073 " set to %d\n", un->un_notready_retry_count); 4074 } 4075 } 4076 4077 /* The controller type is reported for generic disk driver ioctls */ 4078 if (flags & SD_CONF_BSET_CTYPE) { 4079 ASSERT(prop_list != NULL); 4080 switch (prop_list->sdt_ctype) { 4081 case CTYPE_CDROM: 4082 un->un_ctype = prop_list->sdt_ctype; 4083 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4084 "sd_set_vers1_properties: ctype set to " 4085 "CTYPE_CDROM\n"); 4086 break; 4087 case CTYPE_CCS: 4088 un->un_ctype = prop_list->sdt_ctype; 4089 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4090 "sd_set_vers1_properties: ctype set to " 4091 "CTYPE_CCS\n"); 4092 break; 4093 case CTYPE_ROD: /* RW optical */ 4094 un->un_ctype = prop_list->sdt_ctype; 4095 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4096 "sd_set_vers1_properties: ctype set to " 4097 "CTYPE_ROD\n"); 4098 break; 4099 default: 4100 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4101 "sd_set_vers1_properties: Could not set " 4102 "invalid ctype value (%d)", 4103 prop_list->sdt_ctype); 4104 } 4105 } 4106 4107 /* Purple failover timeout */ 4108 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) { 4109 ASSERT(prop_list != NULL); 4110 un->un_busy_retry_count = 4111 prop_list->sdt_busy_retries; 4112 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4113 "sd_set_vers1_properties: " 4114 "busy retry count set to %d\n", 4115 un->un_busy_retry_count); 4116 } 4117 4118 /* Purple reset retry count */ 4119 if (flags & SD_CONF_BSET_RST_RETRIES) { 4120 ASSERT(prop_list != NULL); 4121 un->un_reset_retry_count = 4122 prop_list->sdt_reset_retries; 4123 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4124 "sd_set_vers1_properties: " 4125 "reset retry count set to %d\n", 4126 un->un_reset_retry_count); 4127 } 4128 4129 /* Purple reservation release timeout */ 4130 if (flags & SD_CONF_BSET_RSV_REL_TIME) { 4131 ASSERT(prop_list != NULL); 4132 un->un_reserve_release_time = 4133 prop_list->sdt_reserv_rel_time; 4134 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4135 "sd_set_vers1_properties: " 4136 "reservation release timeout set to %d\n", 4137 un->un_reserve_release_time); 4138 } 4139 4140 /* 4141 * Driver flag telling the driver to verify that no commands are pending 4142 * for a device before issuing a Test Unit Ready. This is a workaround 4143 * for a firmware bug in some Seagate eliteI drives. 4144 */ 4145 if (flags & SD_CONF_BSET_TUR_CHECK) { 4146 un->un_f_cfg_tur_check = TRUE; 4147 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4148 "sd_set_vers1_properties: tur queue check set\n"); 4149 } 4150 4151 if (flags & SD_CONF_BSET_MIN_THROTTLE) { 4152 un->un_min_throttle = prop_list->sdt_min_throttle; 4153 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4154 "sd_set_vers1_properties: min throttle set to %d\n", 4155 un->un_min_throttle); 4156 } 4157 4158 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) { 4159 un->un_f_disksort_disabled = 4160 (prop_list->sdt_disk_sort_dis != 0) ? 4161 TRUE : FALSE; 4162 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4163 "sd_set_vers1_properties: disksort disabled " 4164 "flag set to %d\n", 4165 prop_list->sdt_disk_sort_dis); 4166 } 4167 4168 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) { 4169 un->un_f_lun_reset_enabled = 4170 (prop_list->sdt_lun_reset_enable != 0) ? 4171 TRUE : FALSE; 4172 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4173 "sd_set_vers1_properties: lun reset enabled " 4174 "flag set to %d\n", 4175 prop_list->sdt_lun_reset_enable); 4176 } 4177 4178 if (flags & SD_CONF_BSET_CACHE_IS_NV) { 4179 un->un_f_suppress_cache_flush = 4180 (prop_list->sdt_suppress_cache_flush != 0) ? 4181 TRUE : FALSE; 4182 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4183 "sd_set_vers1_properties: suppress_cache_flush " 4184 "flag set to %d\n", 4185 prop_list->sdt_suppress_cache_flush); 4186 } 4187 4188 /* 4189 * Validate the throttle values. 4190 * If any of the numbers are invalid, set everything to defaults. 4191 */ 4192 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4193 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4194 (un->un_min_throttle > un->un_throttle)) { 4195 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4196 un->un_min_throttle = sd_min_throttle; 4197 } 4198 } 4199 4200 /* 4201 * Function: sd_is_lsi() 4202 * 4203 * Description: Check for lsi devices, step through the static device 4204 * table to match vid/pid. 4205 * 4206 * Args: un - ptr to sd_lun 4207 * 4208 * Notes: When creating new LSI property, need to add the new LSI property 4209 * to this function. 4210 */ 4211 static void 4212 sd_is_lsi(struct sd_lun *un) 4213 { 4214 char *id = NULL; 4215 int table_index; 4216 int idlen; 4217 void *prop; 4218 4219 ASSERT(un != NULL); 4220 for (table_index = 0; table_index < sd_disk_table_size; 4221 table_index++) { 4222 id = sd_disk_table[table_index].device_id; 4223 idlen = strlen(id); 4224 if (idlen == 0) { 4225 continue; 4226 } 4227 4228 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4229 prop = sd_disk_table[table_index].properties; 4230 if (prop == &lsi_properties || 4231 prop == &lsi_oem_properties || 4232 prop == &lsi_properties_scsi || 4233 prop == &symbios_properties) { 4234 un->un_f_cfg_is_lsi = TRUE; 4235 } 4236 break; 4237 } 4238 } 4239 } 4240 4241 /* 4242 * Function: sd_get_physical_geometry 4243 * 4244 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and 4245 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the 4246 * target, and use this information to initialize the physical 4247 * geometry cache specified by pgeom_p. 4248 * 4249 * MODE SENSE is an optional command, so failure in this case 4250 * does not necessarily denote an error. We want to use the 4251 * MODE SENSE commands to derive the physical geometry of the 4252 * device, but if either command fails, the logical geometry is 4253 * used as the fallback for disk label geometry in cmlb. 4254 * 4255 * This requires that un->un_blockcount and un->un_tgt_blocksize 4256 * have already been initialized for the current target and 4257 * that the current values be passed as args so that we don't 4258 * end up ever trying to use -1 as a valid value. This could 4259 * happen if either value is reset while we're not holding 4260 * the mutex. 4261 * 4262 * Arguments: un - driver soft state (unit) structure 4263 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4264 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4265 * to use the USCSI "direct" chain and bypass the normal 4266 * command waitq. 4267 * 4268 * Context: Kernel thread only (can sleep). 4269 */ 4270 4271 static int 4272 sd_get_physical_geometry(struct sd_lun *un, cmlb_geom_t *pgeom_p, 4273 diskaddr_t capacity, int lbasize, int path_flag) 4274 { 4275 struct mode_format *page3p; 4276 struct mode_geometry *page4p; 4277 struct mode_header *headerp; 4278 int sector_size; 4279 int nsect; 4280 int nhead; 4281 int ncyl; 4282 int intrlv; 4283 int spc; 4284 diskaddr_t modesense_capacity; 4285 int rpm; 4286 int bd_len; 4287 int mode_header_length; 4288 uchar_t *p3bufp; 4289 uchar_t *p4bufp; 4290 int cdbsize; 4291 int ret = EIO; 4292 4293 ASSERT(un != NULL); 4294 4295 if (lbasize == 0) { 4296 if (ISCD(un)) { 4297 lbasize = 2048; 4298 } else { 4299 lbasize = un->un_sys_blocksize; 4300 } 4301 } 4302 pgeom_p->g_secsize = (unsigned short)lbasize; 4303 4304 /* 4305 * If the unit is a cd/dvd drive MODE SENSE page three 4306 * and MODE SENSE page four are reserved (see SBC spec 4307 * and MMC spec). To prevent soft errors just return 4308 * using the default LBA size. 4309 */ 4310 if (ISCD(un)) 4311 return (ret); 4312 4313 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0; 4314 4315 /* 4316 * Retrieve MODE SENSE page 3 - Format Device Page 4317 */ 4318 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP); 4319 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p3bufp, 4320 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag) 4321 != 0) { 4322 SD_ERROR(SD_LOG_COMMON, un, 4323 "sd_get_physical_geometry: mode sense page 3 failed\n"); 4324 goto page3_exit; 4325 } 4326 4327 /* 4328 * Determine size of Block Descriptors in order to locate the mode 4329 * page data. ATAPI devices return 0, SCSI devices should return 4330 * MODE_BLK_DESC_LENGTH. 4331 */ 4332 headerp = (struct mode_header *)p3bufp; 4333 if (un->un_f_cfg_is_atapi == TRUE) { 4334 struct mode_header_grp2 *mhp = 4335 (struct mode_header_grp2 *)headerp; 4336 mode_header_length = MODE_HEADER_LENGTH_GRP2; 4337 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4338 } else { 4339 mode_header_length = MODE_HEADER_LENGTH; 4340 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4341 } 4342 4343 if (bd_len > MODE_BLK_DESC_LENGTH) { 4344 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4345 "received unexpected bd_len of %d, page3\n", bd_len); 4346 goto page3_exit; 4347 } 4348 4349 page3p = (struct mode_format *) 4350 ((caddr_t)headerp + mode_header_length + bd_len); 4351 4352 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) { 4353 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4354 "mode sense pg3 code mismatch %d\n", 4355 page3p->mode_page.code); 4356 goto page3_exit; 4357 } 4358 4359 /* 4360 * Use this physical geometry data only if BOTH MODE SENSE commands 4361 * complete successfully; otherwise, revert to the logical geometry. 4362 * So, we need to save everything in temporary variables. 4363 */ 4364 sector_size = BE_16(page3p->data_bytes_sect); 4365 4366 /* 4367 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size 4368 */ 4369 if (sector_size == 0) { 4370 sector_size = un->un_sys_blocksize; 4371 } else { 4372 sector_size &= ~(un->un_sys_blocksize - 1); 4373 } 4374 4375 nsect = BE_16(page3p->sect_track); 4376 intrlv = BE_16(page3p->interleave); 4377 4378 SD_INFO(SD_LOG_COMMON, un, 4379 "sd_get_physical_geometry: Format Parameters (page 3)\n"); 4380 SD_INFO(SD_LOG_COMMON, un, 4381 " mode page: %d; nsect: %d; sector size: %d;\n", 4382 page3p->mode_page.code, nsect, sector_size); 4383 SD_INFO(SD_LOG_COMMON, un, 4384 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv, 4385 BE_16(page3p->track_skew), 4386 BE_16(page3p->cylinder_skew)); 4387 4388 4389 /* 4390 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page 4391 */ 4392 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP); 4393 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p4bufp, 4394 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag) 4395 != 0) { 4396 SD_ERROR(SD_LOG_COMMON, un, 4397 "sd_get_physical_geometry: mode sense page 4 failed\n"); 4398 goto page4_exit; 4399 } 4400 4401 /* 4402 * Determine size of Block Descriptors in order to locate the mode 4403 * page data. ATAPI devices return 0, SCSI devices should return 4404 * MODE_BLK_DESC_LENGTH. 4405 */ 4406 headerp = (struct mode_header *)p4bufp; 4407 if (un->un_f_cfg_is_atapi == TRUE) { 4408 struct mode_header_grp2 *mhp = 4409 (struct mode_header_grp2 *)headerp; 4410 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4411 } else { 4412 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4413 } 4414 4415 if (bd_len > MODE_BLK_DESC_LENGTH) { 4416 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4417 "received unexpected bd_len of %d, page4\n", bd_len); 4418 goto page4_exit; 4419 } 4420 4421 page4p = (struct mode_geometry *) 4422 ((caddr_t)headerp + mode_header_length + bd_len); 4423 4424 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) { 4425 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4426 "mode sense pg4 code mismatch %d\n", 4427 page4p->mode_page.code); 4428 goto page4_exit; 4429 } 4430 4431 /* 4432 * Stash the data now, after we know that both commands completed. 4433 */ 4434 4435 4436 nhead = (int)page4p->heads; /* uchar, so no conversion needed */ 4437 spc = nhead * nsect; 4438 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb; 4439 rpm = BE_16(page4p->rpm); 4440 4441 modesense_capacity = spc * ncyl; 4442 4443 SD_INFO(SD_LOG_COMMON, un, 4444 "sd_get_physical_geometry: Geometry Parameters (page 4)\n"); 4445 SD_INFO(SD_LOG_COMMON, un, 4446 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm); 4447 SD_INFO(SD_LOG_COMMON, un, 4448 " computed capacity(h*s*c): %d;\n", modesense_capacity); 4449 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n", 4450 (void *)pgeom_p, capacity); 4451 4452 /* 4453 * Compensate if the drive's geometry is not rectangular, i.e., 4454 * the product of C * H * S returned by MODE SENSE >= that returned 4455 * by read capacity. This is an idiosyncrasy of the original x86 4456 * disk subsystem. 4457 */ 4458 if (modesense_capacity >= capacity) { 4459 SD_INFO(SD_LOG_COMMON, un, 4460 "sd_get_physical_geometry: adjusting acyl; " 4461 "old: %d; new: %d\n", pgeom_p->g_acyl, 4462 (modesense_capacity - capacity + spc - 1) / spc); 4463 if (sector_size != 0) { 4464 /* 1243403: NEC D38x7 drives don't support sec size */ 4465 pgeom_p->g_secsize = (unsigned short)sector_size; 4466 } 4467 pgeom_p->g_nsect = (unsigned short)nsect; 4468 pgeom_p->g_nhead = (unsigned short)nhead; 4469 pgeom_p->g_capacity = capacity; 4470 pgeom_p->g_acyl = 4471 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc; 4472 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl; 4473 } 4474 4475 pgeom_p->g_rpm = (unsigned short)rpm; 4476 pgeom_p->g_intrlv = (unsigned short)intrlv; 4477 ret = 0; 4478 4479 SD_INFO(SD_LOG_COMMON, un, 4480 "sd_get_physical_geometry: mode sense geometry:\n"); 4481 SD_INFO(SD_LOG_COMMON, un, 4482 " nsect: %d; sector size: %d; interlv: %d\n", 4483 nsect, sector_size, intrlv); 4484 SD_INFO(SD_LOG_COMMON, un, 4485 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n", 4486 nhead, ncyl, rpm, modesense_capacity); 4487 SD_INFO(SD_LOG_COMMON, un, 4488 "sd_get_physical_geometry: (cached)\n"); 4489 SD_INFO(SD_LOG_COMMON, un, 4490 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 4491 pgeom_p->g_ncyl, pgeom_p->g_acyl, 4492 pgeom_p->g_nhead, pgeom_p->g_nsect); 4493 SD_INFO(SD_LOG_COMMON, un, 4494 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n", 4495 pgeom_p->g_secsize, pgeom_p->g_capacity, 4496 pgeom_p->g_intrlv, pgeom_p->g_rpm); 4497 4498 page4_exit: 4499 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH); 4500 page3_exit: 4501 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH); 4502 4503 return (ret); 4504 } 4505 4506 /* 4507 * Function: sd_get_virtual_geometry 4508 * 4509 * Description: Ask the controller to tell us about the target device. 4510 * 4511 * Arguments: un - pointer to softstate 4512 * capacity - disk capacity in #blocks 4513 * lbasize - disk block size in bytes 4514 * 4515 * Context: Kernel thread only 4516 */ 4517 4518 static int 4519 sd_get_virtual_geometry(struct sd_lun *un, cmlb_geom_t *lgeom_p, 4520 diskaddr_t capacity, int lbasize) 4521 { 4522 uint_t geombuf; 4523 int spc; 4524 4525 ASSERT(un != NULL); 4526 4527 /* Set sector size, and total number of sectors */ 4528 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1); 4529 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1); 4530 4531 /* Let the HBA tell us its geometry */ 4532 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1); 4533 4534 /* A value of -1 indicates an undefined "geometry" property */ 4535 if (geombuf == (-1)) { 4536 return (EINVAL); 4537 } 4538 4539 /* Initialize the logical geometry cache. */ 4540 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff; 4541 lgeom_p->g_nsect = geombuf & 0xffff; 4542 lgeom_p->g_secsize = un->un_sys_blocksize; 4543 4544 spc = lgeom_p->g_nhead * lgeom_p->g_nsect; 4545 4546 /* 4547 * Note: The driver originally converted the capacity value from 4548 * target blocks to system blocks. However, the capacity value passed 4549 * to this routine is already in terms of system blocks (this scaling 4550 * is done when the READ CAPACITY command is issued and processed). 4551 * This 'error' may have gone undetected because the usage of g_ncyl 4552 * (which is based upon g_capacity) is very limited within the driver 4553 */ 4554 lgeom_p->g_capacity = capacity; 4555 4556 /* 4557 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The 4558 * hba may return zero values if the device has been removed. 4559 */ 4560 if (spc == 0) { 4561 lgeom_p->g_ncyl = 0; 4562 } else { 4563 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc; 4564 } 4565 lgeom_p->g_acyl = 0; 4566 4567 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n"); 4568 return (0); 4569 4570 } 4571 /* 4572 * Function: sd_update_block_info 4573 * 4574 * Description: Calculate a byte count to sector count bitshift value 4575 * from sector size. 4576 * 4577 * Arguments: un: unit struct. 4578 * lbasize: new target sector size 4579 * capacity: new target capacity, ie. block count 4580 * 4581 * Context: Kernel thread context 4582 */ 4583 4584 static void 4585 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity) 4586 { 4587 uint_t dblk; 4588 4589 if (lbasize != 0) { 4590 un->un_tgt_blocksize = lbasize; 4591 un->un_f_tgt_blocksize_is_valid = TRUE; 4592 } 4593 4594 if (capacity != 0) { 4595 un->un_blockcount = capacity; 4596 un->un_f_blockcount_is_valid = TRUE; 4597 } 4598 4599 /* 4600 * Update device capacity properties. 4601 * 4602 * 'device-nblocks' number of blocks in target's units 4603 * 'device-blksize' data bearing size of target's block 4604 * 4605 * NOTE: math is complicated by the fact that un_tgt_blocksize may 4606 * not be a power of two for checksumming disks with 520/528 byte 4607 * sectors. 4608 */ 4609 if (un->un_f_tgt_blocksize_is_valid && 4610 un->un_f_blockcount_is_valid && 4611 un->un_sys_blocksize) { 4612 dblk = un->un_tgt_blocksize / un->un_sys_blocksize; 4613 (void) ddi_prop_update_int64(DDI_DEV_T_NONE, SD_DEVINFO(un), 4614 "device-nblocks", un->un_blockcount / dblk); 4615 /* 4616 * To save memory, only define "device-blksize" when its 4617 * value is differnet than the default DEV_BSIZE value. 4618 */ 4619 if ((un->un_sys_blocksize * dblk) != DEV_BSIZE) 4620 (void) ddi_prop_update_int(DDI_DEV_T_NONE, 4621 SD_DEVINFO(un), "device-blksize", 4622 un->un_sys_blocksize * dblk); 4623 } 4624 } 4625 4626 4627 /* 4628 * Function: sd_register_devid 4629 * 4630 * Description: This routine will obtain the device id information from the 4631 * target, obtain the serial number, and register the device 4632 * id with the ddi framework. 4633 * 4634 * Arguments: devi - the system's dev_info_t for the device. 4635 * un - driver soft state (unit) structure 4636 * reservation_flag - indicates if a reservation conflict 4637 * occurred during attach 4638 * 4639 * Context: Kernel Thread 4640 */ 4641 static void 4642 sd_register_devid(struct sd_lun *un, dev_info_t *devi, int reservation_flag) 4643 { 4644 int rval = 0; 4645 uchar_t *inq80 = NULL; 4646 size_t inq80_len = MAX_INQUIRY_SIZE; 4647 size_t inq80_resid = 0; 4648 uchar_t *inq83 = NULL; 4649 size_t inq83_len = MAX_INQUIRY_SIZE; 4650 size_t inq83_resid = 0; 4651 int dlen, len; 4652 char *sn; 4653 4654 ASSERT(un != NULL); 4655 ASSERT(mutex_owned(SD_MUTEX(un))); 4656 ASSERT((SD_DEVINFO(un)) == devi); 4657 4658 /* 4659 * If transport has already registered a devid for this target 4660 * then that takes precedence over the driver's determination 4661 * of the devid. 4662 */ 4663 if (ddi_devid_get(SD_DEVINFO(un), &un->un_devid) == DDI_SUCCESS) { 4664 ASSERT(un->un_devid); 4665 return; /* use devid registered by the transport */ 4666 } 4667 4668 /* 4669 * This is the case of antiquated Sun disk drives that have the 4670 * FAB_DEVID property set in the disk_table. These drives 4671 * manage the devid's by storing them in last 2 available sectors 4672 * on the drive and have them fabricated by the ddi layer by calling 4673 * ddi_devid_init and passing the DEVID_FAB flag. 4674 */ 4675 if (un->un_f_opt_fab_devid == TRUE) { 4676 /* 4677 * Depending on EINVAL isn't reliable, since a reserved disk 4678 * may result in invalid geometry, so check to make sure a 4679 * reservation conflict did not occur during attach. 4680 */ 4681 if ((sd_get_devid(un) == EINVAL) && 4682 (reservation_flag != SD_TARGET_IS_RESERVED)) { 4683 /* 4684 * The devid is invalid AND there is no reservation 4685 * conflict. Fabricate a new devid. 4686 */ 4687 (void) sd_create_devid(un); 4688 } 4689 4690 /* Register the devid if it exists */ 4691 if (un->un_devid != NULL) { 4692 (void) ddi_devid_register(SD_DEVINFO(un), 4693 un->un_devid); 4694 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4695 "sd_register_devid: Devid Fabricated\n"); 4696 } 4697 return; 4698 } 4699 4700 /* 4701 * We check the availibility of the World Wide Name (0x83) and Unit 4702 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using 4703 * un_vpd_page_mask from them, we decide which way to get the WWN. If 4704 * 0x83 is availible, that is the best choice. Our next choice is 4705 * 0x80. If neither are availible, we munge the devid from the device 4706 * vid/pid/serial # for Sun qualified disks, or use the ddi framework 4707 * to fabricate a devid for non-Sun qualified disks. 4708 */ 4709 if (sd_check_vpd_page_support(un) == 0) { 4710 /* collect page 80 data if available */ 4711 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) { 4712 4713 mutex_exit(SD_MUTEX(un)); 4714 inq80 = kmem_zalloc(inq80_len, KM_SLEEP); 4715 rval = sd_send_scsi_INQUIRY(un, inq80, inq80_len, 4716 0x01, 0x80, &inq80_resid); 4717 4718 if (rval != 0) { 4719 kmem_free(inq80, inq80_len); 4720 inq80 = NULL; 4721 inq80_len = 0; 4722 } else if (ddi_prop_exists( 4723 DDI_DEV_T_NONE, SD_DEVINFO(un), 4724 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 4725 INQUIRY_SERIAL_NO) == 0) { 4726 /* 4727 * If we don't already have a serial number 4728 * property, do quick verify of data returned 4729 * and define property. 4730 */ 4731 dlen = inq80_len - inq80_resid; 4732 len = (size_t)inq80[3]; 4733 if ((dlen >= 4) && ((len + 4) <= dlen)) { 4734 /* 4735 * Ensure sn termination, skip leading 4736 * blanks, and create property 4737 * 'inquiry-serial-no'. 4738 */ 4739 sn = (char *)&inq80[4]; 4740 sn[len] = 0; 4741 while (*sn && (*sn == ' ')) 4742 sn++; 4743 if (*sn) { 4744 (void) ddi_prop_update_string( 4745 DDI_DEV_T_NONE, 4746 SD_DEVINFO(un), 4747 INQUIRY_SERIAL_NO, sn); 4748 } 4749 } 4750 } 4751 mutex_enter(SD_MUTEX(un)); 4752 } 4753 4754 /* collect page 83 data if available */ 4755 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) { 4756 mutex_exit(SD_MUTEX(un)); 4757 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 4758 rval = sd_send_scsi_INQUIRY(un, inq83, inq83_len, 4759 0x01, 0x83, &inq83_resid); 4760 4761 if (rval != 0) { 4762 kmem_free(inq83, inq83_len); 4763 inq83 = NULL; 4764 inq83_len = 0; 4765 } 4766 mutex_enter(SD_MUTEX(un)); 4767 } 4768 } 4769 4770 /* encode best devid possible based on data available */ 4771 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, 4772 (char *)ddi_driver_name(SD_DEVINFO(un)), 4773 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)), 4774 inq80, inq80_len - inq80_resid, inq83, inq83_len - 4775 inq83_resid, &un->un_devid) == DDI_SUCCESS) { 4776 4777 /* devid successfully encoded, register devid */ 4778 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); 4779 4780 } else { 4781 /* 4782 * Unable to encode a devid based on data available. 4783 * This is not a Sun qualified disk. Older Sun disk 4784 * drives that have the SD_FAB_DEVID property 4785 * set in the disk_table and non Sun qualified 4786 * disks are treated in the same manner. These 4787 * drives manage the devid's by storing them in 4788 * last 2 available sectors on the drive and 4789 * have them fabricated by the ddi layer by 4790 * calling ddi_devid_init and passing the 4791 * DEVID_FAB flag. 4792 * Create a fabricate devid only if there's no 4793 * fabricate devid existed. 4794 */ 4795 if (sd_get_devid(un) == EINVAL) { 4796 (void) sd_create_devid(un); 4797 } 4798 un->un_f_opt_fab_devid = TRUE; 4799 4800 /* Register the devid if it exists */ 4801 if (un->un_devid != NULL) { 4802 (void) ddi_devid_register(SD_DEVINFO(un), 4803 un->un_devid); 4804 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4805 "sd_register_devid: devid fabricated using " 4806 "ddi framework\n"); 4807 } 4808 } 4809 4810 /* clean up resources */ 4811 if (inq80 != NULL) { 4812 kmem_free(inq80, inq80_len); 4813 } 4814 if (inq83 != NULL) { 4815 kmem_free(inq83, inq83_len); 4816 } 4817 } 4818 4819 4820 4821 /* 4822 * Function: sd_get_devid 4823 * 4824 * Description: This routine will return 0 if a valid device id has been 4825 * obtained from the target and stored in the soft state. If a 4826 * valid device id has not been previously read and stored, a 4827 * read attempt will be made. 4828 * 4829 * Arguments: un - driver soft state (unit) structure 4830 * 4831 * Return Code: 0 if we successfully get the device id 4832 * 4833 * Context: Kernel Thread 4834 */ 4835 4836 static int 4837 sd_get_devid(struct sd_lun *un) 4838 { 4839 struct dk_devid *dkdevid; 4840 ddi_devid_t tmpid; 4841 uint_t *ip; 4842 size_t sz; 4843 diskaddr_t blk; 4844 int status; 4845 int chksum; 4846 int i; 4847 size_t buffer_size; 4848 4849 ASSERT(un != NULL); 4850 ASSERT(mutex_owned(SD_MUTEX(un))); 4851 4852 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n", 4853 un); 4854 4855 if (un->un_devid != NULL) { 4856 return (0); 4857 } 4858 4859 mutex_exit(SD_MUTEX(un)); 4860 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 4861 (void *)SD_PATH_DIRECT) != 0) { 4862 mutex_enter(SD_MUTEX(un)); 4863 return (EINVAL); 4864 } 4865 4866 /* 4867 * Read and verify device id, stored in the reserved cylinders at the 4868 * end of the disk. Backup label is on the odd sectors of the last 4869 * track of the last cylinder. Device id will be on track of the next 4870 * to last cylinder. 4871 */ 4872 mutex_enter(SD_MUTEX(un)); 4873 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid)); 4874 mutex_exit(SD_MUTEX(un)); 4875 dkdevid = kmem_alloc(buffer_size, KM_SLEEP); 4876 status = sd_send_scsi_READ(un, dkdevid, buffer_size, blk, 4877 SD_PATH_DIRECT); 4878 if (status != 0) { 4879 goto error; 4880 } 4881 4882 /* Validate the revision */ 4883 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 4884 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 4885 status = EINVAL; 4886 goto error; 4887 } 4888 4889 /* Calculate the checksum */ 4890 chksum = 0; 4891 ip = (uint_t *)dkdevid; 4892 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 4893 i++) { 4894 chksum ^= ip[i]; 4895 } 4896 4897 /* Compare the checksums */ 4898 if (DKD_GETCHKSUM(dkdevid) != chksum) { 4899 status = EINVAL; 4900 goto error; 4901 } 4902 4903 /* Validate the device id */ 4904 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 4905 status = EINVAL; 4906 goto error; 4907 } 4908 4909 /* 4910 * Store the device id in the driver soft state 4911 */ 4912 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 4913 tmpid = kmem_alloc(sz, KM_SLEEP); 4914 4915 mutex_enter(SD_MUTEX(un)); 4916 4917 un->un_devid = tmpid; 4918 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 4919 4920 kmem_free(dkdevid, buffer_size); 4921 4922 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un); 4923 4924 return (status); 4925 error: 4926 mutex_enter(SD_MUTEX(un)); 4927 kmem_free(dkdevid, buffer_size); 4928 return (status); 4929 } 4930 4931 4932 /* 4933 * Function: sd_create_devid 4934 * 4935 * Description: This routine will fabricate the device id and write it 4936 * to the disk. 4937 * 4938 * Arguments: un - driver soft state (unit) structure 4939 * 4940 * Return Code: value of the fabricated device id 4941 * 4942 * Context: Kernel Thread 4943 */ 4944 4945 static ddi_devid_t 4946 sd_create_devid(struct sd_lun *un) 4947 { 4948 ASSERT(un != NULL); 4949 4950 /* Fabricate the devid */ 4951 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid) 4952 == DDI_FAILURE) { 4953 return (NULL); 4954 } 4955 4956 /* Write the devid to disk */ 4957 if (sd_write_deviceid(un) != 0) { 4958 ddi_devid_free(un->un_devid); 4959 un->un_devid = NULL; 4960 } 4961 4962 return (un->un_devid); 4963 } 4964 4965 4966 /* 4967 * Function: sd_write_deviceid 4968 * 4969 * Description: This routine will write the device id to the disk 4970 * reserved sector. 4971 * 4972 * Arguments: un - driver soft state (unit) structure 4973 * 4974 * Return Code: EINVAL 4975 * value returned by sd_send_scsi_cmd 4976 * 4977 * Context: Kernel Thread 4978 */ 4979 4980 static int 4981 sd_write_deviceid(struct sd_lun *un) 4982 { 4983 struct dk_devid *dkdevid; 4984 diskaddr_t blk; 4985 uint_t *ip, chksum; 4986 int status; 4987 int i; 4988 4989 ASSERT(mutex_owned(SD_MUTEX(un))); 4990 4991 mutex_exit(SD_MUTEX(un)); 4992 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 4993 (void *)SD_PATH_DIRECT) != 0) { 4994 mutex_enter(SD_MUTEX(un)); 4995 return (-1); 4996 } 4997 4998 4999 /* Allocate the buffer */ 5000 dkdevid = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP); 5001 5002 /* Fill in the revision */ 5003 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 5004 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 5005 5006 /* Copy in the device id */ 5007 mutex_enter(SD_MUTEX(un)); 5008 bcopy(un->un_devid, &dkdevid->dkd_devid, 5009 ddi_devid_sizeof(un->un_devid)); 5010 mutex_exit(SD_MUTEX(un)); 5011 5012 /* Calculate the checksum */ 5013 chksum = 0; 5014 ip = (uint_t *)dkdevid; 5015 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 5016 i++) { 5017 chksum ^= ip[i]; 5018 } 5019 5020 /* Fill-in checksum */ 5021 DKD_FORMCHKSUM(chksum, dkdevid); 5022 5023 /* Write the reserved sector */ 5024 status = sd_send_scsi_WRITE(un, dkdevid, un->un_sys_blocksize, blk, 5025 SD_PATH_DIRECT); 5026 5027 kmem_free(dkdevid, un->un_sys_blocksize); 5028 5029 mutex_enter(SD_MUTEX(un)); 5030 return (status); 5031 } 5032 5033 5034 /* 5035 * Function: sd_check_vpd_page_support 5036 * 5037 * Description: This routine sends an inquiry command with the EVPD bit set and 5038 * a page code of 0x00 to the device. It is used to determine which 5039 * vital product pages are availible to find the devid. We are 5040 * looking for pages 0x83 or 0x80. If we return a negative 1, the 5041 * device does not support that command. 5042 * 5043 * Arguments: un - driver soft state (unit) structure 5044 * 5045 * Return Code: 0 - success 5046 * 1 - check condition 5047 * 5048 * Context: This routine can sleep. 5049 */ 5050 5051 static int 5052 sd_check_vpd_page_support(struct sd_lun *un) 5053 { 5054 uchar_t *page_list = NULL; 5055 uchar_t page_length = 0xff; /* Use max possible length */ 5056 uchar_t evpd = 0x01; /* Set the EVPD bit */ 5057 uchar_t page_code = 0x00; /* Supported VPD Pages */ 5058 int rval = 0; 5059 int counter; 5060 5061 ASSERT(un != NULL); 5062 ASSERT(mutex_owned(SD_MUTEX(un))); 5063 5064 mutex_exit(SD_MUTEX(un)); 5065 5066 /* 5067 * We'll set the page length to the maximum to save figuring it out 5068 * with an additional call. 5069 */ 5070 page_list = kmem_zalloc(page_length, KM_SLEEP); 5071 5072 rval = sd_send_scsi_INQUIRY(un, page_list, page_length, evpd, 5073 page_code, NULL); 5074 5075 mutex_enter(SD_MUTEX(un)); 5076 5077 /* 5078 * Now we must validate that the device accepted the command, as some 5079 * drives do not support it. If the drive does support it, we will 5080 * return 0, and the supported pages will be in un_vpd_page_mask. If 5081 * not, we return -1. 5082 */ 5083 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) { 5084 /* Loop to find one of the 2 pages we need */ 5085 counter = 4; /* Supported pages start at byte 4, with 0x00 */ 5086 5087 /* 5088 * Pages are returned in ascending order, and 0x83 is what we 5089 * are hoping for. 5090 */ 5091 while ((page_list[counter] <= 0x86) && 5092 (counter <= (page_list[VPD_PAGE_LENGTH] + 5093 VPD_HEAD_OFFSET))) { 5094 /* 5095 * Add 3 because page_list[3] is the number of 5096 * pages minus 3 5097 */ 5098 5099 switch (page_list[counter]) { 5100 case 0x00: 5101 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG; 5102 break; 5103 case 0x80: 5104 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG; 5105 break; 5106 case 0x81: 5107 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG; 5108 break; 5109 case 0x82: 5110 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG; 5111 break; 5112 case 0x83: 5113 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; 5114 break; 5115 case 0x86: 5116 un->un_vpd_page_mask |= SD_VPD_EXTENDED_DATA_PG; 5117 break; 5118 } 5119 counter++; 5120 } 5121 5122 } else { 5123 rval = -1; 5124 5125 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5126 "sd_check_vpd_page_support: This drive does not implement " 5127 "VPD pages.\n"); 5128 } 5129 5130 kmem_free(page_list, page_length); 5131 5132 return (rval); 5133 } 5134 5135 5136 /* 5137 * Function: sd_setup_pm 5138 * 5139 * Description: Initialize Power Management on the device 5140 * 5141 * Context: Kernel Thread 5142 */ 5143 5144 static void 5145 sd_setup_pm(struct sd_lun *un, dev_info_t *devi) 5146 { 5147 uint_t log_page_size; 5148 uchar_t *log_page_data; 5149 int rval; 5150 5151 /* 5152 * Since we are called from attach, holding a mutex for 5153 * un is unnecessary. Because some of the routines called 5154 * from here require SD_MUTEX to not be held, assert this 5155 * right up front. 5156 */ 5157 ASSERT(!mutex_owned(SD_MUTEX(un))); 5158 /* 5159 * Since the sd device does not have the 'reg' property, 5160 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 5161 * The following code is to tell cpr that this device 5162 * DOES need to be suspended and resumed. 5163 */ 5164 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 5165 "pm-hardware-state", "needs-suspend-resume"); 5166 5167 /* 5168 * This complies with the new power management framework 5169 * for certain desktop machines. Create the pm_components 5170 * property as a string array property. 5171 */ 5172 if (un->un_f_pm_supported) { 5173 /* 5174 * not all devices have a motor, try it first. 5175 * some devices may return ILLEGAL REQUEST, some 5176 * will hang 5177 * The following START_STOP_UNIT is used to check if target 5178 * device has a motor. 5179 */ 5180 un->un_f_start_stop_supported = TRUE; 5181 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 5182 SD_PATH_DIRECT) != 0) { 5183 un->un_f_start_stop_supported = FALSE; 5184 } 5185 5186 /* 5187 * create pm properties anyways otherwise the parent can't 5188 * go to sleep 5189 */ 5190 (void) sd_create_pm_components(devi, un); 5191 un->un_f_pm_is_enabled = TRUE; 5192 return; 5193 } 5194 5195 if (!un->un_f_log_sense_supported) { 5196 un->un_power_level = SD_SPINDLE_ON; 5197 un->un_f_pm_is_enabled = FALSE; 5198 return; 5199 } 5200 5201 rval = sd_log_page_supported(un, START_STOP_CYCLE_PAGE); 5202 5203 #ifdef SDDEBUG 5204 if (sd_force_pm_supported) { 5205 /* Force a successful result */ 5206 rval = 1; 5207 } 5208 #endif 5209 5210 /* 5211 * If the start-stop cycle counter log page is not supported 5212 * or if the pm-capable property is SD_PM_CAPABLE_FALSE (0) 5213 * then we should not create the pm_components property. 5214 */ 5215 if (rval == -1) { 5216 /* 5217 * Error. 5218 * Reading log sense failed, most likely this is 5219 * an older drive that does not support log sense. 5220 * If this fails auto-pm is not supported. 5221 */ 5222 un->un_power_level = SD_SPINDLE_ON; 5223 un->un_f_pm_is_enabled = FALSE; 5224 5225 } else if (rval == 0) { 5226 /* 5227 * Page not found. 5228 * The start stop cycle counter is implemented as page 5229 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For 5230 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE). 5231 */ 5232 if (sd_log_page_supported(un, START_STOP_CYCLE_VU_PAGE) == 1) { 5233 /* 5234 * Page found, use this one. 5235 */ 5236 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE; 5237 un->un_f_pm_is_enabled = TRUE; 5238 } else { 5239 /* 5240 * Error or page not found. 5241 * auto-pm is not supported for this device. 5242 */ 5243 un->un_power_level = SD_SPINDLE_ON; 5244 un->un_f_pm_is_enabled = FALSE; 5245 } 5246 } else { 5247 /* 5248 * Page found, use it. 5249 */ 5250 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE; 5251 un->un_f_pm_is_enabled = TRUE; 5252 } 5253 5254 5255 if (un->un_f_pm_is_enabled == TRUE) { 5256 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5257 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5258 5259 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 5260 log_page_size, un->un_start_stop_cycle_page, 5261 0x01, 0, SD_PATH_DIRECT); 5262 #ifdef SDDEBUG 5263 if (sd_force_pm_supported) { 5264 /* Force a successful result */ 5265 rval = 0; 5266 } 5267 #endif 5268 5269 /* 5270 * If the Log sense for Page( Start/stop cycle counter page) 5271 * succeeds, then power managment is supported and we can 5272 * enable auto-pm. 5273 */ 5274 if (rval == 0) { 5275 (void) sd_create_pm_components(devi, un); 5276 } else { 5277 un->un_power_level = SD_SPINDLE_ON; 5278 un->un_f_pm_is_enabled = FALSE; 5279 } 5280 5281 kmem_free(log_page_data, log_page_size); 5282 } 5283 } 5284 5285 5286 /* 5287 * Function: sd_create_pm_components 5288 * 5289 * Description: Initialize PM property. 5290 * 5291 * Context: Kernel thread context 5292 */ 5293 5294 static void 5295 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un) 5296 { 5297 char *pm_comp[] = { "NAME=spindle-motor", "0=off", "1=on", NULL }; 5298 5299 ASSERT(!mutex_owned(SD_MUTEX(un))); 5300 5301 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 5302 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 5303 /* 5304 * When components are initially created they are idle, 5305 * power up any non-removables. 5306 * Note: the return value of pm_raise_power can't be used 5307 * for determining if PM should be enabled for this device. 5308 * Even if you check the return values and remove this 5309 * property created above, the PM framework will not honor the 5310 * change after the first call to pm_raise_power. Hence, 5311 * removal of that property does not help if pm_raise_power 5312 * fails. In the case of removable media, the start/stop 5313 * will fail if the media is not present. 5314 */ 5315 if (un->un_f_attach_spinup && (pm_raise_power(SD_DEVINFO(un), 0, 5316 SD_SPINDLE_ON) == DDI_SUCCESS)) { 5317 mutex_enter(SD_MUTEX(un)); 5318 un->un_power_level = SD_SPINDLE_ON; 5319 mutex_enter(&un->un_pm_mutex); 5320 /* Set to on and not busy. */ 5321 un->un_pm_count = 0; 5322 } else { 5323 mutex_enter(SD_MUTEX(un)); 5324 un->un_power_level = SD_SPINDLE_OFF; 5325 mutex_enter(&un->un_pm_mutex); 5326 /* Set to off. */ 5327 un->un_pm_count = -1; 5328 } 5329 mutex_exit(&un->un_pm_mutex); 5330 mutex_exit(SD_MUTEX(un)); 5331 } else { 5332 un->un_power_level = SD_SPINDLE_ON; 5333 un->un_f_pm_is_enabled = FALSE; 5334 } 5335 } 5336 5337 5338 /* 5339 * Function: sd_ddi_suspend 5340 * 5341 * Description: Performs system power-down operations. This includes 5342 * setting the drive state to indicate its suspended so 5343 * that no new commands will be accepted. Also, wait for 5344 * all commands that are in transport or queued to a timer 5345 * for retry to complete. All timeout threads are cancelled. 5346 * 5347 * Return Code: DDI_FAILURE or DDI_SUCCESS 5348 * 5349 * Context: Kernel thread context 5350 */ 5351 5352 static int 5353 sd_ddi_suspend(dev_info_t *devi) 5354 { 5355 struct sd_lun *un; 5356 clock_t wait_cmds_complete; 5357 5358 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5359 if (un == NULL) { 5360 return (DDI_FAILURE); 5361 } 5362 5363 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n"); 5364 5365 mutex_enter(SD_MUTEX(un)); 5366 5367 /* Return success if the device is already suspended. */ 5368 if (un->un_state == SD_STATE_SUSPENDED) { 5369 mutex_exit(SD_MUTEX(un)); 5370 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5371 "device already suspended, exiting\n"); 5372 return (DDI_SUCCESS); 5373 } 5374 5375 /* Return failure if the device is being used by HA */ 5376 if (un->un_resvd_status & 5377 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) { 5378 mutex_exit(SD_MUTEX(un)); 5379 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5380 "device in use by HA, exiting\n"); 5381 return (DDI_FAILURE); 5382 } 5383 5384 /* 5385 * Return failure if the device is in a resource wait 5386 * or power changing state. 5387 */ 5388 if ((un->un_state == SD_STATE_RWAIT) || 5389 (un->un_state == SD_STATE_PM_CHANGING)) { 5390 mutex_exit(SD_MUTEX(un)); 5391 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5392 "device in resource wait state, exiting\n"); 5393 return (DDI_FAILURE); 5394 } 5395 5396 5397 un->un_save_state = un->un_last_state; 5398 New_state(un, SD_STATE_SUSPENDED); 5399 5400 /* 5401 * Wait for all commands that are in transport or queued to a timer 5402 * for retry to complete. 5403 * 5404 * While waiting, no new commands will be accepted or sent because of 5405 * the new state we set above. 5406 * 5407 * Wait till current operation has completed. If we are in the resource 5408 * wait state (with an intr outstanding) then we need to wait till the 5409 * intr completes and starts the next cmd. We want to wait for 5410 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND. 5411 */ 5412 wait_cmds_complete = ddi_get_lbolt() + 5413 (sd_wait_cmds_complete * drv_usectohz(1000000)); 5414 5415 while (un->un_ncmds_in_transport != 0) { 5416 /* 5417 * Fail if commands do not finish in the specified time. 5418 */ 5419 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un), 5420 wait_cmds_complete) == -1) { 5421 /* 5422 * Undo the state changes made above. Everything 5423 * must go back to it's original value. 5424 */ 5425 Restore_state(un); 5426 un->un_last_state = un->un_save_state; 5427 /* Wake up any threads that might be waiting. */ 5428 cv_broadcast(&un->un_suspend_cv); 5429 mutex_exit(SD_MUTEX(un)); 5430 SD_ERROR(SD_LOG_IO_PM, un, 5431 "sd_ddi_suspend: failed due to outstanding cmds\n"); 5432 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n"); 5433 return (DDI_FAILURE); 5434 } 5435 } 5436 5437 /* 5438 * Cancel SCSI watch thread and timeouts, if any are active 5439 */ 5440 5441 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) { 5442 opaque_t temp_token = un->un_swr_token; 5443 mutex_exit(SD_MUTEX(un)); 5444 scsi_watch_suspend(temp_token); 5445 mutex_enter(SD_MUTEX(un)); 5446 } 5447 5448 if (un->un_reset_throttle_timeid != NULL) { 5449 timeout_id_t temp_id = un->un_reset_throttle_timeid; 5450 un->un_reset_throttle_timeid = NULL; 5451 mutex_exit(SD_MUTEX(un)); 5452 (void) untimeout(temp_id); 5453 mutex_enter(SD_MUTEX(un)); 5454 } 5455 5456 if (un->un_dcvb_timeid != NULL) { 5457 timeout_id_t temp_id = un->un_dcvb_timeid; 5458 un->un_dcvb_timeid = NULL; 5459 mutex_exit(SD_MUTEX(un)); 5460 (void) untimeout(temp_id); 5461 mutex_enter(SD_MUTEX(un)); 5462 } 5463 5464 mutex_enter(&un->un_pm_mutex); 5465 if (un->un_pm_timeid != NULL) { 5466 timeout_id_t temp_id = un->un_pm_timeid; 5467 un->un_pm_timeid = NULL; 5468 mutex_exit(&un->un_pm_mutex); 5469 mutex_exit(SD_MUTEX(un)); 5470 (void) untimeout(temp_id); 5471 mutex_enter(SD_MUTEX(un)); 5472 } else { 5473 mutex_exit(&un->un_pm_mutex); 5474 } 5475 5476 if (un->un_retry_timeid != NULL) { 5477 timeout_id_t temp_id = un->un_retry_timeid; 5478 un->un_retry_timeid = NULL; 5479 mutex_exit(SD_MUTEX(un)); 5480 (void) untimeout(temp_id); 5481 mutex_enter(SD_MUTEX(un)); 5482 5483 if (un->un_retry_bp != NULL) { 5484 un->un_retry_bp->av_forw = un->un_waitq_headp; 5485 un->un_waitq_headp = un->un_retry_bp; 5486 if (un->un_waitq_tailp == NULL) { 5487 un->un_waitq_tailp = un->un_retry_bp; 5488 } 5489 un->un_retry_bp = NULL; 5490 un->un_retry_statp = NULL; 5491 } 5492 } 5493 5494 if (un->un_direct_priority_timeid != NULL) { 5495 timeout_id_t temp_id = un->un_direct_priority_timeid; 5496 un->un_direct_priority_timeid = NULL; 5497 mutex_exit(SD_MUTEX(un)); 5498 (void) untimeout(temp_id); 5499 mutex_enter(SD_MUTEX(un)); 5500 } 5501 5502 if (un->un_f_is_fibre == TRUE) { 5503 /* 5504 * Remove callbacks for insert and remove events 5505 */ 5506 if (un->un_insert_event != NULL) { 5507 mutex_exit(SD_MUTEX(un)); 5508 (void) ddi_remove_event_handler(un->un_insert_cb_id); 5509 mutex_enter(SD_MUTEX(un)); 5510 un->un_insert_event = NULL; 5511 } 5512 5513 if (un->un_remove_event != NULL) { 5514 mutex_exit(SD_MUTEX(un)); 5515 (void) ddi_remove_event_handler(un->un_remove_cb_id); 5516 mutex_enter(SD_MUTEX(un)); 5517 un->un_remove_event = NULL; 5518 } 5519 } 5520 5521 mutex_exit(SD_MUTEX(un)); 5522 5523 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); 5524 5525 return (DDI_SUCCESS); 5526 } 5527 5528 5529 /* 5530 * Function: sd_ddi_pm_suspend 5531 * 5532 * Description: Set the drive state to low power. 5533 * Someone else is required to actually change the drive 5534 * power level. 5535 * 5536 * Arguments: un - driver soft state (unit) structure 5537 * 5538 * Return Code: DDI_FAILURE or DDI_SUCCESS 5539 * 5540 * Context: Kernel thread context 5541 */ 5542 5543 static int 5544 sd_ddi_pm_suspend(struct sd_lun *un) 5545 { 5546 ASSERT(un != NULL); 5547 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: entry\n"); 5548 5549 ASSERT(!mutex_owned(SD_MUTEX(un))); 5550 mutex_enter(SD_MUTEX(un)); 5551 5552 /* 5553 * Exit if power management is not enabled for this device, or if 5554 * the device is being used by HA. 5555 */ 5556 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status & 5557 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) { 5558 mutex_exit(SD_MUTEX(un)); 5559 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exiting\n"); 5560 return (DDI_SUCCESS); 5561 } 5562 5563 SD_INFO(SD_LOG_POWER, un, "sd_ddi_pm_suspend: un_ncmds_in_driver=%ld\n", 5564 un->un_ncmds_in_driver); 5565 5566 /* 5567 * See if the device is not busy, ie.: 5568 * - we have no commands in the driver for this device 5569 * - not waiting for resources 5570 */ 5571 if ((un->un_ncmds_in_driver == 0) && 5572 (un->un_state != SD_STATE_RWAIT)) { 5573 /* 5574 * The device is not busy, so it is OK to go to low power state. 5575 * Indicate low power, but rely on someone else to actually 5576 * change it. 5577 */ 5578 mutex_enter(&un->un_pm_mutex); 5579 un->un_pm_count = -1; 5580 mutex_exit(&un->un_pm_mutex); 5581 un->un_power_level = SD_SPINDLE_OFF; 5582 } 5583 5584 mutex_exit(SD_MUTEX(un)); 5585 5586 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exit\n"); 5587 5588 return (DDI_SUCCESS); 5589 } 5590 5591 5592 /* 5593 * Function: sd_ddi_resume 5594 * 5595 * Description: Performs system power-up operations.. 5596 * 5597 * Return Code: DDI_SUCCESS 5598 * DDI_FAILURE 5599 * 5600 * Context: Kernel thread context 5601 */ 5602 5603 static int 5604 sd_ddi_resume(dev_info_t *devi) 5605 { 5606 struct sd_lun *un; 5607 5608 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5609 if (un == NULL) { 5610 return (DDI_FAILURE); 5611 } 5612 5613 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n"); 5614 5615 mutex_enter(SD_MUTEX(un)); 5616 Restore_state(un); 5617 5618 /* 5619 * Restore the state which was saved to give the 5620 * the right state in un_last_state 5621 */ 5622 un->un_last_state = un->un_save_state; 5623 /* 5624 * Note: throttle comes back at full. 5625 * Also note: this MUST be done before calling pm_raise_power 5626 * otherwise the system can get hung in biowait. The scenario where 5627 * this'll happen is under cpr suspend. Writing of the system 5628 * state goes through sddump, which writes 0 to un_throttle. If 5629 * writing the system state then fails, example if the partition is 5630 * too small, then cpr attempts a resume. If throttle isn't restored 5631 * from the saved value until after calling pm_raise_power then 5632 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs 5633 * in biowait. 5634 */ 5635 un->un_throttle = un->un_saved_throttle; 5636 5637 /* 5638 * The chance of failure is very rare as the only command done in power 5639 * entry point is START command when you transition from 0->1 or 5640 * unknown->1. Put it to SPINDLE ON state irrespective of the state at 5641 * which suspend was done. Ignore the return value as the resume should 5642 * not be failed. In the case of removable media the media need not be 5643 * inserted and hence there is a chance that raise power will fail with 5644 * media not present. 5645 */ 5646 if (un->un_f_attach_spinup) { 5647 mutex_exit(SD_MUTEX(un)); 5648 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 5649 mutex_enter(SD_MUTEX(un)); 5650 } 5651 5652 /* 5653 * Don't broadcast to the suspend cv and therefore possibly 5654 * start I/O until after power has been restored. 5655 */ 5656 cv_broadcast(&un->un_suspend_cv); 5657 cv_broadcast(&un->un_state_cv); 5658 5659 /* restart thread */ 5660 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { 5661 scsi_watch_resume(un->un_swr_token); 5662 } 5663 5664 #if (defined(__fibre)) 5665 if (un->un_f_is_fibre == TRUE) { 5666 /* 5667 * Add callbacks for insert and remove events 5668 */ 5669 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 5670 sd_init_event_callbacks(un); 5671 } 5672 } 5673 #endif 5674 5675 /* 5676 * Transport any pending commands to the target. 5677 * 5678 * If this is a low-activity device commands in queue will have to wait 5679 * until new commands come in, which may take awhile. Also, we 5680 * specifically don't check un_ncmds_in_transport because we know that 5681 * there really are no commands in progress after the unit was 5682 * suspended and we could have reached the throttle level, been 5683 * suspended, and have no new commands coming in for awhile. Highly 5684 * unlikely, but so is the low-activity disk scenario. 5685 */ 5686 ddi_xbuf_dispatch(un->un_xbuf_attr); 5687 5688 sd_start_cmds(un, NULL); 5689 mutex_exit(SD_MUTEX(un)); 5690 5691 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n"); 5692 5693 return (DDI_SUCCESS); 5694 } 5695 5696 5697 /* 5698 * Function: sd_ddi_pm_resume 5699 * 5700 * Description: Set the drive state to powered on. 5701 * Someone else is required to actually change the drive 5702 * power level. 5703 * 5704 * Arguments: un - driver soft state (unit) structure 5705 * 5706 * Return Code: DDI_SUCCESS 5707 * 5708 * Context: Kernel thread context 5709 */ 5710 5711 static int 5712 sd_ddi_pm_resume(struct sd_lun *un) 5713 { 5714 ASSERT(un != NULL); 5715 5716 ASSERT(!mutex_owned(SD_MUTEX(un))); 5717 mutex_enter(SD_MUTEX(un)); 5718 un->un_power_level = SD_SPINDLE_ON; 5719 5720 ASSERT(!mutex_owned(&un->un_pm_mutex)); 5721 mutex_enter(&un->un_pm_mutex); 5722 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 5723 un->un_pm_count++; 5724 ASSERT(un->un_pm_count == 0); 5725 /* 5726 * Note: no longer do the cv_broadcast on un_suspend_cv. The 5727 * un_suspend_cv is for a system resume, not a power management 5728 * device resume. (4297749) 5729 * cv_broadcast(&un->un_suspend_cv); 5730 */ 5731 } 5732 mutex_exit(&un->un_pm_mutex); 5733 mutex_exit(SD_MUTEX(un)); 5734 5735 return (DDI_SUCCESS); 5736 } 5737 5738 5739 /* 5740 * Function: sd_pm_idletimeout_handler 5741 * 5742 * Description: A timer routine that's active only while a device is busy. 5743 * The purpose is to extend slightly the pm framework's busy 5744 * view of the device to prevent busy/idle thrashing for 5745 * back-to-back commands. Do this by comparing the current time 5746 * to the time at which the last command completed and when the 5747 * difference is greater than sd_pm_idletime, call 5748 * pm_idle_component. In addition to indicating idle to the pm 5749 * framework, update the chain type to again use the internal pm 5750 * layers of the driver. 5751 * 5752 * Arguments: arg - driver soft state (unit) structure 5753 * 5754 * Context: Executes in a timeout(9F) thread context 5755 */ 5756 5757 static void 5758 sd_pm_idletimeout_handler(void *arg) 5759 { 5760 struct sd_lun *un = arg; 5761 5762 time_t now; 5763 5764 mutex_enter(&sd_detach_mutex); 5765 if (un->un_detach_count != 0) { 5766 /* Abort if the instance is detaching */ 5767 mutex_exit(&sd_detach_mutex); 5768 return; 5769 } 5770 mutex_exit(&sd_detach_mutex); 5771 5772 now = ddi_get_time(); 5773 /* 5774 * Grab both mutexes, in the proper order, since we're accessing 5775 * both PM and softstate variables. 5776 */ 5777 mutex_enter(SD_MUTEX(un)); 5778 mutex_enter(&un->un_pm_mutex); 5779 if (((now - un->un_pm_idle_time) > sd_pm_idletime) && 5780 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) { 5781 /* 5782 * Update the chain types. 5783 * This takes affect on the next new command received. 5784 */ 5785 if (un->un_f_non_devbsize_supported) { 5786 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 5787 } else { 5788 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 5789 } 5790 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 5791 5792 SD_TRACE(SD_LOG_IO_PM, un, 5793 "sd_pm_idletimeout_handler: idling device\n"); 5794 (void) pm_idle_component(SD_DEVINFO(un), 0); 5795 un->un_pm_idle_timeid = NULL; 5796 } else { 5797 un->un_pm_idle_timeid = 5798 timeout(sd_pm_idletimeout_handler, un, 5799 (drv_usectohz((clock_t)300000))); /* 300 ms. */ 5800 } 5801 mutex_exit(&un->un_pm_mutex); 5802 mutex_exit(SD_MUTEX(un)); 5803 } 5804 5805 5806 /* 5807 * Function: sd_pm_timeout_handler 5808 * 5809 * Description: Callback to tell framework we are idle. 5810 * 5811 * Context: timeout(9f) thread context. 5812 */ 5813 5814 static void 5815 sd_pm_timeout_handler(void *arg) 5816 { 5817 struct sd_lun *un = arg; 5818 5819 (void) pm_idle_component(SD_DEVINFO(un), 0); 5820 mutex_enter(&un->un_pm_mutex); 5821 un->un_pm_timeid = NULL; 5822 mutex_exit(&un->un_pm_mutex); 5823 } 5824 5825 5826 /* 5827 * Function: sdpower 5828 * 5829 * Description: PM entry point. 5830 * 5831 * Return Code: DDI_SUCCESS 5832 * DDI_FAILURE 5833 * 5834 * Context: Kernel thread context 5835 */ 5836 5837 static int 5838 sdpower(dev_info_t *devi, int component, int level) 5839 { 5840 struct sd_lun *un; 5841 int instance; 5842 int rval = DDI_SUCCESS; 5843 uint_t i, log_page_size, maxcycles, ncycles; 5844 uchar_t *log_page_data; 5845 int log_sense_page; 5846 int medium_present; 5847 time_t intvlp; 5848 dev_t dev; 5849 struct pm_trans_data sd_pm_tran_data; 5850 uchar_t save_state; 5851 int sval; 5852 uchar_t state_before_pm; 5853 int got_semaphore_here; 5854 5855 instance = ddi_get_instance(devi); 5856 5857 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 5858 (SD_SPINDLE_OFF > level) || (level > SD_SPINDLE_ON) || 5859 component != 0) { 5860 return (DDI_FAILURE); 5861 } 5862 5863 dev = sd_make_device(SD_DEVINFO(un)); 5864 5865 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level); 5866 5867 /* 5868 * Must synchronize power down with close. 5869 * Attempt to decrement/acquire the open/close semaphore, 5870 * but do NOT wait on it. If it's not greater than zero, 5871 * ie. it can't be decremented without waiting, then 5872 * someone else, either open or close, already has it 5873 * and the try returns 0. Use that knowledge here to determine 5874 * if it's OK to change the device power level. 5875 * Also, only increment it on exit if it was decremented, ie. gotten, 5876 * here. 5877 */ 5878 got_semaphore_here = sema_tryp(&un->un_semoclose); 5879 5880 mutex_enter(SD_MUTEX(un)); 5881 5882 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n", 5883 un->un_ncmds_in_driver); 5884 5885 /* 5886 * If un_ncmds_in_driver is non-zero it indicates commands are 5887 * already being processed in the driver, or if the semaphore was 5888 * not gotten here it indicates an open or close is being processed. 5889 * At the same time somebody is requesting to go low power which 5890 * can't happen, therefore we need to return failure. 5891 */ 5892 if ((level == SD_SPINDLE_OFF) && 5893 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) { 5894 mutex_exit(SD_MUTEX(un)); 5895 5896 if (got_semaphore_here != 0) { 5897 sema_v(&un->un_semoclose); 5898 } 5899 SD_TRACE(SD_LOG_IO_PM, un, 5900 "sdpower: exit, device has queued cmds.\n"); 5901 return (DDI_FAILURE); 5902 } 5903 5904 /* 5905 * if it is OFFLINE that means the disk is completely dead 5906 * in our case we have to put the disk in on or off by sending commands 5907 * Of course that will fail anyway so return back here. 5908 * 5909 * Power changes to a device that's OFFLINE or SUSPENDED 5910 * are not allowed. 5911 */ 5912 if ((un->un_state == SD_STATE_OFFLINE) || 5913 (un->un_state == SD_STATE_SUSPENDED)) { 5914 mutex_exit(SD_MUTEX(un)); 5915 5916 if (got_semaphore_here != 0) { 5917 sema_v(&un->un_semoclose); 5918 } 5919 SD_TRACE(SD_LOG_IO_PM, un, 5920 "sdpower: exit, device is off-line.\n"); 5921 return (DDI_FAILURE); 5922 } 5923 5924 /* 5925 * Change the device's state to indicate it's power level 5926 * is being changed. Do this to prevent a power off in the 5927 * middle of commands, which is especially bad on devices 5928 * that are really powered off instead of just spun down. 5929 */ 5930 state_before_pm = un->un_state; 5931 un->un_state = SD_STATE_PM_CHANGING; 5932 5933 mutex_exit(SD_MUTEX(un)); 5934 5935 /* 5936 * If "pm-capable" property is set to TRUE by HBA drivers, 5937 * bypass the following checking, otherwise, check the log 5938 * sense information for this device 5939 */ 5940 if ((level == SD_SPINDLE_OFF) && un->un_f_log_sense_supported) { 5941 /* 5942 * Get the log sense information to understand whether the 5943 * the powercycle counts have gone beyond the threshhold. 5944 */ 5945 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5946 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5947 5948 mutex_enter(SD_MUTEX(un)); 5949 log_sense_page = un->un_start_stop_cycle_page; 5950 mutex_exit(SD_MUTEX(un)); 5951 5952 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 5953 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT); 5954 #ifdef SDDEBUG 5955 if (sd_force_pm_supported) { 5956 /* Force a successful result */ 5957 rval = 0; 5958 } 5959 #endif 5960 if (rval != 0) { 5961 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 5962 "Log Sense Failed\n"); 5963 kmem_free(log_page_data, log_page_size); 5964 /* Cannot support power management on those drives */ 5965 5966 if (got_semaphore_here != 0) { 5967 sema_v(&un->un_semoclose); 5968 } 5969 /* 5970 * On exit put the state back to it's original value 5971 * and broadcast to anyone waiting for the power 5972 * change completion. 5973 */ 5974 mutex_enter(SD_MUTEX(un)); 5975 un->un_state = state_before_pm; 5976 cv_broadcast(&un->un_suspend_cv); 5977 mutex_exit(SD_MUTEX(un)); 5978 SD_TRACE(SD_LOG_IO_PM, un, 5979 "sdpower: exit, Log Sense Failed.\n"); 5980 return (DDI_FAILURE); 5981 } 5982 5983 /* 5984 * From the page data - Convert the essential information to 5985 * pm_trans_data 5986 */ 5987 maxcycles = 5988 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) | 5989 (log_page_data[0x1E] << 8) | log_page_data[0x1F]; 5990 5991 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles; 5992 5993 ncycles = 5994 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) | 5995 (log_page_data[0x26] << 8) | log_page_data[0x27]; 5996 5997 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles; 5998 5999 for (i = 0; i < DC_SCSI_MFR_LEN; i++) { 6000 sd_pm_tran_data.un.scsi_cycles.svc_date[i] = 6001 log_page_data[8+i]; 6002 } 6003 6004 kmem_free(log_page_data, log_page_size); 6005 6006 /* 6007 * Call pm_trans_check routine to get the Ok from 6008 * the global policy 6009 */ 6010 6011 sd_pm_tran_data.format = DC_SCSI_FORMAT; 6012 sd_pm_tran_data.un.scsi_cycles.flag = 0; 6013 6014 rval = pm_trans_check(&sd_pm_tran_data, &intvlp); 6015 #ifdef SDDEBUG 6016 if (sd_force_pm_supported) { 6017 /* Force a successful result */ 6018 rval = 1; 6019 } 6020 #endif 6021 switch (rval) { 6022 case 0: 6023 /* 6024 * Not Ok to Power cycle or error in parameters passed 6025 * Would have given the advised time to consider power 6026 * cycle. Based on the new intvlp parameter we are 6027 * supposed to pretend we are busy so that pm framework 6028 * will never call our power entry point. Because of 6029 * that install a timeout handler and wait for the 6030 * recommended time to elapse so that power management 6031 * can be effective again. 6032 * 6033 * To effect this behavior, call pm_busy_component to 6034 * indicate to the framework this device is busy. 6035 * By not adjusting un_pm_count the rest of PM in 6036 * the driver will function normally, and independant 6037 * of this but because the framework is told the device 6038 * is busy it won't attempt powering down until it gets 6039 * a matching idle. The timeout handler sends this. 6040 * Note: sd_pm_entry can't be called here to do this 6041 * because sdpower may have been called as a result 6042 * of a call to pm_raise_power from within sd_pm_entry. 6043 * 6044 * If a timeout handler is already active then 6045 * don't install another. 6046 */ 6047 mutex_enter(&un->un_pm_mutex); 6048 if (un->un_pm_timeid == NULL) { 6049 un->un_pm_timeid = 6050 timeout(sd_pm_timeout_handler, 6051 un, intvlp * drv_usectohz(1000000)); 6052 mutex_exit(&un->un_pm_mutex); 6053 (void) pm_busy_component(SD_DEVINFO(un), 0); 6054 } else { 6055 mutex_exit(&un->un_pm_mutex); 6056 } 6057 if (got_semaphore_here != 0) { 6058 sema_v(&un->un_semoclose); 6059 } 6060 /* 6061 * On exit put the state back to it's original value 6062 * and broadcast to anyone waiting for the power 6063 * change completion. 6064 */ 6065 mutex_enter(SD_MUTEX(un)); 6066 un->un_state = state_before_pm; 6067 cv_broadcast(&un->un_suspend_cv); 6068 mutex_exit(SD_MUTEX(un)); 6069 6070 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, " 6071 "trans check Failed, not ok to power cycle.\n"); 6072 return (DDI_FAILURE); 6073 6074 case -1: 6075 if (got_semaphore_here != 0) { 6076 sema_v(&un->un_semoclose); 6077 } 6078 /* 6079 * On exit put the state back to it's original value 6080 * and broadcast to anyone waiting for the power 6081 * change completion. 6082 */ 6083 mutex_enter(SD_MUTEX(un)); 6084 un->un_state = state_before_pm; 6085 cv_broadcast(&un->un_suspend_cv); 6086 mutex_exit(SD_MUTEX(un)); 6087 SD_TRACE(SD_LOG_IO_PM, un, 6088 "sdpower: exit, trans check command Failed.\n"); 6089 return (DDI_FAILURE); 6090 } 6091 } 6092 6093 if (level == SD_SPINDLE_OFF) { 6094 /* 6095 * Save the last state... if the STOP FAILS we need it 6096 * for restoring 6097 */ 6098 mutex_enter(SD_MUTEX(un)); 6099 save_state = un->un_last_state; 6100 /* 6101 * There must not be any cmds. getting processed 6102 * in the driver when we get here. Power to the 6103 * device is potentially going off. 6104 */ 6105 ASSERT(un->un_ncmds_in_driver == 0); 6106 mutex_exit(SD_MUTEX(un)); 6107 6108 /* 6109 * For now suspend the device completely before spindle is 6110 * turned off 6111 */ 6112 if ((rval = sd_ddi_pm_suspend(un)) == DDI_FAILURE) { 6113 if (got_semaphore_here != 0) { 6114 sema_v(&un->un_semoclose); 6115 } 6116 /* 6117 * On exit put the state back to it's original value 6118 * and broadcast to anyone waiting for the power 6119 * change completion. 6120 */ 6121 mutex_enter(SD_MUTEX(un)); 6122 un->un_state = state_before_pm; 6123 cv_broadcast(&un->un_suspend_cv); 6124 mutex_exit(SD_MUTEX(un)); 6125 SD_TRACE(SD_LOG_IO_PM, un, 6126 "sdpower: exit, PM suspend Failed.\n"); 6127 return (DDI_FAILURE); 6128 } 6129 } 6130 6131 /* 6132 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open, 6133 * close, or strategy. Dump no long uses this routine, it uses it's 6134 * own code so it can be done in polled mode. 6135 */ 6136 6137 medium_present = TRUE; 6138 6139 /* 6140 * When powering up, issue a TUR in case the device is at unit 6141 * attention. Don't do retries. Bypass the PM layer, otherwise 6142 * a deadlock on un_pm_busy_cv will occur. 6143 */ 6144 if (level == SD_SPINDLE_ON) { 6145 (void) sd_send_scsi_TEST_UNIT_READY(un, 6146 SD_DONT_RETRY_TUR | SD_BYPASS_PM); 6147 } 6148 6149 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n", 6150 ((level == SD_SPINDLE_ON) ? "START" : "STOP")); 6151 6152 sval = sd_send_scsi_START_STOP_UNIT(un, 6153 ((level == SD_SPINDLE_ON) ? SD_TARGET_START : SD_TARGET_STOP), 6154 SD_PATH_DIRECT); 6155 /* Command failed, check for media present. */ 6156 if ((sval == ENXIO) && un->un_f_has_removable_media) { 6157 medium_present = FALSE; 6158 } 6159 6160 /* 6161 * The conditions of interest here are: 6162 * if a spindle off with media present fails, 6163 * then restore the state and return an error. 6164 * else if a spindle on fails, 6165 * then return an error (there's no state to restore). 6166 * In all other cases we setup for the new state 6167 * and return success. 6168 */ 6169 switch (level) { 6170 case SD_SPINDLE_OFF: 6171 if ((medium_present == TRUE) && (sval != 0)) { 6172 /* The stop command from above failed */ 6173 rval = DDI_FAILURE; 6174 /* 6175 * The stop command failed, and we have media 6176 * present. Put the level back by calling the 6177 * sd_pm_resume() and set the state back to 6178 * it's previous value. 6179 */ 6180 (void) sd_ddi_pm_resume(un); 6181 mutex_enter(SD_MUTEX(un)); 6182 un->un_last_state = save_state; 6183 mutex_exit(SD_MUTEX(un)); 6184 break; 6185 } 6186 /* 6187 * The stop command from above succeeded. 6188 */ 6189 if (un->un_f_monitor_media_state) { 6190 /* 6191 * Terminate watch thread in case of removable media 6192 * devices going into low power state. This is as per 6193 * the requirements of pm framework, otherwise commands 6194 * will be generated for the device (through watch 6195 * thread), even when the device is in low power state. 6196 */ 6197 mutex_enter(SD_MUTEX(un)); 6198 un->un_f_watcht_stopped = FALSE; 6199 if (un->un_swr_token != NULL) { 6200 opaque_t temp_token = un->un_swr_token; 6201 un->un_f_watcht_stopped = TRUE; 6202 un->un_swr_token = NULL; 6203 mutex_exit(SD_MUTEX(un)); 6204 (void) scsi_watch_request_terminate(temp_token, 6205 SCSI_WATCH_TERMINATE_WAIT); 6206 } else { 6207 mutex_exit(SD_MUTEX(un)); 6208 } 6209 } 6210 break; 6211 6212 default: /* The level requested is spindle on... */ 6213 /* 6214 * Legacy behavior: return success on a failed spinup 6215 * if there is no media in the drive. 6216 * Do this by looking at medium_present here. 6217 */ 6218 if ((sval != 0) && medium_present) { 6219 /* The start command from above failed */ 6220 rval = DDI_FAILURE; 6221 break; 6222 } 6223 /* 6224 * The start command from above succeeded 6225 * Resume the devices now that we have 6226 * started the disks 6227 */ 6228 (void) sd_ddi_pm_resume(un); 6229 6230 /* 6231 * Resume the watch thread since it was suspended 6232 * when the device went into low power mode. 6233 */ 6234 if (un->un_f_monitor_media_state) { 6235 mutex_enter(SD_MUTEX(un)); 6236 if (un->un_f_watcht_stopped == TRUE) { 6237 opaque_t temp_token; 6238 6239 un->un_f_watcht_stopped = FALSE; 6240 mutex_exit(SD_MUTEX(un)); 6241 temp_token = scsi_watch_request_submit( 6242 SD_SCSI_DEVP(un), 6243 sd_check_media_time, 6244 SENSE_LENGTH, sd_media_watch_cb, 6245 (caddr_t)dev); 6246 mutex_enter(SD_MUTEX(un)); 6247 un->un_swr_token = temp_token; 6248 } 6249 mutex_exit(SD_MUTEX(un)); 6250 } 6251 } 6252 if (got_semaphore_here != 0) { 6253 sema_v(&un->un_semoclose); 6254 } 6255 /* 6256 * On exit put the state back to it's original value 6257 * and broadcast to anyone waiting for the power 6258 * change completion. 6259 */ 6260 mutex_enter(SD_MUTEX(un)); 6261 un->un_state = state_before_pm; 6262 cv_broadcast(&un->un_suspend_cv); 6263 mutex_exit(SD_MUTEX(un)); 6264 6265 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval); 6266 6267 return (rval); 6268 } 6269 6270 6271 6272 /* 6273 * Function: sdattach 6274 * 6275 * Description: Driver's attach(9e) entry point function. 6276 * 6277 * Arguments: devi - opaque device info handle 6278 * cmd - attach type 6279 * 6280 * Return Code: DDI_SUCCESS 6281 * DDI_FAILURE 6282 * 6283 * Context: Kernel thread context 6284 */ 6285 6286 static int 6287 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 6288 { 6289 switch (cmd) { 6290 case DDI_ATTACH: 6291 return (sd_unit_attach(devi)); 6292 case DDI_RESUME: 6293 return (sd_ddi_resume(devi)); 6294 default: 6295 break; 6296 } 6297 return (DDI_FAILURE); 6298 } 6299 6300 6301 /* 6302 * Function: sddetach 6303 * 6304 * Description: Driver's detach(9E) entry point function. 6305 * 6306 * Arguments: devi - opaque device info handle 6307 * cmd - detach type 6308 * 6309 * Return Code: DDI_SUCCESS 6310 * DDI_FAILURE 6311 * 6312 * Context: Kernel thread context 6313 */ 6314 6315 static int 6316 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 6317 { 6318 switch (cmd) { 6319 case DDI_DETACH: 6320 return (sd_unit_detach(devi)); 6321 case DDI_SUSPEND: 6322 return (sd_ddi_suspend(devi)); 6323 default: 6324 break; 6325 } 6326 return (DDI_FAILURE); 6327 } 6328 6329 6330 /* 6331 * Function: sd_sync_with_callback 6332 * 6333 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft 6334 * state while the callback routine is active. 6335 * 6336 * Arguments: un: softstate structure for the instance 6337 * 6338 * Context: Kernel thread context 6339 */ 6340 6341 static void 6342 sd_sync_with_callback(struct sd_lun *un) 6343 { 6344 ASSERT(un != NULL); 6345 6346 mutex_enter(SD_MUTEX(un)); 6347 6348 ASSERT(un->un_in_callback >= 0); 6349 6350 while (un->un_in_callback > 0) { 6351 mutex_exit(SD_MUTEX(un)); 6352 delay(2); 6353 mutex_enter(SD_MUTEX(un)); 6354 } 6355 6356 mutex_exit(SD_MUTEX(un)); 6357 } 6358 6359 /* 6360 * Function: sd_unit_attach 6361 * 6362 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates 6363 * the soft state structure for the device and performs 6364 * all necessary structure and device initializations. 6365 * 6366 * Arguments: devi: the system's dev_info_t for the device. 6367 * 6368 * Return Code: DDI_SUCCESS if attach is successful. 6369 * DDI_FAILURE if any part of the attach fails. 6370 * 6371 * Context: Called at attach(9e) time for the DDI_ATTACH flag. 6372 * Kernel thread context only. Can sleep. 6373 */ 6374 6375 static int 6376 sd_unit_attach(dev_info_t *devi) 6377 { 6378 struct scsi_device *devp; 6379 struct sd_lun *un; 6380 char *variantp; 6381 int reservation_flag = SD_TARGET_IS_UNRESERVED; 6382 int instance; 6383 int rval; 6384 int wc_enabled; 6385 int tgt; 6386 uint64_t capacity; 6387 uint_t lbasize = 0; 6388 dev_info_t *pdip = ddi_get_parent(devi); 6389 int offbyone = 0; 6390 int geom_label_valid = 0; 6391 #if defined(__sparc) 6392 int max_xfer_size; 6393 #endif 6394 6395 /* 6396 * Retrieve the target driver's private data area. This was set 6397 * up by the HBA. 6398 */ 6399 devp = ddi_get_driver_private(devi); 6400 6401 /* 6402 * Retrieve the target ID of the device. 6403 */ 6404 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6405 SCSI_ADDR_PROP_TARGET, -1); 6406 6407 /* 6408 * Since we have no idea what state things were left in by the last 6409 * user of the device, set up some 'default' settings, ie. turn 'em 6410 * off. The scsi_ifsetcap calls force re-negotiations with the drive. 6411 * Do this before the scsi_probe, which sends an inquiry. 6412 * This is a fix for bug (4430280). 6413 * Of special importance is wide-xfer. The drive could have been left 6414 * in wide transfer mode by the last driver to communicate with it, 6415 * this includes us. If that's the case, and if the following is not 6416 * setup properly or we don't re-negotiate with the drive prior to 6417 * transferring data to/from the drive, it causes bus parity errors, 6418 * data overruns, and unexpected interrupts. This first occurred when 6419 * the fix for bug (4378686) was made. 6420 */ 6421 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1); 6422 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1); 6423 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1); 6424 6425 /* 6426 * Currently, scsi_ifsetcap sets tagged-qing capability for all LUNs 6427 * on a target. Setting it per lun instance actually sets the 6428 * capability of this target, which affects those luns already 6429 * attached on the same target. So during attach, we can only disable 6430 * this capability only when no other lun has been attached on this 6431 * target. By doing this, we assume a target has the same tagged-qing 6432 * capability for every lun. The condition can be removed when HBA 6433 * is changed to support per lun based tagged-qing capability. 6434 */ 6435 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 6436 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1); 6437 } 6438 6439 /* 6440 * Use scsi_probe() to issue an INQUIRY command to the device. 6441 * This call will allocate and fill in the scsi_inquiry structure 6442 * and point the sd_inq member of the scsi_device structure to it. 6443 * If the attach succeeds, then this memory will not be de-allocated 6444 * (via scsi_unprobe()) until the instance is detached. 6445 */ 6446 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 6447 goto probe_failed; 6448 } 6449 6450 /* 6451 * Check the device type as specified in the inquiry data and 6452 * claim it if it is of a type that we support. 6453 */ 6454 switch (devp->sd_inq->inq_dtype) { 6455 case DTYPE_DIRECT: 6456 break; 6457 case DTYPE_RODIRECT: 6458 break; 6459 case DTYPE_OPTICAL: 6460 break; 6461 case DTYPE_NOTPRESENT: 6462 default: 6463 /* Unsupported device type; fail the attach. */ 6464 goto probe_failed; 6465 } 6466 6467 /* 6468 * Allocate the soft state structure for this unit. 6469 * 6470 * We rely upon this memory being set to all zeroes by 6471 * ddi_soft_state_zalloc(). We assume that any member of the 6472 * soft state structure that is not explicitly initialized by 6473 * this routine will have a value of zero. 6474 */ 6475 instance = ddi_get_instance(devp->sd_dev); 6476 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) { 6477 goto probe_failed; 6478 } 6479 6480 /* 6481 * Retrieve a pointer to the newly-allocated soft state. 6482 * 6483 * This should NEVER fail if the ddi_soft_state_zalloc() call above 6484 * was successful, unless something has gone horribly wrong and the 6485 * ddi's soft state internals are corrupt (in which case it is 6486 * probably better to halt here than just fail the attach....) 6487 */ 6488 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 6489 panic("sd_unit_attach: NULL soft state on instance:0x%x", 6490 instance); 6491 /*NOTREACHED*/ 6492 } 6493 6494 /* 6495 * Link the back ptr of the driver soft state to the scsi_device 6496 * struct for this lun. 6497 * Save a pointer to the softstate in the driver-private area of 6498 * the scsi_device struct. 6499 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until 6500 * we first set un->un_sd below. 6501 */ 6502 un->un_sd = devp; 6503 devp->sd_private = (opaque_t)un; 6504 6505 /* 6506 * The following must be after devp is stored in the soft state struct. 6507 */ 6508 #ifdef SDDEBUG 6509 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6510 "%s_unit_attach: un:0x%p instance:%d\n", 6511 ddi_driver_name(devi), un, instance); 6512 #endif 6513 6514 /* 6515 * Set up the device type and node type (for the minor nodes). 6516 * By default we assume that the device can at least support the 6517 * Common Command Set. Call it a CD-ROM if it reports itself 6518 * as a RODIRECT device. 6519 */ 6520 switch (devp->sd_inq->inq_dtype) { 6521 case DTYPE_RODIRECT: 6522 un->un_node_type = DDI_NT_CD_CHAN; 6523 un->un_ctype = CTYPE_CDROM; 6524 break; 6525 case DTYPE_OPTICAL: 6526 un->un_node_type = DDI_NT_BLOCK_CHAN; 6527 un->un_ctype = CTYPE_ROD; 6528 break; 6529 default: 6530 un->un_node_type = DDI_NT_BLOCK_CHAN; 6531 un->un_ctype = CTYPE_CCS; 6532 break; 6533 } 6534 6535 /* 6536 * Try to read the interconnect type from the HBA. 6537 * 6538 * Note: This driver is currently compiled as two binaries, a parallel 6539 * scsi version (sd) and a fibre channel version (ssd). All functional 6540 * differences are determined at compile time. In the future a single 6541 * binary will be provided and the inteconnect type will be used to 6542 * differentiate between fibre and parallel scsi behaviors. At that time 6543 * it will be necessary for all fibre channel HBAs to support this 6544 * property. 6545 * 6546 * set un_f_is_fiber to TRUE ( default fiber ) 6547 */ 6548 un->un_f_is_fibre = TRUE; 6549 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { 6550 case INTERCONNECT_SSA: 6551 un->un_interconnect_type = SD_INTERCONNECT_SSA; 6552 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6553 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); 6554 break; 6555 case INTERCONNECT_PARALLEL: 6556 un->un_f_is_fibre = FALSE; 6557 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 6558 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6559 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); 6560 break; 6561 case INTERCONNECT_SATA: 6562 un->un_f_is_fibre = FALSE; 6563 un->un_interconnect_type = SD_INTERCONNECT_SATA; 6564 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6565 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un); 6566 break; 6567 case INTERCONNECT_FIBRE: 6568 un->un_interconnect_type = SD_INTERCONNECT_FIBRE; 6569 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6570 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); 6571 break; 6572 case INTERCONNECT_FABRIC: 6573 un->un_interconnect_type = SD_INTERCONNECT_FABRIC; 6574 un->un_node_type = DDI_NT_BLOCK_FABRIC; 6575 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6576 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); 6577 break; 6578 default: 6579 #ifdef SD_DEFAULT_INTERCONNECT_TYPE 6580 /* 6581 * The HBA does not support the "interconnect-type" property 6582 * (or did not provide a recognized type). 6583 * 6584 * Note: This will be obsoleted when a single fibre channel 6585 * and parallel scsi driver is delivered. In the meantime the 6586 * interconnect type will be set to the platform default.If that 6587 * type is not parallel SCSI, it means that we should be 6588 * assuming "ssd" semantics. However, here this also means that 6589 * the FC HBA is not supporting the "interconnect-type" property 6590 * like we expect it to, so log this occurrence. 6591 */ 6592 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; 6593 if (!SD_IS_PARALLEL_SCSI(un)) { 6594 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6595 "sd_unit_attach: un:0x%p Assuming " 6596 "INTERCONNECT_FIBRE\n", un); 6597 } else { 6598 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6599 "sd_unit_attach: un:0x%p Assuming " 6600 "INTERCONNECT_PARALLEL\n", un); 6601 un->un_f_is_fibre = FALSE; 6602 } 6603 #else 6604 /* 6605 * Note: This source will be implemented when a single fibre 6606 * channel and parallel scsi driver is delivered. The default 6607 * will be to assume that if a device does not support the 6608 * "interconnect-type" property it is a parallel SCSI HBA and 6609 * we will set the interconnect type for parallel scsi. 6610 */ 6611 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 6612 un->un_f_is_fibre = FALSE; 6613 #endif 6614 break; 6615 } 6616 6617 if (un->un_f_is_fibre == TRUE) { 6618 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == 6619 SCSI_VERSION_3) { 6620 switch (un->un_interconnect_type) { 6621 case SD_INTERCONNECT_FIBRE: 6622 case SD_INTERCONNECT_SSA: 6623 un->un_node_type = DDI_NT_BLOCK_WWN; 6624 break; 6625 default: 6626 break; 6627 } 6628 } 6629 } 6630 6631 /* 6632 * Initialize the Request Sense command for the target 6633 */ 6634 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { 6635 goto alloc_rqs_failed; 6636 } 6637 6638 /* 6639 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc 6640 * with separate binary for sd and ssd. 6641 * 6642 * x86 has 1 binary, un_retry_count is set base on connection type. 6643 * The hardcoded values will go away when Sparc uses 1 binary 6644 * for sd and ssd. This hardcoded values need to match 6645 * SD_RETRY_COUNT in sddef.h 6646 * The value used is base on interconnect type. 6647 * fibre = 3, parallel = 5 6648 */ 6649 #if defined(__i386) || defined(__amd64) 6650 un->un_retry_count = un->un_f_is_fibre ? 3 : 5; 6651 #else 6652 un->un_retry_count = SD_RETRY_COUNT; 6653 #endif 6654 6655 /* 6656 * Set the per disk retry count to the default number of retries 6657 * for disks and CDROMs. This value can be overridden by the 6658 * disk property list or an entry in sd.conf. 6659 */ 6660 un->un_notready_retry_count = 6661 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un) 6662 : DISK_NOT_READY_RETRY_COUNT(un); 6663 6664 /* 6665 * Set the busy retry count to the default value of un_retry_count. 6666 * This can be overridden by entries in sd.conf or the device 6667 * config table. 6668 */ 6669 un->un_busy_retry_count = un->un_retry_count; 6670 6671 /* 6672 * Init the reset threshold for retries. This number determines 6673 * how many retries must be performed before a reset can be issued 6674 * (for certain error conditions). This can be overridden by entries 6675 * in sd.conf or the device config table. 6676 */ 6677 un->un_reset_retry_count = (un->un_retry_count / 2); 6678 6679 /* 6680 * Set the victim_retry_count to the default un_retry_count 6681 */ 6682 un->un_victim_retry_count = (2 * un->un_retry_count); 6683 6684 /* 6685 * Set the reservation release timeout to the default value of 6686 * 5 seconds. This can be overridden by entries in ssd.conf or the 6687 * device config table. 6688 */ 6689 un->un_reserve_release_time = 5; 6690 6691 /* 6692 * Set up the default maximum transfer size. Note that this may 6693 * get updated later in the attach, when setting up default wide 6694 * operations for disks. 6695 */ 6696 #if defined(__i386) || defined(__amd64) 6697 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; 6698 un->un_partial_dma_supported = 1; 6699 #else 6700 un->un_max_xfer_size = (uint_t)maxphys; 6701 #endif 6702 6703 /* 6704 * Get "allow bus device reset" property (defaults to "enabled" if 6705 * the property was not defined). This is to disable bus resets for 6706 * certain kinds of error recovery. Note: In the future when a run-time 6707 * fibre check is available the soft state flag should default to 6708 * enabled. 6709 */ 6710 if (un->un_f_is_fibre == TRUE) { 6711 un->un_f_allow_bus_device_reset = TRUE; 6712 } else { 6713 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6714 "allow-bus-device-reset", 1) != 0) { 6715 un->un_f_allow_bus_device_reset = TRUE; 6716 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6717 "sd_unit_attach: un:0x%p Bus device reset " 6718 "enabled\n", un); 6719 } else { 6720 un->un_f_allow_bus_device_reset = FALSE; 6721 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6722 "sd_unit_attach: un:0x%p Bus device reset " 6723 "disabled\n", un); 6724 } 6725 } 6726 6727 /* 6728 * Check if this is an ATAPI device. ATAPI devices use Group 1 6729 * Read/Write commands and Group 2 Mode Sense/Select commands. 6730 * 6731 * Note: The "obsolete" way of doing this is to check for the "atapi" 6732 * property. The new "variant" property with a value of "atapi" has been 6733 * introduced so that future 'variants' of standard SCSI behavior (like 6734 * atapi) could be specified by the underlying HBA drivers by supplying 6735 * a new value for the "variant" property, instead of having to define a 6736 * new property. 6737 */ 6738 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { 6739 un->un_f_cfg_is_atapi = TRUE; 6740 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6741 "sd_unit_attach: un:0x%p Atapi device\n", un); 6742 } 6743 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", 6744 &variantp) == DDI_PROP_SUCCESS) { 6745 if (strcmp(variantp, "atapi") == 0) { 6746 un->un_f_cfg_is_atapi = TRUE; 6747 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6748 "sd_unit_attach: un:0x%p Atapi device\n", un); 6749 } 6750 ddi_prop_free(variantp); 6751 } 6752 6753 un->un_cmd_timeout = SD_IO_TIME; 6754 6755 /* Info on current states, statuses, etc. (Updated frequently) */ 6756 un->un_state = SD_STATE_NORMAL; 6757 un->un_last_state = SD_STATE_NORMAL; 6758 6759 /* Control & status info for command throttling */ 6760 un->un_throttle = sd_max_throttle; 6761 un->un_saved_throttle = sd_max_throttle; 6762 un->un_min_throttle = sd_min_throttle; 6763 6764 if (un->un_f_is_fibre == TRUE) { 6765 un->un_f_use_adaptive_throttle = TRUE; 6766 } else { 6767 un->un_f_use_adaptive_throttle = FALSE; 6768 } 6769 6770 /* Removable media support. */ 6771 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 6772 un->un_mediastate = DKIO_NONE; 6773 un->un_specified_mediastate = DKIO_NONE; 6774 6775 /* CVs for suspend/resume (PM or DR) */ 6776 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 6777 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 6778 6779 /* Power management support. */ 6780 un->un_power_level = SD_SPINDLE_UNINIT; 6781 6782 cv_init(&un->un_wcc_cv, NULL, CV_DRIVER, NULL); 6783 un->un_f_wcc_inprog = 0; 6784 6785 /* 6786 * The open/close semaphore is used to serialize threads executing 6787 * in the driver's open & close entry point routines for a given 6788 * instance. 6789 */ 6790 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 6791 6792 /* 6793 * The conf file entry and softstate variable is a forceful override, 6794 * meaning a non-zero value must be entered to change the default. 6795 */ 6796 un->un_f_disksort_disabled = FALSE; 6797 6798 /* 6799 * Retrieve the properties from the static driver table or the driver 6800 * configuration file (.conf) for this unit and update the soft state 6801 * for the device as needed for the indicated properties. 6802 * Note: the property configuration needs to occur here as some of the 6803 * following routines may have dependancies on soft state flags set 6804 * as part of the driver property configuration. 6805 */ 6806 sd_read_unit_properties(un); 6807 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6808 "sd_unit_attach: un:0x%p property configuration complete.\n", un); 6809 6810 /* 6811 * Only if a device has "hotpluggable" property, it is 6812 * treated as hotpluggable device. Otherwise, it is 6813 * regarded as non-hotpluggable one. 6814 */ 6815 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable", 6816 -1) != -1) { 6817 un->un_f_is_hotpluggable = TRUE; 6818 } 6819 6820 /* 6821 * set unit's attributes(flags) according to "hotpluggable" and 6822 * RMB bit in INQUIRY data. 6823 */ 6824 sd_set_unit_attributes(un, devi); 6825 6826 /* 6827 * By default, we mark the capacity, lbasize, and geometry 6828 * as invalid. Only if we successfully read a valid capacity 6829 * will we update the un_blockcount and un_tgt_blocksize with the 6830 * valid values (the geometry will be validated later). 6831 */ 6832 un->un_f_blockcount_is_valid = FALSE; 6833 un->un_f_tgt_blocksize_is_valid = FALSE; 6834 6835 /* 6836 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine 6837 * otherwise. 6838 */ 6839 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE; 6840 un->un_blockcount = 0; 6841 6842 /* 6843 * Set up the per-instance info needed to determine the correct 6844 * CDBs and other info for issuing commands to the target. 6845 */ 6846 sd_init_cdb_limits(un); 6847 6848 /* 6849 * Set up the IO chains to use, based upon the target type. 6850 */ 6851 if (un->un_f_non_devbsize_supported) { 6852 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 6853 } else { 6854 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 6855 } 6856 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 6857 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD; 6858 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD; 6859 6860 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf), 6861 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit, 6862 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER); 6863 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi); 6864 6865 6866 if (ISCD(un)) { 6867 un->un_additional_codes = sd_additional_codes; 6868 } else { 6869 un->un_additional_codes = NULL; 6870 } 6871 6872 /* 6873 * Create the kstats here so they can be available for attach-time 6874 * routines that send commands to the unit (either polled or via 6875 * sd_send_scsi_cmd). 6876 * 6877 * Note: This is a critical sequence that needs to be maintained: 6878 * 1) Instantiate the kstats here, before any routines using the 6879 * iopath (i.e. sd_send_scsi_cmd). 6880 * 2) Instantiate and initialize the partition stats 6881 * (sd_set_pstats). 6882 * 3) Initialize the error stats (sd_set_errstats), following 6883 * sd_validate_geometry(),sd_register_devid(), 6884 * and sd_cache_control(). 6885 */ 6886 6887 un->un_stats = kstat_create(sd_label, instance, 6888 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 6889 if (un->un_stats != NULL) { 6890 un->un_stats->ks_lock = SD_MUTEX(un); 6891 kstat_install(un->un_stats); 6892 } 6893 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6894 "sd_unit_attach: un:0x%p un_stats created\n", un); 6895 6896 sd_create_errstats(un, instance); 6897 if (un->un_errstats == NULL) { 6898 goto create_errstats_failed; 6899 } 6900 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6901 "sd_unit_attach: un:0x%p errstats created\n", un); 6902 6903 /* 6904 * The following if/else code was relocated here from below as part 6905 * of the fix for bug (4430280). However with the default setup added 6906 * on entry to this routine, it's no longer absolutely necessary for 6907 * this to be before the call to sd_spin_up_unit. 6908 */ 6909 if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) { 6910 int tq_trigger_flag = (((devp->sd_inq->inq_ansi == 4) || 6911 (devp->sd_inq->inq_ansi == 5)) && 6912 devp->sd_inq->inq_bque) || devp->sd_inq->inq_cmdque; 6913 6914 /* 6915 * If tagged queueing is supported by the target 6916 * and by the host adapter then we will enable it 6917 */ 6918 un->un_tagflags = 0; 6919 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && tq_trigger_flag && 6920 (un->un_f_arq_enabled == TRUE)) { 6921 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 6922 1, 1) == 1) { 6923 un->un_tagflags = FLAG_STAG; 6924 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6925 "sd_unit_attach: un:0x%p tag queueing " 6926 "enabled\n", un); 6927 } else if (scsi_ifgetcap(SD_ADDRESS(un), 6928 "untagged-qing", 0) == 1) { 6929 un->un_f_opt_queueing = TRUE; 6930 un->un_saved_throttle = un->un_throttle = 6931 min(un->un_throttle, 3); 6932 } else { 6933 un->un_f_opt_queueing = FALSE; 6934 un->un_saved_throttle = un->un_throttle = 1; 6935 } 6936 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) 6937 == 1) && (un->un_f_arq_enabled == TRUE)) { 6938 /* The Host Adapter supports internal queueing. */ 6939 un->un_f_opt_queueing = TRUE; 6940 un->un_saved_throttle = un->un_throttle = 6941 min(un->un_throttle, 3); 6942 } else { 6943 un->un_f_opt_queueing = FALSE; 6944 un->un_saved_throttle = un->un_throttle = 1; 6945 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6946 "sd_unit_attach: un:0x%p no tag queueing\n", un); 6947 } 6948 6949 /* 6950 * Enable large transfers for SATA/SAS drives 6951 */ 6952 if (SD_IS_SERIAL(un)) { 6953 un->un_max_xfer_size = 6954 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 6955 sd_max_xfer_size, SD_MAX_XFER_SIZE); 6956 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6957 "sd_unit_attach: un:0x%p max transfer " 6958 "size=0x%x\n", un, un->un_max_xfer_size); 6959 6960 } 6961 6962 /* Setup or tear down default wide operations for disks */ 6963 6964 /* 6965 * Note: Legacy: it may be possible for both "sd_max_xfer_size" 6966 * and "ssd_max_xfer_size" to exist simultaneously on the same 6967 * system and be set to different values. In the future this 6968 * code may need to be updated when the ssd module is 6969 * obsoleted and removed from the system. (4299588) 6970 */ 6971 if (SD_IS_PARALLEL_SCSI(un) && 6972 (devp->sd_inq->inq_rdf == RDF_SCSI2) && 6973 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { 6974 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 6975 1, 1) == 1) { 6976 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6977 "sd_unit_attach: un:0x%p Wide Transfer " 6978 "enabled\n", un); 6979 } 6980 6981 /* 6982 * If tagged queuing has also been enabled, then 6983 * enable large xfers 6984 */ 6985 if (un->un_saved_throttle == sd_max_throttle) { 6986 un->un_max_xfer_size = 6987 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 6988 sd_max_xfer_size, SD_MAX_XFER_SIZE); 6989 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6990 "sd_unit_attach: un:0x%p max transfer " 6991 "size=0x%x\n", un, un->un_max_xfer_size); 6992 } 6993 } else { 6994 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 6995 0, 1) == 1) { 6996 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6997 "sd_unit_attach: un:0x%p " 6998 "Wide Transfer disabled\n", un); 6999 } 7000 } 7001 } else { 7002 un->un_tagflags = FLAG_STAG; 7003 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, 7004 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); 7005 } 7006 7007 /* 7008 * If this target supports LUN reset, try to enable it. 7009 */ 7010 if (un->un_f_lun_reset_enabled) { 7011 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { 7012 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7013 "un:0x%p lun_reset capability set\n", un); 7014 } else { 7015 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7016 "un:0x%p lun-reset capability not set\n", un); 7017 } 7018 } 7019 7020 /* 7021 * Adjust the maximum transfer size. This is to fix 7022 * the problem of partial DMA support on SPARC. Some 7023 * HBA driver, like aac, has very small dma_attr_maxxfer 7024 * size, which requires partial DMA support on SPARC. 7025 * In the future the SPARC pci nexus driver may solve 7026 * the problem instead of this fix. 7027 */ 7028 #if defined(__sparc) 7029 max_xfer_size = scsi_ifgetcap(SD_ADDRESS(un), "dma-max", 1); 7030 if ((max_xfer_size > 0) && (max_xfer_size < un->un_max_xfer_size)) { 7031 un->un_max_xfer_size = max_xfer_size; 7032 un->un_partial_dma_supported = 1; 7033 } 7034 #endif 7035 7036 /* 7037 * Set PKT_DMA_PARTIAL flag. 7038 */ 7039 if (un->un_partial_dma_supported == 1) { 7040 un->un_pkt_flags = PKT_DMA_PARTIAL; 7041 } else { 7042 un->un_pkt_flags = 0; 7043 } 7044 7045 /* 7046 * At this point in the attach, we have enough info in the 7047 * soft state to be able to issue commands to the target. 7048 * 7049 * All command paths used below MUST issue their commands as 7050 * SD_PATH_DIRECT. This is important as intermediate layers 7051 * are not all initialized yet (such as PM). 7052 */ 7053 7054 /* 7055 * Send a TEST UNIT READY command to the device. This should clear 7056 * any outstanding UNIT ATTENTION that may be present. 7057 * 7058 * Note: Don't check for success, just track if there is a reservation, 7059 * this is a throw away command to clear any unit attentions. 7060 * 7061 * Note: This MUST be the first command issued to the target during 7062 * attach to ensure power on UNIT ATTENTIONS are cleared. 7063 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated 7064 * with attempts at spinning up a device with no media. 7065 */ 7066 if (sd_send_scsi_TEST_UNIT_READY(un, SD_DONT_RETRY_TUR) == EACCES) { 7067 reservation_flag = SD_TARGET_IS_RESERVED; 7068 } 7069 7070 /* 7071 * If the device is NOT a removable media device, attempt to spin 7072 * it up (using the START_STOP_UNIT command) and read its capacity 7073 * (using the READ CAPACITY command). Note, however, that either 7074 * of these could fail and in some cases we would continue with 7075 * the attach despite the failure (see below). 7076 */ 7077 if (un->un_f_descr_format_supported) { 7078 switch (sd_spin_up_unit(un)) { 7079 case 0: 7080 /* 7081 * Spin-up was successful; now try to read the 7082 * capacity. If successful then save the results 7083 * and mark the capacity & lbasize as valid. 7084 */ 7085 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7086 "sd_unit_attach: un:0x%p spin-up successful\n", un); 7087 7088 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, 7089 &lbasize, SD_PATH_DIRECT)) { 7090 case 0: { 7091 if (capacity > DK_MAX_BLOCKS) { 7092 #ifdef _LP64 7093 if (capacity + 1 > 7094 SD_GROUP1_MAX_ADDRESS) { 7095 /* 7096 * Enable descriptor format 7097 * sense data so that we can 7098 * get 64 bit sense data 7099 * fields. 7100 */ 7101 sd_enable_descr_sense(un); 7102 } 7103 #else 7104 /* 32-bit kernels can't handle this */ 7105 scsi_log(SD_DEVINFO(un), 7106 sd_label, CE_WARN, 7107 "disk has %llu blocks, which " 7108 "is too large for a 32-bit " 7109 "kernel", capacity); 7110 7111 #if defined(__i386) || defined(__amd64) 7112 /* 7113 * 1TB disk was treated as (1T - 512)B 7114 * in the past, so that it might have 7115 * valid VTOC and solaris partitions, 7116 * we have to allow it to continue to 7117 * work. 7118 */ 7119 if (capacity -1 > DK_MAX_BLOCKS) 7120 #endif 7121 goto spinup_failed; 7122 #endif 7123 } 7124 7125 /* 7126 * Here it's not necessary to check the case: 7127 * the capacity of the device is bigger than 7128 * what the max hba cdb can support. Because 7129 * sd_send_scsi_READ_CAPACITY will retrieve 7130 * the capacity by sending USCSI command, which 7131 * is constrained by the max hba cdb. Actually, 7132 * sd_send_scsi_READ_CAPACITY will return 7133 * EINVAL when using bigger cdb than required 7134 * cdb length. Will handle this case in 7135 * "case EINVAL". 7136 */ 7137 7138 /* 7139 * The following relies on 7140 * sd_send_scsi_READ_CAPACITY never 7141 * returning 0 for capacity and/or lbasize. 7142 */ 7143 sd_update_block_info(un, lbasize, capacity); 7144 7145 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7146 "sd_unit_attach: un:0x%p capacity = %ld " 7147 "blocks; lbasize= %ld.\n", un, 7148 un->un_blockcount, un->un_tgt_blocksize); 7149 7150 break; 7151 } 7152 case EINVAL: 7153 /* 7154 * In the case where the max-cdb-length property 7155 * is smaller than the required CDB length for 7156 * a SCSI device, a target driver can fail to 7157 * attach to that device. 7158 */ 7159 scsi_log(SD_DEVINFO(un), 7160 sd_label, CE_WARN, 7161 "disk capacity is too large " 7162 "for current cdb length"); 7163 goto spinup_failed; 7164 case EACCES: 7165 /* 7166 * Should never get here if the spin-up 7167 * succeeded, but code it in anyway. 7168 * From here, just continue with the attach... 7169 */ 7170 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7171 "sd_unit_attach: un:0x%p " 7172 "sd_send_scsi_READ_CAPACITY " 7173 "returned reservation conflict\n", un); 7174 reservation_flag = SD_TARGET_IS_RESERVED; 7175 break; 7176 default: 7177 /* 7178 * Likewise, should never get here if the 7179 * spin-up succeeded. Just continue with 7180 * the attach... 7181 */ 7182 break; 7183 } 7184 break; 7185 case EACCES: 7186 /* 7187 * Device is reserved by another host. In this case 7188 * we could not spin it up or read the capacity, but 7189 * we continue with the attach anyway. 7190 */ 7191 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7192 "sd_unit_attach: un:0x%p spin-up reservation " 7193 "conflict.\n", un); 7194 reservation_flag = SD_TARGET_IS_RESERVED; 7195 break; 7196 default: 7197 /* Fail the attach if the spin-up failed. */ 7198 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7199 "sd_unit_attach: un:0x%p spin-up failed.", un); 7200 goto spinup_failed; 7201 } 7202 } 7203 7204 /* 7205 * Check to see if this is a MMC drive 7206 */ 7207 if (ISCD(un)) { 7208 sd_set_mmc_caps(un); 7209 } 7210 7211 7212 /* 7213 * Add a zero-length attribute to tell the world we support 7214 * kernel ioctls (for layered drivers) 7215 */ 7216 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7217 DDI_KERNEL_IOCTL, NULL, 0); 7218 7219 /* 7220 * Add a boolean property to tell the world we support 7221 * the B_FAILFAST flag (for layered drivers) 7222 */ 7223 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7224 "ddi-failfast-supported", NULL, 0); 7225 7226 /* 7227 * Initialize power management 7228 */ 7229 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); 7230 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); 7231 sd_setup_pm(un, devi); 7232 if (un->un_f_pm_is_enabled == FALSE) { 7233 /* 7234 * For performance, point to a jump table that does 7235 * not include pm. 7236 * The direct and priority chains don't change with PM. 7237 * 7238 * Note: this is currently done based on individual device 7239 * capabilities. When an interface for determining system 7240 * power enabled state becomes available, or when additional 7241 * layers are added to the command chain, these values will 7242 * have to be re-evaluated for correctness. 7243 */ 7244 if (un->un_f_non_devbsize_supported) { 7245 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM; 7246 } else { 7247 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM; 7248 } 7249 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 7250 } 7251 7252 /* 7253 * This property is set to 0 by HA software to avoid retries 7254 * on a reserved disk. (The preferred property name is 7255 * "retry-on-reservation-conflict") (1189689) 7256 * 7257 * Note: The use of a global here can have unintended consequences. A 7258 * per instance variable is preferrable to match the capabilities of 7259 * different underlying hba's (4402600) 7260 */ 7261 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, 7262 DDI_PROP_DONTPASS, "retry-on-reservation-conflict", 7263 sd_retry_on_reservation_conflict); 7264 if (sd_retry_on_reservation_conflict != 0) { 7265 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, 7266 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, 7267 sd_retry_on_reservation_conflict); 7268 } 7269 7270 /* Set up options for QFULL handling. */ 7271 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7272 "qfull-retries", -1)) != -1) { 7273 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries", 7274 rval, 1); 7275 } 7276 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7277 "qfull-retry-interval", -1)) != -1) { 7278 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval", 7279 rval, 1); 7280 } 7281 7282 /* 7283 * This just prints a message that announces the existence of the 7284 * device. The message is always printed in the system logfile, but 7285 * only appears on the console if the system is booted with the 7286 * -v (verbose) argument. 7287 */ 7288 ddi_report_dev(devi); 7289 7290 un->un_mediastate = DKIO_NONE; 7291 7292 cmlb_alloc_handle(&un->un_cmlbhandle); 7293 7294 #if defined(__i386) || defined(__amd64) 7295 /* 7296 * On x86, compensate for off-by-1 legacy error 7297 */ 7298 if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable && 7299 (lbasize == un->un_sys_blocksize)) 7300 offbyone = CMLB_OFF_BY_ONE; 7301 #endif 7302 7303 if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype, 7304 un->un_f_has_removable_media, un->un_f_is_hotpluggable, 7305 un->un_node_type, offbyone, un->un_cmlbhandle, 7306 (void *)SD_PATH_DIRECT) != 0) { 7307 goto cmlb_attach_failed; 7308 } 7309 7310 7311 /* 7312 * Read and validate the device's geometry (ie, disk label) 7313 * A new unformatted drive will not have a valid geometry, but 7314 * the driver needs to successfully attach to this device so 7315 * the drive can be formatted via ioctls. 7316 */ 7317 geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0, 7318 (void *)SD_PATH_DIRECT) == 0) ? 1: 0; 7319 7320 mutex_enter(SD_MUTEX(un)); 7321 7322 /* 7323 * Read and initialize the devid for the unit. 7324 */ 7325 if (un->un_f_devid_supported) { 7326 sd_register_devid(un, devi, reservation_flag); 7327 } 7328 mutex_exit(SD_MUTEX(un)); 7329 7330 #if (defined(__fibre)) 7331 /* 7332 * Register callbacks for fibre only. You can't do this soley 7333 * on the basis of the devid_type because this is hba specific. 7334 * We need to query our hba capabilities to find out whether to 7335 * register or not. 7336 */ 7337 if (un->un_f_is_fibre) { 7338 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 7339 sd_init_event_callbacks(un); 7340 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7341 "sd_unit_attach: un:0x%p event callbacks inserted", 7342 un); 7343 } 7344 } 7345 #endif 7346 7347 if (un->un_f_opt_disable_cache == TRUE) { 7348 /* 7349 * Disable both read cache and write cache. This is 7350 * the historic behavior of the keywords in the config file. 7351 */ 7352 if (sd_cache_control(un, SD_CACHE_DISABLE, SD_CACHE_DISABLE) != 7353 0) { 7354 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7355 "sd_unit_attach: un:0x%p Could not disable " 7356 "caching", un); 7357 goto devid_failed; 7358 } 7359 } 7360 7361 /* 7362 * Check the value of the WCE bit now and 7363 * set un_f_write_cache_enabled accordingly. 7364 */ 7365 (void) sd_get_write_cache_enabled(un, &wc_enabled); 7366 mutex_enter(SD_MUTEX(un)); 7367 un->un_f_write_cache_enabled = (wc_enabled != 0); 7368 mutex_exit(SD_MUTEX(un)); 7369 7370 /* 7371 * Check the value of the NV_SUP bit and set 7372 * un_f_suppress_cache_flush accordingly. 7373 */ 7374 sd_get_nv_sup(un); 7375 7376 /* 7377 * Find out what type of reservation this disk supports. 7378 */ 7379 switch (sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 0, NULL)) { 7380 case 0: 7381 /* 7382 * SCSI-3 reservations are supported. 7383 */ 7384 un->un_reservation_type = SD_SCSI3_RESERVATION; 7385 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7386 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un); 7387 break; 7388 case ENOTSUP: 7389 /* 7390 * The PERSISTENT RESERVE IN command would not be recognized by 7391 * a SCSI-2 device, so assume the reservation type is SCSI-2. 7392 */ 7393 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7394 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un); 7395 un->un_reservation_type = SD_SCSI2_RESERVATION; 7396 break; 7397 default: 7398 /* 7399 * default to SCSI-3 reservations 7400 */ 7401 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7402 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un); 7403 un->un_reservation_type = SD_SCSI3_RESERVATION; 7404 break; 7405 } 7406 7407 /* 7408 * Set the pstat and error stat values here, so data obtained during the 7409 * previous attach-time routines is available. 7410 * 7411 * Note: This is a critical sequence that needs to be maintained: 7412 * 1) Instantiate the kstats before any routines using the iopath 7413 * (i.e. sd_send_scsi_cmd). 7414 * 2) Initialize the error stats (sd_set_errstats) and partition 7415 * stats (sd_set_pstats)here, following 7416 * cmlb_validate_geometry(), sd_register_devid(), and 7417 * sd_cache_control(). 7418 */ 7419 7420 if (un->un_f_pkstats_enabled && geom_label_valid) { 7421 sd_set_pstats(un); 7422 SD_TRACE(SD_LOG_IO_PARTITION, un, 7423 "sd_unit_attach: un:0x%p pstats created and set\n", un); 7424 } 7425 7426 sd_set_errstats(un); 7427 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7428 "sd_unit_attach: un:0x%p errstats set\n", un); 7429 7430 7431 /* 7432 * After successfully attaching an instance, we record the information 7433 * of how many luns have been attached on the relative target and 7434 * controller for parallel SCSI. This information is used when sd tries 7435 * to set the tagged queuing capability in HBA. 7436 */ 7437 if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) { 7438 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH); 7439 } 7440 7441 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7442 "sd_unit_attach: un:0x%p exit success\n", un); 7443 7444 return (DDI_SUCCESS); 7445 7446 /* 7447 * An error occurred during the attach; clean up & return failure. 7448 */ 7449 7450 devid_failed: 7451 7452 setup_pm_failed: 7453 ddi_remove_minor_node(devi, NULL); 7454 7455 cmlb_attach_failed: 7456 /* 7457 * Cleanup from the scsi_ifsetcap() calls (437868) 7458 */ 7459 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 7460 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 7461 7462 /* 7463 * Refer to the comments of setting tagged-qing in the beginning of 7464 * sd_unit_attach. We can only disable tagged queuing when there is 7465 * no lun attached on the target. 7466 */ 7467 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 7468 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 7469 } 7470 7471 if (un->un_f_is_fibre == FALSE) { 7472 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 7473 } 7474 7475 spinup_failed: 7476 7477 mutex_enter(SD_MUTEX(un)); 7478 7479 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ 7480 if (un->un_direct_priority_timeid != NULL) { 7481 timeout_id_t temp_id = un->un_direct_priority_timeid; 7482 un->un_direct_priority_timeid = NULL; 7483 mutex_exit(SD_MUTEX(un)); 7484 (void) untimeout(temp_id); 7485 mutex_enter(SD_MUTEX(un)); 7486 } 7487 7488 /* Cancel any pending start/stop timeouts */ 7489 if (un->un_startstop_timeid != NULL) { 7490 timeout_id_t temp_id = un->un_startstop_timeid; 7491 un->un_startstop_timeid = NULL; 7492 mutex_exit(SD_MUTEX(un)); 7493 (void) untimeout(temp_id); 7494 mutex_enter(SD_MUTEX(un)); 7495 } 7496 7497 /* Cancel any pending reset-throttle timeouts */ 7498 if (un->un_reset_throttle_timeid != NULL) { 7499 timeout_id_t temp_id = un->un_reset_throttle_timeid; 7500 un->un_reset_throttle_timeid = NULL; 7501 mutex_exit(SD_MUTEX(un)); 7502 (void) untimeout(temp_id); 7503 mutex_enter(SD_MUTEX(un)); 7504 } 7505 7506 /* Cancel any pending retry timeouts */ 7507 if (un->un_retry_timeid != NULL) { 7508 timeout_id_t temp_id = un->un_retry_timeid; 7509 un->un_retry_timeid = NULL; 7510 mutex_exit(SD_MUTEX(un)); 7511 (void) untimeout(temp_id); 7512 mutex_enter(SD_MUTEX(un)); 7513 } 7514 7515 /* Cancel any pending delayed cv broadcast timeouts */ 7516 if (un->un_dcvb_timeid != NULL) { 7517 timeout_id_t temp_id = un->un_dcvb_timeid; 7518 un->un_dcvb_timeid = NULL; 7519 mutex_exit(SD_MUTEX(un)); 7520 (void) untimeout(temp_id); 7521 mutex_enter(SD_MUTEX(un)); 7522 } 7523 7524 mutex_exit(SD_MUTEX(un)); 7525 7526 /* There should not be any in-progress I/O so ASSERT this check */ 7527 ASSERT(un->un_ncmds_in_transport == 0); 7528 ASSERT(un->un_ncmds_in_driver == 0); 7529 7530 /* Do not free the softstate if the callback routine is active */ 7531 sd_sync_with_callback(un); 7532 7533 /* 7534 * Partition stats apparently are not used with removables. These would 7535 * not have been created during attach, so no need to clean them up... 7536 */ 7537 if (un->un_errstats != NULL) { 7538 kstat_delete(un->un_errstats); 7539 un->un_errstats = NULL; 7540 } 7541 7542 create_errstats_failed: 7543 7544 if (un->un_stats != NULL) { 7545 kstat_delete(un->un_stats); 7546 un->un_stats = NULL; 7547 } 7548 7549 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 7550 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 7551 7552 ddi_prop_remove_all(devi); 7553 sema_destroy(&un->un_semoclose); 7554 cv_destroy(&un->un_state_cv); 7555 7556 getrbuf_failed: 7557 7558 sd_free_rqs(un); 7559 7560 alloc_rqs_failed: 7561 7562 devp->sd_private = NULL; 7563 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ 7564 7565 get_softstate_failed: 7566 /* 7567 * Note: the man pages are unclear as to whether or not doing a 7568 * ddi_soft_state_free(sd_state, instance) is the right way to 7569 * clean up after the ddi_soft_state_zalloc() if the subsequent 7570 * ddi_get_soft_state() fails. The implication seems to be 7571 * that the get_soft_state cannot fail if the zalloc succeeds. 7572 */ 7573 ddi_soft_state_free(sd_state, instance); 7574 7575 probe_failed: 7576 scsi_unprobe(devp); 7577 7578 return (DDI_FAILURE); 7579 } 7580 7581 7582 /* 7583 * Function: sd_unit_detach 7584 * 7585 * Description: Performs DDI_DETACH processing for sddetach(). 7586 * 7587 * Return Code: DDI_SUCCESS 7588 * DDI_FAILURE 7589 * 7590 * Context: Kernel thread context 7591 */ 7592 7593 static int 7594 sd_unit_detach(dev_info_t *devi) 7595 { 7596 struct scsi_device *devp; 7597 struct sd_lun *un; 7598 int i; 7599 int tgt; 7600 dev_t dev; 7601 dev_info_t *pdip = ddi_get_parent(devi); 7602 int instance = ddi_get_instance(devi); 7603 7604 mutex_enter(&sd_detach_mutex); 7605 7606 /* 7607 * Fail the detach for any of the following: 7608 * - Unable to get the sd_lun struct for the instance 7609 * - A layered driver has an outstanding open on the instance 7610 * - Another thread is already detaching this instance 7611 * - Another thread is currently performing an open 7612 */ 7613 devp = ddi_get_driver_private(devi); 7614 if ((devp == NULL) || 7615 ((un = (struct sd_lun *)devp->sd_private) == NULL) || 7616 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) || 7617 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) { 7618 mutex_exit(&sd_detach_mutex); 7619 return (DDI_FAILURE); 7620 } 7621 7622 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); 7623 7624 /* 7625 * Mark this instance as currently in a detach, to inhibit any 7626 * opens from a layered driver. 7627 */ 7628 un->un_detach_count++; 7629 mutex_exit(&sd_detach_mutex); 7630 7631 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7632 SCSI_ADDR_PROP_TARGET, -1); 7633 7634 dev = sd_make_device(SD_DEVINFO(un)); 7635 7636 #ifndef lint 7637 _NOTE(COMPETING_THREADS_NOW); 7638 #endif 7639 7640 mutex_enter(SD_MUTEX(un)); 7641 7642 /* 7643 * Fail the detach if there are any outstanding layered 7644 * opens on this device. 7645 */ 7646 for (i = 0; i < NDKMAP; i++) { 7647 if (un->un_ocmap.lyropen[i] != 0) { 7648 goto err_notclosed; 7649 } 7650 } 7651 7652 /* 7653 * Verify there are NO outstanding commands issued to this device. 7654 * ie, un_ncmds_in_transport == 0. 7655 * It's possible to have outstanding commands through the physio 7656 * code path, even though everything's closed. 7657 */ 7658 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || 7659 (un->un_direct_priority_timeid != NULL) || 7660 (un->un_state == SD_STATE_RWAIT)) { 7661 mutex_exit(SD_MUTEX(un)); 7662 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7663 "sd_dr_detach: Detach failure due to outstanding cmds\n"); 7664 goto err_stillbusy; 7665 } 7666 7667 /* 7668 * If we have the device reserved, release the reservation. 7669 */ 7670 if ((un->un_resvd_status & SD_RESERVE) && 7671 !(un->un_resvd_status & SD_LOST_RESERVE)) { 7672 mutex_exit(SD_MUTEX(un)); 7673 /* 7674 * Note: sd_reserve_release sends a command to the device 7675 * via the sd_ioctlcmd() path, and can sleep. 7676 */ 7677 if (sd_reserve_release(dev, SD_RELEASE) != 0) { 7678 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7679 "sd_dr_detach: Cannot release reservation \n"); 7680 } 7681 } else { 7682 mutex_exit(SD_MUTEX(un)); 7683 } 7684 7685 /* 7686 * Untimeout any reserve recover, throttle reset, restart unit 7687 * and delayed broadcast timeout threads. Protect the timeout pointer 7688 * from getting nulled by their callback functions. 7689 */ 7690 mutex_enter(SD_MUTEX(un)); 7691 if (un->un_resvd_timeid != NULL) { 7692 timeout_id_t temp_id = un->un_resvd_timeid; 7693 un->un_resvd_timeid = NULL; 7694 mutex_exit(SD_MUTEX(un)); 7695 (void) untimeout(temp_id); 7696 mutex_enter(SD_MUTEX(un)); 7697 } 7698 7699 if (un->un_reset_throttle_timeid != NULL) { 7700 timeout_id_t temp_id = un->un_reset_throttle_timeid; 7701 un->un_reset_throttle_timeid = NULL; 7702 mutex_exit(SD_MUTEX(un)); 7703 (void) untimeout(temp_id); 7704 mutex_enter(SD_MUTEX(un)); 7705 } 7706 7707 if (un->un_startstop_timeid != NULL) { 7708 timeout_id_t temp_id = un->un_startstop_timeid; 7709 un->un_startstop_timeid = NULL; 7710 mutex_exit(SD_MUTEX(un)); 7711 (void) untimeout(temp_id); 7712 mutex_enter(SD_MUTEX(un)); 7713 } 7714 7715 if (un->un_dcvb_timeid != NULL) { 7716 timeout_id_t temp_id = un->un_dcvb_timeid; 7717 un->un_dcvb_timeid = NULL; 7718 mutex_exit(SD_MUTEX(un)); 7719 (void) untimeout(temp_id); 7720 } else { 7721 mutex_exit(SD_MUTEX(un)); 7722 } 7723 7724 /* Remove any pending reservation reclaim requests for this device */ 7725 sd_rmv_resv_reclaim_req(dev); 7726 7727 mutex_enter(SD_MUTEX(un)); 7728 7729 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ 7730 if (un->un_direct_priority_timeid != NULL) { 7731 timeout_id_t temp_id = un->un_direct_priority_timeid; 7732 un->un_direct_priority_timeid = NULL; 7733 mutex_exit(SD_MUTEX(un)); 7734 (void) untimeout(temp_id); 7735 mutex_enter(SD_MUTEX(un)); 7736 } 7737 7738 /* Cancel any active multi-host disk watch thread requests */ 7739 if (un->un_mhd_token != NULL) { 7740 mutex_exit(SD_MUTEX(un)); 7741 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); 7742 if (scsi_watch_request_terminate(un->un_mhd_token, 7743 SCSI_WATCH_TERMINATE_NOWAIT)) { 7744 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7745 "sd_dr_detach: Cannot cancel mhd watch request\n"); 7746 /* 7747 * Note: We are returning here after having removed 7748 * some driver timeouts above. This is consistent with 7749 * the legacy implementation but perhaps the watch 7750 * terminate call should be made with the wait flag set. 7751 */ 7752 goto err_stillbusy; 7753 } 7754 mutex_enter(SD_MUTEX(un)); 7755 un->un_mhd_token = NULL; 7756 } 7757 7758 if (un->un_swr_token != NULL) { 7759 mutex_exit(SD_MUTEX(un)); 7760 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); 7761 if (scsi_watch_request_terminate(un->un_swr_token, 7762 SCSI_WATCH_TERMINATE_NOWAIT)) { 7763 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7764 "sd_dr_detach: Cannot cancel swr watch request\n"); 7765 /* 7766 * Note: We are returning here after having removed 7767 * some driver timeouts above. This is consistent with 7768 * the legacy implementation but perhaps the watch 7769 * terminate call should be made with the wait flag set. 7770 */ 7771 goto err_stillbusy; 7772 } 7773 mutex_enter(SD_MUTEX(un)); 7774 un->un_swr_token = NULL; 7775 } 7776 7777 mutex_exit(SD_MUTEX(un)); 7778 7779 /* 7780 * Clear any scsi_reset_notifies. We clear the reset notifies 7781 * if we have not registered one. 7782 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! 7783 */ 7784 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 7785 sd_mhd_reset_notify_cb, (caddr_t)un); 7786 7787 /* 7788 * protect the timeout pointers from getting nulled by 7789 * their callback functions during the cancellation process. 7790 * In such a scenario untimeout can be invoked with a null value. 7791 */ 7792 _NOTE(NO_COMPETING_THREADS_NOW); 7793 7794 mutex_enter(&un->un_pm_mutex); 7795 if (un->un_pm_idle_timeid != NULL) { 7796 timeout_id_t temp_id = un->un_pm_idle_timeid; 7797 un->un_pm_idle_timeid = NULL; 7798 mutex_exit(&un->un_pm_mutex); 7799 7800 /* 7801 * Timeout is active; cancel it. 7802 * Note that it'll never be active on a device 7803 * that does not support PM therefore we don't 7804 * have to check before calling pm_idle_component. 7805 */ 7806 (void) untimeout(temp_id); 7807 (void) pm_idle_component(SD_DEVINFO(un), 0); 7808 mutex_enter(&un->un_pm_mutex); 7809 } 7810 7811 /* 7812 * Check whether there is already a timeout scheduled for power 7813 * management. If yes then don't lower the power here, that's. 7814 * the timeout handler's job. 7815 */ 7816 if (un->un_pm_timeid != NULL) { 7817 timeout_id_t temp_id = un->un_pm_timeid; 7818 un->un_pm_timeid = NULL; 7819 mutex_exit(&un->un_pm_mutex); 7820 /* 7821 * Timeout is active; cancel it. 7822 * Note that it'll never be active on a device 7823 * that does not support PM therefore we don't 7824 * have to check before calling pm_idle_component. 7825 */ 7826 (void) untimeout(temp_id); 7827 (void) pm_idle_component(SD_DEVINFO(un), 0); 7828 7829 } else { 7830 mutex_exit(&un->un_pm_mutex); 7831 if ((un->un_f_pm_is_enabled == TRUE) && 7832 (pm_lower_power(SD_DEVINFO(un), 0, SD_SPINDLE_OFF) != 7833 DDI_SUCCESS)) { 7834 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7835 "sd_dr_detach: Lower power request failed, ignoring.\n"); 7836 /* 7837 * Fix for bug: 4297749, item # 13 7838 * The above test now includes a check to see if PM is 7839 * supported by this device before call 7840 * pm_lower_power(). 7841 * Note, the following is not dead code. The call to 7842 * pm_lower_power above will generate a call back into 7843 * our sdpower routine which might result in a timeout 7844 * handler getting activated. Therefore the following 7845 * code is valid and necessary. 7846 */ 7847 mutex_enter(&un->un_pm_mutex); 7848 if (un->un_pm_timeid != NULL) { 7849 timeout_id_t temp_id = un->un_pm_timeid; 7850 un->un_pm_timeid = NULL; 7851 mutex_exit(&un->un_pm_mutex); 7852 (void) untimeout(temp_id); 7853 (void) pm_idle_component(SD_DEVINFO(un), 0); 7854 } else { 7855 mutex_exit(&un->un_pm_mutex); 7856 } 7857 } 7858 } 7859 7860 /* 7861 * Cleanup from the scsi_ifsetcap() calls (437868) 7862 * Relocated here from above to be after the call to 7863 * pm_lower_power, which was getting errors. 7864 */ 7865 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 7866 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 7867 7868 /* 7869 * Currently, tagged queuing is supported per target based by HBA. 7870 * Setting this per lun instance actually sets the capability of this 7871 * target in HBA, which affects those luns already attached on the 7872 * same target. So during detach, we can only disable this capability 7873 * only when this is the only lun left on this target. By doing 7874 * this, we assume a target has the same tagged queuing capability 7875 * for every lun. The condition can be removed when HBA is changed to 7876 * support per lun based tagged queuing capability. 7877 */ 7878 if (sd_scsi_get_target_lun_count(pdip, tgt) <= 1) { 7879 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 7880 } 7881 7882 if (un->un_f_is_fibre == FALSE) { 7883 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 7884 } 7885 7886 /* 7887 * Remove any event callbacks, fibre only 7888 */ 7889 if (un->un_f_is_fibre == TRUE) { 7890 if ((un->un_insert_event != NULL) && 7891 (ddi_remove_event_handler(un->un_insert_cb_id) != 7892 DDI_SUCCESS)) { 7893 /* 7894 * Note: We are returning here after having done 7895 * substantial cleanup above. This is consistent 7896 * with the legacy implementation but this may not 7897 * be the right thing to do. 7898 */ 7899 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7900 "sd_dr_detach: Cannot cancel insert event\n"); 7901 goto err_remove_event; 7902 } 7903 un->un_insert_event = NULL; 7904 7905 if ((un->un_remove_event != NULL) && 7906 (ddi_remove_event_handler(un->un_remove_cb_id) != 7907 DDI_SUCCESS)) { 7908 /* 7909 * Note: We are returning here after having done 7910 * substantial cleanup above. This is consistent 7911 * with the legacy implementation but this may not 7912 * be the right thing to do. 7913 */ 7914 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7915 "sd_dr_detach: Cannot cancel remove event\n"); 7916 goto err_remove_event; 7917 } 7918 un->un_remove_event = NULL; 7919 } 7920 7921 /* Do not free the softstate if the callback routine is active */ 7922 sd_sync_with_callback(un); 7923 7924 cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 7925 cmlb_free_handle(&un->un_cmlbhandle); 7926 7927 /* 7928 * Hold the detach mutex here, to make sure that no other threads ever 7929 * can access a (partially) freed soft state structure. 7930 */ 7931 mutex_enter(&sd_detach_mutex); 7932 7933 /* 7934 * Clean up the soft state struct. 7935 * Cleanup is done in reverse order of allocs/inits. 7936 * At this point there should be no competing threads anymore. 7937 */ 7938 7939 /* Unregister and free device id. */ 7940 ddi_devid_unregister(devi); 7941 if (un->un_devid) { 7942 ddi_devid_free(un->un_devid); 7943 un->un_devid = NULL; 7944 } 7945 7946 /* 7947 * Destroy wmap cache if it exists. 7948 */ 7949 if (un->un_wm_cache != NULL) { 7950 kmem_cache_destroy(un->un_wm_cache); 7951 un->un_wm_cache = NULL; 7952 } 7953 7954 /* 7955 * kstat cleanup is done in detach for all device types (4363169). 7956 * We do not want to fail detach if the device kstats are not deleted 7957 * since there is a confusion about the devo_refcnt for the device. 7958 * We just delete the kstats and let detach complete successfully. 7959 */ 7960 if (un->un_stats != NULL) { 7961 kstat_delete(un->un_stats); 7962 un->un_stats = NULL; 7963 } 7964 if (un->un_errstats != NULL) { 7965 kstat_delete(un->un_errstats); 7966 un->un_errstats = NULL; 7967 } 7968 7969 /* Remove partition stats */ 7970 if (un->un_f_pkstats_enabled) { 7971 for (i = 0; i < NSDMAP; i++) { 7972 if (un->un_pstats[i] != NULL) { 7973 kstat_delete(un->un_pstats[i]); 7974 un->un_pstats[i] = NULL; 7975 } 7976 } 7977 } 7978 7979 /* Remove xbuf registration */ 7980 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 7981 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 7982 7983 /* Remove driver properties */ 7984 ddi_prop_remove_all(devi); 7985 7986 mutex_destroy(&un->un_pm_mutex); 7987 cv_destroy(&un->un_pm_busy_cv); 7988 7989 cv_destroy(&un->un_wcc_cv); 7990 7991 /* Open/close semaphore */ 7992 sema_destroy(&un->un_semoclose); 7993 7994 /* Removable media condvar. */ 7995 cv_destroy(&un->un_state_cv); 7996 7997 /* Suspend/resume condvar. */ 7998 cv_destroy(&un->un_suspend_cv); 7999 cv_destroy(&un->un_disk_busy_cv); 8000 8001 sd_free_rqs(un); 8002 8003 /* Free up soft state */ 8004 devp->sd_private = NULL; 8005 8006 bzero(un, sizeof (struct sd_lun)); 8007 ddi_soft_state_free(sd_state, instance); 8008 8009 mutex_exit(&sd_detach_mutex); 8010 8011 /* This frees up the INQUIRY data associated with the device. */ 8012 scsi_unprobe(devp); 8013 8014 /* 8015 * After successfully detaching an instance, we update the information 8016 * of how many luns have been attached in the relative target and 8017 * controller for parallel SCSI. This information is used when sd tries 8018 * to set the tagged queuing capability in HBA. 8019 * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to 8020 * check if the device is parallel SCSI. However, we don't need to 8021 * check here because we've already checked during attach. No device 8022 * that is not parallel SCSI is in the chain. 8023 */ 8024 if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) { 8025 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH); 8026 } 8027 8028 return (DDI_SUCCESS); 8029 8030 err_notclosed: 8031 mutex_exit(SD_MUTEX(un)); 8032 8033 err_stillbusy: 8034 _NOTE(NO_COMPETING_THREADS_NOW); 8035 8036 err_remove_event: 8037 mutex_enter(&sd_detach_mutex); 8038 un->un_detach_count--; 8039 mutex_exit(&sd_detach_mutex); 8040 8041 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); 8042 return (DDI_FAILURE); 8043 } 8044 8045 8046 /* 8047 * Function: sd_create_errstats 8048 * 8049 * Description: This routine instantiates the device error stats. 8050 * 8051 * Note: During attach the stats are instantiated first so they are 8052 * available for attach-time routines that utilize the driver 8053 * iopath to send commands to the device. The stats are initialized 8054 * separately so data obtained during some attach-time routines is 8055 * available. (4362483) 8056 * 8057 * Arguments: un - driver soft state (unit) structure 8058 * instance - driver instance 8059 * 8060 * Context: Kernel thread context 8061 */ 8062 8063 static void 8064 sd_create_errstats(struct sd_lun *un, int instance) 8065 { 8066 struct sd_errstats *stp; 8067 char kstatmodule_err[KSTAT_STRLEN]; 8068 char kstatname[KSTAT_STRLEN]; 8069 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t)); 8070 8071 ASSERT(un != NULL); 8072 8073 if (un->un_errstats != NULL) { 8074 return; 8075 } 8076 8077 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err), 8078 "%serr", sd_label); 8079 (void) snprintf(kstatname, sizeof (kstatname), 8080 "%s%d,err", sd_label, instance); 8081 8082 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname, 8083 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 8084 8085 if (un->un_errstats == NULL) { 8086 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8087 "sd_create_errstats: Failed kstat_create\n"); 8088 return; 8089 } 8090 8091 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8092 kstat_named_init(&stp->sd_softerrs, "Soft Errors", 8093 KSTAT_DATA_UINT32); 8094 kstat_named_init(&stp->sd_harderrs, "Hard Errors", 8095 KSTAT_DATA_UINT32); 8096 kstat_named_init(&stp->sd_transerrs, "Transport Errors", 8097 KSTAT_DATA_UINT32); 8098 kstat_named_init(&stp->sd_vid, "Vendor", 8099 KSTAT_DATA_CHAR); 8100 kstat_named_init(&stp->sd_pid, "Product", 8101 KSTAT_DATA_CHAR); 8102 kstat_named_init(&stp->sd_revision, "Revision", 8103 KSTAT_DATA_CHAR); 8104 kstat_named_init(&stp->sd_serial, "Serial No", 8105 KSTAT_DATA_CHAR); 8106 kstat_named_init(&stp->sd_capacity, "Size", 8107 KSTAT_DATA_ULONGLONG); 8108 kstat_named_init(&stp->sd_rq_media_err, "Media Error", 8109 KSTAT_DATA_UINT32); 8110 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready", 8111 KSTAT_DATA_UINT32); 8112 kstat_named_init(&stp->sd_rq_nodev_err, "No Device", 8113 KSTAT_DATA_UINT32); 8114 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable", 8115 KSTAT_DATA_UINT32); 8116 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request", 8117 KSTAT_DATA_UINT32); 8118 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis", 8119 KSTAT_DATA_UINT32); 8120 8121 un->un_errstats->ks_private = un; 8122 un->un_errstats->ks_update = nulldev; 8123 8124 kstat_install(un->un_errstats); 8125 } 8126 8127 8128 /* 8129 * Function: sd_set_errstats 8130 * 8131 * Description: This routine sets the value of the vendor id, product id, 8132 * revision, serial number, and capacity device error stats. 8133 * 8134 * Note: During attach the stats are instantiated first so they are 8135 * available for attach-time routines that utilize the driver 8136 * iopath to send commands to the device. The stats are initialized 8137 * separately so data obtained during some attach-time routines is 8138 * available. (4362483) 8139 * 8140 * Arguments: un - driver soft state (unit) structure 8141 * 8142 * Context: Kernel thread context 8143 */ 8144 8145 static void 8146 sd_set_errstats(struct sd_lun *un) 8147 { 8148 struct sd_errstats *stp; 8149 8150 ASSERT(un != NULL); 8151 ASSERT(un->un_errstats != NULL); 8152 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8153 ASSERT(stp != NULL); 8154 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8); 8155 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16); 8156 (void) strncpy(stp->sd_revision.value.c, 8157 un->un_sd->sd_inq->inq_revision, 4); 8158 8159 /* 8160 * All the errstats are persistent across detach/attach, 8161 * so reset all the errstats here in case of the hot 8162 * replacement of disk drives, except for not changed 8163 * Sun qualified drives. 8164 */ 8165 if ((bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) != 0) || 8166 (bcmp(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8167 sizeof (SD_INQUIRY(un)->inq_serial)) != 0)) { 8168 stp->sd_softerrs.value.ui32 = 0; 8169 stp->sd_harderrs.value.ui32 = 0; 8170 stp->sd_transerrs.value.ui32 = 0; 8171 stp->sd_rq_media_err.value.ui32 = 0; 8172 stp->sd_rq_ntrdy_err.value.ui32 = 0; 8173 stp->sd_rq_nodev_err.value.ui32 = 0; 8174 stp->sd_rq_recov_err.value.ui32 = 0; 8175 stp->sd_rq_illrq_err.value.ui32 = 0; 8176 stp->sd_rq_pfa_err.value.ui32 = 0; 8177 } 8178 8179 /* 8180 * Set the "Serial No" kstat for Sun qualified drives (indicated by 8181 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid) 8182 * (4376302)) 8183 */ 8184 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) { 8185 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8186 sizeof (SD_INQUIRY(un)->inq_serial)); 8187 } 8188 8189 if (un->un_f_blockcount_is_valid != TRUE) { 8190 /* 8191 * Set capacity error stat to 0 for no media. This ensures 8192 * a valid capacity is displayed in response to 'iostat -E' 8193 * when no media is present in the device. 8194 */ 8195 stp->sd_capacity.value.ui64 = 0; 8196 } else { 8197 /* 8198 * Multiply un_blockcount by un->un_sys_blocksize to get 8199 * capacity. 8200 * 8201 * Note: for non-512 blocksize devices "un_blockcount" has been 8202 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by 8203 * (un_tgt_blocksize / un->un_sys_blocksize). 8204 */ 8205 stp->sd_capacity.value.ui64 = (uint64_t) 8206 ((uint64_t)un->un_blockcount * un->un_sys_blocksize); 8207 } 8208 } 8209 8210 8211 /* 8212 * Function: sd_set_pstats 8213 * 8214 * Description: This routine instantiates and initializes the partition 8215 * stats for each partition with more than zero blocks. 8216 * (4363169) 8217 * 8218 * Arguments: un - driver soft state (unit) structure 8219 * 8220 * Context: Kernel thread context 8221 */ 8222 8223 static void 8224 sd_set_pstats(struct sd_lun *un) 8225 { 8226 char kstatname[KSTAT_STRLEN]; 8227 int instance; 8228 int i; 8229 diskaddr_t nblks = 0; 8230 char *partname = NULL; 8231 8232 ASSERT(un != NULL); 8233 8234 instance = ddi_get_instance(SD_DEVINFO(un)); 8235 8236 /* Note:x86: is this a VTOC8/VTOC16 difference? */ 8237 for (i = 0; i < NSDMAP; i++) { 8238 8239 if (cmlb_partinfo(un->un_cmlbhandle, i, 8240 &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0) 8241 continue; 8242 mutex_enter(SD_MUTEX(un)); 8243 8244 if ((un->un_pstats[i] == NULL) && 8245 (nblks != 0)) { 8246 8247 (void) snprintf(kstatname, sizeof (kstatname), 8248 "%s%d,%s", sd_label, instance, 8249 partname); 8250 8251 un->un_pstats[i] = kstat_create(sd_label, 8252 instance, kstatname, "partition", KSTAT_TYPE_IO, 8253 1, KSTAT_FLAG_PERSISTENT); 8254 if (un->un_pstats[i] != NULL) { 8255 un->un_pstats[i]->ks_lock = SD_MUTEX(un); 8256 kstat_install(un->un_pstats[i]); 8257 } 8258 } 8259 mutex_exit(SD_MUTEX(un)); 8260 } 8261 } 8262 8263 8264 #if (defined(__fibre)) 8265 /* 8266 * Function: sd_init_event_callbacks 8267 * 8268 * Description: This routine initializes the insertion and removal event 8269 * callbacks. (fibre only) 8270 * 8271 * Arguments: un - driver soft state (unit) structure 8272 * 8273 * Context: Kernel thread context 8274 */ 8275 8276 static void 8277 sd_init_event_callbacks(struct sd_lun *un) 8278 { 8279 ASSERT(un != NULL); 8280 8281 if ((un->un_insert_event == NULL) && 8282 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, 8283 &un->un_insert_event) == DDI_SUCCESS)) { 8284 /* 8285 * Add the callback for an insertion event 8286 */ 8287 (void) ddi_add_event_handler(SD_DEVINFO(un), 8288 un->un_insert_event, sd_event_callback, (void *)un, 8289 &(un->un_insert_cb_id)); 8290 } 8291 8292 if ((un->un_remove_event == NULL) && 8293 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, 8294 &un->un_remove_event) == DDI_SUCCESS)) { 8295 /* 8296 * Add the callback for a removal event 8297 */ 8298 (void) ddi_add_event_handler(SD_DEVINFO(un), 8299 un->un_remove_event, sd_event_callback, (void *)un, 8300 &(un->un_remove_cb_id)); 8301 } 8302 } 8303 8304 8305 /* 8306 * Function: sd_event_callback 8307 * 8308 * Description: This routine handles insert/remove events (photon). The 8309 * state is changed to OFFLINE which can be used to supress 8310 * error msgs. (fibre only) 8311 * 8312 * Arguments: un - driver soft state (unit) structure 8313 * 8314 * Context: Callout thread context 8315 */ 8316 /* ARGSUSED */ 8317 static void 8318 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, 8319 void *bus_impldata) 8320 { 8321 struct sd_lun *un = (struct sd_lun *)arg; 8322 8323 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); 8324 if (event == un->un_insert_event) { 8325 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); 8326 mutex_enter(SD_MUTEX(un)); 8327 if (un->un_state == SD_STATE_OFFLINE) { 8328 if (un->un_last_state != SD_STATE_SUSPENDED) { 8329 un->un_state = un->un_last_state; 8330 } else { 8331 /* 8332 * We have gone through SUSPEND/RESUME while 8333 * we were offline. Restore the last state 8334 */ 8335 un->un_state = un->un_save_state; 8336 } 8337 } 8338 mutex_exit(SD_MUTEX(un)); 8339 8340 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); 8341 } else if (event == un->un_remove_event) { 8342 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); 8343 mutex_enter(SD_MUTEX(un)); 8344 /* 8345 * We need to handle an event callback that occurs during 8346 * the suspend operation, since we don't prevent it. 8347 */ 8348 if (un->un_state != SD_STATE_OFFLINE) { 8349 if (un->un_state != SD_STATE_SUSPENDED) { 8350 New_state(un, SD_STATE_OFFLINE); 8351 } else { 8352 un->un_last_state = SD_STATE_OFFLINE; 8353 } 8354 } 8355 mutex_exit(SD_MUTEX(un)); 8356 } else { 8357 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 8358 "!Unknown event\n"); 8359 } 8360 8361 } 8362 #endif 8363 8364 /* 8365 * Function: sd_cache_control() 8366 * 8367 * Description: This routine is the driver entry point for setting 8368 * read and write caching by modifying the WCE (write cache 8369 * enable) and RCD (read cache disable) bits of mode 8370 * page 8 (MODEPAGE_CACHING). 8371 * 8372 * Arguments: un - driver soft state (unit) structure 8373 * rcd_flag - flag for controlling the read cache 8374 * wce_flag - flag for controlling the write cache 8375 * 8376 * Return Code: EIO 8377 * code returned by sd_send_scsi_MODE_SENSE and 8378 * sd_send_scsi_MODE_SELECT 8379 * 8380 * Context: Kernel Thread 8381 */ 8382 8383 static int 8384 sd_cache_control(struct sd_lun *un, int rcd_flag, int wce_flag) 8385 { 8386 struct mode_caching *mode_caching_page; 8387 uchar_t *header; 8388 size_t buflen; 8389 int hdrlen; 8390 int bd_len; 8391 int rval = 0; 8392 struct mode_header_grp2 *mhp; 8393 8394 ASSERT(un != NULL); 8395 8396 /* 8397 * Do a test unit ready, otherwise a mode sense may not work if this 8398 * is the first command sent to the device after boot. 8399 */ 8400 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 8401 8402 if (un->un_f_cfg_is_atapi == TRUE) { 8403 hdrlen = MODE_HEADER_LENGTH_GRP2; 8404 } else { 8405 hdrlen = MODE_HEADER_LENGTH; 8406 } 8407 8408 /* 8409 * Allocate memory for the retrieved mode page and its headers. Set 8410 * a pointer to the page itself. Use mode_cache_scsi3 to insure 8411 * we get all of the mode sense data otherwise, the mode select 8412 * will fail. mode_cache_scsi3 is a superset of mode_caching. 8413 */ 8414 buflen = hdrlen + MODE_BLK_DESC_LENGTH + 8415 sizeof (struct mode_cache_scsi3); 8416 8417 header = kmem_zalloc(buflen, KM_SLEEP); 8418 8419 /* Get the information from the device. */ 8420 if (un->un_f_cfg_is_atapi == TRUE) { 8421 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, header, buflen, 8422 MODEPAGE_CACHING, SD_PATH_DIRECT); 8423 } else { 8424 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 8425 MODEPAGE_CACHING, SD_PATH_DIRECT); 8426 } 8427 if (rval != 0) { 8428 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 8429 "sd_cache_control: Mode Sense Failed\n"); 8430 kmem_free(header, buflen); 8431 return (rval); 8432 } 8433 8434 /* 8435 * Determine size of Block Descriptors in order to locate 8436 * the mode page data. ATAPI devices return 0, SCSI devices 8437 * should return MODE_BLK_DESC_LENGTH. 8438 */ 8439 if (un->un_f_cfg_is_atapi == TRUE) { 8440 mhp = (struct mode_header_grp2 *)header; 8441 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 8442 } else { 8443 bd_len = ((struct mode_header *)header)->bdesc_length; 8444 } 8445 8446 if (bd_len > MODE_BLK_DESC_LENGTH) { 8447 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 8448 "sd_cache_control: Mode Sense returned invalid " 8449 "block descriptor length\n"); 8450 kmem_free(header, buflen); 8451 return (EIO); 8452 } 8453 8454 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 8455 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 8456 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 8457 " caching page code mismatch %d\n", 8458 mode_caching_page->mode_page.code); 8459 kmem_free(header, buflen); 8460 return (EIO); 8461 } 8462 8463 /* Check the relevant bits on successful mode sense. */ 8464 if ((mode_caching_page->rcd && rcd_flag == SD_CACHE_ENABLE) || 8465 (!mode_caching_page->rcd && rcd_flag == SD_CACHE_DISABLE) || 8466 (mode_caching_page->wce && wce_flag == SD_CACHE_DISABLE) || 8467 (!mode_caching_page->wce && wce_flag == SD_CACHE_ENABLE)) { 8468 8469 size_t sbuflen; 8470 uchar_t save_pg; 8471 8472 /* 8473 * Construct select buffer length based on the 8474 * length of the sense data returned. 8475 */ 8476 sbuflen = hdrlen + MODE_BLK_DESC_LENGTH + 8477 sizeof (struct mode_page) + 8478 (int)mode_caching_page->mode_page.length; 8479 8480 /* 8481 * Set the caching bits as requested. 8482 */ 8483 if (rcd_flag == SD_CACHE_ENABLE) 8484 mode_caching_page->rcd = 0; 8485 else if (rcd_flag == SD_CACHE_DISABLE) 8486 mode_caching_page->rcd = 1; 8487 8488 if (wce_flag == SD_CACHE_ENABLE) 8489 mode_caching_page->wce = 1; 8490 else if (wce_flag == SD_CACHE_DISABLE) 8491 mode_caching_page->wce = 0; 8492 8493 /* 8494 * Save the page if the mode sense says the 8495 * drive supports it. 8496 */ 8497 save_pg = mode_caching_page->mode_page.ps ? 8498 SD_SAVE_PAGE : SD_DONTSAVE_PAGE; 8499 8500 /* Clear reserved bits before mode select. */ 8501 mode_caching_page->mode_page.ps = 0; 8502 8503 /* 8504 * Clear out mode header for mode select. 8505 * The rest of the retrieved page will be reused. 8506 */ 8507 bzero(header, hdrlen); 8508 8509 if (un->un_f_cfg_is_atapi == TRUE) { 8510 mhp = (struct mode_header_grp2 *)header; 8511 mhp->bdesc_length_hi = bd_len >> 8; 8512 mhp->bdesc_length_lo = (uchar_t)bd_len & 0xff; 8513 } else { 8514 ((struct mode_header *)header)->bdesc_length = bd_len; 8515 } 8516 8517 /* Issue mode select to change the cache settings */ 8518 if (un->un_f_cfg_is_atapi == TRUE) { 8519 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, header, 8520 sbuflen, save_pg, SD_PATH_DIRECT); 8521 } else { 8522 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 8523 sbuflen, save_pg, SD_PATH_DIRECT); 8524 } 8525 } 8526 8527 kmem_free(header, buflen); 8528 return (rval); 8529 } 8530 8531 8532 /* 8533 * Function: sd_get_write_cache_enabled() 8534 * 8535 * Description: This routine is the driver entry point for determining if 8536 * write caching is enabled. It examines the WCE (write cache 8537 * enable) bits of mode page 8 (MODEPAGE_CACHING). 8538 * 8539 * Arguments: un - driver soft state (unit) structure 8540 * is_enabled - pointer to int where write cache enabled state 8541 * is returned (non-zero -> write cache enabled) 8542 * 8543 * 8544 * Return Code: EIO 8545 * code returned by sd_send_scsi_MODE_SENSE 8546 * 8547 * Context: Kernel Thread 8548 * 8549 * NOTE: If ioctl is added to disable write cache, this sequence should 8550 * be followed so that no locking is required for accesses to 8551 * un->un_f_write_cache_enabled: 8552 * do mode select to clear wce 8553 * do synchronize cache to flush cache 8554 * set un->un_f_write_cache_enabled = FALSE 8555 * 8556 * Conversely, an ioctl to enable the write cache should be done 8557 * in this order: 8558 * set un->un_f_write_cache_enabled = TRUE 8559 * do mode select to set wce 8560 */ 8561 8562 static int 8563 sd_get_write_cache_enabled(struct sd_lun *un, int *is_enabled) 8564 { 8565 struct mode_caching *mode_caching_page; 8566 uchar_t *header; 8567 size_t buflen; 8568 int hdrlen; 8569 int bd_len; 8570 int rval = 0; 8571 8572 ASSERT(un != NULL); 8573 ASSERT(is_enabled != NULL); 8574 8575 /* in case of error, flag as enabled */ 8576 *is_enabled = TRUE; 8577 8578 /* 8579 * Do a test unit ready, otherwise a mode sense may not work if this 8580 * is the first command sent to the device after boot. 8581 */ 8582 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 8583 8584 if (un->un_f_cfg_is_atapi == TRUE) { 8585 hdrlen = MODE_HEADER_LENGTH_GRP2; 8586 } else { 8587 hdrlen = MODE_HEADER_LENGTH; 8588 } 8589 8590 /* 8591 * Allocate memory for the retrieved mode page and its headers. Set 8592 * a pointer to the page itself. 8593 */ 8594 buflen = hdrlen + MODE_BLK_DESC_LENGTH + sizeof (struct mode_caching); 8595 header = kmem_zalloc(buflen, KM_SLEEP); 8596 8597 /* Get the information from the device. */ 8598 if (un->un_f_cfg_is_atapi == TRUE) { 8599 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, header, buflen, 8600 MODEPAGE_CACHING, SD_PATH_DIRECT); 8601 } else { 8602 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 8603 MODEPAGE_CACHING, SD_PATH_DIRECT); 8604 } 8605 if (rval != 0) { 8606 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 8607 "sd_get_write_cache_enabled: Mode Sense Failed\n"); 8608 kmem_free(header, buflen); 8609 return (rval); 8610 } 8611 8612 /* 8613 * Determine size of Block Descriptors in order to locate 8614 * the mode page data. ATAPI devices return 0, SCSI devices 8615 * should return MODE_BLK_DESC_LENGTH. 8616 */ 8617 if (un->un_f_cfg_is_atapi == TRUE) { 8618 struct mode_header_grp2 *mhp; 8619 mhp = (struct mode_header_grp2 *)header; 8620 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 8621 } else { 8622 bd_len = ((struct mode_header *)header)->bdesc_length; 8623 } 8624 8625 if (bd_len > MODE_BLK_DESC_LENGTH) { 8626 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 8627 "sd_get_write_cache_enabled: Mode Sense returned invalid " 8628 "block descriptor length\n"); 8629 kmem_free(header, buflen); 8630 return (EIO); 8631 } 8632 8633 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 8634 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 8635 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 8636 " caching page code mismatch %d\n", 8637 mode_caching_page->mode_page.code); 8638 kmem_free(header, buflen); 8639 return (EIO); 8640 } 8641 *is_enabled = mode_caching_page->wce; 8642 8643 kmem_free(header, buflen); 8644 return (0); 8645 } 8646 8647 /* 8648 * Function: sd_get_nv_sup() 8649 * 8650 * Description: This routine is the driver entry point for 8651 * determining whether non-volatile cache is supported. This 8652 * determination process works as follows: 8653 * 8654 * 1. sd first queries sd.conf on whether 8655 * suppress_cache_flush bit is set for this device. 8656 * 8657 * 2. if not there, then queries the internal disk table. 8658 * 8659 * 3. if either sd.conf or internal disk table specifies 8660 * cache flush be suppressed, we don't bother checking 8661 * NV_SUP bit. 8662 * 8663 * If SUPPRESS_CACHE_FLUSH bit is not set to 1, sd queries 8664 * the optional INQUIRY VPD page 0x86. If the device 8665 * supports VPD page 0x86, sd examines the NV_SUP 8666 * (non-volatile cache support) bit in the INQUIRY VPD page 8667 * 0x86: 8668 * o If NV_SUP bit is set, sd assumes the device has a 8669 * non-volatile cache and set the 8670 * un_f_sync_nv_supported to TRUE. 8671 * o Otherwise cache is not non-volatile, 8672 * un_f_sync_nv_supported is set to FALSE. 8673 * 8674 * Arguments: un - driver soft state (unit) structure 8675 * 8676 * Return Code: 8677 * 8678 * Context: Kernel Thread 8679 */ 8680 8681 static void 8682 sd_get_nv_sup(struct sd_lun *un) 8683 { 8684 int rval = 0; 8685 uchar_t *inq86 = NULL; 8686 size_t inq86_len = MAX_INQUIRY_SIZE; 8687 size_t inq86_resid = 0; 8688 struct dk_callback *dkc; 8689 8690 ASSERT(un != NULL); 8691 8692 mutex_enter(SD_MUTEX(un)); 8693 8694 /* 8695 * Be conservative on the device's support of 8696 * SYNC_NV bit: un_f_sync_nv_supported is 8697 * initialized to be false. 8698 */ 8699 un->un_f_sync_nv_supported = FALSE; 8700 8701 /* 8702 * If either sd.conf or internal disk table 8703 * specifies cache flush be suppressed, then 8704 * we don't bother checking NV_SUP bit. 8705 */ 8706 if (un->un_f_suppress_cache_flush == TRUE) { 8707 mutex_exit(SD_MUTEX(un)); 8708 return; 8709 } 8710 8711 if (sd_check_vpd_page_support(un) == 0 && 8712 un->un_vpd_page_mask & SD_VPD_EXTENDED_DATA_PG) { 8713 mutex_exit(SD_MUTEX(un)); 8714 /* collect page 86 data if available */ 8715 inq86 = kmem_zalloc(inq86_len, KM_SLEEP); 8716 rval = sd_send_scsi_INQUIRY(un, inq86, inq86_len, 8717 0x01, 0x86, &inq86_resid); 8718 8719 if (rval == 0 && (inq86_len - inq86_resid > 6)) { 8720 SD_TRACE(SD_LOG_COMMON, un, 8721 "sd_get_nv_sup: \ 8722 successfully get VPD page: %x \ 8723 PAGE LENGTH: %x BYTE 6: %x\n", 8724 inq86[1], inq86[3], inq86[6]); 8725 8726 mutex_enter(SD_MUTEX(un)); 8727 /* 8728 * check the value of NV_SUP bit: only if the device 8729 * reports NV_SUP bit to be 1, the 8730 * un_f_sync_nv_supported bit will be set to true. 8731 */ 8732 if (inq86[6] & SD_VPD_NV_SUP) { 8733 un->un_f_sync_nv_supported = TRUE; 8734 } 8735 mutex_exit(SD_MUTEX(un)); 8736 } 8737 kmem_free(inq86, inq86_len); 8738 } else { 8739 mutex_exit(SD_MUTEX(un)); 8740 } 8741 8742 /* 8743 * Send a SYNC CACHE command to check whether 8744 * SYNC_NV bit is supported. This command should have 8745 * un_f_sync_nv_supported set to correct value. 8746 */ 8747 mutex_enter(SD_MUTEX(un)); 8748 if (un->un_f_sync_nv_supported) { 8749 mutex_exit(SD_MUTEX(un)); 8750 dkc = kmem_zalloc(sizeof (struct dk_callback), KM_SLEEP); 8751 dkc->dkc_flag = FLUSH_VOLATILE; 8752 (void) sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 8753 8754 /* 8755 * Send a TEST UNIT READY command to the device. This should 8756 * clear any outstanding UNIT ATTENTION that may be present. 8757 */ 8758 (void) sd_send_scsi_TEST_UNIT_READY(un, SD_DONT_RETRY_TUR); 8759 8760 kmem_free(dkc, sizeof (struct dk_callback)); 8761 } else { 8762 mutex_exit(SD_MUTEX(un)); 8763 } 8764 8765 SD_TRACE(SD_LOG_COMMON, un, "sd_get_nv_sup: \ 8766 un_f_suppress_cache_flush is set to %d\n", 8767 un->un_f_suppress_cache_flush); 8768 } 8769 8770 /* 8771 * Function: sd_make_device 8772 * 8773 * Description: Utility routine to return the Solaris device number from 8774 * the data in the device's dev_info structure. 8775 * 8776 * Return Code: The Solaris device number 8777 * 8778 * Context: Any 8779 */ 8780 8781 static dev_t 8782 sd_make_device(dev_info_t *devi) 8783 { 8784 return (makedevice(ddi_name_to_major(ddi_get_name(devi)), 8785 ddi_get_instance(devi) << SDUNIT_SHIFT)); 8786 } 8787 8788 8789 /* 8790 * Function: sd_pm_entry 8791 * 8792 * Description: Called at the start of a new command to manage power 8793 * and busy status of a device. This includes determining whether 8794 * the current power state of the device is sufficient for 8795 * performing the command or whether it must be changed. 8796 * The PM framework is notified appropriately. 8797 * Only with a return status of DDI_SUCCESS will the 8798 * component be busy to the framework. 8799 * 8800 * All callers of sd_pm_entry must check the return status 8801 * and only call sd_pm_exit it it was DDI_SUCCESS. A status 8802 * of DDI_FAILURE indicates the device failed to power up. 8803 * In this case un_pm_count has been adjusted so the result 8804 * on exit is still powered down, ie. count is less than 0. 8805 * Calling sd_pm_exit with this count value hits an ASSERT. 8806 * 8807 * Return Code: DDI_SUCCESS or DDI_FAILURE 8808 * 8809 * Context: Kernel thread context. 8810 */ 8811 8812 static int 8813 sd_pm_entry(struct sd_lun *un) 8814 { 8815 int return_status = DDI_SUCCESS; 8816 8817 ASSERT(!mutex_owned(SD_MUTEX(un))); 8818 ASSERT(!mutex_owned(&un->un_pm_mutex)); 8819 8820 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n"); 8821 8822 if (un->un_f_pm_is_enabled == FALSE) { 8823 SD_TRACE(SD_LOG_IO_PM, un, 8824 "sd_pm_entry: exiting, PM not enabled\n"); 8825 return (return_status); 8826 } 8827 8828 /* 8829 * Just increment a counter if PM is enabled. On the transition from 8830 * 0 ==> 1, mark the device as busy. The iodone side will decrement 8831 * the count with each IO and mark the device as idle when the count 8832 * hits 0. 8833 * 8834 * If the count is less than 0 the device is powered down. If a powered 8835 * down device is successfully powered up then the count must be 8836 * incremented to reflect the power up. Note that it'll get incremented 8837 * a second time to become busy. 8838 * 8839 * Because the following has the potential to change the device state 8840 * and must release the un_pm_mutex to do so, only one thread can be 8841 * allowed through at a time. 8842 */ 8843 8844 mutex_enter(&un->un_pm_mutex); 8845 while (un->un_pm_busy == TRUE) { 8846 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex); 8847 } 8848 un->un_pm_busy = TRUE; 8849 8850 if (un->un_pm_count < 1) { 8851 8852 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n"); 8853 8854 /* 8855 * Indicate we are now busy so the framework won't attempt to 8856 * power down the device. This call will only fail if either 8857 * we passed a bad component number or the device has no 8858 * components. Neither of these should ever happen. 8859 */ 8860 mutex_exit(&un->un_pm_mutex); 8861 return_status = pm_busy_component(SD_DEVINFO(un), 0); 8862 ASSERT(return_status == DDI_SUCCESS); 8863 8864 mutex_enter(&un->un_pm_mutex); 8865 8866 if (un->un_pm_count < 0) { 8867 mutex_exit(&un->un_pm_mutex); 8868 8869 SD_TRACE(SD_LOG_IO_PM, un, 8870 "sd_pm_entry: power up component\n"); 8871 8872 /* 8873 * pm_raise_power will cause sdpower to be called 8874 * which brings the device power level to the 8875 * desired state, ON in this case. If successful, 8876 * un_pm_count and un_power_level will be updated 8877 * appropriately. 8878 */ 8879 return_status = pm_raise_power(SD_DEVINFO(un), 0, 8880 SD_SPINDLE_ON); 8881 8882 mutex_enter(&un->un_pm_mutex); 8883 8884 if (return_status != DDI_SUCCESS) { 8885 /* 8886 * Power up failed. 8887 * Idle the device and adjust the count 8888 * so the result on exit is that we're 8889 * still powered down, ie. count is less than 0. 8890 */ 8891 SD_TRACE(SD_LOG_IO_PM, un, 8892 "sd_pm_entry: power up failed," 8893 " idle the component\n"); 8894 8895 (void) pm_idle_component(SD_DEVINFO(un), 0); 8896 un->un_pm_count--; 8897 } else { 8898 /* 8899 * Device is powered up, verify the 8900 * count is non-negative. 8901 * This is debug only. 8902 */ 8903 ASSERT(un->un_pm_count == 0); 8904 } 8905 } 8906 8907 if (return_status == DDI_SUCCESS) { 8908 /* 8909 * For performance, now that the device has been tagged 8910 * as busy, and it's known to be powered up, update the 8911 * chain types to use jump tables that do not include 8912 * pm. This significantly lowers the overhead and 8913 * therefore improves performance. 8914 */ 8915 8916 mutex_exit(&un->un_pm_mutex); 8917 mutex_enter(SD_MUTEX(un)); 8918 SD_TRACE(SD_LOG_IO_PM, un, 8919 "sd_pm_entry: changing uscsi_chain_type from %d\n", 8920 un->un_uscsi_chain_type); 8921 8922 if (un->un_f_non_devbsize_supported) { 8923 un->un_buf_chain_type = 8924 SD_CHAIN_INFO_RMMEDIA_NO_PM; 8925 } else { 8926 un->un_buf_chain_type = 8927 SD_CHAIN_INFO_DISK_NO_PM; 8928 } 8929 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 8930 8931 SD_TRACE(SD_LOG_IO_PM, un, 8932 " changed uscsi_chain_type to %d\n", 8933 un->un_uscsi_chain_type); 8934 mutex_exit(SD_MUTEX(un)); 8935 mutex_enter(&un->un_pm_mutex); 8936 8937 if (un->un_pm_idle_timeid == NULL) { 8938 /* 300 ms. */ 8939 un->un_pm_idle_timeid = 8940 timeout(sd_pm_idletimeout_handler, un, 8941 (drv_usectohz((clock_t)300000))); 8942 /* 8943 * Include an extra call to busy which keeps the 8944 * device busy with-respect-to the PM layer 8945 * until the timer fires, at which time it'll 8946 * get the extra idle call. 8947 */ 8948 (void) pm_busy_component(SD_DEVINFO(un), 0); 8949 } 8950 } 8951 } 8952 un->un_pm_busy = FALSE; 8953 /* Next... */ 8954 cv_signal(&un->un_pm_busy_cv); 8955 8956 un->un_pm_count++; 8957 8958 SD_TRACE(SD_LOG_IO_PM, un, 8959 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count); 8960 8961 mutex_exit(&un->un_pm_mutex); 8962 8963 return (return_status); 8964 } 8965 8966 8967 /* 8968 * Function: sd_pm_exit 8969 * 8970 * Description: Called at the completion of a command to manage busy 8971 * status for the device. If the device becomes idle the 8972 * PM framework is notified. 8973 * 8974 * Context: Kernel thread context 8975 */ 8976 8977 static void 8978 sd_pm_exit(struct sd_lun *un) 8979 { 8980 ASSERT(!mutex_owned(SD_MUTEX(un))); 8981 ASSERT(!mutex_owned(&un->un_pm_mutex)); 8982 8983 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n"); 8984 8985 /* 8986 * After attach the following flag is only read, so don't 8987 * take the penalty of acquiring a mutex for it. 8988 */ 8989 if (un->un_f_pm_is_enabled == TRUE) { 8990 8991 mutex_enter(&un->un_pm_mutex); 8992 un->un_pm_count--; 8993 8994 SD_TRACE(SD_LOG_IO_PM, un, 8995 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count); 8996 8997 ASSERT(un->un_pm_count >= 0); 8998 if (un->un_pm_count == 0) { 8999 mutex_exit(&un->un_pm_mutex); 9000 9001 SD_TRACE(SD_LOG_IO_PM, un, 9002 "sd_pm_exit: idle component\n"); 9003 9004 (void) pm_idle_component(SD_DEVINFO(un), 0); 9005 9006 } else { 9007 mutex_exit(&un->un_pm_mutex); 9008 } 9009 } 9010 9011 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n"); 9012 } 9013 9014 9015 /* 9016 * Function: sdopen 9017 * 9018 * Description: Driver's open(9e) entry point function. 9019 * 9020 * Arguments: dev_i - pointer to device number 9021 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE) 9022 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9023 * cred_p - user credential pointer 9024 * 9025 * Return Code: EINVAL 9026 * ENXIO 9027 * EIO 9028 * EROFS 9029 * EBUSY 9030 * 9031 * Context: Kernel thread context 9032 */ 9033 /* ARGSUSED */ 9034 static int 9035 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 9036 { 9037 struct sd_lun *un; 9038 int nodelay; 9039 int part; 9040 uint64_t partmask; 9041 int instance; 9042 dev_t dev; 9043 int rval = EIO; 9044 diskaddr_t nblks = 0; 9045 9046 /* Validate the open type */ 9047 if (otyp >= OTYPCNT) { 9048 return (EINVAL); 9049 } 9050 9051 dev = *dev_p; 9052 instance = SDUNIT(dev); 9053 mutex_enter(&sd_detach_mutex); 9054 9055 /* 9056 * Fail the open if there is no softstate for the instance, or 9057 * if another thread somewhere is trying to detach the instance. 9058 */ 9059 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 9060 (un->un_detach_count != 0)) { 9061 mutex_exit(&sd_detach_mutex); 9062 /* 9063 * The probe cache only needs to be cleared when open (9e) fails 9064 * with ENXIO (4238046). 9065 */ 9066 /* 9067 * un-conditionally clearing probe cache is ok with 9068 * separate sd/ssd binaries 9069 * x86 platform can be an issue with both parallel 9070 * and fibre in 1 binary 9071 */ 9072 sd_scsi_clear_probe_cache(); 9073 return (ENXIO); 9074 } 9075 9076 /* 9077 * The un_layer_count is to prevent another thread in specfs from 9078 * trying to detach the instance, which can happen when we are 9079 * called from a higher-layer driver instead of thru specfs. 9080 * This will not be needed when DDI provides a layered driver 9081 * interface that allows specfs to know that an instance is in 9082 * use by a layered driver & should not be detached. 9083 * 9084 * Note: the semantics for layered driver opens are exactly one 9085 * close for every open. 9086 */ 9087 if (otyp == OTYP_LYR) { 9088 un->un_layer_count++; 9089 } 9090 9091 /* 9092 * Keep a count of the current # of opens in progress. This is because 9093 * some layered drivers try to call us as a regular open. This can 9094 * cause problems that we cannot prevent, however by keeping this count 9095 * we can at least keep our open and detach routines from racing against 9096 * each other under such conditions. 9097 */ 9098 un->un_opens_in_progress++; 9099 mutex_exit(&sd_detach_mutex); 9100 9101 nodelay = (flag & (FNDELAY | FNONBLOCK)); 9102 part = SDPART(dev); 9103 partmask = 1 << part; 9104 9105 /* 9106 * We use a semaphore here in order to serialize 9107 * open and close requests on the device. 9108 */ 9109 sema_p(&un->un_semoclose); 9110 9111 mutex_enter(SD_MUTEX(un)); 9112 9113 /* 9114 * All device accesses go thru sdstrategy() where we check 9115 * on suspend status but there could be a scsi_poll command, 9116 * which bypasses sdstrategy(), so we need to check pm 9117 * status. 9118 */ 9119 9120 if (!nodelay) { 9121 while ((un->un_state == SD_STATE_SUSPENDED) || 9122 (un->un_state == SD_STATE_PM_CHANGING)) { 9123 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9124 } 9125 9126 mutex_exit(SD_MUTEX(un)); 9127 if (sd_pm_entry(un) != DDI_SUCCESS) { 9128 rval = EIO; 9129 SD_ERROR(SD_LOG_OPEN_CLOSE, un, 9130 "sdopen: sd_pm_entry failed\n"); 9131 goto open_failed_with_pm; 9132 } 9133 mutex_enter(SD_MUTEX(un)); 9134 } 9135 9136 /* check for previous exclusive open */ 9137 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); 9138 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9139 "sdopen: exclopen=%x, flag=%x, regopen=%x\n", 9140 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 9141 9142 if (un->un_exclopen & (partmask)) { 9143 goto excl_open_fail; 9144 } 9145 9146 if (flag & FEXCL) { 9147 int i; 9148 if (un->un_ocmap.lyropen[part]) { 9149 goto excl_open_fail; 9150 } 9151 for (i = 0; i < (OTYPCNT - 1); i++) { 9152 if (un->un_ocmap.regopen[i] & (partmask)) { 9153 goto excl_open_fail; 9154 } 9155 } 9156 } 9157 9158 /* 9159 * Check the write permission if this is a removable media device, 9160 * NDELAY has not been set, and writable permission is requested. 9161 * 9162 * Note: If NDELAY was set and this is write-protected media the WRITE 9163 * attempt will fail with EIO as part of the I/O processing. This is a 9164 * more permissive implementation that allows the open to succeed and 9165 * WRITE attempts to fail when appropriate. 9166 */ 9167 if (un->un_f_chk_wp_open) { 9168 if ((flag & FWRITE) && (!nodelay)) { 9169 mutex_exit(SD_MUTEX(un)); 9170 /* 9171 * Defer the check for write permission on writable 9172 * DVD drive till sdstrategy and will not fail open even 9173 * if FWRITE is set as the device can be writable 9174 * depending upon the media and the media can change 9175 * after the call to open(). 9176 */ 9177 if (un->un_f_dvdram_writable_device == FALSE) { 9178 if (ISCD(un) || sr_check_wp(dev)) { 9179 rval = EROFS; 9180 mutex_enter(SD_MUTEX(un)); 9181 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9182 "write to cd or write protected media\n"); 9183 goto open_fail; 9184 } 9185 } 9186 mutex_enter(SD_MUTEX(un)); 9187 } 9188 } 9189 9190 /* 9191 * If opening in NDELAY/NONBLOCK mode, just return. 9192 * Check if disk is ready and has a valid geometry later. 9193 */ 9194 if (!nodelay) { 9195 mutex_exit(SD_MUTEX(un)); 9196 rval = sd_ready_and_valid(un); 9197 mutex_enter(SD_MUTEX(un)); 9198 /* 9199 * Fail if device is not ready or if the number of disk 9200 * blocks is zero or negative for non CD devices. 9201 */ 9202 9203 nblks = 0; 9204 9205 if (rval == SD_READY_VALID && (!ISCD(un))) { 9206 /* if cmlb_partinfo fails, nblks remains 0 */ 9207 mutex_exit(SD_MUTEX(un)); 9208 (void) cmlb_partinfo(un->un_cmlbhandle, part, &nblks, 9209 NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 9210 mutex_enter(SD_MUTEX(un)); 9211 } 9212 9213 if ((rval != SD_READY_VALID) || 9214 (!ISCD(un) && nblks <= 0)) { 9215 rval = un->un_f_has_removable_media ? ENXIO : EIO; 9216 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9217 "device not ready or invalid disk block value\n"); 9218 goto open_fail; 9219 } 9220 #if defined(__i386) || defined(__amd64) 9221 } else { 9222 uchar_t *cp; 9223 /* 9224 * x86 requires special nodelay handling, so that p0 is 9225 * always defined and accessible. 9226 * Invalidate geometry only if device is not already open. 9227 */ 9228 cp = &un->un_ocmap.chkd[0]; 9229 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9230 if (*cp != (uchar_t)0) { 9231 break; 9232 } 9233 cp++; 9234 } 9235 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9236 mutex_exit(SD_MUTEX(un)); 9237 cmlb_invalidate(un->un_cmlbhandle, 9238 (void *)SD_PATH_DIRECT); 9239 mutex_enter(SD_MUTEX(un)); 9240 } 9241 9242 #endif 9243 } 9244 9245 if (otyp == OTYP_LYR) { 9246 un->un_ocmap.lyropen[part]++; 9247 } else { 9248 un->un_ocmap.regopen[otyp] |= partmask; 9249 } 9250 9251 /* Set up open and exclusive open flags */ 9252 if (flag & FEXCL) { 9253 un->un_exclopen |= (partmask); 9254 } 9255 9256 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9257 "open of part %d type %d\n", part, otyp); 9258 9259 mutex_exit(SD_MUTEX(un)); 9260 if (!nodelay) { 9261 sd_pm_exit(un); 9262 } 9263 9264 sema_v(&un->un_semoclose); 9265 9266 mutex_enter(&sd_detach_mutex); 9267 un->un_opens_in_progress--; 9268 mutex_exit(&sd_detach_mutex); 9269 9270 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n"); 9271 return (DDI_SUCCESS); 9272 9273 excl_open_fail: 9274 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n"); 9275 rval = EBUSY; 9276 9277 open_fail: 9278 mutex_exit(SD_MUTEX(un)); 9279 9280 /* 9281 * On a failed open we must exit the pm management. 9282 */ 9283 if (!nodelay) { 9284 sd_pm_exit(un); 9285 } 9286 open_failed_with_pm: 9287 sema_v(&un->un_semoclose); 9288 9289 mutex_enter(&sd_detach_mutex); 9290 un->un_opens_in_progress--; 9291 if (otyp == OTYP_LYR) { 9292 un->un_layer_count--; 9293 } 9294 mutex_exit(&sd_detach_mutex); 9295 9296 return (rval); 9297 } 9298 9299 9300 /* 9301 * Function: sdclose 9302 * 9303 * Description: Driver's close(9e) entry point function. 9304 * 9305 * Arguments: dev - device number 9306 * flag - file status flag, informational only 9307 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9308 * cred_p - user credential pointer 9309 * 9310 * Return Code: ENXIO 9311 * 9312 * Context: Kernel thread context 9313 */ 9314 /* ARGSUSED */ 9315 static int 9316 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 9317 { 9318 struct sd_lun *un; 9319 uchar_t *cp; 9320 int part; 9321 int nodelay; 9322 int rval = 0; 9323 9324 /* Validate the open type */ 9325 if (otyp >= OTYPCNT) { 9326 return (ENXIO); 9327 } 9328 9329 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9330 return (ENXIO); 9331 } 9332 9333 part = SDPART(dev); 9334 nodelay = flag & (FNDELAY | FNONBLOCK); 9335 9336 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9337 "sdclose: close of part %d type %d\n", part, otyp); 9338 9339 /* 9340 * We use a semaphore here in order to serialize 9341 * open and close requests on the device. 9342 */ 9343 sema_p(&un->un_semoclose); 9344 9345 mutex_enter(SD_MUTEX(un)); 9346 9347 /* Don't proceed if power is being changed. */ 9348 while (un->un_state == SD_STATE_PM_CHANGING) { 9349 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9350 } 9351 9352 if (un->un_exclopen & (1 << part)) { 9353 un->un_exclopen &= ~(1 << part); 9354 } 9355 9356 /* Update the open partition map */ 9357 if (otyp == OTYP_LYR) { 9358 un->un_ocmap.lyropen[part] -= 1; 9359 } else { 9360 un->un_ocmap.regopen[otyp] &= ~(1 << part); 9361 } 9362 9363 cp = &un->un_ocmap.chkd[0]; 9364 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9365 if (*cp != NULL) { 9366 break; 9367 } 9368 cp++; 9369 } 9370 9371 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9372 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n"); 9373 9374 /* 9375 * We avoid persistance upon the last close, and set 9376 * the throttle back to the maximum. 9377 */ 9378 un->un_throttle = un->un_saved_throttle; 9379 9380 if (un->un_state == SD_STATE_OFFLINE) { 9381 if (un->un_f_is_fibre == FALSE) { 9382 scsi_log(SD_DEVINFO(un), sd_label, 9383 CE_WARN, "offline\n"); 9384 } 9385 mutex_exit(SD_MUTEX(un)); 9386 cmlb_invalidate(un->un_cmlbhandle, 9387 (void *)SD_PATH_DIRECT); 9388 mutex_enter(SD_MUTEX(un)); 9389 9390 } else { 9391 /* 9392 * Flush any outstanding writes in NVRAM cache. 9393 * Note: SYNCHRONIZE CACHE is an optional SCSI-2 9394 * cmd, it may not work for non-Pluto devices. 9395 * SYNCHRONIZE CACHE is not required for removables, 9396 * except DVD-RAM drives. 9397 * 9398 * Also note: because SYNCHRONIZE CACHE is currently 9399 * the only command issued here that requires the 9400 * drive be powered up, only do the power up before 9401 * sending the Sync Cache command. If additional 9402 * commands are added which require a powered up 9403 * drive, the following sequence may have to change. 9404 * 9405 * And finally, note that parallel SCSI on SPARC 9406 * only issues a Sync Cache to DVD-RAM, a newly 9407 * supported device. 9408 */ 9409 #if defined(__i386) || defined(__amd64) 9410 if (un->un_f_sync_cache_supported || 9411 un->un_f_dvdram_writable_device == TRUE) { 9412 #else 9413 if (un->un_f_dvdram_writable_device == TRUE) { 9414 #endif 9415 mutex_exit(SD_MUTEX(un)); 9416 if (sd_pm_entry(un) == DDI_SUCCESS) { 9417 rval = 9418 sd_send_scsi_SYNCHRONIZE_CACHE(un, 9419 NULL); 9420 /* ignore error if not supported */ 9421 if (rval == ENOTSUP) { 9422 rval = 0; 9423 } else if (rval != 0) { 9424 rval = EIO; 9425 } 9426 sd_pm_exit(un); 9427 } else { 9428 rval = EIO; 9429 } 9430 mutex_enter(SD_MUTEX(un)); 9431 } 9432 9433 /* 9434 * For devices which supports DOOR_LOCK, send an ALLOW 9435 * MEDIA REMOVAL command, but don't get upset if it 9436 * fails. We need to raise the power of the drive before 9437 * we can call sd_send_scsi_DOORLOCK() 9438 */ 9439 if (un->un_f_doorlock_supported) { 9440 mutex_exit(SD_MUTEX(un)); 9441 if (sd_pm_entry(un) == DDI_SUCCESS) { 9442 rval = sd_send_scsi_DOORLOCK(un, 9443 SD_REMOVAL_ALLOW, SD_PATH_DIRECT); 9444 9445 sd_pm_exit(un); 9446 if (ISCD(un) && (rval != 0) && 9447 (nodelay != 0)) { 9448 rval = ENXIO; 9449 } 9450 } else { 9451 rval = EIO; 9452 } 9453 mutex_enter(SD_MUTEX(un)); 9454 } 9455 9456 /* 9457 * If a device has removable media, invalidate all 9458 * parameters related to media, such as geometry, 9459 * blocksize, and blockcount. 9460 */ 9461 if (un->un_f_has_removable_media) { 9462 sr_ejected(un); 9463 } 9464 9465 /* 9466 * Destroy the cache (if it exists) which was 9467 * allocated for the write maps since this is 9468 * the last close for this media. 9469 */ 9470 if (un->un_wm_cache) { 9471 /* 9472 * Check if there are pending commands. 9473 * and if there are give a warning and 9474 * do not destroy the cache. 9475 */ 9476 if (un->un_ncmds_in_driver > 0) { 9477 scsi_log(SD_DEVINFO(un), 9478 sd_label, CE_WARN, 9479 "Unable to clean up memory " 9480 "because of pending I/O\n"); 9481 } else { 9482 kmem_cache_destroy( 9483 un->un_wm_cache); 9484 un->un_wm_cache = NULL; 9485 } 9486 } 9487 } 9488 } 9489 9490 mutex_exit(SD_MUTEX(un)); 9491 sema_v(&un->un_semoclose); 9492 9493 if (otyp == OTYP_LYR) { 9494 mutex_enter(&sd_detach_mutex); 9495 /* 9496 * The detach routine may run when the layer count 9497 * drops to zero. 9498 */ 9499 un->un_layer_count--; 9500 mutex_exit(&sd_detach_mutex); 9501 } 9502 9503 return (rval); 9504 } 9505 9506 9507 /* 9508 * Function: sd_ready_and_valid 9509 * 9510 * Description: Test if device is ready and has a valid geometry. 9511 * 9512 * Arguments: dev - device number 9513 * un - driver soft state (unit) structure 9514 * 9515 * Return Code: SD_READY_VALID ready and valid label 9516 * SD_NOT_READY_VALID not ready, no label 9517 * SD_RESERVED_BY_OTHERS reservation conflict 9518 * 9519 * Context: Never called at interrupt context. 9520 */ 9521 9522 static int 9523 sd_ready_and_valid(struct sd_lun *un) 9524 { 9525 struct sd_errstats *stp; 9526 uint64_t capacity; 9527 uint_t lbasize; 9528 int rval = SD_READY_VALID; 9529 char name_str[48]; 9530 int is_valid; 9531 9532 ASSERT(un != NULL); 9533 ASSERT(!mutex_owned(SD_MUTEX(un))); 9534 9535 mutex_enter(SD_MUTEX(un)); 9536 /* 9537 * If a device has removable media, we must check if media is 9538 * ready when checking if this device is ready and valid. 9539 */ 9540 if (un->un_f_has_removable_media) { 9541 mutex_exit(SD_MUTEX(un)); 9542 if (sd_send_scsi_TEST_UNIT_READY(un, 0) != 0) { 9543 rval = SD_NOT_READY_VALID; 9544 mutex_enter(SD_MUTEX(un)); 9545 goto done; 9546 } 9547 9548 is_valid = SD_IS_VALID_LABEL(un); 9549 mutex_enter(SD_MUTEX(un)); 9550 if (!is_valid || 9551 (un->un_f_blockcount_is_valid == FALSE) || 9552 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 9553 9554 /* capacity has to be read every open. */ 9555 mutex_exit(SD_MUTEX(un)); 9556 if (sd_send_scsi_READ_CAPACITY(un, &capacity, 9557 &lbasize, SD_PATH_DIRECT) != 0) { 9558 cmlb_invalidate(un->un_cmlbhandle, 9559 (void *)SD_PATH_DIRECT); 9560 mutex_enter(SD_MUTEX(un)); 9561 rval = SD_NOT_READY_VALID; 9562 goto done; 9563 } else { 9564 mutex_enter(SD_MUTEX(un)); 9565 sd_update_block_info(un, lbasize, capacity); 9566 } 9567 } 9568 9569 /* 9570 * Check if the media in the device is writable or not. 9571 */ 9572 if (!is_valid && ISCD(un)) { 9573 sd_check_for_writable_cd(un, SD_PATH_DIRECT); 9574 } 9575 9576 } else { 9577 /* 9578 * Do a test unit ready to clear any unit attention from non-cd 9579 * devices. 9580 */ 9581 mutex_exit(SD_MUTEX(un)); 9582 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 9583 mutex_enter(SD_MUTEX(un)); 9584 } 9585 9586 9587 /* 9588 * If this is a non 512 block device, allocate space for 9589 * the wmap cache. This is being done here since every time 9590 * a media is changed this routine will be called and the 9591 * block size is a function of media rather than device. 9592 */ 9593 if (un->un_f_non_devbsize_supported && NOT_DEVBSIZE(un)) { 9594 if (!(un->un_wm_cache)) { 9595 (void) snprintf(name_str, sizeof (name_str), 9596 "%s%d_cache", 9597 ddi_driver_name(SD_DEVINFO(un)), 9598 ddi_get_instance(SD_DEVINFO(un))); 9599 un->un_wm_cache = kmem_cache_create( 9600 name_str, sizeof (struct sd_w_map), 9601 8, sd_wm_cache_constructor, 9602 sd_wm_cache_destructor, NULL, 9603 (void *)un, NULL, 0); 9604 if (!(un->un_wm_cache)) { 9605 rval = ENOMEM; 9606 goto done; 9607 } 9608 } 9609 } 9610 9611 if (un->un_state == SD_STATE_NORMAL) { 9612 /* 9613 * If the target is not yet ready here (defined by a TUR 9614 * failure), invalidate the geometry and print an 'offline' 9615 * message. This is a legacy message, as the state of the 9616 * target is not actually changed to SD_STATE_OFFLINE. 9617 * 9618 * If the TUR fails for EACCES (Reservation Conflict), 9619 * SD_RESERVED_BY_OTHERS will be returned to indicate 9620 * reservation conflict. If the TUR fails for other 9621 * reasons, SD_NOT_READY_VALID will be returned. 9622 */ 9623 int err; 9624 9625 mutex_exit(SD_MUTEX(un)); 9626 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 9627 mutex_enter(SD_MUTEX(un)); 9628 9629 if (err != 0) { 9630 mutex_exit(SD_MUTEX(un)); 9631 cmlb_invalidate(un->un_cmlbhandle, 9632 (void *)SD_PATH_DIRECT); 9633 mutex_enter(SD_MUTEX(un)); 9634 if (err == EACCES) { 9635 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 9636 "reservation conflict\n"); 9637 rval = SD_RESERVED_BY_OTHERS; 9638 } else { 9639 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 9640 "drive offline\n"); 9641 rval = SD_NOT_READY_VALID; 9642 } 9643 goto done; 9644 } 9645 } 9646 9647 if (un->un_f_format_in_progress == FALSE) { 9648 mutex_exit(SD_MUTEX(un)); 9649 if (cmlb_validate(un->un_cmlbhandle, 0, 9650 (void *)SD_PATH_DIRECT) != 0) { 9651 rval = SD_NOT_READY_VALID; 9652 mutex_enter(SD_MUTEX(un)); 9653 goto done; 9654 } 9655 if (un->un_f_pkstats_enabled) { 9656 sd_set_pstats(un); 9657 SD_TRACE(SD_LOG_IO_PARTITION, un, 9658 "sd_ready_and_valid: un:0x%p pstats created and " 9659 "set\n", un); 9660 } 9661 mutex_enter(SD_MUTEX(un)); 9662 } 9663 9664 /* 9665 * If this device supports DOOR_LOCK command, try and send 9666 * this command to PREVENT MEDIA REMOVAL, but don't get upset 9667 * if it fails. For a CD, however, it is an error 9668 */ 9669 if (un->un_f_doorlock_supported) { 9670 mutex_exit(SD_MUTEX(un)); 9671 if ((sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 9672 SD_PATH_DIRECT) != 0) && ISCD(un)) { 9673 rval = SD_NOT_READY_VALID; 9674 mutex_enter(SD_MUTEX(un)); 9675 goto done; 9676 } 9677 mutex_enter(SD_MUTEX(un)); 9678 } 9679 9680 /* The state has changed, inform the media watch routines */ 9681 un->un_mediastate = DKIO_INSERTED; 9682 cv_broadcast(&un->un_state_cv); 9683 rval = SD_READY_VALID; 9684 9685 done: 9686 9687 /* 9688 * Initialize the capacity kstat value, if no media previously 9689 * (capacity kstat is 0) and a media has been inserted 9690 * (un_blockcount > 0). 9691 */ 9692 if (un->un_errstats != NULL) { 9693 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9694 if ((stp->sd_capacity.value.ui64 == 0) && 9695 (un->un_f_blockcount_is_valid == TRUE)) { 9696 stp->sd_capacity.value.ui64 = 9697 (uint64_t)((uint64_t)un->un_blockcount * 9698 un->un_sys_blocksize); 9699 } 9700 } 9701 9702 mutex_exit(SD_MUTEX(un)); 9703 return (rval); 9704 } 9705 9706 9707 /* 9708 * Function: sdmin 9709 * 9710 * Description: Routine to limit the size of a data transfer. Used in 9711 * conjunction with physio(9F). 9712 * 9713 * Arguments: bp - pointer to the indicated buf(9S) struct. 9714 * 9715 * Context: Kernel thread context. 9716 */ 9717 9718 static void 9719 sdmin(struct buf *bp) 9720 { 9721 struct sd_lun *un; 9722 int instance; 9723 9724 instance = SDUNIT(bp->b_edev); 9725 9726 un = ddi_get_soft_state(sd_state, instance); 9727 ASSERT(un != NULL); 9728 9729 if (bp->b_bcount > un->un_max_xfer_size) { 9730 bp->b_bcount = un->un_max_xfer_size; 9731 } 9732 } 9733 9734 9735 /* 9736 * Function: sdread 9737 * 9738 * Description: Driver's read(9e) entry point function. 9739 * 9740 * Arguments: dev - device number 9741 * uio - structure pointer describing where data is to be stored 9742 * in user's space 9743 * cred_p - user credential pointer 9744 * 9745 * Return Code: ENXIO 9746 * EIO 9747 * EINVAL 9748 * value returned by physio 9749 * 9750 * Context: Kernel thread context. 9751 */ 9752 /* ARGSUSED */ 9753 static int 9754 sdread(dev_t dev, struct uio *uio, cred_t *cred_p) 9755 { 9756 struct sd_lun *un = NULL; 9757 int secmask; 9758 int err; 9759 9760 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9761 return (ENXIO); 9762 } 9763 9764 ASSERT(!mutex_owned(SD_MUTEX(un))); 9765 9766 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9767 mutex_enter(SD_MUTEX(un)); 9768 /* 9769 * Because the call to sd_ready_and_valid will issue I/O we 9770 * must wait here if either the device is suspended or 9771 * if it's power level is changing. 9772 */ 9773 while ((un->un_state == SD_STATE_SUSPENDED) || 9774 (un->un_state == SD_STATE_PM_CHANGING)) { 9775 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9776 } 9777 un->un_ncmds_in_driver++; 9778 mutex_exit(SD_MUTEX(un)); 9779 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9780 mutex_enter(SD_MUTEX(un)); 9781 un->un_ncmds_in_driver--; 9782 ASSERT(un->un_ncmds_in_driver >= 0); 9783 mutex_exit(SD_MUTEX(un)); 9784 return (EIO); 9785 } 9786 mutex_enter(SD_MUTEX(un)); 9787 un->un_ncmds_in_driver--; 9788 ASSERT(un->un_ncmds_in_driver >= 0); 9789 mutex_exit(SD_MUTEX(un)); 9790 } 9791 9792 /* 9793 * Read requests are restricted to multiples of the system block size. 9794 */ 9795 secmask = un->un_sys_blocksize - 1; 9796 9797 if (uio->uio_loffset & ((offset_t)(secmask))) { 9798 SD_ERROR(SD_LOG_READ_WRITE, un, 9799 "sdread: file offset not modulo %d\n", 9800 un->un_sys_blocksize); 9801 err = EINVAL; 9802 } else if (uio->uio_iov->iov_len & (secmask)) { 9803 SD_ERROR(SD_LOG_READ_WRITE, un, 9804 "sdread: transfer length not modulo %d\n", 9805 un->un_sys_blocksize); 9806 err = EINVAL; 9807 } else { 9808 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio); 9809 } 9810 return (err); 9811 } 9812 9813 9814 /* 9815 * Function: sdwrite 9816 * 9817 * Description: Driver's write(9e) entry point function. 9818 * 9819 * Arguments: dev - device number 9820 * uio - structure pointer describing where data is stored in 9821 * user's space 9822 * cred_p - user credential pointer 9823 * 9824 * Return Code: ENXIO 9825 * EIO 9826 * EINVAL 9827 * value returned by physio 9828 * 9829 * Context: Kernel thread context. 9830 */ 9831 /* ARGSUSED */ 9832 static int 9833 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 9834 { 9835 struct sd_lun *un = NULL; 9836 int secmask; 9837 int err; 9838 9839 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9840 return (ENXIO); 9841 } 9842 9843 ASSERT(!mutex_owned(SD_MUTEX(un))); 9844 9845 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9846 mutex_enter(SD_MUTEX(un)); 9847 /* 9848 * Because the call to sd_ready_and_valid will issue I/O we 9849 * must wait here if either the device is suspended or 9850 * if it's power level is changing. 9851 */ 9852 while ((un->un_state == SD_STATE_SUSPENDED) || 9853 (un->un_state == SD_STATE_PM_CHANGING)) { 9854 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9855 } 9856 un->un_ncmds_in_driver++; 9857 mutex_exit(SD_MUTEX(un)); 9858 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9859 mutex_enter(SD_MUTEX(un)); 9860 un->un_ncmds_in_driver--; 9861 ASSERT(un->un_ncmds_in_driver >= 0); 9862 mutex_exit(SD_MUTEX(un)); 9863 return (EIO); 9864 } 9865 mutex_enter(SD_MUTEX(un)); 9866 un->un_ncmds_in_driver--; 9867 ASSERT(un->un_ncmds_in_driver >= 0); 9868 mutex_exit(SD_MUTEX(un)); 9869 } 9870 9871 /* 9872 * Write requests are restricted to multiples of the system block size. 9873 */ 9874 secmask = un->un_sys_blocksize - 1; 9875 9876 if (uio->uio_loffset & ((offset_t)(secmask))) { 9877 SD_ERROR(SD_LOG_READ_WRITE, un, 9878 "sdwrite: file offset not modulo %d\n", 9879 un->un_sys_blocksize); 9880 err = EINVAL; 9881 } else if (uio->uio_iov->iov_len & (secmask)) { 9882 SD_ERROR(SD_LOG_READ_WRITE, un, 9883 "sdwrite: transfer length not modulo %d\n", 9884 un->un_sys_blocksize); 9885 err = EINVAL; 9886 } else { 9887 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio); 9888 } 9889 return (err); 9890 } 9891 9892 9893 /* 9894 * Function: sdaread 9895 * 9896 * Description: Driver's aread(9e) entry point function. 9897 * 9898 * Arguments: dev - device number 9899 * aio - structure pointer describing where data is to be stored 9900 * cred_p - user credential pointer 9901 * 9902 * Return Code: ENXIO 9903 * EIO 9904 * EINVAL 9905 * value returned by aphysio 9906 * 9907 * Context: Kernel thread context. 9908 */ 9909 /* ARGSUSED */ 9910 static int 9911 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 9912 { 9913 struct sd_lun *un = NULL; 9914 struct uio *uio = aio->aio_uio; 9915 int secmask; 9916 int err; 9917 9918 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9919 return (ENXIO); 9920 } 9921 9922 ASSERT(!mutex_owned(SD_MUTEX(un))); 9923 9924 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 9925 mutex_enter(SD_MUTEX(un)); 9926 /* 9927 * Because the call to sd_ready_and_valid will issue I/O we 9928 * must wait here if either the device is suspended or 9929 * if it's power level is changing. 9930 */ 9931 while ((un->un_state == SD_STATE_SUSPENDED) || 9932 (un->un_state == SD_STATE_PM_CHANGING)) { 9933 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9934 } 9935 un->un_ncmds_in_driver++; 9936 mutex_exit(SD_MUTEX(un)); 9937 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 9938 mutex_enter(SD_MUTEX(un)); 9939 un->un_ncmds_in_driver--; 9940 ASSERT(un->un_ncmds_in_driver >= 0); 9941 mutex_exit(SD_MUTEX(un)); 9942 return (EIO); 9943 } 9944 mutex_enter(SD_MUTEX(un)); 9945 un->un_ncmds_in_driver--; 9946 ASSERT(un->un_ncmds_in_driver >= 0); 9947 mutex_exit(SD_MUTEX(un)); 9948 } 9949 9950 /* 9951 * Read requests are restricted to multiples of the system block size. 9952 */ 9953 secmask = un->un_sys_blocksize - 1; 9954 9955 if (uio->uio_loffset & ((offset_t)(secmask))) { 9956 SD_ERROR(SD_LOG_READ_WRITE, un, 9957 "sdaread: file offset not modulo %d\n", 9958 un->un_sys_blocksize); 9959 err = EINVAL; 9960 } else if (uio->uio_iov->iov_len & (secmask)) { 9961 SD_ERROR(SD_LOG_READ_WRITE, un, 9962 "sdaread: transfer length not modulo %d\n", 9963 un->un_sys_blocksize); 9964 err = EINVAL; 9965 } else { 9966 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio); 9967 } 9968 return (err); 9969 } 9970 9971 9972 /* 9973 * Function: sdawrite 9974 * 9975 * Description: Driver's awrite(9e) entry point function. 9976 * 9977 * Arguments: dev - device number 9978 * aio - structure pointer describing where data is stored 9979 * cred_p - user credential pointer 9980 * 9981 * Return Code: ENXIO 9982 * EIO 9983 * EINVAL 9984 * value returned by aphysio 9985 * 9986 * Context: Kernel thread context. 9987 */ 9988 /* ARGSUSED */ 9989 static int 9990 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 9991 { 9992 struct sd_lun *un = NULL; 9993 struct uio *uio = aio->aio_uio; 9994 int secmask; 9995 int err; 9996 9997 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9998 return (ENXIO); 9999 } 10000 10001 ASSERT(!mutex_owned(SD_MUTEX(un))); 10002 10003 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10004 mutex_enter(SD_MUTEX(un)); 10005 /* 10006 * Because the call to sd_ready_and_valid will issue I/O we 10007 * must wait here if either the device is suspended or 10008 * if it's power level is changing. 10009 */ 10010 while ((un->un_state == SD_STATE_SUSPENDED) || 10011 (un->un_state == SD_STATE_PM_CHANGING)) { 10012 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10013 } 10014 un->un_ncmds_in_driver++; 10015 mutex_exit(SD_MUTEX(un)); 10016 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 10017 mutex_enter(SD_MUTEX(un)); 10018 un->un_ncmds_in_driver--; 10019 ASSERT(un->un_ncmds_in_driver >= 0); 10020 mutex_exit(SD_MUTEX(un)); 10021 return (EIO); 10022 } 10023 mutex_enter(SD_MUTEX(un)); 10024 un->un_ncmds_in_driver--; 10025 ASSERT(un->un_ncmds_in_driver >= 0); 10026 mutex_exit(SD_MUTEX(un)); 10027 } 10028 10029 /* 10030 * Write requests are restricted to multiples of the system block size. 10031 */ 10032 secmask = un->un_sys_blocksize - 1; 10033 10034 if (uio->uio_loffset & ((offset_t)(secmask))) { 10035 SD_ERROR(SD_LOG_READ_WRITE, un, 10036 "sdawrite: file offset not modulo %d\n", 10037 un->un_sys_blocksize); 10038 err = EINVAL; 10039 } else if (uio->uio_iov->iov_len & (secmask)) { 10040 SD_ERROR(SD_LOG_READ_WRITE, un, 10041 "sdawrite: transfer length not modulo %d\n", 10042 un->un_sys_blocksize); 10043 err = EINVAL; 10044 } else { 10045 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio); 10046 } 10047 return (err); 10048 } 10049 10050 10051 10052 10053 10054 /* 10055 * Driver IO processing follows the following sequence: 10056 * 10057 * sdioctl(9E) sdstrategy(9E) biodone(9F) 10058 * | | ^ 10059 * v v | 10060 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+ 10061 * | | | | 10062 * v | | | 10063 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone() 10064 * | | ^ ^ 10065 * v v | | 10066 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | | 10067 * | | | | 10068 * +---+ | +------------+ +-------+ 10069 * | | | | 10070 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10071 * | v | | 10072 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() | 10073 * | | ^ | 10074 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10075 * | v | | 10076 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() | 10077 * | | ^ | 10078 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10079 * | v | | 10080 * | sd_checksum_iostart() sd_checksum_iodone() | 10081 * | | ^ | 10082 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+ 10083 * | v | | 10084 * | sd_pm_iostart() sd_pm_iodone() | 10085 * | | ^ | 10086 * | | | | 10087 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+ 10088 * | ^ 10089 * v | 10090 * sd_core_iostart() | 10091 * | | 10092 * | +------>(*destroypkt)() 10093 * +-> sd_start_cmds() <-+ | | 10094 * | | | v 10095 * | | | scsi_destroy_pkt(9F) 10096 * | | | 10097 * +->(*initpkt)() +- sdintr() 10098 * | | | | 10099 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx() 10100 * | +-> scsi_setup_cdb(9F) | 10101 * | | 10102 * +--> scsi_transport(9F) | 10103 * | | 10104 * +----> SCSA ---->+ 10105 * 10106 * 10107 * This code is based upon the following presumptions: 10108 * 10109 * - iostart and iodone functions operate on buf(9S) structures. These 10110 * functions perform the necessary operations on the buf(9S) and pass 10111 * them along to the next function in the chain by using the macros 10112 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE() 10113 * (for iodone side functions). 10114 * 10115 * - The iostart side functions may sleep. The iodone side functions 10116 * are called under interrupt context and may NOT sleep. Therefore 10117 * iodone side functions also may not call iostart side functions. 10118 * (NOTE: iostart side functions should NOT sleep for memory, as 10119 * this could result in deadlock.) 10120 * 10121 * - An iostart side function may call its corresponding iodone side 10122 * function directly (if necessary). 10123 * 10124 * - In the event of an error, an iostart side function can return a buf(9S) 10125 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and 10126 * b_error in the usual way of course). 10127 * 10128 * - The taskq mechanism may be used by the iodone side functions to dispatch 10129 * requests to the iostart side functions. The iostart side functions in 10130 * this case would be called under the context of a taskq thread, so it's 10131 * OK for them to block/sleep/spin in this case. 10132 * 10133 * - iostart side functions may allocate "shadow" buf(9S) structs and 10134 * pass them along to the next function in the chain. The corresponding 10135 * iodone side functions must coalesce the "shadow" bufs and return 10136 * the "original" buf to the next higher layer. 10137 * 10138 * - The b_private field of the buf(9S) struct holds a pointer to 10139 * an sd_xbuf struct, which contains information needed to 10140 * construct the scsi_pkt for the command. 10141 * 10142 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each 10143 * layer must acquire & release the SD_MUTEX(un) as needed. 10144 */ 10145 10146 10147 /* 10148 * Create taskq for all targets in the system. This is created at 10149 * _init(9E) and destroyed at _fini(9E). 10150 * 10151 * Note: here we set the minalloc to a reasonably high number to ensure that 10152 * we will have an adequate supply of task entries available at interrupt time. 10153 * This is used in conjunction with the TASKQ_PREPOPULATE flag in 10154 * sd_create_taskq(). Since we do not want to sleep for allocations at 10155 * interrupt time, set maxalloc equal to minalloc. That way we will just fail 10156 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq 10157 * requests any one instant in time. 10158 */ 10159 #define SD_TASKQ_NUMTHREADS 8 10160 #define SD_TASKQ_MINALLOC 256 10161 #define SD_TASKQ_MAXALLOC 256 10162 10163 static taskq_t *sd_tq = NULL; 10164 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq)) 10165 10166 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; 10167 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; 10168 10169 /* 10170 * The following task queue is being created for the write part of 10171 * read-modify-write of non-512 block size devices. 10172 * Limit the number of threads to 1 for now. This number has been chosen 10173 * considering the fact that it applies only to dvd ram drives/MO drives 10174 * currently. Performance for which is not main criteria at this stage. 10175 * Note: It needs to be explored if we can use a single taskq in future 10176 */ 10177 #define SD_WMR_TASKQ_NUMTHREADS 1 10178 static taskq_t *sd_wmr_tq = NULL; 10179 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq)) 10180 10181 /* 10182 * Function: sd_taskq_create 10183 * 10184 * Description: Create taskq thread(s) and preallocate task entries 10185 * 10186 * Return Code: Returns a pointer to the allocated taskq_t. 10187 * 10188 * Context: Can sleep. Requires blockable context. 10189 * 10190 * Notes: - The taskq() facility currently is NOT part of the DDI. 10191 * (definitely NOT recommeded for 3rd-party drivers!) :-) 10192 * - taskq_create() will block for memory, also it will panic 10193 * if it cannot create the requested number of threads. 10194 * - Currently taskq_create() creates threads that cannot be 10195 * swapped. 10196 * - We use TASKQ_PREPOPULATE to ensure we have an adequate 10197 * supply of taskq entries at interrupt time (ie, so that we 10198 * do not have to sleep for memory) 10199 */ 10200 10201 static void 10202 sd_taskq_create(void) 10203 { 10204 char taskq_name[TASKQ_NAMELEN]; 10205 10206 ASSERT(sd_tq == NULL); 10207 ASSERT(sd_wmr_tq == NULL); 10208 10209 (void) snprintf(taskq_name, sizeof (taskq_name), 10210 "%s_drv_taskq", sd_label); 10211 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS, 10212 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10213 TASKQ_PREPOPULATE)); 10214 10215 (void) snprintf(taskq_name, sizeof (taskq_name), 10216 "%s_rmw_taskq", sd_label); 10217 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS, 10218 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10219 TASKQ_PREPOPULATE)); 10220 } 10221 10222 10223 /* 10224 * Function: sd_taskq_delete 10225 * 10226 * Description: Complementary cleanup routine for sd_taskq_create(). 10227 * 10228 * Context: Kernel thread context. 10229 */ 10230 10231 static void 10232 sd_taskq_delete(void) 10233 { 10234 ASSERT(sd_tq != NULL); 10235 ASSERT(sd_wmr_tq != NULL); 10236 taskq_destroy(sd_tq); 10237 taskq_destroy(sd_wmr_tq); 10238 sd_tq = NULL; 10239 sd_wmr_tq = NULL; 10240 } 10241 10242 10243 /* 10244 * Function: sdstrategy 10245 * 10246 * Description: Driver's strategy (9E) entry point function. 10247 * 10248 * Arguments: bp - pointer to buf(9S) 10249 * 10250 * Return Code: Always returns zero 10251 * 10252 * Context: Kernel thread context. 10253 */ 10254 10255 static int 10256 sdstrategy(struct buf *bp) 10257 { 10258 struct sd_lun *un; 10259 10260 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 10261 if (un == NULL) { 10262 bioerror(bp, EIO); 10263 bp->b_resid = bp->b_bcount; 10264 biodone(bp); 10265 return (0); 10266 } 10267 /* As was done in the past, fail new cmds. if state is dumping. */ 10268 if (un->un_state == SD_STATE_DUMPING) { 10269 bioerror(bp, ENXIO); 10270 bp->b_resid = bp->b_bcount; 10271 biodone(bp); 10272 return (0); 10273 } 10274 10275 ASSERT(!mutex_owned(SD_MUTEX(un))); 10276 10277 /* 10278 * Commands may sneak in while we released the mutex in 10279 * DDI_SUSPEND, we should block new commands. However, old 10280 * commands that are still in the driver at this point should 10281 * still be allowed to drain. 10282 */ 10283 mutex_enter(SD_MUTEX(un)); 10284 /* 10285 * Must wait here if either the device is suspended or 10286 * if it's power level is changing. 10287 */ 10288 while ((un->un_state == SD_STATE_SUSPENDED) || 10289 (un->un_state == SD_STATE_PM_CHANGING)) { 10290 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10291 } 10292 10293 un->un_ncmds_in_driver++; 10294 10295 /* 10296 * atapi: Since we are running the CD for now in PIO mode we need to 10297 * call bp_mapin here to avoid bp_mapin called interrupt context under 10298 * the HBA's init_pkt routine. 10299 */ 10300 if (un->un_f_cfg_is_atapi == TRUE) { 10301 mutex_exit(SD_MUTEX(un)); 10302 bp_mapin(bp); 10303 mutex_enter(SD_MUTEX(un)); 10304 } 10305 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n", 10306 un->un_ncmds_in_driver); 10307 10308 mutex_exit(SD_MUTEX(un)); 10309 10310 /* 10311 * This will (eventually) allocate the sd_xbuf area and 10312 * call sd_xbuf_strategy(). We just want to return the 10313 * result of ddi_xbuf_qstrategy so that we have an opt- 10314 * imized tail call which saves us a stack frame. 10315 */ 10316 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); 10317 } 10318 10319 10320 /* 10321 * Function: sd_xbuf_strategy 10322 * 10323 * Description: Function for initiating IO operations via the 10324 * ddi_xbuf_qstrategy() mechanism. 10325 * 10326 * Context: Kernel thread context. 10327 */ 10328 10329 static void 10330 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg) 10331 { 10332 struct sd_lun *un = arg; 10333 10334 ASSERT(bp != NULL); 10335 ASSERT(xp != NULL); 10336 ASSERT(un != NULL); 10337 ASSERT(!mutex_owned(SD_MUTEX(un))); 10338 10339 /* 10340 * Initialize the fields in the xbuf and save a pointer to the 10341 * xbuf in bp->b_private. 10342 */ 10343 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL); 10344 10345 /* Send the buf down the iostart chain */ 10346 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp); 10347 } 10348 10349 10350 /* 10351 * Function: sd_xbuf_init 10352 * 10353 * Description: Prepare the given sd_xbuf struct for use. 10354 * 10355 * Arguments: un - ptr to softstate 10356 * bp - ptr to associated buf(9S) 10357 * xp - ptr to associated sd_xbuf 10358 * chain_type - IO chain type to use: 10359 * SD_CHAIN_NULL 10360 * SD_CHAIN_BUFIO 10361 * SD_CHAIN_USCSI 10362 * SD_CHAIN_DIRECT 10363 * SD_CHAIN_DIRECT_PRIORITY 10364 * pktinfop - ptr to private data struct for scsi_pkt(9S) 10365 * initialization; may be NULL if none. 10366 * 10367 * Context: Kernel thread context 10368 */ 10369 10370 static void 10371 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 10372 uchar_t chain_type, void *pktinfop) 10373 { 10374 int index; 10375 10376 ASSERT(un != NULL); 10377 ASSERT(bp != NULL); 10378 ASSERT(xp != NULL); 10379 10380 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n", 10381 bp, chain_type); 10382 10383 xp->xb_un = un; 10384 xp->xb_pktp = NULL; 10385 xp->xb_pktinfo = pktinfop; 10386 xp->xb_private = bp->b_private; 10387 xp->xb_blkno = (daddr_t)bp->b_blkno; 10388 10389 /* 10390 * Set up the iostart and iodone chain indexes in the xbuf, based 10391 * upon the specified chain type to use. 10392 */ 10393 switch (chain_type) { 10394 case SD_CHAIN_NULL: 10395 /* 10396 * Fall thru to just use the values for the buf type, even 10397 * tho for the NULL chain these values will never be used. 10398 */ 10399 /* FALLTHRU */ 10400 case SD_CHAIN_BUFIO: 10401 index = un->un_buf_chain_type; 10402 break; 10403 case SD_CHAIN_USCSI: 10404 index = un->un_uscsi_chain_type; 10405 break; 10406 case SD_CHAIN_DIRECT: 10407 index = un->un_direct_chain_type; 10408 break; 10409 case SD_CHAIN_DIRECT_PRIORITY: 10410 index = un->un_priority_chain_type; 10411 break; 10412 default: 10413 /* We're really broken if we ever get here... */ 10414 panic("sd_xbuf_init: illegal chain type!"); 10415 /*NOTREACHED*/ 10416 } 10417 10418 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index; 10419 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index; 10420 10421 /* 10422 * It might be a bit easier to simply bzero the entire xbuf above, 10423 * but it turns out that since we init a fair number of members anyway, 10424 * we save a fair number cycles by doing explicit assignment of zero. 10425 */ 10426 xp->xb_pkt_flags = 0; 10427 xp->xb_dma_resid = 0; 10428 xp->xb_retry_count = 0; 10429 xp->xb_victim_retry_count = 0; 10430 xp->xb_ua_retry_count = 0; 10431 xp->xb_nr_retry_count = 0; 10432 xp->xb_sense_bp = NULL; 10433 xp->xb_sense_status = 0; 10434 xp->xb_sense_state = 0; 10435 xp->xb_sense_resid = 0; 10436 10437 bp->b_private = xp; 10438 bp->b_flags &= ~(B_DONE | B_ERROR); 10439 bp->b_resid = 0; 10440 bp->av_forw = NULL; 10441 bp->av_back = NULL; 10442 bioerror(bp, 0); 10443 10444 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n"); 10445 } 10446 10447 10448 /* 10449 * Function: sd_uscsi_strategy 10450 * 10451 * Description: Wrapper for calling into the USCSI chain via physio(9F) 10452 * 10453 * Arguments: bp - buf struct ptr 10454 * 10455 * Return Code: Always returns 0 10456 * 10457 * Context: Kernel thread context 10458 */ 10459 10460 static int 10461 sd_uscsi_strategy(struct buf *bp) 10462 { 10463 struct sd_lun *un; 10464 struct sd_uscsi_info *uip; 10465 struct sd_xbuf *xp; 10466 uchar_t chain_type; 10467 10468 ASSERT(bp != NULL); 10469 10470 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 10471 if (un == NULL) { 10472 bioerror(bp, EIO); 10473 bp->b_resid = bp->b_bcount; 10474 biodone(bp); 10475 return (0); 10476 } 10477 10478 ASSERT(!mutex_owned(SD_MUTEX(un))); 10479 10480 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp); 10481 10482 mutex_enter(SD_MUTEX(un)); 10483 /* 10484 * atapi: Since we are running the CD for now in PIO mode we need to 10485 * call bp_mapin here to avoid bp_mapin called interrupt context under 10486 * the HBA's init_pkt routine. 10487 */ 10488 if (un->un_f_cfg_is_atapi == TRUE) { 10489 mutex_exit(SD_MUTEX(un)); 10490 bp_mapin(bp); 10491 mutex_enter(SD_MUTEX(un)); 10492 } 10493 un->un_ncmds_in_driver++; 10494 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n", 10495 un->un_ncmds_in_driver); 10496 mutex_exit(SD_MUTEX(un)); 10497 10498 /* 10499 * A pointer to a struct sd_uscsi_info is expected in bp->b_private 10500 */ 10501 ASSERT(bp->b_private != NULL); 10502 uip = (struct sd_uscsi_info *)bp->b_private; 10503 10504 switch (uip->ui_flags) { 10505 case SD_PATH_DIRECT: 10506 chain_type = SD_CHAIN_DIRECT; 10507 break; 10508 case SD_PATH_DIRECT_PRIORITY: 10509 chain_type = SD_CHAIN_DIRECT_PRIORITY; 10510 break; 10511 default: 10512 chain_type = SD_CHAIN_USCSI; 10513 break; 10514 } 10515 10516 /* 10517 * We may allocate extra buf for external USCSI commands. If the 10518 * application asks for bigger than 20-byte sense data via USCSI, 10519 * SCSA layer will allocate 252 bytes sense buf for that command. 10520 */ 10521 if (((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_rqlen > 10522 SENSE_LENGTH) { 10523 xp = kmem_zalloc(sizeof (struct sd_xbuf) - SENSE_LENGTH + 10524 MAX_SENSE_LENGTH, KM_SLEEP); 10525 } else { 10526 xp = kmem_zalloc(sizeof (struct sd_xbuf), KM_SLEEP); 10527 } 10528 10529 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp); 10530 10531 /* Use the index obtained within xbuf_init */ 10532 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp); 10533 10534 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp); 10535 10536 return (0); 10537 } 10538 10539 /* 10540 * Function: sd_send_scsi_cmd 10541 * 10542 * Description: Runs a USCSI command for user (when called thru sdioctl), 10543 * or for the driver 10544 * 10545 * Arguments: dev - the dev_t for the device 10546 * incmd - ptr to a valid uscsi_cmd struct 10547 * flag - bit flag, indicating open settings, 32/64 bit type 10548 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 10549 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 10550 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 10551 * to use the USCSI "direct" chain and bypass the normal 10552 * command waitq. 10553 * 10554 * Return Code: 0 - successful completion of the given command 10555 * EIO - scsi_uscsi_handle_command() failed 10556 * ENXIO - soft state not found for specified dev 10557 * EINVAL 10558 * EFAULT - copyin/copyout error 10559 * return code of scsi_uscsi_handle_command(): 10560 * EIO 10561 * ENXIO 10562 * EACCES 10563 * 10564 * Context: Waits for command to complete. Can sleep. 10565 */ 10566 10567 static int 10568 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 10569 enum uio_seg dataspace, int path_flag) 10570 { 10571 struct sd_uscsi_info *uip; 10572 struct uscsi_cmd *uscmd; 10573 struct sd_lun *un; 10574 int format = 0; 10575 int rval; 10576 10577 un = ddi_get_soft_state(sd_state, SDUNIT(dev)); 10578 if (un == NULL) { 10579 return (ENXIO); 10580 } 10581 10582 ASSERT(!mutex_owned(SD_MUTEX(un))); 10583 10584 #ifdef SDDEBUG 10585 switch (dataspace) { 10586 case UIO_USERSPACE: 10587 SD_TRACE(SD_LOG_IO, un, 10588 "sd_send_scsi_cmd: entry: un:0x%p UIO_USERSPACE\n", un); 10589 break; 10590 case UIO_SYSSPACE: 10591 SD_TRACE(SD_LOG_IO, un, 10592 "sd_send_scsi_cmd: entry: un:0x%p UIO_SYSSPACE\n", un); 10593 break; 10594 default: 10595 SD_TRACE(SD_LOG_IO, un, 10596 "sd_send_scsi_cmd: entry: un:0x%p UNEXPECTED SPACE\n", un); 10597 break; 10598 } 10599 #endif 10600 10601 rval = scsi_uscsi_alloc_and_copyin((intptr_t)incmd, flag, 10602 SD_ADDRESS(un), &uscmd); 10603 if (rval != 0) { 10604 SD_TRACE(SD_LOG_IO, un, "sd_sense_scsi_cmd: " 10605 "scsi_uscsi_alloc_and_copyin failed\n", un); 10606 return (rval); 10607 } 10608 10609 if ((uscmd->uscsi_cdb != NULL) && 10610 (uscmd->uscsi_cdb[0] == SCMD_FORMAT)) { 10611 mutex_enter(SD_MUTEX(un)); 10612 un->un_f_format_in_progress = TRUE; 10613 mutex_exit(SD_MUTEX(un)); 10614 format = 1; 10615 } 10616 10617 /* 10618 * Allocate an sd_uscsi_info struct and fill it with the info 10619 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 10620 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 10621 * since we allocate the buf here in this function, we do not 10622 * need to preserve the prior contents of b_private. 10623 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 10624 */ 10625 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 10626 uip->ui_flags = path_flag; 10627 uip->ui_cmdp = uscmd; 10628 10629 /* 10630 * Commands sent with priority are intended for error recovery 10631 * situations, and do not have retries performed. 10632 */ 10633 if (path_flag == SD_PATH_DIRECT_PRIORITY) { 10634 uscmd->uscsi_flags |= USCSI_DIAGNOSE; 10635 } 10636 uscmd->uscsi_flags &= ~USCSI_NOINTR; 10637 10638 rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd, 10639 sd_uscsi_strategy, NULL, uip); 10640 10641 #ifdef SDDEBUG 10642 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 10643 "uscsi_status: 0x%02x uscsi_resid:0x%x\n", 10644 uscmd->uscsi_status, uscmd->uscsi_resid); 10645 if (uscmd->uscsi_bufaddr != NULL) { 10646 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 10647 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n", 10648 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen); 10649 if (dataspace == UIO_SYSSPACE) { 10650 SD_DUMP_MEMORY(un, SD_LOG_IO, 10651 "data", (uchar_t *)uscmd->uscsi_bufaddr, 10652 uscmd->uscsi_buflen, SD_LOG_HEX); 10653 } 10654 } 10655 #endif 10656 10657 if (format == 1) { 10658 mutex_enter(SD_MUTEX(un)); 10659 un->un_f_format_in_progress = FALSE; 10660 mutex_exit(SD_MUTEX(un)); 10661 } 10662 10663 (void) scsi_uscsi_copyout_and_free((intptr_t)incmd, uscmd); 10664 kmem_free(uip, sizeof (struct sd_uscsi_info)); 10665 10666 return (rval); 10667 } 10668 10669 10670 /* 10671 * Function: sd_buf_iodone 10672 * 10673 * Description: Frees the sd_xbuf & returns the buf to its originator. 10674 * 10675 * Context: May be called from interrupt context. 10676 */ 10677 /* ARGSUSED */ 10678 static void 10679 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp) 10680 { 10681 struct sd_xbuf *xp; 10682 10683 ASSERT(un != NULL); 10684 ASSERT(bp != NULL); 10685 ASSERT(!mutex_owned(SD_MUTEX(un))); 10686 10687 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n"); 10688 10689 xp = SD_GET_XBUF(bp); 10690 ASSERT(xp != NULL); 10691 10692 mutex_enter(SD_MUTEX(un)); 10693 10694 /* 10695 * Grab time when the cmd completed. 10696 * This is used for determining if the system has been 10697 * idle long enough to make it idle to the PM framework. 10698 * This is for lowering the overhead, and therefore improving 10699 * performance per I/O operation. 10700 */ 10701 un->un_pm_idle_time = ddi_get_time(); 10702 10703 un->un_ncmds_in_driver--; 10704 ASSERT(un->un_ncmds_in_driver >= 0); 10705 SD_INFO(SD_LOG_IO, un, "sd_buf_iodone: un_ncmds_in_driver = %ld\n", 10706 un->un_ncmds_in_driver); 10707 10708 mutex_exit(SD_MUTEX(un)); 10709 10710 ddi_xbuf_done(bp, un->un_xbuf_attr); /* xbuf is gone after this */ 10711 biodone(bp); /* bp is gone after this */ 10712 10713 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n"); 10714 } 10715 10716 10717 /* 10718 * Function: sd_uscsi_iodone 10719 * 10720 * Description: Frees the sd_xbuf & returns the buf to its originator. 10721 * 10722 * Context: May be called from interrupt context. 10723 */ 10724 /* ARGSUSED */ 10725 static void 10726 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 10727 { 10728 struct sd_xbuf *xp; 10729 10730 ASSERT(un != NULL); 10731 ASSERT(bp != NULL); 10732 10733 xp = SD_GET_XBUF(bp); 10734 ASSERT(xp != NULL); 10735 ASSERT(!mutex_owned(SD_MUTEX(un))); 10736 10737 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n"); 10738 10739 bp->b_private = xp->xb_private; 10740 10741 mutex_enter(SD_MUTEX(un)); 10742 10743 /* 10744 * Grab time when the cmd completed. 10745 * This is used for determining if the system has been 10746 * idle long enough to make it idle to the PM framework. 10747 * This is for lowering the overhead, and therefore improving 10748 * performance per I/O operation. 10749 */ 10750 un->un_pm_idle_time = ddi_get_time(); 10751 10752 un->un_ncmds_in_driver--; 10753 ASSERT(un->un_ncmds_in_driver >= 0); 10754 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", 10755 un->un_ncmds_in_driver); 10756 10757 mutex_exit(SD_MUTEX(un)); 10758 10759 if (((struct uscsi_cmd *)(xp->xb_pktinfo))->uscsi_rqlen > 10760 SENSE_LENGTH) { 10761 kmem_free(xp, sizeof (struct sd_xbuf) - SENSE_LENGTH + 10762 MAX_SENSE_LENGTH); 10763 } else { 10764 kmem_free(xp, sizeof (struct sd_xbuf)); 10765 } 10766 10767 biodone(bp); 10768 10769 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n"); 10770 } 10771 10772 10773 /* 10774 * Function: sd_mapblockaddr_iostart 10775 * 10776 * Description: Verify request lies within the partition limits for 10777 * the indicated minor device. Issue "overrun" buf if 10778 * request would exceed partition range. Converts 10779 * partition-relative block address to absolute. 10780 * 10781 * Context: Can sleep 10782 * 10783 * Issues: This follows what the old code did, in terms of accessing 10784 * some of the partition info in the unit struct without holding 10785 * the mutext. This is a general issue, if the partition info 10786 * can be altered while IO is in progress... as soon as we send 10787 * a buf, its partitioning can be invalid before it gets to the 10788 * device. Probably the right fix is to move partitioning out 10789 * of the driver entirely. 10790 */ 10791 10792 static void 10793 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp) 10794 { 10795 diskaddr_t nblocks; /* #blocks in the given partition */ 10796 daddr_t blocknum; /* Block number specified by the buf */ 10797 size_t requested_nblocks; 10798 size_t available_nblocks; 10799 int partition; 10800 diskaddr_t partition_offset; 10801 struct sd_xbuf *xp; 10802 10803 10804 ASSERT(un != NULL); 10805 ASSERT(bp != NULL); 10806 ASSERT(!mutex_owned(SD_MUTEX(un))); 10807 10808 SD_TRACE(SD_LOG_IO_PARTITION, un, 10809 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp); 10810 10811 xp = SD_GET_XBUF(bp); 10812 ASSERT(xp != NULL); 10813 10814 /* 10815 * If the geometry is not indicated as valid, attempt to access 10816 * the unit & verify the geometry/label. This can be the case for 10817 * removable-media devices, of if the device was opened in 10818 * NDELAY/NONBLOCK mode. 10819 */ 10820 if (!SD_IS_VALID_LABEL(un) && 10821 (sd_ready_and_valid(un) != SD_READY_VALID)) { 10822 /* 10823 * For removable devices it is possible to start an I/O 10824 * without a media by opening the device in nodelay mode. 10825 * Also for writable CDs there can be many scenarios where 10826 * there is no geometry yet but volume manager is trying to 10827 * issue a read() just because it can see TOC on the CD. So 10828 * do not print a message for removables. 10829 */ 10830 if (!un->un_f_has_removable_media) { 10831 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10832 "i/o to invalid geometry\n"); 10833 } 10834 bioerror(bp, EIO); 10835 bp->b_resid = bp->b_bcount; 10836 SD_BEGIN_IODONE(index, un, bp); 10837 return; 10838 } 10839 10840 partition = SDPART(bp->b_edev); 10841 10842 nblocks = 0; 10843 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 10844 &nblocks, &partition_offset, NULL, NULL, (void *)SD_PATH_DIRECT); 10845 10846 /* 10847 * blocknum is the starting block number of the request. At this 10848 * point it is still relative to the start of the minor device. 10849 */ 10850 blocknum = xp->xb_blkno; 10851 10852 /* 10853 * Legacy: If the starting block number is one past the last block 10854 * in the partition, do not set B_ERROR in the buf. 10855 */ 10856 if (blocknum == nblocks) { 10857 goto error_exit; 10858 } 10859 10860 /* 10861 * Confirm that the first block of the request lies within the 10862 * partition limits. Also the requested number of bytes must be 10863 * a multiple of the system block size. 10864 */ 10865 if ((blocknum < 0) || (blocknum >= nblocks) || 10866 ((bp->b_bcount & (un->un_sys_blocksize - 1)) != 0)) { 10867 bp->b_flags |= B_ERROR; 10868 goto error_exit; 10869 } 10870 10871 /* 10872 * If the requsted # blocks exceeds the available # blocks, that 10873 * is an overrun of the partition. 10874 */ 10875 requested_nblocks = SD_BYTES2SYSBLOCKS(un, bp->b_bcount); 10876 available_nblocks = (size_t)(nblocks - blocknum); 10877 ASSERT(nblocks >= blocknum); 10878 10879 if (requested_nblocks > available_nblocks) { 10880 /* 10881 * Allocate an "overrun" buf to allow the request to proceed 10882 * for the amount of space available in the partition. The 10883 * amount not transferred will be added into the b_resid 10884 * when the operation is complete. The overrun buf 10885 * replaces the original buf here, and the original buf 10886 * is saved inside the overrun buf, for later use. 10887 */ 10888 size_t resid = SD_SYSBLOCKS2BYTES(un, 10889 (offset_t)(requested_nblocks - available_nblocks)); 10890 size_t count = bp->b_bcount - resid; 10891 /* 10892 * Note: count is an unsigned entity thus it'll NEVER 10893 * be less than 0 so ASSERT the original values are 10894 * correct. 10895 */ 10896 ASSERT(bp->b_bcount >= resid); 10897 10898 bp = sd_bioclone_alloc(bp, count, blocknum, 10899 (int (*)(struct buf *)) sd_mapblockaddr_iodone); 10900 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */ 10901 ASSERT(xp != NULL); 10902 } 10903 10904 /* At this point there should be no residual for this buf. */ 10905 ASSERT(bp->b_resid == 0); 10906 10907 /* Convert the block number to an absolute address. */ 10908 xp->xb_blkno += partition_offset; 10909 10910 SD_NEXT_IOSTART(index, un, bp); 10911 10912 SD_TRACE(SD_LOG_IO_PARTITION, un, 10913 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp); 10914 10915 return; 10916 10917 error_exit: 10918 bp->b_resid = bp->b_bcount; 10919 SD_BEGIN_IODONE(index, un, bp); 10920 SD_TRACE(SD_LOG_IO_PARTITION, un, 10921 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp); 10922 } 10923 10924 10925 /* 10926 * Function: sd_mapblockaddr_iodone 10927 * 10928 * Description: Completion-side processing for partition management. 10929 * 10930 * Context: May be called under interrupt context 10931 */ 10932 10933 static void 10934 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp) 10935 { 10936 /* int partition; */ /* Not used, see below. */ 10937 ASSERT(un != NULL); 10938 ASSERT(bp != NULL); 10939 ASSERT(!mutex_owned(SD_MUTEX(un))); 10940 10941 SD_TRACE(SD_LOG_IO_PARTITION, un, 10942 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp); 10943 10944 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) { 10945 /* 10946 * We have an "overrun" buf to deal with... 10947 */ 10948 struct sd_xbuf *xp; 10949 struct buf *obp; /* ptr to the original buf */ 10950 10951 xp = SD_GET_XBUF(bp); 10952 ASSERT(xp != NULL); 10953 10954 /* Retrieve the pointer to the original buf */ 10955 obp = (struct buf *)xp->xb_private; 10956 ASSERT(obp != NULL); 10957 10958 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid); 10959 bioerror(obp, bp->b_error); 10960 10961 sd_bioclone_free(bp); 10962 10963 /* 10964 * Get back the original buf. 10965 * Note that since the restoration of xb_blkno below 10966 * was removed, the sd_xbuf is not needed. 10967 */ 10968 bp = obp; 10969 /* 10970 * xp = SD_GET_XBUF(bp); 10971 * ASSERT(xp != NULL); 10972 */ 10973 } 10974 10975 /* 10976 * Convert sd->xb_blkno back to a minor-device relative value. 10977 * Note: this has been commented out, as it is not needed in the 10978 * current implementation of the driver (ie, since this function 10979 * is at the top of the layering chains, so the info will be 10980 * discarded) and it is in the "hot" IO path. 10981 * 10982 * partition = getminor(bp->b_edev) & SDPART_MASK; 10983 * xp->xb_blkno -= un->un_offset[partition]; 10984 */ 10985 10986 SD_NEXT_IODONE(index, un, bp); 10987 10988 SD_TRACE(SD_LOG_IO_PARTITION, un, 10989 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp); 10990 } 10991 10992 10993 /* 10994 * Function: sd_mapblocksize_iostart 10995 * 10996 * Description: Convert between system block size (un->un_sys_blocksize) 10997 * and target block size (un->un_tgt_blocksize). 10998 * 10999 * Context: Can sleep to allocate resources. 11000 * 11001 * Assumptions: A higher layer has already performed any partition validation, 11002 * and converted the xp->xb_blkno to an absolute value relative 11003 * to the start of the device. 11004 * 11005 * It is also assumed that the higher layer has implemented 11006 * an "overrun" mechanism for the case where the request would 11007 * read/write beyond the end of a partition. In this case we 11008 * assume (and ASSERT) that bp->b_resid == 0. 11009 * 11010 * Note: The implementation for this routine assumes the target 11011 * block size remains constant between allocation and transport. 11012 */ 11013 11014 static void 11015 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp) 11016 { 11017 struct sd_mapblocksize_info *bsp; 11018 struct sd_xbuf *xp; 11019 offset_t first_byte; 11020 daddr_t start_block, end_block; 11021 daddr_t request_bytes; 11022 ushort_t is_aligned = FALSE; 11023 11024 ASSERT(un != NULL); 11025 ASSERT(bp != NULL); 11026 ASSERT(!mutex_owned(SD_MUTEX(un))); 11027 ASSERT(bp->b_resid == 0); 11028 11029 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 11030 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp); 11031 11032 /* 11033 * For a non-writable CD, a write request is an error 11034 */ 11035 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) && 11036 (un->un_f_mmc_writable_media == FALSE)) { 11037 bioerror(bp, EIO); 11038 bp->b_resid = bp->b_bcount; 11039 SD_BEGIN_IODONE(index, un, bp); 11040 return; 11041 } 11042 11043 /* 11044 * We do not need a shadow buf if the device is using 11045 * un->un_sys_blocksize as its block size or if bcount == 0. 11046 * In this case there is no layer-private data block allocated. 11047 */ 11048 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 11049 (bp->b_bcount == 0)) { 11050 goto done; 11051 } 11052 11053 #if defined(__i386) || defined(__amd64) 11054 /* We do not support non-block-aligned transfers for ROD devices */ 11055 ASSERT(!ISROD(un)); 11056 #endif 11057 11058 xp = SD_GET_XBUF(bp); 11059 ASSERT(xp != NULL); 11060 11061 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 11062 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n", 11063 un->un_tgt_blocksize, un->un_sys_blocksize); 11064 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 11065 "request start block:0x%x\n", xp->xb_blkno); 11066 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 11067 "request len:0x%x\n", bp->b_bcount); 11068 11069 /* 11070 * Allocate the layer-private data area for the mapblocksize layer. 11071 * Layers are allowed to use the xp_private member of the sd_xbuf 11072 * struct to store the pointer to their layer-private data block, but 11073 * each layer also has the responsibility of restoring the prior 11074 * contents of xb_private before returning the buf/xbuf to the 11075 * higher layer that sent it. 11076 * 11077 * Here we save the prior contents of xp->xb_private into the 11078 * bsp->mbs_oprivate field of our layer-private data area. This value 11079 * is restored by sd_mapblocksize_iodone() just prior to freeing up 11080 * the layer-private area and returning the buf/xbuf to the layer 11081 * that sent it. 11082 * 11083 * Note that here we use kmem_zalloc for the allocation as there are 11084 * parts of the mapblocksize code that expect certain fields to be 11085 * zero unless explicitly set to a required value. 11086 */ 11087 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 11088 bsp->mbs_oprivate = xp->xb_private; 11089 xp->xb_private = bsp; 11090 11091 /* 11092 * This treats the data on the disk (target) as an array of bytes. 11093 * first_byte is the byte offset, from the beginning of the device, 11094 * to the location of the request. This is converted from a 11095 * un->un_sys_blocksize block address to a byte offset, and then back 11096 * to a block address based upon a un->un_tgt_blocksize block size. 11097 * 11098 * xp->xb_blkno should be absolute upon entry into this function, 11099 * but, but it is based upon partitions that use the "system" 11100 * block size. It must be adjusted to reflect the block size of 11101 * the target. 11102 * 11103 * Note that end_block is actually the block that follows the last 11104 * block of the request, but that's what is needed for the computation. 11105 */ 11106 first_byte = SD_SYSBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 11107 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize; 11108 end_block = (first_byte + bp->b_bcount + un->un_tgt_blocksize - 1) / 11109 un->un_tgt_blocksize; 11110 11111 /* request_bytes is rounded up to a multiple of the target block size */ 11112 request_bytes = (end_block - start_block) * un->un_tgt_blocksize; 11113 11114 /* 11115 * See if the starting address of the request and the request 11116 * length are aligned on a un->un_tgt_blocksize boundary. If aligned 11117 * then we do not need to allocate a shadow buf to handle the request. 11118 */ 11119 if (((first_byte % un->un_tgt_blocksize) == 0) && 11120 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) { 11121 is_aligned = TRUE; 11122 } 11123 11124 if ((bp->b_flags & B_READ) == 0) { 11125 /* 11126 * Lock the range for a write operation. An aligned request is 11127 * considered a simple write; otherwise the request must be a 11128 * read-modify-write. 11129 */ 11130 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1, 11131 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW); 11132 } 11133 11134 /* 11135 * Alloc a shadow buf if the request is not aligned. Also, this is 11136 * where the READ command is generated for a read-modify-write. (The 11137 * write phase is deferred until after the read completes.) 11138 */ 11139 if (is_aligned == FALSE) { 11140 11141 struct sd_mapblocksize_info *shadow_bsp; 11142 struct sd_xbuf *shadow_xp; 11143 struct buf *shadow_bp; 11144 11145 /* 11146 * Allocate the shadow buf and it associated xbuf. Note that 11147 * after this call the xb_blkno value in both the original 11148 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the 11149 * same: absolute relative to the start of the device, and 11150 * adjusted for the target block size. The b_blkno in the 11151 * shadow buf will also be set to this value. We should never 11152 * change b_blkno in the original bp however. 11153 * 11154 * Note also that the shadow buf will always need to be a 11155 * READ command, regardless of whether the incoming command 11156 * is a READ or a WRITE. 11157 */ 11158 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ, 11159 xp->xb_blkno, 11160 (int (*)(struct buf *)) sd_mapblocksize_iodone); 11161 11162 shadow_xp = SD_GET_XBUF(shadow_bp); 11163 11164 /* 11165 * Allocate the layer-private data for the shadow buf. 11166 * (No need to preserve xb_private in the shadow xbuf.) 11167 */ 11168 shadow_xp->xb_private = shadow_bsp = 11169 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 11170 11171 /* 11172 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone 11173 * to figure out where the start of the user data is (based upon 11174 * the system block size) in the data returned by the READ 11175 * command (which will be based upon the target blocksize). Note 11176 * that this is only really used if the request is unaligned. 11177 */ 11178 bsp->mbs_copy_offset = (ssize_t)(first_byte - 11179 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize)); 11180 ASSERT((bsp->mbs_copy_offset >= 0) && 11181 (bsp->mbs_copy_offset < un->un_tgt_blocksize)); 11182 11183 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset; 11184 11185 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index; 11186 11187 /* Transfer the wmap (if any) to the shadow buf */ 11188 shadow_bsp->mbs_wmp = bsp->mbs_wmp; 11189 bsp->mbs_wmp = NULL; 11190 11191 /* 11192 * The shadow buf goes on from here in place of the 11193 * original buf. 11194 */ 11195 shadow_bsp->mbs_orig_bp = bp; 11196 bp = shadow_bp; 11197 } 11198 11199 SD_INFO(SD_LOG_IO_RMMEDIA, un, 11200 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno); 11201 SD_INFO(SD_LOG_IO_RMMEDIA, un, 11202 "sd_mapblocksize_iostart: tgt request len:0x%x\n", 11203 request_bytes); 11204 SD_INFO(SD_LOG_IO_RMMEDIA, un, 11205 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp); 11206 11207 done: 11208 SD_NEXT_IOSTART(index, un, bp); 11209 11210 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 11211 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp); 11212 } 11213 11214 11215 /* 11216 * Function: sd_mapblocksize_iodone 11217 * 11218 * Description: Completion side processing for block-size mapping. 11219 * 11220 * Context: May be called under interrupt context 11221 */ 11222 11223 static void 11224 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp) 11225 { 11226 struct sd_mapblocksize_info *bsp; 11227 struct sd_xbuf *xp; 11228 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */ 11229 struct buf *orig_bp; /* ptr to the original buf */ 11230 offset_t shadow_end; 11231 offset_t request_end; 11232 offset_t shadow_start; 11233 ssize_t copy_offset; 11234 size_t copy_length; 11235 size_t shortfall; 11236 uint_t is_write; /* TRUE if this bp is a WRITE */ 11237 uint_t has_wmap; /* TRUE is this bp has a wmap */ 11238 11239 ASSERT(un != NULL); 11240 ASSERT(bp != NULL); 11241 11242 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 11243 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp); 11244 11245 /* 11246 * There is no shadow buf or layer-private data if the target is 11247 * using un->un_sys_blocksize as its block size or if bcount == 0. 11248 */ 11249 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 11250 (bp->b_bcount == 0)) { 11251 goto exit; 11252 } 11253 11254 xp = SD_GET_XBUF(bp); 11255 ASSERT(xp != NULL); 11256 11257 /* Retrieve the pointer to the layer-private data area from the xbuf. */ 11258 bsp = xp->xb_private; 11259 11260 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE; 11261 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE; 11262 11263 if (is_write) { 11264 /* 11265 * For a WRITE request we must free up the block range that 11266 * we have locked up. This holds regardless of whether this is 11267 * an aligned write request or a read-modify-write request. 11268 */ 11269 sd_range_unlock(un, bsp->mbs_wmp); 11270 bsp->mbs_wmp = NULL; 11271 } 11272 11273 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) { 11274 /* 11275 * An aligned read or write command will have no shadow buf; 11276 * there is not much else to do with it. 11277 */ 11278 goto done; 11279 } 11280 11281 orig_bp = bsp->mbs_orig_bp; 11282 ASSERT(orig_bp != NULL); 11283 orig_xp = SD_GET_XBUF(orig_bp); 11284 ASSERT(orig_xp != NULL); 11285 ASSERT(!mutex_owned(SD_MUTEX(un))); 11286 11287 if (!is_write && has_wmap) { 11288 /* 11289 * A READ with a wmap means this is the READ phase of a 11290 * read-modify-write. If an error occurred on the READ then 11291 * we do not proceed with the WRITE phase or copy any data. 11292 * Just release the write maps and return with an error. 11293 */ 11294 if ((bp->b_resid != 0) || (bp->b_error != 0)) { 11295 orig_bp->b_resid = orig_bp->b_bcount; 11296 bioerror(orig_bp, bp->b_error); 11297 sd_range_unlock(un, bsp->mbs_wmp); 11298 goto freebuf_done; 11299 } 11300 } 11301 11302 /* 11303 * Here is where we set up to copy the data from the shadow buf 11304 * into the space associated with the original buf. 11305 * 11306 * To deal with the conversion between block sizes, these 11307 * computations treat the data as an array of bytes, with the 11308 * first byte (byte 0) corresponding to the first byte in the 11309 * first block on the disk. 11310 */ 11311 11312 /* 11313 * shadow_start and shadow_len indicate the location and size of 11314 * the data returned with the shadow IO request. 11315 */ 11316 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 11317 shadow_end = shadow_start + bp->b_bcount - bp->b_resid; 11318 11319 /* 11320 * copy_offset gives the offset (in bytes) from the start of the first 11321 * block of the READ request to the beginning of the data. We retrieve 11322 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved 11323 * there by sd_mapblockize_iostart(). copy_length gives the amount of 11324 * data to be copied (in bytes). 11325 */ 11326 copy_offset = bsp->mbs_copy_offset; 11327 ASSERT((copy_offset >= 0) && (copy_offset < un->un_tgt_blocksize)); 11328 copy_length = orig_bp->b_bcount; 11329 request_end = shadow_start + copy_offset + orig_bp->b_bcount; 11330 11331 /* 11332 * Set up the resid and error fields of orig_bp as appropriate. 11333 */ 11334 if (shadow_end >= request_end) { 11335 /* We got all the requested data; set resid to zero */ 11336 orig_bp->b_resid = 0; 11337 } else { 11338 /* 11339 * We failed to get enough data to fully satisfy the original 11340 * request. Just copy back whatever data we got and set 11341 * up the residual and error code as required. 11342 * 11343 * 'shortfall' is the amount by which the data received with the 11344 * shadow buf has "fallen short" of the requested amount. 11345 */ 11346 shortfall = (size_t)(request_end - shadow_end); 11347 11348 if (shortfall > orig_bp->b_bcount) { 11349 /* 11350 * We did not get enough data to even partially 11351 * fulfill the original request. The residual is 11352 * equal to the amount requested. 11353 */ 11354 orig_bp->b_resid = orig_bp->b_bcount; 11355 } else { 11356 /* 11357 * We did not get all the data that we requested 11358 * from the device, but we will try to return what 11359 * portion we did get. 11360 */ 11361 orig_bp->b_resid = shortfall; 11362 } 11363 ASSERT(copy_length >= orig_bp->b_resid); 11364 copy_length -= orig_bp->b_resid; 11365 } 11366 11367 /* Propagate the error code from the shadow buf to the original buf */ 11368 bioerror(orig_bp, bp->b_error); 11369 11370 if (is_write) { 11371 goto freebuf_done; /* No data copying for a WRITE */ 11372 } 11373 11374 if (has_wmap) { 11375 /* 11376 * This is a READ command from the READ phase of a 11377 * read-modify-write request. We have to copy the data given 11378 * by the user OVER the data returned by the READ command, 11379 * then convert the command from a READ to a WRITE and send 11380 * it back to the target. 11381 */ 11382 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset, 11383 copy_length); 11384 11385 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */ 11386 11387 /* 11388 * Dispatch the WRITE command to the taskq thread, which 11389 * will in turn send the command to the target. When the 11390 * WRITE command completes, we (sd_mapblocksize_iodone()) 11391 * will get called again as part of the iodone chain 11392 * processing for it. Note that we will still be dealing 11393 * with the shadow buf at that point. 11394 */ 11395 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp, 11396 KM_NOSLEEP) != 0) { 11397 /* 11398 * Dispatch was successful so we are done. Return 11399 * without going any higher up the iodone chain. Do 11400 * not free up any layer-private data until after the 11401 * WRITE completes. 11402 */ 11403 return; 11404 } 11405 11406 /* 11407 * Dispatch of the WRITE command failed; set up the error 11408 * condition and send this IO back up the iodone chain. 11409 */ 11410 bioerror(orig_bp, EIO); 11411 orig_bp->b_resid = orig_bp->b_bcount; 11412 11413 } else { 11414 /* 11415 * This is a regular READ request (ie, not a RMW). Copy the 11416 * data from the shadow buf into the original buf. The 11417 * copy_offset compensates for any "misalignment" between the 11418 * shadow buf (with its un->un_tgt_blocksize blocks) and the 11419 * original buf (with its un->un_sys_blocksize blocks). 11420 */ 11421 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr, 11422 copy_length); 11423 } 11424 11425 freebuf_done: 11426 11427 /* 11428 * At this point we still have both the shadow buf AND the original 11429 * buf to deal with, as well as the layer-private data area in each. 11430 * Local variables are as follows: 11431 * 11432 * bp -- points to shadow buf 11433 * xp -- points to xbuf of shadow buf 11434 * bsp -- points to layer-private data area of shadow buf 11435 * orig_bp -- points to original buf 11436 * 11437 * First free the shadow buf and its associated xbuf, then free the 11438 * layer-private data area from the shadow buf. There is no need to 11439 * restore xb_private in the shadow xbuf. 11440 */ 11441 sd_shadow_buf_free(bp); 11442 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 11443 11444 /* 11445 * Now update the local variables to point to the original buf, xbuf, 11446 * and layer-private area. 11447 */ 11448 bp = orig_bp; 11449 xp = SD_GET_XBUF(bp); 11450 ASSERT(xp != NULL); 11451 ASSERT(xp == orig_xp); 11452 bsp = xp->xb_private; 11453 ASSERT(bsp != NULL); 11454 11455 done: 11456 /* 11457 * Restore xb_private to whatever it was set to by the next higher 11458 * layer in the chain, then free the layer-private data area. 11459 */ 11460 xp->xb_private = bsp->mbs_oprivate; 11461 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 11462 11463 exit: 11464 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp), 11465 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp); 11466 11467 SD_NEXT_IODONE(index, un, bp); 11468 } 11469 11470 11471 /* 11472 * Function: sd_checksum_iostart 11473 * 11474 * Description: A stub function for a layer that's currently not used. 11475 * For now just a placeholder. 11476 * 11477 * Context: Kernel thread context 11478 */ 11479 11480 static void 11481 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp) 11482 { 11483 ASSERT(un != NULL); 11484 ASSERT(bp != NULL); 11485 ASSERT(!mutex_owned(SD_MUTEX(un))); 11486 SD_NEXT_IOSTART(index, un, bp); 11487 } 11488 11489 11490 /* 11491 * Function: sd_checksum_iodone 11492 * 11493 * Description: A stub function for a layer that's currently not used. 11494 * For now just a placeholder. 11495 * 11496 * Context: May be called under interrupt context 11497 */ 11498 11499 static void 11500 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp) 11501 { 11502 ASSERT(un != NULL); 11503 ASSERT(bp != NULL); 11504 ASSERT(!mutex_owned(SD_MUTEX(un))); 11505 SD_NEXT_IODONE(index, un, bp); 11506 } 11507 11508 11509 /* 11510 * Function: sd_checksum_uscsi_iostart 11511 * 11512 * Description: A stub function for a layer that's currently not used. 11513 * For now just a placeholder. 11514 * 11515 * Context: Kernel thread context 11516 */ 11517 11518 static void 11519 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp) 11520 { 11521 ASSERT(un != NULL); 11522 ASSERT(bp != NULL); 11523 ASSERT(!mutex_owned(SD_MUTEX(un))); 11524 SD_NEXT_IOSTART(index, un, bp); 11525 } 11526 11527 11528 /* 11529 * Function: sd_checksum_uscsi_iodone 11530 * 11531 * Description: A stub function for a layer that's currently not used. 11532 * For now just a placeholder. 11533 * 11534 * Context: May be called under interrupt context 11535 */ 11536 11537 static void 11538 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 11539 { 11540 ASSERT(un != NULL); 11541 ASSERT(bp != NULL); 11542 ASSERT(!mutex_owned(SD_MUTEX(un))); 11543 SD_NEXT_IODONE(index, un, bp); 11544 } 11545 11546 11547 /* 11548 * Function: sd_pm_iostart 11549 * 11550 * Description: iostart-side routine for Power mangement. 11551 * 11552 * Context: Kernel thread context 11553 */ 11554 11555 static void 11556 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp) 11557 { 11558 ASSERT(un != NULL); 11559 ASSERT(bp != NULL); 11560 ASSERT(!mutex_owned(SD_MUTEX(un))); 11561 ASSERT(!mutex_owned(&un->un_pm_mutex)); 11562 11563 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n"); 11564 11565 if (sd_pm_entry(un) != DDI_SUCCESS) { 11566 /* 11567 * Set up to return the failed buf back up the 'iodone' 11568 * side of the calling chain. 11569 */ 11570 bioerror(bp, EIO); 11571 bp->b_resid = bp->b_bcount; 11572 11573 SD_BEGIN_IODONE(index, un, bp); 11574 11575 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 11576 return; 11577 } 11578 11579 SD_NEXT_IOSTART(index, un, bp); 11580 11581 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 11582 } 11583 11584 11585 /* 11586 * Function: sd_pm_iodone 11587 * 11588 * Description: iodone-side routine for power mangement. 11589 * 11590 * Context: may be called from interrupt context 11591 */ 11592 11593 static void 11594 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp) 11595 { 11596 ASSERT(un != NULL); 11597 ASSERT(bp != NULL); 11598 ASSERT(!mutex_owned(&un->un_pm_mutex)); 11599 11600 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n"); 11601 11602 /* 11603 * After attach the following flag is only read, so don't 11604 * take the penalty of acquiring a mutex for it. 11605 */ 11606 if (un->un_f_pm_is_enabled == TRUE) { 11607 sd_pm_exit(un); 11608 } 11609 11610 SD_NEXT_IODONE(index, un, bp); 11611 11612 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n"); 11613 } 11614 11615 11616 /* 11617 * Function: sd_core_iostart 11618 * 11619 * Description: Primary driver function for enqueuing buf(9S) structs from 11620 * the system and initiating IO to the target device 11621 * 11622 * Context: Kernel thread context. Can sleep. 11623 * 11624 * Assumptions: - The given xp->xb_blkno is absolute 11625 * (ie, relative to the start of the device). 11626 * - The IO is to be done using the native blocksize of 11627 * the device, as specified in un->un_tgt_blocksize. 11628 */ 11629 /* ARGSUSED */ 11630 static void 11631 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp) 11632 { 11633 struct sd_xbuf *xp; 11634 11635 ASSERT(un != NULL); 11636 ASSERT(bp != NULL); 11637 ASSERT(!mutex_owned(SD_MUTEX(un))); 11638 ASSERT(bp->b_resid == 0); 11639 11640 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp); 11641 11642 xp = SD_GET_XBUF(bp); 11643 ASSERT(xp != NULL); 11644 11645 mutex_enter(SD_MUTEX(un)); 11646 11647 /* 11648 * If we are currently in the failfast state, fail any new IO 11649 * that has B_FAILFAST set, then return. 11650 */ 11651 if ((bp->b_flags & B_FAILFAST) && 11652 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) { 11653 mutex_exit(SD_MUTEX(un)); 11654 bioerror(bp, EIO); 11655 bp->b_resid = bp->b_bcount; 11656 SD_BEGIN_IODONE(index, un, bp); 11657 return; 11658 } 11659 11660 if (SD_IS_DIRECT_PRIORITY(xp)) { 11661 /* 11662 * Priority command -- transport it immediately. 11663 * 11664 * Note: We may want to assert that USCSI_DIAGNOSE is set, 11665 * because all direct priority commands should be associated 11666 * with error recovery actions which we don't want to retry. 11667 */ 11668 sd_start_cmds(un, bp); 11669 } else { 11670 /* 11671 * Normal command -- add it to the wait queue, then start 11672 * transporting commands from the wait queue. 11673 */ 11674 sd_add_buf_to_waitq(un, bp); 11675 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 11676 sd_start_cmds(un, NULL); 11677 } 11678 11679 mutex_exit(SD_MUTEX(un)); 11680 11681 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp); 11682 } 11683 11684 11685 /* 11686 * Function: sd_init_cdb_limits 11687 * 11688 * Description: This is to handle scsi_pkt initialization differences 11689 * between the driver platforms. 11690 * 11691 * Legacy behaviors: 11692 * 11693 * If the block number or the sector count exceeds the 11694 * capabilities of a Group 0 command, shift over to a 11695 * Group 1 command. We don't blindly use Group 1 11696 * commands because a) some drives (CDC Wren IVs) get a 11697 * bit confused, and b) there is probably a fair amount 11698 * of speed difference for a target to receive and decode 11699 * a 10 byte command instead of a 6 byte command. 11700 * 11701 * The xfer time difference of 6 vs 10 byte CDBs is 11702 * still significant so this code is still worthwhile. 11703 * 10 byte CDBs are very inefficient with the fas HBA driver 11704 * and older disks. Each CDB byte took 1 usec with some 11705 * popular disks. 11706 * 11707 * Context: Must be called at attach time 11708 */ 11709 11710 static void 11711 sd_init_cdb_limits(struct sd_lun *un) 11712 { 11713 int hba_cdb_limit; 11714 11715 /* 11716 * Use CDB_GROUP1 commands for most devices except for 11717 * parallel SCSI fixed drives in which case we get better 11718 * performance using CDB_GROUP0 commands (where applicable). 11719 */ 11720 un->un_mincdb = SD_CDB_GROUP1; 11721 #if !defined(__fibre) 11722 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && 11723 !un->un_f_has_removable_media) { 11724 un->un_mincdb = SD_CDB_GROUP0; 11725 } 11726 #endif 11727 11728 /* 11729 * Try to read the max-cdb-length supported by HBA. 11730 */ 11731 un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1); 11732 if (0 >= un->un_max_hba_cdb) { 11733 un->un_max_hba_cdb = CDB_GROUP4; 11734 hba_cdb_limit = SD_CDB_GROUP4; 11735 } else if (0 < un->un_max_hba_cdb && 11736 un->un_max_hba_cdb < CDB_GROUP1) { 11737 hba_cdb_limit = SD_CDB_GROUP0; 11738 } else if (CDB_GROUP1 <= un->un_max_hba_cdb && 11739 un->un_max_hba_cdb < CDB_GROUP5) { 11740 hba_cdb_limit = SD_CDB_GROUP1; 11741 } else if (CDB_GROUP5 <= un->un_max_hba_cdb && 11742 un->un_max_hba_cdb < CDB_GROUP4) { 11743 hba_cdb_limit = SD_CDB_GROUP5; 11744 } else { 11745 hba_cdb_limit = SD_CDB_GROUP4; 11746 } 11747 11748 /* 11749 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4 11750 * commands for fixed disks unless we are building for a 32 bit 11751 * kernel. 11752 */ 11753 #ifdef _LP64 11754 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 11755 min(hba_cdb_limit, SD_CDB_GROUP4); 11756 #else 11757 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 11758 min(hba_cdb_limit, SD_CDB_GROUP1); 11759 #endif 11760 11761 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) 11762 ? sizeof (struct scsi_arq_status) : 1); 11763 un->un_cmd_timeout = (ushort_t)sd_io_time; 11764 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; 11765 } 11766 11767 11768 /* 11769 * Function: sd_initpkt_for_buf 11770 * 11771 * Description: Allocate and initialize for transport a scsi_pkt struct, 11772 * based upon the info specified in the given buf struct. 11773 * 11774 * Assumes the xb_blkno in the request is absolute (ie, 11775 * relative to the start of the device (NOT partition!). 11776 * Also assumes that the request is using the native block 11777 * size of the device (as returned by the READ CAPACITY 11778 * command). 11779 * 11780 * Return Code: SD_PKT_ALLOC_SUCCESS 11781 * SD_PKT_ALLOC_FAILURE 11782 * SD_PKT_ALLOC_FAILURE_NO_DMA 11783 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 11784 * 11785 * Context: Kernel thread and may be called from software interrupt context 11786 * as part of a sdrunout callback. This function may not block or 11787 * call routines that block 11788 */ 11789 11790 static int 11791 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp) 11792 { 11793 struct sd_xbuf *xp; 11794 struct scsi_pkt *pktp = NULL; 11795 struct sd_lun *un; 11796 size_t blockcount; 11797 daddr_t startblock; 11798 int rval; 11799 int cmd_flags; 11800 11801 ASSERT(bp != NULL); 11802 ASSERT(pktpp != NULL); 11803 xp = SD_GET_XBUF(bp); 11804 ASSERT(xp != NULL); 11805 un = SD_GET_UN(bp); 11806 ASSERT(un != NULL); 11807 ASSERT(mutex_owned(SD_MUTEX(un))); 11808 ASSERT(bp->b_resid == 0); 11809 11810 SD_TRACE(SD_LOG_IO_CORE, un, 11811 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); 11812 11813 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11814 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { 11815 /* 11816 * Already have a scsi_pkt -- just need DMA resources. 11817 * We must recompute the CDB in case the mapping returns 11818 * a nonzero pkt_resid. 11819 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer 11820 * that is being retried, the unmap/remap of the DMA resouces 11821 * will result in the entire transfer starting over again 11822 * from the very first block. 11823 */ 11824 ASSERT(xp->xb_pktp != NULL); 11825 pktp = xp->xb_pktp; 11826 } else { 11827 pktp = NULL; 11828 } 11829 #endif /* __i386 || __amd64 */ 11830 11831 startblock = xp->xb_blkno; /* Absolute block num. */ 11832 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 11833 11834 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11835 11836 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); 11837 11838 #else 11839 11840 cmd_flags = un->un_pkt_flags | xp->xb_pkt_flags; 11841 11842 #endif 11843 11844 /* 11845 * sd_setup_rw_pkt will determine the appropriate CDB group to use, 11846 * call scsi_init_pkt, and build the CDB. 11847 */ 11848 rval = sd_setup_rw_pkt(un, &pktp, bp, 11849 cmd_flags, sdrunout, (caddr_t)un, 11850 startblock, blockcount); 11851 11852 if (rval == 0) { 11853 /* 11854 * Success. 11855 * 11856 * If partial DMA is being used and required for this transfer. 11857 * set it up here. 11858 */ 11859 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 && 11860 (pktp->pkt_resid != 0)) { 11861 11862 /* 11863 * Save the CDB length and pkt_resid for the 11864 * next xfer 11865 */ 11866 xp->xb_dma_resid = pktp->pkt_resid; 11867 11868 /* rezero resid */ 11869 pktp->pkt_resid = 0; 11870 11871 } else { 11872 xp->xb_dma_resid = 0; 11873 } 11874 11875 pktp->pkt_flags = un->un_tagflags; 11876 pktp->pkt_time = un->un_cmd_timeout; 11877 pktp->pkt_comp = sdintr; 11878 11879 pktp->pkt_private = bp; 11880 *pktpp = pktp; 11881 11882 SD_TRACE(SD_LOG_IO_CORE, un, 11883 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); 11884 11885 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 11886 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; 11887 #endif 11888 11889 return (SD_PKT_ALLOC_SUCCESS); 11890 11891 } 11892 11893 /* 11894 * SD_PKT_ALLOC_FAILURE is the only expected failure code 11895 * from sd_setup_rw_pkt. 11896 */ 11897 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 11898 11899 if (rval == SD_PKT_ALLOC_FAILURE) { 11900 *pktpp = NULL; 11901 /* 11902 * Set the driver state to RWAIT to indicate the driver 11903 * is waiting on resource allocations. The driver will not 11904 * suspend, pm_suspend, or detatch while the state is RWAIT. 11905 */ 11906 New_state(un, SD_STATE_RWAIT); 11907 11908 SD_ERROR(SD_LOG_IO_CORE, un, 11909 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp); 11910 11911 if ((bp->b_flags & B_ERROR) != 0) { 11912 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 11913 } 11914 return (SD_PKT_ALLOC_FAILURE); 11915 } else { 11916 /* 11917 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL 11918 * 11919 * This should never happen. Maybe someone messed with the 11920 * kernel's minphys? 11921 */ 11922 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 11923 "Request rejected: too large for CDB: " 11924 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount); 11925 SD_ERROR(SD_LOG_IO_CORE, un, 11926 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp); 11927 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 11928 11929 } 11930 } 11931 11932 11933 /* 11934 * Function: sd_destroypkt_for_buf 11935 * 11936 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing). 11937 * 11938 * Context: Kernel thread or interrupt context 11939 */ 11940 11941 static void 11942 sd_destroypkt_for_buf(struct buf *bp) 11943 { 11944 ASSERT(bp != NULL); 11945 ASSERT(SD_GET_UN(bp) != NULL); 11946 11947 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 11948 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp); 11949 11950 ASSERT(SD_GET_PKTP(bp) != NULL); 11951 scsi_destroy_pkt(SD_GET_PKTP(bp)); 11952 11953 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 11954 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp); 11955 } 11956 11957 /* 11958 * Function: sd_setup_rw_pkt 11959 * 11960 * Description: Determines appropriate CDB group for the requested LBA 11961 * and transfer length, calls scsi_init_pkt, and builds 11962 * the CDB. Do not use for partial DMA transfers except 11963 * for the initial transfer since the CDB size must 11964 * remain constant. 11965 * 11966 * Context: Kernel thread and may be called from software interrupt 11967 * context as part of a sdrunout callback. This function may not 11968 * block or call routines that block 11969 */ 11970 11971 11972 int 11973 sd_setup_rw_pkt(struct sd_lun *un, 11974 struct scsi_pkt **pktpp, struct buf *bp, int flags, 11975 int (*callback)(caddr_t), caddr_t callback_arg, 11976 diskaddr_t lba, uint32_t blockcount) 11977 { 11978 struct scsi_pkt *return_pktp; 11979 union scsi_cdb *cdbp; 11980 struct sd_cdbinfo *cp = NULL; 11981 int i; 11982 11983 /* 11984 * See which size CDB to use, based upon the request. 11985 */ 11986 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) { 11987 11988 /* 11989 * Check lba and block count against sd_cdbtab limits. 11990 * In the partial DMA case, we have to use the same size 11991 * CDB for all the transfers. Check lba + blockcount 11992 * against the max LBA so we know that segment of the 11993 * transfer can use the CDB we select. 11994 */ 11995 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) && 11996 (blockcount <= sd_cdbtab[i].sc_maxlen)) { 11997 11998 /* 11999 * The command will fit into the CDB type 12000 * specified by sd_cdbtab[i]. 12001 */ 12002 cp = sd_cdbtab + i; 12003 12004 /* 12005 * Call scsi_init_pkt so we can fill in the 12006 * CDB. 12007 */ 12008 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp, 12009 bp, cp->sc_grpcode, un->un_status_len, 0, 12010 flags, callback, callback_arg); 12011 12012 if (return_pktp != NULL) { 12013 12014 /* 12015 * Return new value of pkt 12016 */ 12017 *pktpp = return_pktp; 12018 12019 /* 12020 * To be safe, zero the CDB insuring there is 12021 * no leftover data from a previous command. 12022 */ 12023 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode); 12024 12025 /* 12026 * Handle partial DMA mapping 12027 */ 12028 if (return_pktp->pkt_resid != 0) { 12029 12030 /* 12031 * Not going to xfer as many blocks as 12032 * originally expected 12033 */ 12034 blockcount -= 12035 SD_BYTES2TGTBLOCKS(un, 12036 return_pktp->pkt_resid); 12037 } 12038 12039 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp; 12040 12041 /* 12042 * Set command byte based on the CDB 12043 * type we matched. 12044 */ 12045 cdbp->scc_cmd = cp->sc_grpmask | 12046 ((bp->b_flags & B_READ) ? 12047 SCMD_READ : SCMD_WRITE); 12048 12049 SD_FILL_SCSI1_LUN(un, return_pktp); 12050 12051 /* 12052 * Fill in LBA and length 12053 */ 12054 ASSERT((cp->sc_grpcode == CDB_GROUP1) || 12055 (cp->sc_grpcode == CDB_GROUP4) || 12056 (cp->sc_grpcode == CDB_GROUP0) || 12057 (cp->sc_grpcode == CDB_GROUP5)); 12058 12059 if (cp->sc_grpcode == CDB_GROUP1) { 12060 FORMG1ADDR(cdbp, lba); 12061 FORMG1COUNT(cdbp, blockcount); 12062 return (0); 12063 } else if (cp->sc_grpcode == CDB_GROUP4) { 12064 FORMG4LONGADDR(cdbp, lba); 12065 FORMG4COUNT(cdbp, blockcount); 12066 return (0); 12067 } else if (cp->sc_grpcode == CDB_GROUP0) { 12068 FORMG0ADDR(cdbp, lba); 12069 FORMG0COUNT(cdbp, blockcount); 12070 return (0); 12071 } else if (cp->sc_grpcode == CDB_GROUP5) { 12072 FORMG5ADDR(cdbp, lba); 12073 FORMG5COUNT(cdbp, blockcount); 12074 return (0); 12075 } 12076 12077 /* 12078 * It should be impossible to not match one 12079 * of the CDB types above, so we should never 12080 * reach this point. Set the CDB command byte 12081 * to test-unit-ready to avoid writing 12082 * to somewhere we don't intend. 12083 */ 12084 cdbp->scc_cmd = SCMD_TEST_UNIT_READY; 12085 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 12086 } else { 12087 /* 12088 * Couldn't get scsi_pkt 12089 */ 12090 return (SD_PKT_ALLOC_FAILURE); 12091 } 12092 } 12093 } 12094 12095 /* 12096 * None of the available CDB types were suitable. This really 12097 * should never happen: on a 64 bit system we support 12098 * READ16/WRITE16 which will hold an entire 64 bit disk address 12099 * and on a 32 bit system we will refuse to bind to a device 12100 * larger than 2TB so addresses will never be larger than 32 bits. 12101 */ 12102 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 12103 } 12104 12105 /* 12106 * Function: sd_setup_next_rw_pkt 12107 * 12108 * Description: Setup packet for partial DMA transfers, except for the 12109 * initial transfer. sd_setup_rw_pkt should be used for 12110 * the initial transfer. 12111 * 12112 * Context: Kernel thread and may be called from interrupt context. 12113 */ 12114 12115 int 12116 sd_setup_next_rw_pkt(struct sd_lun *un, 12117 struct scsi_pkt *pktp, struct buf *bp, 12118 diskaddr_t lba, uint32_t blockcount) 12119 { 12120 uchar_t com; 12121 union scsi_cdb *cdbp; 12122 uchar_t cdb_group_id; 12123 12124 ASSERT(pktp != NULL); 12125 ASSERT(pktp->pkt_cdbp != NULL); 12126 12127 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 12128 com = cdbp->scc_cmd; 12129 cdb_group_id = CDB_GROUPID(com); 12130 12131 ASSERT((cdb_group_id == CDB_GROUPID_0) || 12132 (cdb_group_id == CDB_GROUPID_1) || 12133 (cdb_group_id == CDB_GROUPID_4) || 12134 (cdb_group_id == CDB_GROUPID_5)); 12135 12136 /* 12137 * Move pkt to the next portion of the xfer. 12138 * func is NULL_FUNC so we do not have to release 12139 * the disk mutex here. 12140 */ 12141 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0, 12142 NULL_FUNC, NULL) == pktp) { 12143 /* Success. Handle partial DMA */ 12144 if (pktp->pkt_resid != 0) { 12145 blockcount -= 12146 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid); 12147 } 12148 12149 cdbp->scc_cmd = com; 12150 SD_FILL_SCSI1_LUN(un, pktp); 12151 if (cdb_group_id == CDB_GROUPID_1) { 12152 FORMG1ADDR(cdbp, lba); 12153 FORMG1COUNT(cdbp, blockcount); 12154 return (0); 12155 } else if (cdb_group_id == CDB_GROUPID_4) { 12156 FORMG4LONGADDR(cdbp, lba); 12157 FORMG4COUNT(cdbp, blockcount); 12158 return (0); 12159 } else if (cdb_group_id == CDB_GROUPID_0) { 12160 FORMG0ADDR(cdbp, lba); 12161 FORMG0COUNT(cdbp, blockcount); 12162 return (0); 12163 } else if (cdb_group_id == CDB_GROUPID_5) { 12164 FORMG5ADDR(cdbp, lba); 12165 FORMG5COUNT(cdbp, blockcount); 12166 return (0); 12167 } 12168 12169 /* Unreachable */ 12170 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 12171 } 12172 12173 /* 12174 * Error setting up next portion of cmd transfer. 12175 * Something is definitely very wrong and this 12176 * should not happen. 12177 */ 12178 return (SD_PKT_ALLOC_FAILURE); 12179 } 12180 12181 /* 12182 * Function: sd_initpkt_for_uscsi 12183 * 12184 * Description: Allocate and initialize for transport a scsi_pkt struct, 12185 * based upon the info specified in the given uscsi_cmd struct. 12186 * 12187 * Return Code: SD_PKT_ALLOC_SUCCESS 12188 * SD_PKT_ALLOC_FAILURE 12189 * SD_PKT_ALLOC_FAILURE_NO_DMA 12190 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 12191 * 12192 * Context: Kernel thread and may be called from software interrupt context 12193 * as part of a sdrunout callback. This function may not block or 12194 * call routines that block 12195 */ 12196 12197 static int 12198 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp) 12199 { 12200 struct uscsi_cmd *uscmd; 12201 struct sd_xbuf *xp; 12202 struct scsi_pkt *pktp; 12203 struct sd_lun *un; 12204 uint32_t flags = 0; 12205 12206 ASSERT(bp != NULL); 12207 ASSERT(pktpp != NULL); 12208 xp = SD_GET_XBUF(bp); 12209 ASSERT(xp != NULL); 12210 un = SD_GET_UN(bp); 12211 ASSERT(un != NULL); 12212 ASSERT(mutex_owned(SD_MUTEX(un))); 12213 12214 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 12215 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 12216 ASSERT(uscmd != NULL); 12217 12218 SD_TRACE(SD_LOG_IO_CORE, un, 12219 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp); 12220 12221 /* 12222 * Allocate the scsi_pkt for the command. 12223 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path 12224 * during scsi_init_pkt time and will continue to use the 12225 * same path as long as the same scsi_pkt is used without 12226 * intervening scsi_dma_free(). Since uscsi command does 12227 * not call scsi_dmafree() before retry failed command, it 12228 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT 12229 * set such that scsi_vhci can use other available path for 12230 * retry. Besides, ucsci command does not allow DMA breakup, 12231 * so there is no need to set PKT_DMA_PARTIAL flag. 12232 */ 12233 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 12234 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 12235 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 12236 ((int)(uscmd->uscsi_rqlen) + sizeof (struct scsi_arq_status) 12237 - sizeof (struct scsi_extended_sense)), 0, 12238 (un->un_pkt_flags & ~PKT_DMA_PARTIAL) | PKT_XARQ, 12239 sdrunout, (caddr_t)un); 12240 } else { 12241 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 12242 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 12243 sizeof (struct scsi_arq_status), 0, 12244 (un->un_pkt_flags & ~PKT_DMA_PARTIAL), 12245 sdrunout, (caddr_t)un); 12246 } 12247 12248 if (pktp == NULL) { 12249 *pktpp = NULL; 12250 /* 12251 * Set the driver state to RWAIT to indicate the driver 12252 * is waiting on resource allocations. The driver will not 12253 * suspend, pm_suspend, or detatch while the state is RWAIT. 12254 */ 12255 New_state(un, SD_STATE_RWAIT); 12256 12257 SD_ERROR(SD_LOG_IO_CORE, un, 12258 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp); 12259 12260 if ((bp->b_flags & B_ERROR) != 0) { 12261 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 12262 } 12263 return (SD_PKT_ALLOC_FAILURE); 12264 } 12265 12266 /* 12267 * We do not do DMA breakup for USCSI commands, so return failure 12268 * here if all the needed DMA resources were not allocated. 12269 */ 12270 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) && 12271 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) { 12272 scsi_destroy_pkt(pktp); 12273 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: " 12274 "No partial DMA for USCSI. exit: buf:0x%p\n", bp); 12275 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL); 12276 } 12277 12278 /* Init the cdb from the given uscsi struct */ 12279 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp, 12280 uscmd->uscsi_cdb[0], 0, 0, 0); 12281 12282 SD_FILL_SCSI1_LUN(un, pktp); 12283 12284 /* 12285 * Set up the optional USCSI flags. See the uscsi (7I) man page 12286 * for listing of the supported flags. 12287 */ 12288 12289 if (uscmd->uscsi_flags & USCSI_SILENT) { 12290 flags |= FLAG_SILENT; 12291 } 12292 12293 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) { 12294 flags |= FLAG_DIAGNOSE; 12295 } 12296 12297 if (uscmd->uscsi_flags & USCSI_ISOLATE) { 12298 flags |= FLAG_ISOLATE; 12299 } 12300 12301 if (un->un_f_is_fibre == FALSE) { 12302 if (uscmd->uscsi_flags & USCSI_RENEGOT) { 12303 flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 12304 } 12305 } 12306 12307 /* 12308 * Set the pkt flags here so we save time later. 12309 * Note: These flags are NOT in the uscsi man page!!! 12310 */ 12311 if (uscmd->uscsi_flags & USCSI_HEAD) { 12312 flags |= FLAG_HEAD; 12313 } 12314 12315 if (uscmd->uscsi_flags & USCSI_NOINTR) { 12316 flags |= FLAG_NOINTR; 12317 } 12318 12319 /* 12320 * For tagged queueing, things get a bit complicated. 12321 * Check first for head of queue and last for ordered queue. 12322 * If neither head nor order, use the default driver tag flags. 12323 */ 12324 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) { 12325 if (uscmd->uscsi_flags & USCSI_HTAG) { 12326 flags |= FLAG_HTAG; 12327 } else if (uscmd->uscsi_flags & USCSI_OTAG) { 12328 flags |= FLAG_OTAG; 12329 } else { 12330 flags |= un->un_tagflags & FLAG_TAGMASK; 12331 } 12332 } 12333 12334 if (uscmd->uscsi_flags & USCSI_NODISCON) { 12335 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON; 12336 } 12337 12338 pktp->pkt_flags = flags; 12339 12340 /* Copy the caller's CDB into the pkt... */ 12341 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen); 12342 12343 if (uscmd->uscsi_timeout == 0) { 12344 pktp->pkt_time = un->un_uscsi_timeout; 12345 } else { 12346 pktp->pkt_time = uscmd->uscsi_timeout; 12347 } 12348 12349 /* need it later to identify USCSI request in sdintr */ 12350 xp->xb_pkt_flags |= SD_XB_USCSICMD; 12351 12352 xp->xb_sense_resid = uscmd->uscsi_rqresid; 12353 12354 pktp->pkt_private = bp; 12355 pktp->pkt_comp = sdintr; 12356 *pktpp = pktp; 12357 12358 SD_TRACE(SD_LOG_IO_CORE, un, 12359 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp); 12360 12361 return (SD_PKT_ALLOC_SUCCESS); 12362 } 12363 12364 12365 /* 12366 * Function: sd_destroypkt_for_uscsi 12367 * 12368 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi 12369 * IOs.. Also saves relevant info into the associated uscsi_cmd 12370 * struct. 12371 * 12372 * Context: May be called under interrupt context 12373 */ 12374 12375 static void 12376 sd_destroypkt_for_uscsi(struct buf *bp) 12377 { 12378 struct uscsi_cmd *uscmd; 12379 struct sd_xbuf *xp; 12380 struct scsi_pkt *pktp; 12381 struct sd_lun *un; 12382 12383 ASSERT(bp != NULL); 12384 xp = SD_GET_XBUF(bp); 12385 ASSERT(xp != NULL); 12386 un = SD_GET_UN(bp); 12387 ASSERT(un != NULL); 12388 ASSERT(!mutex_owned(SD_MUTEX(un))); 12389 pktp = SD_GET_PKTP(bp); 12390 ASSERT(pktp != NULL); 12391 12392 SD_TRACE(SD_LOG_IO_CORE, un, 12393 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp); 12394 12395 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 12396 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 12397 ASSERT(uscmd != NULL); 12398 12399 /* Save the status and the residual into the uscsi_cmd struct */ 12400 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 12401 uscmd->uscsi_resid = bp->b_resid; 12402 12403 /* 12404 * If enabled, copy any saved sense data into the area specified 12405 * by the uscsi command. 12406 */ 12407 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 12408 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 12409 /* 12410 * Note: uscmd->uscsi_rqbuf should always point to a buffer 12411 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd()) 12412 */ 12413 uscmd->uscsi_rqstatus = xp->xb_sense_status; 12414 uscmd->uscsi_rqresid = xp->xb_sense_resid; 12415 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 12416 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 12417 MAX_SENSE_LENGTH); 12418 } else { 12419 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 12420 SENSE_LENGTH); 12421 } 12422 } 12423 12424 /* We are done with the scsi_pkt; free it now */ 12425 ASSERT(SD_GET_PKTP(bp) != NULL); 12426 scsi_destroy_pkt(SD_GET_PKTP(bp)); 12427 12428 SD_TRACE(SD_LOG_IO_CORE, un, 12429 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp); 12430 } 12431 12432 12433 /* 12434 * Function: sd_bioclone_alloc 12435 * 12436 * Description: Allocate a buf(9S) and init it as per the given buf 12437 * and the various arguments. The associated sd_xbuf 12438 * struct is (nearly) duplicated. The struct buf *bp 12439 * argument is saved in new_xp->xb_private. 12440 * 12441 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 12442 * datalen - size of data area for the shadow bp 12443 * blkno - starting LBA 12444 * func - function pointer for b_iodone in the shadow buf. (May 12445 * be NULL if none.) 12446 * 12447 * Return Code: Pointer to allocates buf(9S) struct 12448 * 12449 * Context: Can sleep. 12450 */ 12451 12452 static struct buf * 12453 sd_bioclone_alloc(struct buf *bp, size_t datalen, 12454 daddr_t blkno, int (*func)(struct buf *)) 12455 { 12456 struct sd_lun *un; 12457 struct sd_xbuf *xp; 12458 struct sd_xbuf *new_xp; 12459 struct buf *new_bp; 12460 12461 ASSERT(bp != NULL); 12462 xp = SD_GET_XBUF(bp); 12463 ASSERT(xp != NULL); 12464 un = SD_GET_UN(bp); 12465 ASSERT(un != NULL); 12466 ASSERT(!mutex_owned(SD_MUTEX(un))); 12467 12468 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func, 12469 NULL, KM_SLEEP); 12470 12471 new_bp->b_lblkno = blkno; 12472 12473 /* 12474 * Allocate an xbuf for the shadow bp and copy the contents of the 12475 * original xbuf into it. 12476 */ 12477 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 12478 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 12479 12480 /* 12481 * The given bp is automatically saved in the xb_private member 12482 * of the new xbuf. Callers are allowed to depend on this. 12483 */ 12484 new_xp->xb_private = bp; 12485 12486 new_bp->b_private = new_xp; 12487 12488 return (new_bp); 12489 } 12490 12491 /* 12492 * Function: sd_shadow_buf_alloc 12493 * 12494 * Description: Allocate a buf(9S) and init it as per the given buf 12495 * and the various arguments. The associated sd_xbuf 12496 * struct is (nearly) duplicated. The struct buf *bp 12497 * argument is saved in new_xp->xb_private. 12498 * 12499 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 12500 * datalen - size of data area for the shadow bp 12501 * bflags - B_READ or B_WRITE (pseudo flag) 12502 * blkno - starting LBA 12503 * func - function pointer for b_iodone in the shadow buf. (May 12504 * be NULL if none.) 12505 * 12506 * Return Code: Pointer to allocates buf(9S) struct 12507 * 12508 * Context: Can sleep. 12509 */ 12510 12511 static struct buf * 12512 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags, 12513 daddr_t blkno, int (*func)(struct buf *)) 12514 { 12515 struct sd_lun *un; 12516 struct sd_xbuf *xp; 12517 struct sd_xbuf *new_xp; 12518 struct buf *new_bp; 12519 12520 ASSERT(bp != NULL); 12521 xp = SD_GET_XBUF(bp); 12522 ASSERT(xp != NULL); 12523 un = SD_GET_UN(bp); 12524 ASSERT(un != NULL); 12525 ASSERT(!mutex_owned(SD_MUTEX(un))); 12526 12527 if (bp->b_flags & (B_PAGEIO | B_PHYS)) { 12528 bp_mapin(bp); 12529 } 12530 12531 bflags &= (B_READ | B_WRITE); 12532 #if defined(__i386) || defined(__amd64) 12533 new_bp = getrbuf(KM_SLEEP); 12534 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); 12535 new_bp->b_bcount = datalen; 12536 new_bp->b_flags = bflags | 12537 (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW)); 12538 #else 12539 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, 12540 datalen, bflags, SLEEP_FUNC, NULL); 12541 #endif 12542 new_bp->av_forw = NULL; 12543 new_bp->av_back = NULL; 12544 new_bp->b_dev = bp->b_dev; 12545 new_bp->b_blkno = blkno; 12546 new_bp->b_iodone = func; 12547 new_bp->b_edev = bp->b_edev; 12548 new_bp->b_resid = 0; 12549 12550 /* We need to preserve the B_FAILFAST flag */ 12551 if (bp->b_flags & B_FAILFAST) { 12552 new_bp->b_flags |= B_FAILFAST; 12553 } 12554 12555 /* 12556 * Allocate an xbuf for the shadow bp and copy the contents of the 12557 * original xbuf into it. 12558 */ 12559 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 12560 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 12561 12562 /* Need later to copy data between the shadow buf & original buf! */ 12563 new_xp->xb_pkt_flags |= PKT_CONSISTENT; 12564 12565 /* 12566 * The given bp is automatically saved in the xb_private member 12567 * of the new xbuf. Callers are allowed to depend on this. 12568 */ 12569 new_xp->xb_private = bp; 12570 12571 new_bp->b_private = new_xp; 12572 12573 return (new_bp); 12574 } 12575 12576 /* 12577 * Function: sd_bioclone_free 12578 * 12579 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations 12580 * in the larger than partition operation. 12581 * 12582 * Context: May be called under interrupt context 12583 */ 12584 12585 static void 12586 sd_bioclone_free(struct buf *bp) 12587 { 12588 struct sd_xbuf *xp; 12589 12590 ASSERT(bp != NULL); 12591 xp = SD_GET_XBUF(bp); 12592 ASSERT(xp != NULL); 12593 12594 /* 12595 * Call bp_mapout() before freeing the buf, in case a lower 12596 * layer or HBA had done a bp_mapin(). we must do this here 12597 * as we are the "originator" of the shadow buf. 12598 */ 12599 bp_mapout(bp); 12600 12601 /* 12602 * Null out b_iodone before freeing the bp, to ensure that the driver 12603 * never gets confused by a stale value in this field. (Just a little 12604 * extra defensiveness here.) 12605 */ 12606 bp->b_iodone = NULL; 12607 12608 freerbuf(bp); 12609 12610 kmem_free(xp, sizeof (struct sd_xbuf)); 12611 } 12612 12613 /* 12614 * Function: sd_shadow_buf_free 12615 * 12616 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations. 12617 * 12618 * Context: May be called under interrupt context 12619 */ 12620 12621 static void 12622 sd_shadow_buf_free(struct buf *bp) 12623 { 12624 struct sd_xbuf *xp; 12625 12626 ASSERT(bp != NULL); 12627 xp = SD_GET_XBUF(bp); 12628 ASSERT(xp != NULL); 12629 12630 #if defined(__sparc) 12631 /* 12632 * Call bp_mapout() before freeing the buf, in case a lower 12633 * layer or HBA had done a bp_mapin(). we must do this here 12634 * as we are the "originator" of the shadow buf. 12635 */ 12636 bp_mapout(bp); 12637 #endif 12638 12639 /* 12640 * Null out b_iodone before freeing the bp, to ensure that the driver 12641 * never gets confused by a stale value in this field. (Just a little 12642 * extra defensiveness here.) 12643 */ 12644 bp->b_iodone = NULL; 12645 12646 #if defined(__i386) || defined(__amd64) 12647 kmem_free(bp->b_un.b_addr, bp->b_bcount); 12648 freerbuf(bp); 12649 #else 12650 scsi_free_consistent_buf(bp); 12651 #endif 12652 12653 kmem_free(xp, sizeof (struct sd_xbuf)); 12654 } 12655 12656 12657 /* 12658 * Function: sd_print_transport_rejected_message 12659 * 12660 * Description: This implements the ludicrously complex rules for printing 12661 * a "transport rejected" message. This is to address the 12662 * specific problem of having a flood of this error message 12663 * produced when a failover occurs. 12664 * 12665 * Context: Any. 12666 */ 12667 12668 static void 12669 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp, 12670 int code) 12671 { 12672 ASSERT(un != NULL); 12673 ASSERT(mutex_owned(SD_MUTEX(un))); 12674 ASSERT(xp != NULL); 12675 12676 /* 12677 * Print the "transport rejected" message under the following 12678 * conditions: 12679 * 12680 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set 12681 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR. 12682 * - If the error code IS a TRAN_FATAL_ERROR, then the message is 12683 * printed the FIRST time a TRAN_FATAL_ERROR is returned from 12684 * scsi_transport(9F) (which indicates that the target might have 12685 * gone off-line). This uses the un->un_tran_fatal_count 12686 * count, which is incremented whenever a TRAN_FATAL_ERROR is 12687 * received, and reset to zero whenver a TRAN_ACCEPT is returned 12688 * from scsi_transport(). 12689 * 12690 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of 12691 * the preceeding cases in order for the message to be printed. 12692 */ 12693 if ((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) { 12694 if ((sd_level_mask & SD_LOGMASK_DIAG) || 12695 (code != TRAN_FATAL_ERROR) || 12696 (un->un_tran_fatal_count == 1)) { 12697 switch (code) { 12698 case TRAN_BADPKT: 12699 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12700 "transport rejected bad packet\n"); 12701 break; 12702 case TRAN_FATAL_ERROR: 12703 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12704 "transport rejected fatal error\n"); 12705 break; 12706 default: 12707 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12708 "transport rejected (%d)\n", code); 12709 break; 12710 } 12711 } 12712 } 12713 } 12714 12715 12716 /* 12717 * Function: sd_add_buf_to_waitq 12718 * 12719 * Description: Add the given buf(9S) struct to the wait queue for the 12720 * instance. If sorting is enabled, then the buf is added 12721 * to the queue via an elevator sort algorithm (a la 12722 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key. 12723 * If sorting is not enabled, then the buf is just added 12724 * to the end of the wait queue. 12725 * 12726 * Return Code: void 12727 * 12728 * Context: Does not sleep/block, therefore technically can be called 12729 * from any context. However if sorting is enabled then the 12730 * execution time is indeterminate, and may take long if 12731 * the wait queue grows large. 12732 */ 12733 12734 static void 12735 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp) 12736 { 12737 struct buf *ap; 12738 12739 ASSERT(bp != NULL); 12740 ASSERT(un != NULL); 12741 ASSERT(mutex_owned(SD_MUTEX(un))); 12742 12743 /* If the queue is empty, add the buf as the only entry & return. */ 12744 if (un->un_waitq_headp == NULL) { 12745 ASSERT(un->un_waitq_tailp == NULL); 12746 un->un_waitq_headp = un->un_waitq_tailp = bp; 12747 bp->av_forw = NULL; 12748 return; 12749 } 12750 12751 ASSERT(un->un_waitq_tailp != NULL); 12752 12753 /* 12754 * If sorting is disabled, just add the buf to the tail end of 12755 * the wait queue and return. 12756 */ 12757 if (un->un_f_disksort_disabled) { 12758 un->un_waitq_tailp->av_forw = bp; 12759 un->un_waitq_tailp = bp; 12760 bp->av_forw = NULL; 12761 return; 12762 } 12763 12764 /* 12765 * Sort thru the list of requests currently on the wait queue 12766 * and add the new buf request at the appropriate position. 12767 * 12768 * The un->un_waitq_headp is an activity chain pointer on which 12769 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The 12770 * first queue holds those requests which are positioned after 12771 * the current SD_GET_BLKNO() (in the first request); the second holds 12772 * requests which came in after their SD_GET_BLKNO() number was passed. 12773 * Thus we implement a one way scan, retracting after reaching 12774 * the end of the drive to the first request on the second 12775 * queue, at which time it becomes the first queue. 12776 * A one-way scan is natural because of the way UNIX read-ahead 12777 * blocks are allocated. 12778 * 12779 * If we lie after the first request, then we must locate the 12780 * second request list and add ourselves to it. 12781 */ 12782 ap = un->un_waitq_headp; 12783 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) { 12784 while (ap->av_forw != NULL) { 12785 /* 12786 * Look for an "inversion" in the (normally 12787 * ascending) block numbers. This indicates 12788 * the start of the second request list. 12789 */ 12790 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) { 12791 /* 12792 * Search the second request list for the 12793 * first request at a larger block number. 12794 * We go before that; however if there is 12795 * no such request, we go at the end. 12796 */ 12797 do { 12798 if (SD_GET_BLKNO(bp) < 12799 SD_GET_BLKNO(ap->av_forw)) { 12800 goto insert; 12801 } 12802 ap = ap->av_forw; 12803 } while (ap->av_forw != NULL); 12804 goto insert; /* after last */ 12805 } 12806 ap = ap->av_forw; 12807 } 12808 12809 /* 12810 * No inversions... we will go after the last, and 12811 * be the first request in the second request list. 12812 */ 12813 goto insert; 12814 } 12815 12816 /* 12817 * Request is at/after the current request... 12818 * sort in the first request list. 12819 */ 12820 while (ap->av_forw != NULL) { 12821 /* 12822 * We want to go after the current request (1) if 12823 * there is an inversion after it (i.e. it is the end 12824 * of the first request list), or (2) if the next 12825 * request is a larger block no. than our request. 12826 */ 12827 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) || 12828 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) { 12829 goto insert; 12830 } 12831 ap = ap->av_forw; 12832 } 12833 12834 /* 12835 * Neither a second list nor a larger request, therefore 12836 * we go at the end of the first list (which is the same 12837 * as the end of the whole schebang). 12838 */ 12839 insert: 12840 bp->av_forw = ap->av_forw; 12841 ap->av_forw = bp; 12842 12843 /* 12844 * If we inserted onto the tail end of the waitq, make sure the 12845 * tail pointer is updated. 12846 */ 12847 if (ap == un->un_waitq_tailp) { 12848 un->un_waitq_tailp = bp; 12849 } 12850 } 12851 12852 12853 /* 12854 * Function: sd_start_cmds 12855 * 12856 * Description: Remove and transport cmds from the driver queues. 12857 * 12858 * Arguments: un - pointer to the unit (soft state) struct for the target. 12859 * 12860 * immed_bp - ptr to a buf to be transported immediately. Only 12861 * the immed_bp is transported; bufs on the waitq are not 12862 * processed and the un_retry_bp is not checked. If immed_bp is 12863 * NULL, then normal queue processing is performed. 12864 * 12865 * Context: May be called from kernel thread context, interrupt context, 12866 * or runout callback context. This function may not block or 12867 * call routines that block. 12868 */ 12869 12870 static void 12871 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) 12872 { 12873 struct sd_xbuf *xp; 12874 struct buf *bp; 12875 void (*statp)(kstat_io_t *); 12876 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12877 void (*saved_statp)(kstat_io_t *); 12878 #endif 12879 int rval; 12880 12881 ASSERT(un != NULL); 12882 ASSERT(mutex_owned(SD_MUTEX(un))); 12883 ASSERT(un->un_ncmds_in_transport >= 0); 12884 ASSERT(un->un_throttle >= 0); 12885 12886 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); 12887 12888 do { 12889 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12890 saved_statp = NULL; 12891 #endif 12892 12893 /* 12894 * If we are syncing or dumping, fail the command to 12895 * avoid recursively calling back into scsi_transport(). 12896 * The dump I/O itself uses a separate code path so this 12897 * only prevents non-dump I/O from being sent while dumping. 12898 * File system sync takes place before dumping begins. 12899 * During panic, filesystem I/O is allowed provided 12900 * un_in_callback is <= 1. This is to prevent recursion 12901 * such as sd_start_cmds -> scsi_transport -> sdintr -> 12902 * sd_start_cmds and so on. See panic.c for more information 12903 * about the states the system can be in during panic. 12904 */ 12905 if ((un->un_state == SD_STATE_DUMPING) || 12906 (ddi_in_panic() && (un->un_in_callback > 1))) { 12907 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12908 "sd_start_cmds: panicking\n"); 12909 goto exit; 12910 } 12911 12912 if ((bp = immed_bp) != NULL) { 12913 /* 12914 * We have a bp that must be transported immediately. 12915 * It's OK to transport the immed_bp here without doing 12916 * the throttle limit check because the immed_bp is 12917 * always used in a retry/recovery case. This means 12918 * that we know we are not at the throttle limit by 12919 * virtue of the fact that to get here we must have 12920 * already gotten a command back via sdintr(). This also 12921 * relies on (1) the command on un_retry_bp preventing 12922 * further commands from the waitq from being issued; 12923 * and (2) the code in sd_retry_command checking the 12924 * throttle limit before issuing a delayed or immediate 12925 * retry. This holds even if the throttle limit is 12926 * currently ratcheted down from its maximum value. 12927 */ 12928 statp = kstat_runq_enter; 12929 if (bp == un->un_retry_bp) { 12930 ASSERT((un->un_retry_statp == NULL) || 12931 (un->un_retry_statp == kstat_waitq_enter) || 12932 (un->un_retry_statp == 12933 kstat_runq_back_to_waitq)); 12934 /* 12935 * If the waitq kstat was incremented when 12936 * sd_set_retry_bp() queued this bp for a retry, 12937 * then we must set up statp so that the waitq 12938 * count will get decremented correctly below. 12939 * Also we must clear un->un_retry_statp to 12940 * ensure that we do not act on a stale value 12941 * in this field. 12942 */ 12943 if ((un->un_retry_statp == kstat_waitq_enter) || 12944 (un->un_retry_statp == 12945 kstat_runq_back_to_waitq)) { 12946 statp = kstat_waitq_to_runq; 12947 } 12948 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12949 saved_statp = un->un_retry_statp; 12950 #endif 12951 un->un_retry_statp = NULL; 12952 12953 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 12954 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " 12955 "un_throttle:%d un_ncmds_in_transport:%d\n", 12956 un, un->un_retry_bp, un->un_throttle, 12957 un->un_ncmds_in_transport); 12958 } else { 12959 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: " 12960 "processing priority bp:0x%p\n", bp); 12961 } 12962 12963 } else if ((bp = un->un_waitq_headp) != NULL) { 12964 /* 12965 * A command on the waitq is ready to go, but do not 12966 * send it if: 12967 * 12968 * (1) the throttle limit has been reached, or 12969 * (2) a retry is pending, or 12970 * (3) a START_STOP_UNIT callback pending, or 12971 * (4) a callback for a SD_PATH_DIRECT_PRIORITY 12972 * command is pending. 12973 * 12974 * For all of these conditions, IO processing will 12975 * restart after the condition is cleared. 12976 */ 12977 if (un->un_ncmds_in_transport >= un->un_throttle) { 12978 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12979 "sd_start_cmds: exiting, " 12980 "throttle limit reached!\n"); 12981 goto exit; 12982 } 12983 if (un->un_retry_bp != NULL) { 12984 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12985 "sd_start_cmds: exiting, retry pending!\n"); 12986 goto exit; 12987 } 12988 if (un->un_startstop_timeid != NULL) { 12989 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12990 "sd_start_cmds: exiting, " 12991 "START_STOP pending!\n"); 12992 goto exit; 12993 } 12994 if (un->un_direct_priority_timeid != NULL) { 12995 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 12996 "sd_start_cmds: exiting, " 12997 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n"); 12998 goto exit; 12999 } 13000 13001 /* Dequeue the command */ 13002 un->un_waitq_headp = bp->av_forw; 13003 if (un->un_waitq_headp == NULL) { 13004 un->un_waitq_tailp = NULL; 13005 } 13006 bp->av_forw = NULL; 13007 statp = kstat_waitq_to_runq; 13008 SD_TRACE(SD_LOG_IO_CORE, un, 13009 "sd_start_cmds: processing waitq bp:0x%p\n", bp); 13010 13011 } else { 13012 /* No work to do so bail out now */ 13013 SD_TRACE(SD_LOG_IO_CORE, un, 13014 "sd_start_cmds: no more work, exiting!\n"); 13015 goto exit; 13016 } 13017 13018 /* 13019 * Reset the state to normal. This is the mechanism by which 13020 * the state transitions from either SD_STATE_RWAIT or 13021 * SD_STATE_OFFLINE to SD_STATE_NORMAL. 13022 * If state is SD_STATE_PM_CHANGING then this command is 13023 * part of the device power control and the state must 13024 * not be put back to normal. Doing so would would 13025 * allow new commands to proceed when they shouldn't, 13026 * the device may be going off. 13027 */ 13028 if ((un->un_state != SD_STATE_SUSPENDED) && 13029 (un->un_state != SD_STATE_PM_CHANGING)) { 13030 New_state(un, SD_STATE_NORMAL); 13031 } 13032 13033 xp = SD_GET_XBUF(bp); 13034 ASSERT(xp != NULL); 13035 13036 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13037 /* 13038 * Allocate the scsi_pkt if we need one, or attach DMA 13039 * resources if we have a scsi_pkt that needs them. The 13040 * latter should only occur for commands that are being 13041 * retried. 13042 */ 13043 if ((xp->xb_pktp == NULL) || 13044 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { 13045 #else 13046 if (xp->xb_pktp == NULL) { 13047 #endif 13048 /* 13049 * There is no scsi_pkt allocated for this buf. Call 13050 * the initpkt function to allocate & init one. 13051 * 13052 * The scsi_init_pkt runout callback functionality is 13053 * implemented as follows: 13054 * 13055 * 1) The initpkt function always calls 13056 * scsi_init_pkt(9F) with sdrunout specified as the 13057 * callback routine. 13058 * 2) A successful packet allocation is initialized and 13059 * the I/O is transported. 13060 * 3) The I/O associated with an allocation resource 13061 * failure is left on its queue to be retried via 13062 * runout or the next I/O. 13063 * 4) The I/O associated with a DMA error is removed 13064 * from the queue and failed with EIO. Processing of 13065 * the transport queues is also halted to be 13066 * restarted via runout or the next I/O. 13067 * 5) The I/O associated with a CDB size or packet 13068 * size error is removed from the queue and failed 13069 * with EIO. Processing of the transport queues is 13070 * continued. 13071 * 13072 * Note: there is no interface for canceling a runout 13073 * callback. To prevent the driver from detaching or 13074 * suspending while a runout is pending the driver 13075 * state is set to SD_STATE_RWAIT 13076 * 13077 * Note: using the scsi_init_pkt callback facility can 13078 * result in an I/O request persisting at the head of 13079 * the list which cannot be satisfied even after 13080 * multiple retries. In the future the driver may 13081 * implement some kind of maximum runout count before 13082 * failing an I/O. 13083 * 13084 * Note: the use of funcp below may seem superfluous, 13085 * but it helps warlock figure out the correct 13086 * initpkt function calls (see [s]sd.wlcmd). 13087 */ 13088 struct scsi_pkt *pktp; 13089 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp); 13090 13091 ASSERT(bp != un->un_rqs_bp); 13092 13093 funcp = sd_initpkt_map[xp->xb_chain_iostart]; 13094 switch ((*funcp)(bp, &pktp)) { 13095 case SD_PKT_ALLOC_SUCCESS: 13096 xp->xb_pktp = pktp; 13097 SD_TRACE(SD_LOG_IO_CORE, un, 13098 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n", 13099 pktp); 13100 goto got_pkt; 13101 13102 case SD_PKT_ALLOC_FAILURE: 13103 /* 13104 * Temporary (hopefully) resource depletion. 13105 * Since retries and RQS commands always have a 13106 * scsi_pkt allocated, these cases should never 13107 * get here. So the only cases this needs to 13108 * handle is a bp from the waitq (which we put 13109 * back onto the waitq for sdrunout), or a bp 13110 * sent as an immed_bp (which we just fail). 13111 */ 13112 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13113 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); 13114 13115 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13116 13117 if (bp == immed_bp) { 13118 /* 13119 * If SD_XB_DMA_FREED is clear, then 13120 * this is a failure to allocate a 13121 * scsi_pkt, and we must fail the 13122 * command. 13123 */ 13124 if ((xp->xb_pkt_flags & 13125 SD_XB_DMA_FREED) == 0) { 13126 break; 13127 } 13128 13129 /* 13130 * If this immediate command is NOT our 13131 * un_retry_bp, then we must fail it. 13132 */ 13133 if (bp != un->un_retry_bp) { 13134 break; 13135 } 13136 13137 /* 13138 * We get here if this cmd is our 13139 * un_retry_bp that was DMAFREED, but 13140 * scsi_init_pkt() failed to reallocate 13141 * DMA resources when we attempted to 13142 * retry it. This can happen when an 13143 * mpxio failover is in progress, but 13144 * we don't want to just fail the 13145 * command in this case. 13146 * 13147 * Use timeout(9F) to restart it after 13148 * a 100ms delay. We don't want to 13149 * let sdrunout() restart it, because 13150 * sdrunout() is just supposed to start 13151 * commands that are sitting on the 13152 * wait queue. The un_retry_bp stays 13153 * set until the command completes, but 13154 * sdrunout can be called many times 13155 * before that happens. Since sdrunout 13156 * cannot tell if the un_retry_bp is 13157 * already in the transport, it could 13158 * end up calling scsi_transport() for 13159 * the un_retry_bp multiple times. 13160 * 13161 * Also: don't schedule the callback 13162 * if some other callback is already 13163 * pending. 13164 */ 13165 if (un->un_retry_statp == NULL) { 13166 /* 13167 * restore the kstat pointer to 13168 * keep kstat counts coherent 13169 * when we do retry the command. 13170 */ 13171 un->un_retry_statp = 13172 saved_statp; 13173 } 13174 13175 if ((un->un_startstop_timeid == NULL) && 13176 (un->un_retry_timeid == NULL) && 13177 (un->un_direct_priority_timeid == 13178 NULL)) { 13179 13180 un->un_retry_timeid = 13181 timeout( 13182 sd_start_retry_command, 13183 un, SD_RESTART_TIMEOUT); 13184 } 13185 goto exit; 13186 } 13187 13188 #else 13189 if (bp == immed_bp) { 13190 break; /* Just fail the command */ 13191 } 13192 #endif 13193 13194 /* Add the buf back to the head of the waitq */ 13195 bp->av_forw = un->un_waitq_headp; 13196 un->un_waitq_headp = bp; 13197 if (un->un_waitq_tailp == NULL) { 13198 un->un_waitq_tailp = bp; 13199 } 13200 goto exit; 13201 13202 case SD_PKT_ALLOC_FAILURE_NO_DMA: 13203 /* 13204 * HBA DMA resource failure. Fail the command 13205 * and continue processing of the queues. 13206 */ 13207 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13208 "sd_start_cmds: " 13209 "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); 13210 break; 13211 13212 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: 13213 /* 13214 * Note:x86: Partial DMA mapping not supported 13215 * for USCSI commands, and all the needed DMA 13216 * resources were not allocated. 13217 */ 13218 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13219 "sd_start_cmds: " 13220 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); 13221 break; 13222 13223 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: 13224 /* 13225 * Note:x86: Request cannot fit into CDB based 13226 * on lba and len. 13227 */ 13228 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13229 "sd_start_cmds: " 13230 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); 13231 break; 13232 13233 default: 13234 /* Should NEVER get here! */ 13235 panic("scsi_initpkt error"); 13236 /*NOTREACHED*/ 13237 } 13238 13239 /* 13240 * Fatal error in allocating a scsi_pkt for this buf. 13241 * Update kstats & return the buf with an error code. 13242 * We must use sd_return_failed_command_no_restart() to 13243 * avoid a recursive call back into sd_start_cmds(). 13244 * However this also means that we must keep processing 13245 * the waitq here in order to avoid stalling. 13246 */ 13247 if (statp == kstat_waitq_to_runq) { 13248 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 13249 } 13250 sd_return_failed_command_no_restart(un, bp, EIO); 13251 if (bp == immed_bp) { 13252 /* immed_bp is gone by now, so clear this */ 13253 immed_bp = NULL; 13254 } 13255 continue; 13256 } 13257 got_pkt: 13258 if (bp == immed_bp) { 13259 /* goto the head of the class.... */ 13260 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 13261 } 13262 13263 un->un_ncmds_in_transport++; 13264 SD_UPDATE_KSTATS(un, statp, bp); 13265 13266 /* 13267 * Call scsi_transport() to send the command to the target. 13268 * According to SCSA architecture, we must drop the mutex here 13269 * before calling scsi_transport() in order to avoid deadlock. 13270 * Note that the scsi_pkt's completion routine can be executed 13271 * (from interrupt context) even before the call to 13272 * scsi_transport() returns. 13273 */ 13274 SD_TRACE(SD_LOG_IO_CORE, un, 13275 "sd_start_cmds: calling scsi_transport()\n"); 13276 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); 13277 13278 mutex_exit(SD_MUTEX(un)); 13279 rval = scsi_transport(xp->xb_pktp); 13280 mutex_enter(SD_MUTEX(un)); 13281 13282 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13283 "sd_start_cmds: scsi_transport() returned %d\n", rval); 13284 13285 switch (rval) { 13286 case TRAN_ACCEPT: 13287 /* Clear this with every pkt accepted by the HBA */ 13288 un->un_tran_fatal_count = 0; 13289 break; /* Success; try the next cmd (if any) */ 13290 13291 case TRAN_BUSY: 13292 un->un_ncmds_in_transport--; 13293 ASSERT(un->un_ncmds_in_transport >= 0); 13294 13295 /* 13296 * Don't retry request sense, the sense data 13297 * is lost when another request is sent. 13298 * Free up the rqs buf and retry 13299 * the original failed cmd. Update kstat. 13300 */ 13301 if (bp == un->un_rqs_bp) { 13302 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13303 bp = sd_mark_rqs_idle(un, xp); 13304 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 13305 NULL, NULL, EIO, SD_BSY_TIMEOUT / 500, 13306 kstat_waitq_enter); 13307 goto exit; 13308 } 13309 13310 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13311 /* 13312 * Free the DMA resources for the scsi_pkt. This will 13313 * allow mpxio to select another path the next time 13314 * we call scsi_transport() with this scsi_pkt. 13315 * See sdintr() for the rationalization behind this. 13316 */ 13317 if ((un->un_f_is_fibre == TRUE) && 13318 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 13319 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { 13320 scsi_dmafree(xp->xb_pktp); 13321 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 13322 } 13323 #endif 13324 13325 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { 13326 /* 13327 * Commands that are SD_PATH_DIRECT_PRIORITY 13328 * are for error recovery situations. These do 13329 * not use the normal command waitq, so if they 13330 * get a TRAN_BUSY we cannot put them back onto 13331 * the waitq for later retry. One possible 13332 * problem is that there could already be some 13333 * other command on un_retry_bp that is waiting 13334 * for this one to complete, so we would be 13335 * deadlocked if we put this command back onto 13336 * the waitq for later retry (since un_retry_bp 13337 * must complete before the driver gets back to 13338 * commands on the waitq). 13339 * 13340 * To avoid deadlock we must schedule a callback 13341 * that will restart this command after a set 13342 * interval. This should keep retrying for as 13343 * long as the underlying transport keeps 13344 * returning TRAN_BUSY (just like for other 13345 * commands). Use the same timeout interval as 13346 * for the ordinary TRAN_BUSY retry. 13347 */ 13348 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13349 "sd_start_cmds: scsi_transport() returned " 13350 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n"); 13351 13352 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13353 un->un_direct_priority_timeid = 13354 timeout(sd_start_direct_priority_command, 13355 bp, SD_BSY_TIMEOUT / 500); 13356 13357 goto exit; 13358 } 13359 13360 /* 13361 * For TRAN_BUSY, we want to reduce the throttle value, 13362 * unless we are retrying a command. 13363 */ 13364 if (bp != un->un_retry_bp) { 13365 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); 13366 } 13367 13368 /* 13369 * Set up the bp to be tried again 10 ms later. 13370 * Note:x86: Is there a timeout value in the sd_lun 13371 * for this condition? 13372 */ 13373 sd_set_retry_bp(un, bp, SD_BSY_TIMEOUT / 500, 13374 kstat_runq_back_to_waitq); 13375 goto exit; 13376 13377 case TRAN_FATAL_ERROR: 13378 un->un_tran_fatal_count++; 13379 /* FALLTHRU */ 13380 13381 case TRAN_BADPKT: 13382 default: 13383 un->un_ncmds_in_transport--; 13384 ASSERT(un->un_ncmds_in_transport >= 0); 13385 13386 /* 13387 * If this is our REQUEST SENSE command with a 13388 * transport error, we must get back the pointers 13389 * to the original buf, and mark the REQUEST 13390 * SENSE command as "available". 13391 */ 13392 if (bp == un->un_rqs_bp) { 13393 bp = sd_mark_rqs_idle(un, xp); 13394 xp = SD_GET_XBUF(bp); 13395 } else { 13396 /* 13397 * Legacy behavior: do not update transport 13398 * error count for request sense commands. 13399 */ 13400 SD_UPDATE_ERRSTATS(un, sd_transerrs); 13401 } 13402 13403 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13404 sd_print_transport_rejected_message(un, xp, rval); 13405 13406 /* 13407 * We must use sd_return_failed_command_no_restart() to 13408 * avoid a recursive call back into sd_start_cmds(). 13409 * However this also means that we must keep processing 13410 * the waitq here in order to avoid stalling. 13411 */ 13412 sd_return_failed_command_no_restart(un, bp, EIO); 13413 13414 /* 13415 * Notify any threads waiting in sd_ddi_suspend() that 13416 * a command completion has occurred. 13417 */ 13418 if (un->un_state == SD_STATE_SUSPENDED) { 13419 cv_broadcast(&un->un_disk_busy_cv); 13420 } 13421 13422 if (bp == immed_bp) { 13423 /* immed_bp is gone by now, so clear this */ 13424 immed_bp = NULL; 13425 } 13426 break; 13427 } 13428 13429 } while (immed_bp == NULL); 13430 13431 exit: 13432 ASSERT(mutex_owned(SD_MUTEX(un))); 13433 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n"); 13434 } 13435 13436 13437 /* 13438 * Function: sd_return_command 13439 * 13440 * Description: Returns a command to its originator (with or without an 13441 * error). Also starts commands waiting to be transported 13442 * to the target. 13443 * 13444 * Context: May be called from interrupt, kernel, or timeout context 13445 */ 13446 13447 static void 13448 sd_return_command(struct sd_lun *un, struct buf *bp) 13449 { 13450 struct sd_xbuf *xp; 13451 struct scsi_pkt *pktp; 13452 13453 ASSERT(bp != NULL); 13454 ASSERT(un != NULL); 13455 ASSERT(mutex_owned(SD_MUTEX(un))); 13456 ASSERT(bp != un->un_rqs_bp); 13457 xp = SD_GET_XBUF(bp); 13458 ASSERT(xp != NULL); 13459 13460 pktp = SD_GET_PKTP(bp); 13461 13462 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n"); 13463 13464 /* 13465 * Note: check for the "sdrestart failed" case. 13466 */ 13467 if ((un->un_partial_dma_supported == 1) && 13468 ((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) && 13469 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) && 13470 (xp->xb_pktp->pkt_resid == 0)) { 13471 13472 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) { 13473 /* 13474 * Successfully set up next portion of cmd 13475 * transfer, try sending it 13476 */ 13477 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 13478 NULL, NULL, 0, (clock_t)0, NULL); 13479 sd_start_cmds(un, NULL); 13480 return; /* Note:x86: need a return here? */ 13481 } 13482 } 13483 13484 /* 13485 * If this is the failfast bp, clear it from un_failfast_bp. This 13486 * can happen if upon being re-tried the failfast bp either 13487 * succeeded or encountered another error (possibly even a different 13488 * error than the one that precipitated the failfast state, but in 13489 * that case it would have had to exhaust retries as well). Regardless, 13490 * this should not occur whenever the instance is in the active 13491 * failfast state. 13492 */ 13493 if (bp == un->un_failfast_bp) { 13494 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 13495 un->un_failfast_bp = NULL; 13496 } 13497 13498 /* 13499 * Clear the failfast state upon successful completion of ANY cmd. 13500 */ 13501 if (bp->b_error == 0) { 13502 un->un_failfast_state = SD_FAILFAST_INACTIVE; 13503 } 13504 13505 /* 13506 * This is used if the command was retried one or more times. Show that 13507 * we are done with it, and allow processing of the waitq to resume. 13508 */ 13509 if (bp == un->un_retry_bp) { 13510 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13511 "sd_return_command: un:0x%p: " 13512 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 13513 un->un_retry_bp = NULL; 13514 un->un_retry_statp = NULL; 13515 } 13516 13517 SD_UPDATE_RDWR_STATS(un, bp); 13518 SD_UPDATE_PARTITION_STATS(un, bp); 13519 13520 switch (un->un_state) { 13521 case SD_STATE_SUSPENDED: 13522 /* 13523 * Notify any threads waiting in sd_ddi_suspend() that 13524 * a command completion has occurred. 13525 */ 13526 cv_broadcast(&un->un_disk_busy_cv); 13527 break; 13528 default: 13529 sd_start_cmds(un, NULL); 13530 break; 13531 } 13532 13533 /* Return this command up the iodone chain to its originator. */ 13534 mutex_exit(SD_MUTEX(un)); 13535 13536 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 13537 xp->xb_pktp = NULL; 13538 13539 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 13540 13541 ASSERT(!mutex_owned(SD_MUTEX(un))); 13542 mutex_enter(SD_MUTEX(un)); 13543 13544 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n"); 13545 } 13546 13547 13548 /* 13549 * Function: sd_return_failed_command 13550 * 13551 * Description: Command completion when an error occurred. 13552 * 13553 * Context: May be called from interrupt context 13554 */ 13555 13556 static void 13557 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode) 13558 { 13559 ASSERT(bp != NULL); 13560 ASSERT(un != NULL); 13561 ASSERT(mutex_owned(SD_MUTEX(un))); 13562 13563 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13564 "sd_return_failed_command: entry\n"); 13565 13566 /* 13567 * b_resid could already be nonzero due to a partial data 13568 * transfer, so do not change it here. 13569 */ 13570 SD_BIOERROR(bp, errcode); 13571 13572 sd_return_command(un, bp); 13573 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13574 "sd_return_failed_command: exit\n"); 13575 } 13576 13577 13578 /* 13579 * Function: sd_return_failed_command_no_restart 13580 * 13581 * Description: Same as sd_return_failed_command, but ensures that no 13582 * call back into sd_start_cmds will be issued. 13583 * 13584 * Context: May be called from interrupt context 13585 */ 13586 13587 static void 13588 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp, 13589 int errcode) 13590 { 13591 struct sd_xbuf *xp; 13592 13593 ASSERT(bp != NULL); 13594 ASSERT(un != NULL); 13595 ASSERT(mutex_owned(SD_MUTEX(un))); 13596 xp = SD_GET_XBUF(bp); 13597 ASSERT(xp != NULL); 13598 ASSERT(errcode != 0); 13599 13600 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13601 "sd_return_failed_command_no_restart: entry\n"); 13602 13603 /* 13604 * b_resid could already be nonzero due to a partial data 13605 * transfer, so do not change it here. 13606 */ 13607 SD_BIOERROR(bp, errcode); 13608 13609 /* 13610 * If this is the failfast bp, clear it. This can happen if the 13611 * failfast bp encounterd a fatal error when we attempted to 13612 * re-try it (such as a scsi_transport(9F) failure). However 13613 * we should NOT be in an active failfast state if the failfast 13614 * bp is not NULL. 13615 */ 13616 if (bp == un->un_failfast_bp) { 13617 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 13618 un->un_failfast_bp = NULL; 13619 } 13620 13621 if (bp == un->un_retry_bp) { 13622 /* 13623 * This command was retried one or more times. Show that we are 13624 * done with it, and allow processing of the waitq to resume. 13625 */ 13626 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13627 "sd_return_failed_command_no_restart: " 13628 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 13629 un->un_retry_bp = NULL; 13630 un->un_retry_statp = NULL; 13631 } 13632 13633 SD_UPDATE_RDWR_STATS(un, bp); 13634 SD_UPDATE_PARTITION_STATS(un, bp); 13635 13636 mutex_exit(SD_MUTEX(un)); 13637 13638 if (xp->xb_pktp != NULL) { 13639 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 13640 xp->xb_pktp = NULL; 13641 } 13642 13643 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 13644 13645 mutex_enter(SD_MUTEX(un)); 13646 13647 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13648 "sd_return_failed_command_no_restart: exit\n"); 13649 } 13650 13651 13652 /* 13653 * Function: sd_retry_command 13654 * 13655 * Description: queue up a command for retry, or (optionally) fail it 13656 * if retry counts are exhausted. 13657 * 13658 * Arguments: un - Pointer to the sd_lun struct for the target. 13659 * 13660 * bp - Pointer to the buf for the command to be retried. 13661 * 13662 * retry_check_flag - Flag to see which (if any) of the retry 13663 * counts should be decremented/checked. If the indicated 13664 * retry count is exhausted, then the command will not be 13665 * retried; it will be failed instead. This should use a 13666 * value equal to one of the following: 13667 * 13668 * SD_RETRIES_NOCHECK 13669 * SD_RESD_RETRIES_STANDARD 13670 * SD_RETRIES_VICTIM 13671 * 13672 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE 13673 * if the check should be made to see of FLAG_ISOLATE is set 13674 * in the pkt. If FLAG_ISOLATE is set, then the command is 13675 * not retried, it is simply failed. 13676 * 13677 * user_funcp - Ptr to function to call before dispatching the 13678 * command. May be NULL if no action needs to be performed. 13679 * (Primarily intended for printing messages.) 13680 * 13681 * user_arg - Optional argument to be passed along to 13682 * the user_funcp call. 13683 * 13684 * failure_code - errno return code to set in the bp if the 13685 * command is going to be failed. 13686 * 13687 * retry_delay - Retry delay interval in (clock_t) units. May 13688 * be zero which indicates that the retry should be retried 13689 * immediately (ie, without an intervening delay). 13690 * 13691 * statp - Ptr to kstat function to be updated if the command 13692 * is queued for a delayed retry. May be NULL if no kstat 13693 * update is desired. 13694 * 13695 * Context: May be called from interrupt context. 13696 */ 13697 13698 static void 13699 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag, 13700 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int 13701 code), void *user_arg, int failure_code, clock_t retry_delay, 13702 void (*statp)(kstat_io_t *)) 13703 { 13704 struct sd_xbuf *xp; 13705 struct scsi_pkt *pktp; 13706 13707 ASSERT(un != NULL); 13708 ASSERT(mutex_owned(SD_MUTEX(un))); 13709 ASSERT(bp != NULL); 13710 xp = SD_GET_XBUF(bp); 13711 ASSERT(xp != NULL); 13712 pktp = SD_GET_PKTP(bp); 13713 ASSERT(pktp != NULL); 13714 13715 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 13716 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp); 13717 13718 /* 13719 * If we are syncing or dumping, fail the command to avoid 13720 * recursively calling back into scsi_transport(). 13721 */ 13722 if (ddi_in_panic()) { 13723 goto fail_command_no_log; 13724 } 13725 13726 /* 13727 * We should never be be retrying a command with FLAG_DIAGNOSE set, so 13728 * log an error and fail the command. 13729 */ 13730 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 13731 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 13732 "ERROR, retrying FLAG_DIAGNOSE command.\n"); 13733 sd_dump_memory(un, SD_LOG_IO, "CDB", 13734 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 13735 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 13736 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 13737 goto fail_command; 13738 } 13739 13740 /* 13741 * If we are suspended, then put the command onto head of the 13742 * wait queue since we don't want to start more commands, and 13743 * clear the un_retry_bp. Next time when we are resumed, will 13744 * handle the command in the wait queue. 13745 */ 13746 switch (un->un_state) { 13747 case SD_STATE_SUSPENDED: 13748 case SD_STATE_DUMPING: 13749 bp->av_forw = un->un_waitq_headp; 13750 un->un_waitq_headp = bp; 13751 if (un->un_waitq_tailp == NULL) { 13752 un->un_waitq_tailp = bp; 13753 } 13754 if (bp == un->un_retry_bp) { 13755 un->un_retry_bp = NULL; 13756 un->un_retry_statp = NULL; 13757 } 13758 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 13759 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: " 13760 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp); 13761 return; 13762 default: 13763 break; 13764 } 13765 13766 /* 13767 * If the caller wants us to check FLAG_ISOLATE, then see if that 13768 * is set; if it is then we do not want to retry the command. 13769 * Normally, FLAG_ISOLATE is only used with USCSI cmds. 13770 */ 13771 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) { 13772 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { 13773 goto fail_command; 13774 } 13775 } 13776 13777 13778 /* 13779 * If SD_RETRIES_FAILFAST is set, it indicates that either a 13780 * command timeout or a selection timeout has occurred. This means 13781 * that we were unable to establish an kind of communication with 13782 * the target, and subsequent retries and/or commands are likely 13783 * to encounter similar results and take a long time to complete. 13784 * 13785 * If this is a failfast error condition, we need to update the 13786 * failfast state, even if this bp does not have B_FAILFAST set. 13787 */ 13788 if (retry_check_flag & SD_RETRIES_FAILFAST) { 13789 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { 13790 ASSERT(un->un_failfast_bp == NULL); 13791 /* 13792 * If we are already in the active failfast state, and 13793 * another failfast error condition has been detected, 13794 * then fail this command if it has B_FAILFAST set. 13795 * If B_FAILFAST is clear, then maintain the legacy 13796 * behavior of retrying heroically, even tho this will 13797 * take a lot more time to fail the command. 13798 */ 13799 if (bp->b_flags & B_FAILFAST) { 13800 goto fail_command; 13801 } 13802 } else { 13803 /* 13804 * We're not in the active failfast state, but we 13805 * have a failfast error condition, so we must begin 13806 * transition to the next state. We do this regardless 13807 * of whether or not this bp has B_FAILFAST set. 13808 */ 13809 if (un->un_failfast_bp == NULL) { 13810 /* 13811 * This is the first bp to meet a failfast 13812 * condition so save it on un_failfast_bp & 13813 * do normal retry processing. Do not enter 13814 * active failfast state yet. This marks 13815 * entry into the "failfast pending" state. 13816 */ 13817 un->un_failfast_bp = bp; 13818 13819 } else if (un->un_failfast_bp == bp) { 13820 /* 13821 * This is the second time *this* bp has 13822 * encountered a failfast error condition, 13823 * so enter active failfast state & flush 13824 * queues as appropriate. 13825 */ 13826 un->un_failfast_state = SD_FAILFAST_ACTIVE; 13827 un->un_failfast_bp = NULL; 13828 sd_failfast_flushq(un); 13829 13830 /* 13831 * Fail this bp now if B_FAILFAST set; 13832 * otherwise continue with retries. (It would 13833 * be pretty ironic if this bp succeeded on a 13834 * subsequent retry after we just flushed all 13835 * the queues). 13836 */ 13837 if (bp->b_flags & B_FAILFAST) { 13838 goto fail_command; 13839 } 13840 13841 #if !defined(lint) && !defined(__lint) 13842 } else { 13843 /* 13844 * If neither of the preceeding conditionals 13845 * was true, it means that there is some 13846 * *other* bp that has met an inital failfast 13847 * condition and is currently either being 13848 * retried or is waiting to be retried. In 13849 * that case we should perform normal retry 13850 * processing on *this* bp, since there is a 13851 * chance that the current failfast condition 13852 * is transient and recoverable. If that does 13853 * not turn out to be the case, then retries 13854 * will be cleared when the wait queue is 13855 * flushed anyway. 13856 */ 13857 #endif 13858 } 13859 } 13860 } else { 13861 /* 13862 * SD_RETRIES_FAILFAST is clear, which indicates that we 13863 * likely were able to at least establish some level of 13864 * communication with the target and subsequent commands 13865 * and/or retries are likely to get through to the target, 13866 * In this case we want to be aggressive about clearing 13867 * the failfast state. Note that this does not affect 13868 * the "failfast pending" condition. 13869 */ 13870 un->un_failfast_state = SD_FAILFAST_INACTIVE; 13871 } 13872 13873 13874 /* 13875 * Check the specified retry count to see if we can still do 13876 * any retries with this pkt before we should fail it. 13877 */ 13878 switch (retry_check_flag & SD_RETRIES_MASK) { 13879 case SD_RETRIES_VICTIM: 13880 /* 13881 * Check the victim retry count. If exhausted, then fall 13882 * thru & check against the standard retry count. 13883 */ 13884 if (xp->xb_victim_retry_count < un->un_victim_retry_count) { 13885 /* Increment count & proceed with the retry */ 13886 xp->xb_victim_retry_count++; 13887 break; 13888 } 13889 /* Victim retries exhausted, fall back to std. retries... */ 13890 /* FALLTHRU */ 13891 13892 case SD_RETRIES_STANDARD: 13893 if (xp->xb_retry_count >= un->un_retry_count) { 13894 /* Retries exhausted, fail the command */ 13895 SD_TRACE(SD_LOG_IO_CORE, un, 13896 "sd_retry_command: retries exhausted!\n"); 13897 /* 13898 * update b_resid for failed SCMD_READ & SCMD_WRITE 13899 * commands with nonzero pkt_resid. 13900 */ 13901 if ((pktp->pkt_reason == CMD_CMPLT) && 13902 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) && 13903 (pktp->pkt_resid != 0)) { 13904 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F; 13905 if ((op == SCMD_READ) || (op == SCMD_WRITE)) { 13906 SD_UPDATE_B_RESID(bp, pktp); 13907 } 13908 } 13909 goto fail_command; 13910 } 13911 xp->xb_retry_count++; 13912 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13913 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 13914 break; 13915 13916 case SD_RETRIES_UA: 13917 if (xp->xb_ua_retry_count >= sd_ua_retry_count) { 13918 /* Retries exhausted, fail the command */ 13919 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13920 "Unit Attention retries exhausted. " 13921 "Check the target.\n"); 13922 goto fail_command; 13923 } 13924 xp->xb_ua_retry_count++; 13925 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13926 "sd_retry_command: retry count:%d\n", 13927 xp->xb_ua_retry_count); 13928 break; 13929 13930 case SD_RETRIES_BUSY: 13931 if (xp->xb_retry_count >= un->un_busy_retry_count) { 13932 /* Retries exhausted, fail the command */ 13933 SD_TRACE(SD_LOG_IO_CORE, un, 13934 "sd_retry_command: retries exhausted!\n"); 13935 goto fail_command; 13936 } 13937 xp->xb_retry_count++; 13938 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13939 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 13940 break; 13941 13942 case SD_RETRIES_NOCHECK: 13943 default: 13944 /* No retry count to check. Just proceed with the retry */ 13945 break; 13946 } 13947 13948 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 13949 13950 /* 13951 * If we were given a zero timeout, we must attempt to retry the 13952 * command immediately (ie, without a delay). 13953 */ 13954 if (retry_delay == 0) { 13955 /* 13956 * Check some limiting conditions to see if we can actually 13957 * do the immediate retry. If we cannot, then we must 13958 * fall back to queueing up a delayed retry. 13959 */ 13960 if (un->un_ncmds_in_transport >= un->un_throttle) { 13961 /* 13962 * We are at the throttle limit for the target, 13963 * fall back to delayed retry. 13964 */ 13965 retry_delay = SD_BSY_TIMEOUT; 13966 statp = kstat_waitq_enter; 13967 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13968 "sd_retry_command: immed. retry hit " 13969 "throttle!\n"); 13970 } else { 13971 /* 13972 * We're clear to proceed with the immediate retry. 13973 * First call the user-provided function (if any) 13974 */ 13975 if (user_funcp != NULL) { 13976 (*user_funcp)(un, bp, user_arg, 13977 SD_IMMEDIATE_RETRY_ISSUED); 13978 #ifdef __lock_lint 13979 sd_print_incomplete_msg(un, bp, user_arg, 13980 SD_IMMEDIATE_RETRY_ISSUED); 13981 sd_print_cmd_incomplete_msg(un, bp, user_arg, 13982 SD_IMMEDIATE_RETRY_ISSUED); 13983 sd_print_sense_failed_msg(un, bp, user_arg, 13984 SD_IMMEDIATE_RETRY_ISSUED); 13985 #endif 13986 } 13987 13988 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13989 "sd_retry_command: issuing immediate retry\n"); 13990 13991 /* 13992 * Call sd_start_cmds() to transport the command to 13993 * the target. 13994 */ 13995 sd_start_cmds(un, bp); 13996 13997 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13998 "sd_retry_command exit\n"); 13999 return; 14000 } 14001 } 14002 14003 /* 14004 * Set up to retry the command after a delay. 14005 * First call the user-provided function (if any) 14006 */ 14007 if (user_funcp != NULL) { 14008 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED); 14009 } 14010 14011 sd_set_retry_bp(un, bp, retry_delay, statp); 14012 14013 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 14014 return; 14015 14016 fail_command: 14017 14018 if (user_funcp != NULL) { 14019 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED); 14020 } 14021 14022 fail_command_no_log: 14023 14024 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14025 "sd_retry_command: returning failed command\n"); 14026 14027 sd_return_failed_command(un, bp, failure_code); 14028 14029 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 14030 } 14031 14032 14033 /* 14034 * Function: sd_set_retry_bp 14035 * 14036 * Description: Set up the given bp for retry. 14037 * 14038 * Arguments: un - ptr to associated softstate 14039 * bp - ptr to buf(9S) for the command 14040 * retry_delay - time interval before issuing retry (may be 0) 14041 * statp - optional pointer to kstat function 14042 * 14043 * Context: May be called under interrupt context 14044 */ 14045 14046 static void 14047 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, 14048 void (*statp)(kstat_io_t *)) 14049 { 14050 ASSERT(un != NULL); 14051 ASSERT(mutex_owned(SD_MUTEX(un))); 14052 ASSERT(bp != NULL); 14053 14054 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14055 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp); 14056 14057 /* 14058 * Indicate that the command is being retried. This will not allow any 14059 * other commands on the wait queue to be transported to the target 14060 * until this command has been completed (success or failure). The 14061 * "retry command" is not transported to the target until the given 14062 * time delay expires, unless the user specified a 0 retry_delay. 14063 * 14064 * Note: the timeout(9F) callback routine is what actually calls 14065 * sd_start_cmds() to transport the command, with the exception of a 14066 * zero retry_delay. The only current implementor of a zero retry delay 14067 * is the case where a START_STOP_UNIT is sent to spin-up a device. 14068 */ 14069 if (un->un_retry_bp == NULL) { 14070 ASSERT(un->un_retry_statp == NULL); 14071 un->un_retry_bp = bp; 14072 14073 /* 14074 * If the user has not specified a delay the command should 14075 * be queued and no timeout should be scheduled. 14076 */ 14077 if (retry_delay == 0) { 14078 /* 14079 * Save the kstat pointer that will be used in the 14080 * call to SD_UPDATE_KSTATS() below, so that 14081 * sd_start_cmds() can correctly decrement the waitq 14082 * count when it is time to transport this command. 14083 */ 14084 un->un_retry_statp = statp; 14085 goto done; 14086 } 14087 } 14088 14089 if (un->un_retry_bp == bp) { 14090 /* 14091 * Save the kstat pointer that will be used in the call to 14092 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can 14093 * correctly decrement the waitq count when it is time to 14094 * transport this command. 14095 */ 14096 un->un_retry_statp = statp; 14097 14098 /* 14099 * Schedule a timeout if: 14100 * 1) The user has specified a delay. 14101 * 2) There is not a START_STOP_UNIT callback pending. 14102 * 14103 * If no delay has been specified, then it is up to the caller 14104 * to ensure that IO processing continues without stalling. 14105 * Effectively, this means that the caller will issue the 14106 * required call to sd_start_cmds(). The START_STOP_UNIT 14107 * callback does this after the START STOP UNIT command has 14108 * completed. In either of these cases we should not schedule 14109 * a timeout callback here. Also don't schedule the timeout if 14110 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart. 14111 */ 14112 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) && 14113 (un->un_direct_priority_timeid == NULL)) { 14114 un->un_retry_timeid = 14115 timeout(sd_start_retry_command, un, retry_delay); 14116 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14117 "sd_set_retry_bp: setting timeout: un: 0x%p" 14118 " bp:0x%p un_retry_timeid:0x%p\n", 14119 un, bp, un->un_retry_timeid); 14120 } 14121 } else { 14122 /* 14123 * We only get in here if there is already another command 14124 * waiting to be retried. In this case, we just put the 14125 * given command onto the wait queue, so it can be transported 14126 * after the current retry command has completed. 14127 * 14128 * Also we have to make sure that if the command at the head 14129 * of the wait queue is the un_failfast_bp, that we do not 14130 * put ahead of it any other commands that are to be retried. 14131 */ 14132 if ((un->un_failfast_bp != NULL) && 14133 (un->un_failfast_bp == un->un_waitq_headp)) { 14134 /* 14135 * Enqueue this command AFTER the first command on 14136 * the wait queue (which is also un_failfast_bp). 14137 */ 14138 bp->av_forw = un->un_waitq_headp->av_forw; 14139 un->un_waitq_headp->av_forw = bp; 14140 if (un->un_waitq_headp == un->un_waitq_tailp) { 14141 un->un_waitq_tailp = bp; 14142 } 14143 } else { 14144 /* Enqueue this command at the head of the waitq. */ 14145 bp->av_forw = un->un_waitq_headp; 14146 un->un_waitq_headp = bp; 14147 if (un->un_waitq_tailp == NULL) { 14148 un->un_waitq_tailp = bp; 14149 } 14150 } 14151 14152 if (statp == NULL) { 14153 statp = kstat_waitq_enter; 14154 } 14155 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14156 "sd_set_retry_bp: un:0x%p already delayed retry\n", un); 14157 } 14158 14159 done: 14160 if (statp != NULL) { 14161 SD_UPDATE_KSTATS(un, statp, bp); 14162 } 14163 14164 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14165 "sd_set_retry_bp: exit un:0x%p\n", un); 14166 } 14167 14168 14169 /* 14170 * Function: sd_start_retry_command 14171 * 14172 * Description: Start the command that has been waiting on the target's 14173 * retry queue. Called from timeout(9F) context after the 14174 * retry delay interval has expired. 14175 * 14176 * Arguments: arg - pointer to associated softstate for the device. 14177 * 14178 * Context: timeout(9F) thread context. May not sleep. 14179 */ 14180 14181 static void 14182 sd_start_retry_command(void *arg) 14183 { 14184 struct sd_lun *un = arg; 14185 14186 ASSERT(un != NULL); 14187 ASSERT(!mutex_owned(SD_MUTEX(un))); 14188 14189 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14190 "sd_start_retry_command: entry\n"); 14191 14192 mutex_enter(SD_MUTEX(un)); 14193 14194 un->un_retry_timeid = NULL; 14195 14196 if (un->un_retry_bp != NULL) { 14197 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14198 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n", 14199 un, un->un_retry_bp); 14200 sd_start_cmds(un, un->un_retry_bp); 14201 } 14202 14203 mutex_exit(SD_MUTEX(un)); 14204 14205 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14206 "sd_start_retry_command: exit\n"); 14207 } 14208 14209 14210 /* 14211 * Function: sd_start_direct_priority_command 14212 * 14213 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had 14214 * received TRAN_BUSY when we called scsi_transport() to send it 14215 * to the underlying HBA. This function is called from timeout(9F) 14216 * context after the delay interval has expired. 14217 * 14218 * Arguments: arg - pointer to associated buf(9S) to be restarted. 14219 * 14220 * Context: timeout(9F) thread context. May not sleep. 14221 */ 14222 14223 static void 14224 sd_start_direct_priority_command(void *arg) 14225 { 14226 struct buf *priority_bp = arg; 14227 struct sd_lun *un; 14228 14229 ASSERT(priority_bp != NULL); 14230 un = SD_GET_UN(priority_bp); 14231 ASSERT(un != NULL); 14232 ASSERT(!mutex_owned(SD_MUTEX(un))); 14233 14234 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14235 "sd_start_direct_priority_command: entry\n"); 14236 14237 mutex_enter(SD_MUTEX(un)); 14238 un->un_direct_priority_timeid = NULL; 14239 sd_start_cmds(un, priority_bp); 14240 mutex_exit(SD_MUTEX(un)); 14241 14242 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14243 "sd_start_direct_priority_command: exit\n"); 14244 } 14245 14246 14247 /* 14248 * Function: sd_send_request_sense_command 14249 * 14250 * Description: Sends a REQUEST SENSE command to the target 14251 * 14252 * Context: May be called from interrupt context. 14253 */ 14254 14255 static void 14256 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 14257 struct scsi_pkt *pktp) 14258 { 14259 ASSERT(bp != NULL); 14260 ASSERT(un != NULL); 14261 ASSERT(mutex_owned(SD_MUTEX(un))); 14262 14263 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: " 14264 "entry: buf:0x%p\n", bp); 14265 14266 /* 14267 * If we are syncing or dumping, then fail the command to avoid a 14268 * recursive callback into scsi_transport(). Also fail the command 14269 * if we are suspended (legacy behavior). 14270 */ 14271 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 14272 (un->un_state == SD_STATE_DUMPING)) { 14273 sd_return_failed_command(un, bp, EIO); 14274 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14275 "sd_send_request_sense_command: syncing/dumping, exit\n"); 14276 return; 14277 } 14278 14279 /* 14280 * Retry the failed command and don't issue the request sense if: 14281 * 1) the sense buf is busy 14282 * 2) we have 1 or more outstanding commands on the target 14283 * (the sense data will be cleared or invalidated any way) 14284 * 14285 * Note: There could be an issue with not checking a retry limit here, 14286 * the problem is determining which retry limit to check. 14287 */ 14288 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { 14289 /* Don't retry if the command is flagged as non-retryable */ 14290 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 14291 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 14292 NULL, NULL, 0, SD_BSY_TIMEOUT, kstat_waitq_enter); 14293 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14294 "sd_send_request_sense_command: " 14295 "at full throttle, retrying exit\n"); 14296 } else { 14297 sd_return_failed_command(un, bp, EIO); 14298 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14299 "sd_send_request_sense_command: " 14300 "at full throttle, non-retryable exit\n"); 14301 } 14302 return; 14303 } 14304 14305 sd_mark_rqs_busy(un, bp); 14306 sd_start_cmds(un, un->un_rqs_bp); 14307 14308 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14309 "sd_send_request_sense_command: exit\n"); 14310 } 14311 14312 14313 /* 14314 * Function: sd_mark_rqs_busy 14315 * 14316 * Description: Indicate that the request sense bp for this instance is 14317 * in use. 14318 * 14319 * Context: May be called under interrupt context 14320 */ 14321 14322 static void 14323 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp) 14324 { 14325 struct sd_xbuf *sense_xp; 14326 14327 ASSERT(un != NULL); 14328 ASSERT(bp != NULL); 14329 ASSERT(mutex_owned(SD_MUTEX(un))); 14330 ASSERT(un->un_sense_isbusy == 0); 14331 14332 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: " 14333 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un); 14334 14335 sense_xp = SD_GET_XBUF(un->un_rqs_bp); 14336 ASSERT(sense_xp != NULL); 14337 14338 SD_INFO(SD_LOG_IO, un, 14339 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp); 14340 14341 ASSERT(sense_xp->xb_pktp != NULL); 14342 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) 14343 == (FLAG_SENSING | FLAG_HEAD)); 14344 14345 un->un_sense_isbusy = 1; 14346 un->un_rqs_bp->b_resid = 0; 14347 sense_xp->xb_pktp->pkt_resid = 0; 14348 sense_xp->xb_pktp->pkt_reason = 0; 14349 14350 /* So we can get back the bp at interrupt time! */ 14351 sense_xp->xb_sense_bp = bp; 14352 14353 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH); 14354 14355 /* 14356 * Mark this buf as awaiting sense data. (This is already set in 14357 * the pkt_flags for the RQS packet.) 14358 */ 14359 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING; 14360 14361 sense_xp->xb_retry_count = 0; 14362 sense_xp->xb_victim_retry_count = 0; 14363 sense_xp->xb_ua_retry_count = 0; 14364 sense_xp->xb_nr_retry_count = 0; 14365 sense_xp->xb_dma_resid = 0; 14366 14367 /* Clean up the fields for auto-request sense */ 14368 sense_xp->xb_sense_status = 0; 14369 sense_xp->xb_sense_state = 0; 14370 sense_xp->xb_sense_resid = 0; 14371 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data)); 14372 14373 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n"); 14374 } 14375 14376 14377 /* 14378 * Function: sd_mark_rqs_idle 14379 * 14380 * Description: SD_MUTEX must be held continuously through this routine 14381 * to prevent reuse of the rqs struct before the caller can 14382 * complete it's processing. 14383 * 14384 * Return Code: Pointer to the RQS buf 14385 * 14386 * Context: May be called under interrupt context 14387 */ 14388 14389 static struct buf * 14390 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp) 14391 { 14392 struct buf *bp; 14393 ASSERT(un != NULL); 14394 ASSERT(sense_xp != NULL); 14395 ASSERT(mutex_owned(SD_MUTEX(un))); 14396 ASSERT(un->un_sense_isbusy != 0); 14397 14398 un->un_sense_isbusy = 0; 14399 bp = sense_xp->xb_sense_bp; 14400 sense_xp->xb_sense_bp = NULL; 14401 14402 /* This pkt is no longer interested in getting sense data */ 14403 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING; 14404 14405 return (bp); 14406 } 14407 14408 14409 14410 /* 14411 * Function: sd_alloc_rqs 14412 * 14413 * Description: Set up the unit to receive auto request sense data 14414 * 14415 * Return Code: DDI_SUCCESS or DDI_FAILURE 14416 * 14417 * Context: Called under attach(9E) context 14418 */ 14419 14420 static int 14421 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un) 14422 { 14423 struct sd_xbuf *xp; 14424 14425 ASSERT(un != NULL); 14426 ASSERT(!mutex_owned(SD_MUTEX(un))); 14427 ASSERT(un->un_rqs_bp == NULL); 14428 ASSERT(un->un_rqs_pktp == NULL); 14429 14430 /* 14431 * First allocate the required buf and scsi_pkt structs, then set up 14432 * the CDB in the scsi_pkt for a REQUEST SENSE command. 14433 */ 14434 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL, 14435 MAX_SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 14436 if (un->un_rqs_bp == NULL) { 14437 return (DDI_FAILURE); 14438 } 14439 14440 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp, 14441 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL); 14442 14443 if (un->un_rqs_pktp == NULL) { 14444 sd_free_rqs(un); 14445 return (DDI_FAILURE); 14446 } 14447 14448 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */ 14449 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp, 14450 SCMD_REQUEST_SENSE, 0, MAX_SENSE_LENGTH, 0); 14451 14452 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); 14453 14454 /* Set up the other needed members in the ARQ scsi_pkt. */ 14455 un->un_rqs_pktp->pkt_comp = sdintr; 14456 un->un_rqs_pktp->pkt_time = sd_io_time; 14457 un->un_rqs_pktp->pkt_flags |= 14458 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ 14459 14460 /* 14461 * Allocate & init the sd_xbuf struct for the RQS command. Do not 14462 * provide any intpkt, destroypkt routines as we take care of 14463 * scsi_pkt allocation/freeing here and in sd_free_rqs(). 14464 */ 14465 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 14466 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL); 14467 xp->xb_pktp = un->un_rqs_pktp; 14468 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14469 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n", 14470 un, xp, un->un_rqs_pktp, un->un_rqs_bp); 14471 14472 /* 14473 * Save the pointer to the request sense private bp so it can 14474 * be retrieved in sdintr. 14475 */ 14476 un->un_rqs_pktp->pkt_private = un->un_rqs_bp; 14477 ASSERT(un->un_rqs_bp->b_private == xp); 14478 14479 /* 14480 * See if the HBA supports auto-request sense for the specified 14481 * target/lun. If it does, then try to enable it (if not already 14482 * enabled). 14483 * 14484 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return 14485 * failure, while for other HBAs (pln) scsi_ifsetcap will always 14486 * return success. However, in both of these cases ARQ is always 14487 * enabled and scsi_ifgetcap will always return true. The best approach 14488 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). 14489 * 14490 * The 3rd case is the HBA (adp) always return enabled on 14491 * scsi_ifgetgetcap even when it's not enable, the best approach 14492 * is issue a scsi_ifsetcap then a scsi_ifgetcap 14493 * Note: this case is to circumvent the Adaptec bug. (x86 only) 14494 */ 14495 14496 if (un->un_f_is_fibre == TRUE) { 14497 un->un_f_arq_enabled = TRUE; 14498 } else { 14499 #if defined(__i386) || defined(__amd64) 14500 /* 14501 * Circumvent the Adaptec bug, remove this code when 14502 * the bug is fixed 14503 */ 14504 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); 14505 #endif 14506 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { 14507 case 0: 14508 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14509 "sd_alloc_rqs: HBA supports ARQ\n"); 14510 /* 14511 * ARQ is supported by this HBA but currently is not 14512 * enabled. Attempt to enable it and if successful then 14513 * mark this instance as ARQ enabled. 14514 */ 14515 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1) 14516 == 1) { 14517 /* Successfully enabled ARQ in the HBA */ 14518 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14519 "sd_alloc_rqs: ARQ enabled\n"); 14520 un->un_f_arq_enabled = TRUE; 14521 } else { 14522 /* Could not enable ARQ in the HBA */ 14523 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14524 "sd_alloc_rqs: failed ARQ enable\n"); 14525 un->un_f_arq_enabled = FALSE; 14526 } 14527 break; 14528 case 1: 14529 /* 14530 * ARQ is supported by this HBA and is already enabled. 14531 * Just mark ARQ as enabled for this instance. 14532 */ 14533 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14534 "sd_alloc_rqs: ARQ already enabled\n"); 14535 un->un_f_arq_enabled = TRUE; 14536 break; 14537 default: 14538 /* 14539 * ARQ is not supported by this HBA; disable it for this 14540 * instance. 14541 */ 14542 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14543 "sd_alloc_rqs: HBA does not support ARQ\n"); 14544 un->un_f_arq_enabled = FALSE; 14545 break; 14546 } 14547 } 14548 14549 return (DDI_SUCCESS); 14550 } 14551 14552 14553 /* 14554 * Function: sd_free_rqs 14555 * 14556 * Description: Cleanup for the pre-instance RQS command. 14557 * 14558 * Context: Kernel thread context 14559 */ 14560 14561 static void 14562 sd_free_rqs(struct sd_lun *un) 14563 { 14564 ASSERT(un != NULL); 14565 14566 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n"); 14567 14568 /* 14569 * If consistent memory is bound to a scsi_pkt, the pkt 14570 * has to be destroyed *before* freeing the consistent memory. 14571 * Don't change the sequence of this operations. 14572 * scsi_destroy_pkt() might access memory, which isn't allowed, 14573 * after it was freed in scsi_free_consistent_buf(). 14574 */ 14575 if (un->un_rqs_pktp != NULL) { 14576 scsi_destroy_pkt(un->un_rqs_pktp); 14577 un->un_rqs_pktp = NULL; 14578 } 14579 14580 if (un->un_rqs_bp != NULL) { 14581 struct sd_xbuf *xp = SD_GET_XBUF(un->un_rqs_bp); 14582 if (xp != NULL) { 14583 kmem_free(xp, sizeof (struct sd_xbuf)); 14584 } 14585 scsi_free_consistent_buf(un->un_rqs_bp); 14586 un->un_rqs_bp = NULL; 14587 } 14588 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n"); 14589 } 14590 14591 14592 14593 /* 14594 * Function: sd_reduce_throttle 14595 * 14596 * Description: Reduces the maximum # of outstanding commands on a 14597 * target to the current number of outstanding commands. 14598 * Queues a tiemout(9F) callback to restore the limit 14599 * after a specified interval has elapsed. 14600 * Typically used when we get a TRAN_BUSY return code 14601 * back from scsi_transport(). 14602 * 14603 * Arguments: un - ptr to the sd_lun softstate struct 14604 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL 14605 * 14606 * Context: May be called from interrupt context 14607 */ 14608 14609 static void 14610 sd_reduce_throttle(struct sd_lun *un, int throttle_type) 14611 { 14612 ASSERT(un != NULL); 14613 ASSERT(mutex_owned(SD_MUTEX(un))); 14614 ASSERT(un->un_ncmds_in_transport >= 0); 14615 14616 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 14617 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n", 14618 un, un->un_throttle, un->un_ncmds_in_transport); 14619 14620 if (un->un_throttle > 1) { 14621 if (un->un_f_use_adaptive_throttle == TRUE) { 14622 switch (throttle_type) { 14623 case SD_THROTTLE_TRAN_BUSY: 14624 if (un->un_busy_throttle == 0) { 14625 un->un_busy_throttle = un->un_throttle; 14626 } 14627 break; 14628 case SD_THROTTLE_QFULL: 14629 un->un_busy_throttle = 0; 14630 break; 14631 default: 14632 ASSERT(FALSE); 14633 } 14634 14635 if (un->un_ncmds_in_transport > 0) { 14636 un->un_throttle = un->un_ncmds_in_transport; 14637 } 14638 14639 } else { 14640 if (un->un_ncmds_in_transport == 0) { 14641 un->un_throttle = 1; 14642 } else { 14643 un->un_throttle = un->un_ncmds_in_transport; 14644 } 14645 } 14646 } 14647 14648 /* Reschedule the timeout if none is currently active */ 14649 if (un->un_reset_throttle_timeid == NULL) { 14650 un->un_reset_throttle_timeid = timeout(sd_restore_throttle, 14651 un, SD_THROTTLE_RESET_INTERVAL); 14652 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14653 "sd_reduce_throttle: timeout scheduled!\n"); 14654 } 14655 14656 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 14657 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle); 14658 } 14659 14660 14661 14662 /* 14663 * Function: sd_restore_throttle 14664 * 14665 * Description: Callback function for timeout(9F). Resets the current 14666 * value of un->un_throttle to its default. 14667 * 14668 * Arguments: arg - pointer to associated softstate for the device. 14669 * 14670 * Context: May be called from interrupt context 14671 */ 14672 14673 static void 14674 sd_restore_throttle(void *arg) 14675 { 14676 struct sd_lun *un = arg; 14677 14678 ASSERT(un != NULL); 14679 ASSERT(!mutex_owned(SD_MUTEX(un))); 14680 14681 mutex_enter(SD_MUTEX(un)); 14682 14683 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 14684 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle); 14685 14686 un->un_reset_throttle_timeid = NULL; 14687 14688 if (un->un_f_use_adaptive_throttle == TRUE) { 14689 /* 14690 * If un_busy_throttle is nonzero, then it contains the 14691 * value that un_throttle was when we got a TRAN_BUSY back 14692 * from scsi_transport(). We want to revert back to this 14693 * value. 14694 * 14695 * In the QFULL case, the throttle limit will incrementally 14696 * increase until it reaches max throttle. 14697 */ 14698 if (un->un_busy_throttle > 0) { 14699 un->un_throttle = un->un_busy_throttle; 14700 un->un_busy_throttle = 0; 14701 } else { 14702 /* 14703 * increase throttle by 10% open gate slowly, schedule 14704 * another restore if saved throttle has not been 14705 * reached 14706 */ 14707 short throttle; 14708 if (sd_qfull_throttle_enable) { 14709 throttle = un->un_throttle + 14710 max((un->un_throttle / 10), 1); 14711 un->un_throttle = 14712 (throttle < un->un_saved_throttle) ? 14713 throttle : un->un_saved_throttle; 14714 if (un->un_throttle < un->un_saved_throttle) { 14715 un->un_reset_throttle_timeid = 14716 timeout(sd_restore_throttle, 14717 un, 14718 SD_QFULL_THROTTLE_RESET_INTERVAL); 14719 } 14720 } 14721 } 14722 14723 /* 14724 * If un_throttle has fallen below the low-water mark, we 14725 * restore the maximum value here (and allow it to ratchet 14726 * down again if necessary). 14727 */ 14728 if (un->un_throttle < un->un_min_throttle) { 14729 un->un_throttle = un->un_saved_throttle; 14730 } 14731 } else { 14732 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 14733 "restoring limit from 0x%x to 0x%x\n", 14734 un->un_throttle, un->un_saved_throttle); 14735 un->un_throttle = un->un_saved_throttle; 14736 } 14737 14738 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14739 "sd_restore_throttle: calling sd_start_cmds!\n"); 14740 14741 sd_start_cmds(un, NULL); 14742 14743 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14744 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n", 14745 un, un->un_throttle); 14746 14747 mutex_exit(SD_MUTEX(un)); 14748 14749 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n"); 14750 } 14751 14752 /* 14753 * Function: sdrunout 14754 * 14755 * Description: Callback routine for scsi_init_pkt when a resource allocation 14756 * fails. 14757 * 14758 * Arguments: arg - a pointer to the sd_lun unit struct for the particular 14759 * soft state instance. 14760 * 14761 * Return Code: The scsi_init_pkt routine allows for the callback function to 14762 * return a 0 indicating the callback should be rescheduled or a 1 14763 * indicating not to reschedule. This routine always returns 1 14764 * because the driver always provides a callback function to 14765 * scsi_init_pkt. This results in a callback always being scheduled 14766 * (via the scsi_init_pkt callback implementation) if a resource 14767 * failure occurs. 14768 * 14769 * Context: This callback function may not block or call routines that block 14770 * 14771 * Note: Using the scsi_init_pkt callback facility can result in an I/O 14772 * request persisting at the head of the list which cannot be 14773 * satisfied even after multiple retries. In the future the driver 14774 * may implement some time of maximum runout count before failing 14775 * an I/O. 14776 */ 14777 14778 static int 14779 sdrunout(caddr_t arg) 14780 { 14781 struct sd_lun *un = (struct sd_lun *)arg; 14782 14783 ASSERT(un != NULL); 14784 ASSERT(!mutex_owned(SD_MUTEX(un))); 14785 14786 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n"); 14787 14788 mutex_enter(SD_MUTEX(un)); 14789 sd_start_cmds(un, NULL); 14790 mutex_exit(SD_MUTEX(un)); 14791 /* 14792 * This callback routine always returns 1 (i.e. do not reschedule) 14793 * because we always specify sdrunout as the callback handler for 14794 * scsi_init_pkt inside the call to sd_start_cmds. 14795 */ 14796 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); 14797 return (1); 14798 } 14799 14800 14801 /* 14802 * Function: sdintr 14803 * 14804 * Description: Completion callback routine for scsi_pkt(9S) structs 14805 * sent to the HBA driver via scsi_transport(9F). 14806 * 14807 * Context: Interrupt context 14808 */ 14809 14810 static void 14811 sdintr(struct scsi_pkt *pktp) 14812 { 14813 struct buf *bp; 14814 struct sd_xbuf *xp; 14815 struct sd_lun *un; 14816 size_t actual_len; 14817 14818 ASSERT(pktp != NULL); 14819 bp = (struct buf *)pktp->pkt_private; 14820 ASSERT(bp != NULL); 14821 xp = SD_GET_XBUF(bp); 14822 ASSERT(xp != NULL); 14823 ASSERT(xp->xb_pktp != NULL); 14824 un = SD_GET_UN(bp); 14825 ASSERT(un != NULL); 14826 ASSERT(!mutex_owned(SD_MUTEX(un))); 14827 14828 #ifdef SD_FAULT_INJECTION 14829 14830 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n"); 14831 /* SD FaultInjection */ 14832 sd_faultinjection(pktp); 14833 14834 #endif /* SD_FAULT_INJECTION */ 14835 14836 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p," 14837 " xp:0x%p, un:0x%p\n", bp, xp, un); 14838 14839 mutex_enter(SD_MUTEX(un)); 14840 14841 /* Reduce the count of the #commands currently in transport */ 14842 un->un_ncmds_in_transport--; 14843 ASSERT(un->un_ncmds_in_transport >= 0); 14844 14845 /* Increment counter to indicate that the callback routine is active */ 14846 un->un_in_callback++; 14847 14848 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14849 14850 #ifdef SDDEBUG 14851 if (bp == un->un_retry_bp) { 14852 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " 14853 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", 14854 un, un->un_retry_bp, un->un_ncmds_in_transport); 14855 } 14856 #endif 14857 14858 /* 14859 * If pkt_reason is CMD_DEV_GONE, fail the command, and update the media 14860 * state if needed. 14861 */ 14862 if (pktp->pkt_reason == CMD_DEV_GONE) { 14863 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14864 "Command failed to complete...Device is gone\n"); 14865 if (un->un_mediastate != DKIO_DEV_GONE) { 14866 un->un_mediastate = DKIO_DEV_GONE; 14867 cv_broadcast(&un->un_state_cv); 14868 } 14869 sd_return_failed_command(un, bp, EIO); 14870 goto exit; 14871 } 14872 14873 if (pktp->pkt_state & STATE_XARQ_DONE) { 14874 SD_TRACE(SD_LOG_COMMON, un, 14875 "sdintr: extra sense data received. pkt=%p\n", pktp); 14876 } 14877 14878 /* 14879 * First see if the pkt has auto-request sense data with it.... 14880 * Look at the packet state first so we don't take a performance 14881 * hit looking at the arq enabled flag unless absolutely necessary. 14882 */ 14883 if ((pktp->pkt_state & STATE_ARQ_DONE) && 14884 (un->un_f_arq_enabled == TRUE)) { 14885 /* 14886 * The HBA did an auto request sense for this command so check 14887 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 14888 * driver command that should not be retried. 14889 */ 14890 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 14891 /* 14892 * Save the relevant sense info into the xp for the 14893 * original cmd. 14894 */ 14895 struct scsi_arq_status *asp; 14896 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 14897 xp->xb_sense_status = 14898 *((uchar_t *)(&(asp->sts_rqpkt_status))); 14899 xp->xb_sense_state = asp->sts_rqpkt_state; 14900 xp->xb_sense_resid = asp->sts_rqpkt_resid; 14901 if (pktp->pkt_state & STATE_XARQ_DONE) { 14902 actual_len = MAX_SENSE_LENGTH - 14903 xp->xb_sense_resid; 14904 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 14905 MAX_SENSE_LENGTH); 14906 } else { 14907 if (xp->xb_sense_resid > SENSE_LENGTH) { 14908 actual_len = MAX_SENSE_LENGTH - 14909 xp->xb_sense_resid; 14910 } else { 14911 actual_len = SENSE_LENGTH - 14912 xp->xb_sense_resid; 14913 } 14914 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 14915 if ((((struct uscsi_cmd *) 14916 (xp->xb_pktinfo))->uscsi_rqlen) > 14917 actual_len) { 14918 xp->xb_sense_resid = 14919 (((struct uscsi_cmd *) 14920 (xp->xb_pktinfo))-> 14921 uscsi_rqlen) - actual_len; 14922 } else { 14923 xp->xb_sense_resid = 0; 14924 } 14925 } 14926 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 14927 SENSE_LENGTH); 14928 } 14929 14930 /* fail the command */ 14931 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14932 "sdintr: arq done and FLAG_DIAGNOSE set\n"); 14933 sd_return_failed_command(un, bp, EIO); 14934 goto exit; 14935 } 14936 14937 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 14938 /* 14939 * We want to either retry or fail this command, so free 14940 * the DMA resources here. If we retry the command then 14941 * the DMA resources will be reallocated in sd_start_cmds(). 14942 * Note that when PKT_DMA_PARTIAL is used, this reallocation 14943 * causes the *entire* transfer to start over again from the 14944 * beginning of the request, even for PARTIAL chunks that 14945 * have already transferred successfully. 14946 */ 14947 if ((un->un_f_is_fibre == TRUE) && 14948 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 14949 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 14950 scsi_dmafree(pktp); 14951 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 14952 } 14953 #endif 14954 14955 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14956 "sdintr: arq done, sd_handle_auto_request_sense\n"); 14957 14958 sd_handle_auto_request_sense(un, bp, xp, pktp); 14959 goto exit; 14960 } 14961 14962 /* Next see if this is the REQUEST SENSE pkt for the instance */ 14963 if (pktp->pkt_flags & FLAG_SENSING) { 14964 /* This pktp is from the unit's REQUEST_SENSE command */ 14965 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14966 "sdintr: sd_handle_request_sense\n"); 14967 sd_handle_request_sense(un, bp, xp, pktp); 14968 goto exit; 14969 } 14970 14971 /* 14972 * Check to see if the command successfully completed as requested; 14973 * this is the most common case (and also the hot performance path). 14974 * 14975 * Requirements for successful completion are: 14976 * pkt_reason is CMD_CMPLT and packet status is status good. 14977 * In addition: 14978 * - A residual of zero indicates successful completion no matter what 14979 * the command is. 14980 * - If the residual is not zero and the command is not a read or 14981 * write, then it's still defined as successful completion. In other 14982 * words, if the command is a read or write the residual must be 14983 * zero for successful completion. 14984 * - If the residual is not zero and the command is a read or 14985 * write, and it's a USCSICMD, then it's still defined as 14986 * successful completion. 14987 */ 14988 if ((pktp->pkt_reason == CMD_CMPLT) && 14989 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) { 14990 14991 /* 14992 * Since this command is returned with a good status, we 14993 * can reset the count for Sonoma failover. 14994 */ 14995 un->un_sonoma_failure_count = 0; 14996 14997 /* 14998 * Return all USCSI commands on good status 14999 */ 15000 if (pktp->pkt_resid == 0) { 15001 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15002 "sdintr: returning command for resid == 0\n"); 15003 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) && 15004 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) { 15005 SD_UPDATE_B_RESID(bp, pktp); 15006 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15007 "sdintr: returning command for resid != 0\n"); 15008 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 15009 SD_UPDATE_B_RESID(bp, pktp); 15010 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15011 "sdintr: returning uscsi command\n"); 15012 } else { 15013 goto not_successful; 15014 } 15015 sd_return_command(un, bp); 15016 15017 /* 15018 * Decrement counter to indicate that the callback routine 15019 * is done. 15020 */ 15021 un->un_in_callback--; 15022 ASSERT(un->un_in_callback >= 0); 15023 mutex_exit(SD_MUTEX(un)); 15024 15025 return; 15026 } 15027 15028 not_successful: 15029 15030 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 15031 /* 15032 * The following is based upon knowledge of the underlying transport 15033 * and its use of DMA resources. This code should be removed when 15034 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor 15035 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() 15036 * and sd_start_cmds(). 15037 * 15038 * Free any DMA resources associated with this command if there 15039 * is a chance it could be retried or enqueued for later retry. 15040 * If we keep the DMA binding then mpxio cannot reissue the 15041 * command on another path whenever a path failure occurs. 15042 * 15043 * Note that when PKT_DMA_PARTIAL is used, free/reallocation 15044 * causes the *entire* transfer to start over again from the 15045 * beginning of the request, even for PARTIAL chunks that 15046 * have already transferred successfully. 15047 * 15048 * This is only done for non-uscsi commands (and also skipped for the 15049 * driver's internal RQS command). Also just do this for Fibre Channel 15050 * devices as these are the only ones that support mpxio. 15051 */ 15052 if ((un->un_f_is_fibre == TRUE) && 15053 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 15054 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 15055 scsi_dmafree(pktp); 15056 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 15057 } 15058 #endif 15059 15060 /* 15061 * The command did not successfully complete as requested so check 15062 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 15063 * driver command that should not be retried so just return. If 15064 * FLAG_DIAGNOSE is not set the error will be processed below. 15065 */ 15066 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 15067 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15068 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n"); 15069 /* 15070 * Issue a request sense if a check condition caused the error 15071 * (we handle the auto request sense case above), otherwise 15072 * just fail the command. 15073 */ 15074 if ((pktp->pkt_reason == CMD_CMPLT) && 15075 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { 15076 sd_send_request_sense_command(un, bp, pktp); 15077 } else { 15078 sd_return_failed_command(un, bp, EIO); 15079 } 15080 goto exit; 15081 } 15082 15083 /* 15084 * The command did not successfully complete as requested so process 15085 * the error, retry, and/or attempt recovery. 15086 */ 15087 switch (pktp->pkt_reason) { 15088 case CMD_CMPLT: 15089 switch (SD_GET_PKT_STATUS(pktp)) { 15090 case STATUS_GOOD: 15091 /* 15092 * The command completed successfully with a non-zero 15093 * residual 15094 */ 15095 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15096 "sdintr: STATUS_GOOD \n"); 15097 sd_pkt_status_good(un, bp, xp, pktp); 15098 break; 15099 15100 case STATUS_CHECK: 15101 case STATUS_TERMINATED: 15102 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15103 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n"); 15104 sd_pkt_status_check_condition(un, bp, xp, pktp); 15105 break; 15106 15107 case STATUS_BUSY: 15108 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15109 "sdintr: STATUS_BUSY\n"); 15110 sd_pkt_status_busy(un, bp, xp, pktp); 15111 break; 15112 15113 case STATUS_RESERVATION_CONFLICT: 15114 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15115 "sdintr: STATUS_RESERVATION_CONFLICT\n"); 15116 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 15117 break; 15118 15119 case STATUS_QFULL: 15120 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15121 "sdintr: STATUS_QFULL\n"); 15122 sd_pkt_status_qfull(un, bp, xp, pktp); 15123 break; 15124 15125 case STATUS_MET: 15126 case STATUS_INTERMEDIATE: 15127 case STATUS_SCSI2: 15128 case STATUS_INTERMEDIATE_MET: 15129 case STATUS_ACA_ACTIVE: 15130 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 15131 "Unexpected SCSI status received: 0x%x\n", 15132 SD_GET_PKT_STATUS(pktp)); 15133 sd_return_failed_command(un, bp, EIO); 15134 break; 15135 15136 default: 15137 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 15138 "Invalid SCSI status received: 0x%x\n", 15139 SD_GET_PKT_STATUS(pktp)); 15140 sd_return_failed_command(un, bp, EIO); 15141 break; 15142 15143 } 15144 break; 15145 15146 case CMD_INCOMPLETE: 15147 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15148 "sdintr: CMD_INCOMPLETE\n"); 15149 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp); 15150 break; 15151 case CMD_TRAN_ERR: 15152 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15153 "sdintr: CMD_TRAN_ERR\n"); 15154 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp); 15155 break; 15156 case CMD_RESET: 15157 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15158 "sdintr: CMD_RESET \n"); 15159 sd_pkt_reason_cmd_reset(un, bp, xp, pktp); 15160 break; 15161 case CMD_ABORTED: 15162 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15163 "sdintr: CMD_ABORTED \n"); 15164 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp); 15165 break; 15166 case CMD_TIMEOUT: 15167 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15168 "sdintr: CMD_TIMEOUT\n"); 15169 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp); 15170 break; 15171 case CMD_UNX_BUS_FREE: 15172 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15173 "sdintr: CMD_UNX_BUS_FREE \n"); 15174 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp); 15175 break; 15176 case CMD_TAG_REJECT: 15177 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15178 "sdintr: CMD_TAG_REJECT\n"); 15179 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp); 15180 break; 15181 default: 15182 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15183 "sdintr: default\n"); 15184 sd_pkt_reason_default(un, bp, xp, pktp); 15185 break; 15186 } 15187 15188 exit: 15189 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n"); 15190 15191 /* Decrement counter to indicate that the callback routine is done. */ 15192 un->un_in_callback--; 15193 ASSERT(un->un_in_callback >= 0); 15194 15195 /* 15196 * At this point, the pkt has been dispatched, ie, it is either 15197 * being re-tried or has been returned to its caller and should 15198 * not be referenced. 15199 */ 15200 15201 mutex_exit(SD_MUTEX(un)); 15202 } 15203 15204 15205 /* 15206 * Function: sd_print_incomplete_msg 15207 * 15208 * Description: Prints the error message for a CMD_INCOMPLETE error. 15209 * 15210 * Arguments: un - ptr to associated softstate for the device. 15211 * bp - ptr to the buf(9S) for the command. 15212 * arg - message string ptr 15213 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED, 15214 * or SD_NO_RETRY_ISSUED. 15215 * 15216 * Context: May be called under interrupt context 15217 */ 15218 15219 static void 15220 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 15221 { 15222 struct scsi_pkt *pktp; 15223 char *msgp; 15224 char *cmdp = arg; 15225 15226 ASSERT(un != NULL); 15227 ASSERT(mutex_owned(SD_MUTEX(un))); 15228 ASSERT(bp != NULL); 15229 ASSERT(arg != NULL); 15230 pktp = SD_GET_PKTP(bp); 15231 ASSERT(pktp != NULL); 15232 15233 switch (code) { 15234 case SD_DELAYED_RETRY_ISSUED: 15235 case SD_IMMEDIATE_RETRY_ISSUED: 15236 msgp = "retrying"; 15237 break; 15238 case SD_NO_RETRY_ISSUED: 15239 default: 15240 msgp = "giving up"; 15241 break; 15242 } 15243 15244 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 15245 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15246 "incomplete %s- %s\n", cmdp, msgp); 15247 } 15248 } 15249 15250 15251 15252 /* 15253 * Function: sd_pkt_status_good 15254 * 15255 * Description: Processing for a STATUS_GOOD code in pkt_status. 15256 * 15257 * Context: May be called under interrupt context 15258 */ 15259 15260 static void 15261 sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 15262 struct sd_xbuf *xp, struct scsi_pkt *pktp) 15263 { 15264 char *cmdp; 15265 15266 ASSERT(un != NULL); 15267 ASSERT(mutex_owned(SD_MUTEX(un))); 15268 ASSERT(bp != NULL); 15269 ASSERT(xp != NULL); 15270 ASSERT(pktp != NULL); 15271 ASSERT(pktp->pkt_reason == CMD_CMPLT); 15272 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD); 15273 ASSERT(pktp->pkt_resid != 0); 15274 15275 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n"); 15276 15277 SD_UPDATE_ERRSTATS(un, sd_harderrs); 15278 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) { 15279 case SCMD_READ: 15280 cmdp = "read"; 15281 break; 15282 case SCMD_WRITE: 15283 cmdp = "write"; 15284 break; 15285 default: 15286 SD_UPDATE_B_RESID(bp, pktp); 15287 sd_return_command(un, bp); 15288 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 15289 return; 15290 } 15291 15292 /* 15293 * See if we can retry the read/write, preferrably immediately. 15294 * If retries are exhaused, then sd_retry_command() will update 15295 * the b_resid count. 15296 */ 15297 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg, 15298 cmdp, EIO, (clock_t)0, NULL); 15299 15300 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 15301 } 15302 15303 15304 15305 15306 15307 /* 15308 * Function: sd_handle_request_sense 15309 * 15310 * Description: Processing for non-auto Request Sense command. 15311 * 15312 * Arguments: un - ptr to associated softstate 15313 * sense_bp - ptr to buf(9S) for the RQS command 15314 * sense_xp - ptr to the sd_xbuf for the RQS command 15315 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command 15316 * 15317 * Context: May be called under interrupt context 15318 */ 15319 15320 static void 15321 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp, 15322 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp) 15323 { 15324 struct buf *cmd_bp; /* buf for the original command */ 15325 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */ 15326 struct scsi_pkt *cmd_pktp; /* pkt for the original command */ 15327 size_t actual_len; /* actual sense data length */ 15328 15329 ASSERT(un != NULL); 15330 ASSERT(mutex_owned(SD_MUTEX(un))); 15331 ASSERT(sense_bp != NULL); 15332 ASSERT(sense_xp != NULL); 15333 ASSERT(sense_pktp != NULL); 15334 15335 /* 15336 * Note the sense_bp, sense_xp, and sense_pktp here are for the 15337 * RQS command and not the original command. 15338 */ 15339 ASSERT(sense_pktp == un->un_rqs_pktp); 15340 ASSERT(sense_bp == un->un_rqs_bp); 15341 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) == 15342 (FLAG_SENSING | FLAG_HEAD)); 15343 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) & 15344 FLAG_SENSING) == FLAG_SENSING); 15345 15346 /* These are the bp, xp, and pktp for the original command */ 15347 cmd_bp = sense_xp->xb_sense_bp; 15348 cmd_xp = SD_GET_XBUF(cmd_bp); 15349 cmd_pktp = SD_GET_PKTP(cmd_bp); 15350 15351 if (sense_pktp->pkt_reason != CMD_CMPLT) { 15352 /* 15353 * The REQUEST SENSE command failed. Release the REQUEST 15354 * SENSE command for re-use, get back the bp for the original 15355 * command, and attempt to re-try the original command if 15356 * FLAG_DIAGNOSE is not set in the original packet. 15357 */ 15358 SD_UPDATE_ERRSTATS(un, sd_harderrs); 15359 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15360 cmd_bp = sd_mark_rqs_idle(un, sense_xp); 15361 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD, 15362 NULL, NULL, EIO, (clock_t)0, NULL); 15363 return; 15364 } 15365 } 15366 15367 /* 15368 * Save the relevant sense info into the xp for the original cmd. 15369 * 15370 * Note: if the request sense failed the state info will be zero 15371 * as set in sd_mark_rqs_busy() 15372 */ 15373 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp); 15374 cmd_xp->xb_sense_state = sense_pktp->pkt_state; 15375 actual_len = MAX_SENSE_LENGTH - sense_pktp->pkt_resid; 15376 if ((cmd_xp->xb_pkt_flags & SD_XB_USCSICMD) && 15377 (((struct uscsi_cmd *)cmd_xp->xb_pktinfo)->uscsi_rqlen > 15378 SENSE_LENGTH)) { 15379 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 15380 MAX_SENSE_LENGTH); 15381 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid; 15382 } else { 15383 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 15384 SENSE_LENGTH); 15385 if (actual_len < SENSE_LENGTH) { 15386 cmd_xp->xb_sense_resid = SENSE_LENGTH - actual_len; 15387 } else { 15388 cmd_xp->xb_sense_resid = 0; 15389 } 15390 } 15391 15392 /* 15393 * Free up the RQS command.... 15394 * NOTE: 15395 * Must do this BEFORE calling sd_validate_sense_data! 15396 * sd_validate_sense_data may return the original command in 15397 * which case the pkt will be freed and the flags can no 15398 * longer be touched. 15399 * SD_MUTEX is held through this process until the command 15400 * is dispatched based upon the sense data, so there are 15401 * no race conditions. 15402 */ 15403 (void) sd_mark_rqs_idle(un, sense_xp); 15404 15405 /* 15406 * For a retryable command see if we have valid sense data, if so then 15407 * turn it over to sd_decode_sense() to figure out the right course of 15408 * action. Just fail a non-retryable command. 15409 */ 15410 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15411 if (sd_validate_sense_data(un, cmd_bp, cmd_xp, actual_len) == 15412 SD_SENSE_DATA_IS_VALID) { 15413 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp); 15414 } 15415 } else { 15416 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB", 15417 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15418 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data", 15419 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 15420 sd_return_failed_command(un, cmd_bp, EIO); 15421 } 15422 } 15423 15424 15425 15426 15427 /* 15428 * Function: sd_handle_auto_request_sense 15429 * 15430 * Description: Processing for auto-request sense information. 15431 * 15432 * Arguments: un - ptr to associated softstate 15433 * bp - ptr to buf(9S) for the command 15434 * xp - ptr to the sd_xbuf for the command 15435 * pktp - ptr to the scsi_pkt(9S) for the command 15436 * 15437 * Context: May be called under interrupt context 15438 */ 15439 15440 static void 15441 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 15442 struct sd_xbuf *xp, struct scsi_pkt *pktp) 15443 { 15444 struct scsi_arq_status *asp; 15445 size_t actual_len; 15446 15447 ASSERT(un != NULL); 15448 ASSERT(mutex_owned(SD_MUTEX(un))); 15449 ASSERT(bp != NULL); 15450 ASSERT(xp != NULL); 15451 ASSERT(pktp != NULL); 15452 ASSERT(pktp != un->un_rqs_pktp); 15453 ASSERT(bp != un->un_rqs_bp); 15454 15455 /* 15456 * For auto-request sense, we get a scsi_arq_status back from 15457 * the HBA, with the sense data in the sts_sensedata member. 15458 * The pkt_scbp of the packet points to this scsi_arq_status. 15459 */ 15460 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 15461 15462 if (asp->sts_rqpkt_reason != CMD_CMPLT) { 15463 /* 15464 * The auto REQUEST SENSE failed; see if we can re-try 15465 * the original command. 15466 */ 15467 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15468 "auto request sense failed (reason=%s)\n", 15469 scsi_rname(asp->sts_rqpkt_reason)); 15470 15471 sd_reset_target(un, pktp); 15472 15473 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15474 NULL, NULL, EIO, (clock_t)0, NULL); 15475 return; 15476 } 15477 15478 /* Save the relevant sense info into the xp for the original cmd. */ 15479 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status))); 15480 xp->xb_sense_state = asp->sts_rqpkt_state; 15481 xp->xb_sense_resid = asp->sts_rqpkt_resid; 15482 if (xp->xb_sense_state & STATE_XARQ_DONE) { 15483 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 15484 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 15485 MAX_SENSE_LENGTH); 15486 } else { 15487 if (xp->xb_sense_resid > SENSE_LENGTH) { 15488 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 15489 } else { 15490 actual_len = SENSE_LENGTH - xp->xb_sense_resid; 15491 } 15492 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 15493 if ((((struct uscsi_cmd *) 15494 (xp->xb_pktinfo))->uscsi_rqlen) > actual_len) { 15495 xp->xb_sense_resid = (((struct uscsi_cmd *) 15496 (xp->xb_pktinfo))->uscsi_rqlen) - 15497 actual_len; 15498 } else { 15499 xp->xb_sense_resid = 0; 15500 } 15501 } 15502 bcopy(&asp->sts_sensedata, xp->xb_sense_data, SENSE_LENGTH); 15503 } 15504 15505 /* 15506 * See if we have valid sense data, if so then turn it over to 15507 * sd_decode_sense() to figure out the right course of action. 15508 */ 15509 if (sd_validate_sense_data(un, bp, xp, actual_len) == 15510 SD_SENSE_DATA_IS_VALID) { 15511 sd_decode_sense(un, bp, xp, pktp); 15512 } 15513 } 15514 15515 15516 /* 15517 * Function: sd_print_sense_failed_msg 15518 * 15519 * Description: Print log message when RQS has failed. 15520 * 15521 * Arguments: un - ptr to associated softstate 15522 * bp - ptr to buf(9S) for the command 15523 * arg - generic message string ptr 15524 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 15525 * or SD_NO_RETRY_ISSUED 15526 * 15527 * Context: May be called from interrupt context 15528 */ 15529 15530 static void 15531 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg, 15532 int code) 15533 { 15534 char *msgp = arg; 15535 15536 ASSERT(un != NULL); 15537 ASSERT(mutex_owned(SD_MUTEX(un))); 15538 ASSERT(bp != NULL); 15539 15540 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) { 15541 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp); 15542 } 15543 } 15544 15545 15546 /* 15547 * Function: sd_validate_sense_data 15548 * 15549 * Description: Check the given sense data for validity. 15550 * If the sense data is not valid, the command will 15551 * be either failed or retried! 15552 * 15553 * Return Code: SD_SENSE_DATA_IS_INVALID 15554 * SD_SENSE_DATA_IS_VALID 15555 * 15556 * Context: May be called from interrupt context 15557 */ 15558 15559 static int 15560 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 15561 size_t actual_len) 15562 { 15563 struct scsi_extended_sense *esp; 15564 struct scsi_pkt *pktp; 15565 char *msgp = NULL; 15566 15567 ASSERT(un != NULL); 15568 ASSERT(mutex_owned(SD_MUTEX(un))); 15569 ASSERT(bp != NULL); 15570 ASSERT(bp != un->un_rqs_bp); 15571 ASSERT(xp != NULL); 15572 15573 pktp = SD_GET_PKTP(bp); 15574 ASSERT(pktp != NULL); 15575 15576 /* 15577 * Check the status of the RQS command (auto or manual). 15578 */ 15579 switch (xp->xb_sense_status & STATUS_MASK) { 15580 case STATUS_GOOD: 15581 break; 15582 15583 case STATUS_RESERVATION_CONFLICT: 15584 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 15585 return (SD_SENSE_DATA_IS_INVALID); 15586 15587 case STATUS_BUSY: 15588 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15589 "Busy Status on REQUEST SENSE\n"); 15590 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL, 15591 NULL, EIO, SD_BSY_TIMEOUT / 500, kstat_waitq_enter); 15592 return (SD_SENSE_DATA_IS_INVALID); 15593 15594 case STATUS_QFULL: 15595 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15596 "QFULL Status on REQUEST SENSE\n"); 15597 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, 15598 NULL, EIO, SD_BSY_TIMEOUT / 500, kstat_waitq_enter); 15599 return (SD_SENSE_DATA_IS_INVALID); 15600 15601 case STATUS_CHECK: 15602 case STATUS_TERMINATED: 15603 msgp = "Check Condition on REQUEST SENSE\n"; 15604 goto sense_failed; 15605 15606 default: 15607 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n"; 15608 goto sense_failed; 15609 } 15610 15611 /* 15612 * See if we got the minimum required amount of sense data. 15613 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes 15614 * or less. 15615 */ 15616 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) || 15617 (actual_len == 0)) { 15618 msgp = "Request Sense couldn't get sense data\n"; 15619 goto sense_failed; 15620 } 15621 15622 if (actual_len < SUN_MIN_SENSE_LENGTH) { 15623 msgp = "Not enough sense information\n"; 15624 goto sense_failed; 15625 } 15626 15627 /* 15628 * We require the extended sense data 15629 */ 15630 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 15631 if (esp->es_class != CLASS_EXTENDED_SENSE) { 15632 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 15633 static char tmp[8]; 15634 static char buf[148]; 15635 char *p = (char *)(xp->xb_sense_data); 15636 int i; 15637 15638 mutex_enter(&sd_sense_mutex); 15639 (void) strcpy(buf, "undecodable sense information:"); 15640 for (i = 0; i < actual_len; i++) { 15641 (void) sprintf(tmp, " 0x%x", *(p++)&0xff); 15642 (void) strcpy(&buf[strlen(buf)], tmp); 15643 } 15644 i = strlen(buf); 15645 (void) strcpy(&buf[i], "-(assumed fatal)\n"); 15646 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, buf); 15647 mutex_exit(&sd_sense_mutex); 15648 } 15649 /* Note: Legacy behavior, fail the command with no retry */ 15650 sd_return_failed_command(un, bp, EIO); 15651 return (SD_SENSE_DATA_IS_INVALID); 15652 } 15653 15654 /* 15655 * Check that es_code is valid (es_class concatenated with es_code 15656 * make up the "response code" field. es_class will always be 7, so 15657 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the 15658 * format. 15659 */ 15660 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) && 15661 (esp->es_code != CODE_FMT_FIXED_DEFERRED) && 15662 (esp->es_code != CODE_FMT_DESCR_CURRENT) && 15663 (esp->es_code != CODE_FMT_DESCR_DEFERRED) && 15664 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) { 15665 goto sense_failed; 15666 } 15667 15668 return (SD_SENSE_DATA_IS_VALID); 15669 15670 sense_failed: 15671 /* 15672 * If the request sense failed (for whatever reason), attempt 15673 * to retry the original command. 15674 */ 15675 #if defined(__i386) || defined(__amd64) 15676 /* 15677 * SD_RETRY_DELAY is conditionally compile (#if fibre) in 15678 * sddef.h for Sparc platform, and x86 uses 1 binary 15679 * for both SCSI/FC. 15680 * The SD_RETRY_DELAY value need to be adjusted here 15681 * when SD_RETRY_DELAY change in sddef.h 15682 */ 15683 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15684 sd_print_sense_failed_msg, msgp, EIO, 15685 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL); 15686 #else 15687 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15688 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); 15689 #endif 15690 15691 return (SD_SENSE_DATA_IS_INVALID); 15692 } 15693 15694 15695 15696 /* 15697 * Function: sd_decode_sense 15698 * 15699 * Description: Take recovery action(s) when SCSI Sense Data is received. 15700 * 15701 * Context: Interrupt context. 15702 */ 15703 15704 static void 15705 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 15706 struct scsi_pkt *pktp) 15707 { 15708 uint8_t sense_key; 15709 15710 ASSERT(un != NULL); 15711 ASSERT(mutex_owned(SD_MUTEX(un))); 15712 ASSERT(bp != NULL); 15713 ASSERT(bp != un->un_rqs_bp); 15714 ASSERT(xp != NULL); 15715 ASSERT(pktp != NULL); 15716 15717 sense_key = scsi_sense_key(xp->xb_sense_data); 15718 15719 switch (sense_key) { 15720 case KEY_NO_SENSE: 15721 sd_sense_key_no_sense(un, bp, xp, pktp); 15722 break; 15723 case KEY_RECOVERABLE_ERROR: 15724 sd_sense_key_recoverable_error(un, xp->xb_sense_data, 15725 bp, xp, pktp); 15726 break; 15727 case KEY_NOT_READY: 15728 sd_sense_key_not_ready(un, xp->xb_sense_data, 15729 bp, xp, pktp); 15730 break; 15731 case KEY_MEDIUM_ERROR: 15732 case KEY_HARDWARE_ERROR: 15733 sd_sense_key_medium_or_hardware_error(un, 15734 xp->xb_sense_data, bp, xp, pktp); 15735 break; 15736 case KEY_ILLEGAL_REQUEST: 15737 sd_sense_key_illegal_request(un, bp, xp, pktp); 15738 break; 15739 case KEY_UNIT_ATTENTION: 15740 sd_sense_key_unit_attention(un, xp->xb_sense_data, 15741 bp, xp, pktp); 15742 break; 15743 case KEY_WRITE_PROTECT: 15744 case KEY_VOLUME_OVERFLOW: 15745 case KEY_MISCOMPARE: 15746 sd_sense_key_fail_command(un, bp, xp, pktp); 15747 break; 15748 case KEY_BLANK_CHECK: 15749 sd_sense_key_blank_check(un, bp, xp, pktp); 15750 break; 15751 case KEY_ABORTED_COMMAND: 15752 sd_sense_key_aborted_command(un, bp, xp, pktp); 15753 break; 15754 case KEY_VENDOR_UNIQUE: 15755 case KEY_COPY_ABORTED: 15756 case KEY_EQUAL: 15757 case KEY_RESERVED: 15758 default: 15759 sd_sense_key_default(un, xp->xb_sense_data, 15760 bp, xp, pktp); 15761 break; 15762 } 15763 } 15764 15765 15766 /* 15767 * Function: sd_dump_memory 15768 * 15769 * Description: Debug logging routine to print the contents of a user provided 15770 * buffer. The output of the buffer is broken up into 256 byte 15771 * segments due to a size constraint of the scsi_log. 15772 * implementation. 15773 * 15774 * Arguments: un - ptr to softstate 15775 * comp - component mask 15776 * title - "title" string to preceed data when printed 15777 * data - ptr to data block to be printed 15778 * len - size of data block to be printed 15779 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c) 15780 * 15781 * Context: May be called from interrupt context 15782 */ 15783 15784 #define SD_DUMP_MEMORY_BUF_SIZE 256 15785 15786 static char *sd_dump_format_string[] = { 15787 " 0x%02x", 15788 " %c" 15789 }; 15790 15791 static void 15792 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data, 15793 int len, int fmt) 15794 { 15795 int i, j; 15796 int avail_count; 15797 int start_offset; 15798 int end_offset; 15799 size_t entry_len; 15800 char *bufp; 15801 char *local_buf; 15802 char *format_string; 15803 15804 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR)); 15805 15806 /* 15807 * In the debug version of the driver, this function is called from a 15808 * number of places which are NOPs in the release driver. 15809 * The debug driver therefore has additional methods of filtering 15810 * debug output. 15811 */ 15812 #ifdef SDDEBUG 15813 /* 15814 * In the debug version of the driver we can reduce the amount of debug 15815 * messages by setting sd_error_level to something other than 15816 * SCSI_ERR_ALL and clearing bits in sd_level_mask and 15817 * sd_component_mask. 15818 */ 15819 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) || 15820 (sd_error_level != SCSI_ERR_ALL)) { 15821 return; 15822 } 15823 if (((sd_component_mask & comp) == 0) || 15824 (sd_error_level != SCSI_ERR_ALL)) { 15825 return; 15826 } 15827 #else 15828 if (sd_error_level != SCSI_ERR_ALL) { 15829 return; 15830 } 15831 #endif 15832 15833 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP); 15834 bufp = local_buf; 15835 /* 15836 * Available length is the length of local_buf[], minus the 15837 * length of the title string, minus one for the ":", minus 15838 * one for the newline, minus one for the NULL terminator. 15839 * This gives the #bytes available for holding the printed 15840 * values from the given data buffer. 15841 */ 15842 if (fmt == SD_LOG_HEX) { 15843 format_string = sd_dump_format_string[0]; 15844 } else /* SD_LOG_CHAR */ { 15845 format_string = sd_dump_format_string[1]; 15846 } 15847 /* 15848 * Available count is the number of elements from the given 15849 * data buffer that we can fit into the available length. 15850 * This is based upon the size of the format string used. 15851 * Make one entry and find it's size. 15852 */ 15853 (void) sprintf(bufp, format_string, data[0]); 15854 entry_len = strlen(bufp); 15855 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len; 15856 15857 j = 0; 15858 while (j < len) { 15859 bufp = local_buf; 15860 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE); 15861 start_offset = j; 15862 15863 end_offset = start_offset + avail_count; 15864 15865 (void) sprintf(bufp, "%s:", title); 15866 bufp += strlen(bufp); 15867 for (i = start_offset; ((i < end_offset) && (j < len)); 15868 i++, j++) { 15869 (void) sprintf(bufp, format_string, data[i]); 15870 bufp += entry_len; 15871 } 15872 (void) sprintf(bufp, "\n"); 15873 15874 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf); 15875 } 15876 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE); 15877 } 15878 15879 /* 15880 * Function: sd_print_sense_msg 15881 * 15882 * Description: Log a message based upon the given sense data. 15883 * 15884 * Arguments: un - ptr to associated softstate 15885 * bp - ptr to buf(9S) for the command 15886 * arg - ptr to associate sd_sense_info struct 15887 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 15888 * or SD_NO_RETRY_ISSUED 15889 * 15890 * Context: May be called from interrupt context 15891 */ 15892 15893 static void 15894 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 15895 { 15896 struct sd_xbuf *xp; 15897 struct scsi_pkt *pktp; 15898 uint8_t *sensep; 15899 daddr_t request_blkno; 15900 diskaddr_t err_blkno; 15901 int severity; 15902 int pfa_flag; 15903 extern struct scsi_key_strings scsi_cmds[]; 15904 15905 ASSERT(un != NULL); 15906 ASSERT(mutex_owned(SD_MUTEX(un))); 15907 ASSERT(bp != NULL); 15908 xp = SD_GET_XBUF(bp); 15909 ASSERT(xp != NULL); 15910 pktp = SD_GET_PKTP(bp); 15911 ASSERT(pktp != NULL); 15912 ASSERT(arg != NULL); 15913 15914 severity = ((struct sd_sense_info *)(arg))->ssi_severity; 15915 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag; 15916 15917 if ((code == SD_DELAYED_RETRY_ISSUED) || 15918 (code == SD_IMMEDIATE_RETRY_ISSUED)) { 15919 severity = SCSI_ERR_RETRYABLE; 15920 } 15921 15922 /* Use absolute block number for the request block number */ 15923 request_blkno = xp->xb_blkno; 15924 15925 /* 15926 * Now try to get the error block number from the sense data 15927 */ 15928 sensep = xp->xb_sense_data; 15929 15930 if (scsi_sense_info_uint64(sensep, SENSE_LENGTH, 15931 (uint64_t *)&err_blkno)) { 15932 /* 15933 * We retrieved the error block number from the information 15934 * portion of the sense data. 15935 * 15936 * For USCSI commands we are better off using the error 15937 * block no. as the requested block no. (This is the best 15938 * we can estimate.) 15939 */ 15940 if ((SD_IS_BUFIO(xp) == FALSE) && 15941 ((pktp->pkt_flags & FLAG_SILENT) == 0)) { 15942 request_blkno = err_blkno; 15943 } 15944 } else { 15945 /* 15946 * Without the es_valid bit set (for fixed format) or an 15947 * information descriptor (for descriptor format) we cannot 15948 * be certain of the error blkno, so just use the 15949 * request_blkno. 15950 */ 15951 err_blkno = (diskaddr_t)request_blkno; 15952 } 15953 15954 /* 15955 * The following will log the buffer contents for the release driver 15956 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error 15957 * level is set to verbose. 15958 */ 15959 sd_dump_memory(un, SD_LOG_IO, "Failed CDB", 15960 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15961 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 15962 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX); 15963 15964 if (pfa_flag == FALSE) { 15965 /* This is normally only set for USCSI */ 15966 if ((pktp->pkt_flags & FLAG_SILENT) != 0) { 15967 return; 15968 } 15969 15970 if ((SD_IS_BUFIO(xp) == TRUE) && 15971 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) && 15972 (severity < sd_error_level))) { 15973 return; 15974 } 15975 } 15976 15977 /* 15978 * Check for Sonoma Failover and keep a count of how many failed I/O's 15979 */ 15980 if ((SD_IS_LSI(un)) && 15981 (scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) && 15982 (scsi_sense_asc(sensep) == 0x94) && 15983 (scsi_sense_ascq(sensep) == 0x01)) { 15984 un->un_sonoma_failure_count++; 15985 if (un->un_sonoma_failure_count > 1) { 15986 return; 15987 } 15988 } 15989 15990 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity, 15991 request_blkno, err_blkno, scsi_cmds, 15992 (struct scsi_extended_sense *)sensep, 15993 un->un_additional_codes, NULL); 15994 } 15995 15996 /* 15997 * Function: sd_sense_key_no_sense 15998 * 15999 * Description: Recovery action when sense data was not received. 16000 * 16001 * Context: May be called from interrupt context 16002 */ 16003 16004 static void 16005 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 16006 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16007 { 16008 struct sd_sense_info si; 16009 16010 ASSERT(un != NULL); 16011 ASSERT(mutex_owned(SD_MUTEX(un))); 16012 ASSERT(bp != NULL); 16013 ASSERT(xp != NULL); 16014 ASSERT(pktp != NULL); 16015 16016 si.ssi_severity = SCSI_ERR_FATAL; 16017 si.ssi_pfa_flag = FALSE; 16018 16019 SD_UPDATE_ERRSTATS(un, sd_softerrs); 16020 16021 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16022 &si, EIO, (clock_t)0, NULL); 16023 } 16024 16025 16026 /* 16027 * Function: sd_sense_key_recoverable_error 16028 * 16029 * Description: Recovery actions for a SCSI "Recovered Error" sense key. 16030 * 16031 * Context: May be called from interrupt context 16032 */ 16033 16034 static void 16035 sd_sense_key_recoverable_error(struct sd_lun *un, 16036 uint8_t *sense_datap, 16037 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16038 { 16039 struct sd_sense_info si; 16040 uint8_t asc = scsi_sense_asc(sense_datap); 16041 16042 ASSERT(un != NULL); 16043 ASSERT(mutex_owned(SD_MUTEX(un))); 16044 ASSERT(bp != NULL); 16045 ASSERT(xp != NULL); 16046 ASSERT(pktp != NULL); 16047 16048 /* 16049 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED 16050 */ 16051 if ((asc == 0x5D) && (sd_report_pfa != 0)) { 16052 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 16053 si.ssi_severity = SCSI_ERR_INFO; 16054 si.ssi_pfa_flag = TRUE; 16055 } else { 16056 SD_UPDATE_ERRSTATS(un, sd_softerrs); 16057 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err); 16058 si.ssi_severity = SCSI_ERR_RECOVERED; 16059 si.ssi_pfa_flag = FALSE; 16060 } 16061 16062 if (pktp->pkt_resid == 0) { 16063 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16064 sd_return_command(un, bp); 16065 return; 16066 } 16067 16068 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16069 &si, EIO, (clock_t)0, NULL); 16070 } 16071 16072 16073 16074 16075 /* 16076 * Function: sd_sense_key_not_ready 16077 * 16078 * Description: Recovery actions for a SCSI "Not Ready" sense key. 16079 * 16080 * Context: May be called from interrupt context 16081 */ 16082 16083 static void 16084 sd_sense_key_not_ready(struct sd_lun *un, 16085 uint8_t *sense_datap, 16086 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16087 { 16088 struct sd_sense_info si; 16089 uint8_t asc = scsi_sense_asc(sense_datap); 16090 uint8_t ascq = scsi_sense_ascq(sense_datap); 16091 16092 ASSERT(un != NULL); 16093 ASSERT(mutex_owned(SD_MUTEX(un))); 16094 ASSERT(bp != NULL); 16095 ASSERT(xp != NULL); 16096 ASSERT(pktp != NULL); 16097 16098 si.ssi_severity = SCSI_ERR_FATAL; 16099 si.ssi_pfa_flag = FALSE; 16100 16101 /* 16102 * Update error stats after first NOT READY error. Disks may have 16103 * been powered down and may need to be restarted. For CDROMs, 16104 * report NOT READY errors only if media is present. 16105 */ 16106 if ((ISCD(un) && (asc == 0x3A)) || 16107 (xp->xb_nr_retry_count > 0)) { 16108 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16109 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err); 16110 } 16111 16112 /* 16113 * Just fail if the "not ready" retry limit has been reached. 16114 */ 16115 if (xp->xb_nr_retry_count >= un->un_notready_retry_count) { 16116 /* Special check for error message printing for removables. */ 16117 if (un->un_f_has_removable_media && (asc == 0x04) && 16118 (ascq >= 0x04)) { 16119 si.ssi_severity = SCSI_ERR_ALL; 16120 } 16121 goto fail_command; 16122 } 16123 16124 /* 16125 * Check the ASC and ASCQ in the sense data as needed, to determine 16126 * what to do. 16127 */ 16128 switch (asc) { 16129 case 0x04: /* LOGICAL UNIT NOT READY */ 16130 /* 16131 * disk drives that don't spin up result in a very long delay 16132 * in format without warning messages. We will log a message 16133 * if the error level is set to verbose. 16134 */ 16135 if (sd_error_level < SCSI_ERR_RETRYABLE) { 16136 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16137 "logical unit not ready, resetting disk\n"); 16138 } 16139 16140 /* 16141 * There are different requirements for CDROMs and disks for 16142 * the number of retries. If a CD-ROM is giving this, it is 16143 * probably reading TOC and is in the process of getting 16144 * ready, so we should keep on trying for a long time to make 16145 * sure that all types of media are taken in account (for 16146 * some media the drive takes a long time to read TOC). For 16147 * disks we do not want to retry this too many times as this 16148 * can cause a long hang in format when the drive refuses to 16149 * spin up (a very common failure). 16150 */ 16151 switch (ascq) { 16152 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */ 16153 /* 16154 * Disk drives frequently refuse to spin up which 16155 * results in a very long hang in format without 16156 * warning messages. 16157 * 16158 * Note: This code preserves the legacy behavior of 16159 * comparing xb_nr_retry_count against zero for fibre 16160 * channel targets instead of comparing against the 16161 * un_reset_retry_count value. The reason for this 16162 * discrepancy has been so utterly lost beneath the 16163 * Sands of Time that even Indiana Jones could not 16164 * find it. 16165 */ 16166 if (un->un_f_is_fibre == TRUE) { 16167 if (((sd_level_mask & SD_LOGMASK_DIAG) || 16168 (xp->xb_nr_retry_count > 0)) && 16169 (un->un_startstop_timeid == NULL)) { 16170 scsi_log(SD_DEVINFO(un), sd_label, 16171 CE_WARN, "logical unit not ready, " 16172 "resetting disk\n"); 16173 sd_reset_target(un, pktp); 16174 } 16175 } else { 16176 if (((sd_level_mask & SD_LOGMASK_DIAG) || 16177 (xp->xb_nr_retry_count > 16178 un->un_reset_retry_count)) && 16179 (un->un_startstop_timeid == NULL)) { 16180 scsi_log(SD_DEVINFO(un), sd_label, 16181 CE_WARN, "logical unit not ready, " 16182 "resetting disk\n"); 16183 sd_reset_target(un, pktp); 16184 } 16185 } 16186 break; 16187 16188 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */ 16189 /* 16190 * If the target is in the process of becoming 16191 * ready, just proceed with the retry. This can 16192 * happen with CD-ROMs that take a long time to 16193 * read TOC after a power cycle or reset. 16194 */ 16195 goto do_retry; 16196 16197 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */ 16198 break; 16199 16200 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */ 16201 /* 16202 * Retries cannot help here so just fail right away. 16203 */ 16204 goto fail_command; 16205 16206 case 0x88: 16207 /* 16208 * Vendor-unique code for T3/T4: it indicates a 16209 * path problem in a mutipathed config, but as far as 16210 * the target driver is concerned it equates to a fatal 16211 * error, so we should just fail the command right away 16212 * (without printing anything to the console). If this 16213 * is not a T3/T4, fall thru to the default recovery 16214 * action. 16215 * T3/T4 is FC only, don't need to check is_fibre 16216 */ 16217 if (SD_IS_T3(un) || SD_IS_T4(un)) { 16218 sd_return_failed_command(un, bp, EIO); 16219 return; 16220 } 16221 /* FALLTHRU */ 16222 16223 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */ 16224 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */ 16225 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */ 16226 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */ 16227 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */ 16228 default: /* Possible future codes in SCSI spec? */ 16229 /* 16230 * For removable-media devices, do not retry if 16231 * ASCQ > 2 as these result mostly from USCSI commands 16232 * on MMC devices issued to check status of an 16233 * operation initiated in immediate mode. Also for 16234 * ASCQ >= 4 do not print console messages as these 16235 * mainly represent a user-initiated operation 16236 * instead of a system failure. 16237 */ 16238 if (un->un_f_has_removable_media) { 16239 si.ssi_severity = SCSI_ERR_ALL; 16240 goto fail_command; 16241 } 16242 break; 16243 } 16244 16245 /* 16246 * As part of our recovery attempt for the NOT READY 16247 * condition, we issue a START STOP UNIT command. However 16248 * we want to wait for a short delay before attempting this 16249 * as there may still be more commands coming back from the 16250 * target with the check condition. To do this we use 16251 * timeout(9F) to call sd_start_stop_unit_callback() after 16252 * the delay interval expires. (sd_start_stop_unit_callback() 16253 * dispatches sd_start_stop_unit_task(), which will issue 16254 * the actual START STOP UNIT command. The delay interval 16255 * is one-half of the delay that we will use to retry the 16256 * command that generated the NOT READY condition. 16257 * 16258 * Note that we could just dispatch sd_start_stop_unit_task() 16259 * from here and allow it to sleep for the delay interval, 16260 * but then we would be tying up the taskq thread 16261 * uncesessarily for the duration of the delay. 16262 * 16263 * Do not issue the START STOP UNIT if the current command 16264 * is already a START STOP UNIT. 16265 */ 16266 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) { 16267 break; 16268 } 16269 16270 /* 16271 * Do not schedule the timeout if one is already pending. 16272 */ 16273 if (un->un_startstop_timeid != NULL) { 16274 SD_INFO(SD_LOG_ERROR, un, 16275 "sd_sense_key_not_ready: restart already issued to" 16276 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)), 16277 ddi_get_instance(SD_DEVINFO(un))); 16278 break; 16279 } 16280 16281 /* 16282 * Schedule the START STOP UNIT command, then queue the command 16283 * for a retry. 16284 * 16285 * Note: A timeout is not scheduled for this retry because we 16286 * want the retry to be serial with the START_STOP_UNIT. The 16287 * retry will be started when the START_STOP_UNIT is completed 16288 * in sd_start_stop_unit_task. 16289 */ 16290 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback, 16291 un, SD_BSY_TIMEOUT / 2); 16292 xp->xb_nr_retry_count++; 16293 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter); 16294 return; 16295 16296 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */ 16297 if (sd_error_level < SCSI_ERR_RETRYABLE) { 16298 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16299 "unit does not respond to selection\n"); 16300 } 16301 break; 16302 16303 case 0x3A: /* MEDIUM NOT PRESENT */ 16304 if (sd_error_level >= SCSI_ERR_FATAL) { 16305 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16306 "Caddy not inserted in drive\n"); 16307 } 16308 16309 sr_ejected(un); 16310 un->un_mediastate = DKIO_EJECTED; 16311 /* The state has changed, inform the media watch routines */ 16312 cv_broadcast(&un->un_state_cv); 16313 /* Just fail if no media is present in the drive. */ 16314 goto fail_command; 16315 16316 default: 16317 if (sd_error_level < SCSI_ERR_RETRYABLE) { 16318 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 16319 "Unit not Ready. Additional sense code 0x%x\n", 16320 asc); 16321 } 16322 break; 16323 } 16324 16325 do_retry: 16326 16327 /* 16328 * Retry the command, as some targets may report NOT READY for 16329 * several seconds after being reset. 16330 */ 16331 xp->xb_nr_retry_count++; 16332 si.ssi_severity = SCSI_ERR_RETRYABLE; 16333 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 16334 &si, EIO, SD_BSY_TIMEOUT, NULL); 16335 16336 return; 16337 16338 fail_command: 16339 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16340 sd_return_failed_command(un, bp, EIO); 16341 } 16342 16343 16344 16345 /* 16346 * Function: sd_sense_key_medium_or_hardware_error 16347 * 16348 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error" 16349 * sense key. 16350 * 16351 * Context: May be called from interrupt context 16352 */ 16353 16354 static void 16355 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 16356 uint8_t *sense_datap, 16357 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16358 { 16359 struct sd_sense_info si; 16360 uint8_t sense_key = scsi_sense_key(sense_datap); 16361 uint8_t asc = scsi_sense_asc(sense_datap); 16362 16363 ASSERT(un != NULL); 16364 ASSERT(mutex_owned(SD_MUTEX(un))); 16365 ASSERT(bp != NULL); 16366 ASSERT(xp != NULL); 16367 ASSERT(pktp != NULL); 16368 16369 si.ssi_severity = SCSI_ERR_FATAL; 16370 si.ssi_pfa_flag = FALSE; 16371 16372 if (sense_key == KEY_MEDIUM_ERROR) { 16373 SD_UPDATE_ERRSTATS(un, sd_rq_media_err); 16374 } 16375 16376 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16377 16378 if ((un->un_reset_retry_count != 0) && 16379 (xp->xb_retry_count == un->un_reset_retry_count)) { 16380 mutex_exit(SD_MUTEX(un)); 16381 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */ 16382 if (un->un_f_allow_bus_device_reset == TRUE) { 16383 16384 boolean_t try_resetting_target = B_TRUE; 16385 16386 /* 16387 * We need to be able to handle specific ASC when we are 16388 * handling a KEY_HARDWARE_ERROR. In particular 16389 * taking the default action of resetting the target may 16390 * not be the appropriate way to attempt recovery. 16391 * Resetting a target because of a single LUN failure 16392 * victimizes all LUNs on that target. 16393 * 16394 * This is true for the LSI arrays, if an LSI 16395 * array controller returns an ASC of 0x84 (LUN Dead) we 16396 * should trust it. 16397 */ 16398 16399 if (sense_key == KEY_HARDWARE_ERROR) { 16400 switch (asc) { 16401 case 0x84: 16402 if (SD_IS_LSI(un)) { 16403 try_resetting_target = B_FALSE; 16404 } 16405 break; 16406 default: 16407 break; 16408 } 16409 } 16410 16411 if (try_resetting_target == B_TRUE) { 16412 int reset_retval = 0; 16413 if (un->un_f_lun_reset_enabled == TRUE) { 16414 SD_TRACE(SD_LOG_IO_CORE, un, 16415 "sd_sense_key_medium_or_hardware_" 16416 "error: issuing RESET_LUN\n"); 16417 reset_retval = 16418 scsi_reset(SD_ADDRESS(un), 16419 RESET_LUN); 16420 } 16421 if (reset_retval == 0) { 16422 SD_TRACE(SD_LOG_IO_CORE, un, 16423 "sd_sense_key_medium_or_hardware_" 16424 "error: issuing RESET_TARGET\n"); 16425 (void) scsi_reset(SD_ADDRESS(un), 16426 RESET_TARGET); 16427 } 16428 } 16429 } 16430 mutex_enter(SD_MUTEX(un)); 16431 } 16432 16433 /* 16434 * This really ought to be a fatal error, but we will retry anyway 16435 * as some drives report this as a spurious error. 16436 */ 16437 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16438 &si, EIO, (clock_t)0, NULL); 16439 } 16440 16441 16442 16443 /* 16444 * Function: sd_sense_key_illegal_request 16445 * 16446 * Description: Recovery actions for a SCSI "Illegal Request" sense key. 16447 * 16448 * Context: May be called from interrupt context 16449 */ 16450 16451 static void 16452 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 16453 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16454 { 16455 struct sd_sense_info si; 16456 16457 ASSERT(un != NULL); 16458 ASSERT(mutex_owned(SD_MUTEX(un))); 16459 ASSERT(bp != NULL); 16460 ASSERT(xp != NULL); 16461 ASSERT(pktp != NULL); 16462 16463 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err); 16464 16465 si.ssi_severity = SCSI_ERR_INFO; 16466 si.ssi_pfa_flag = FALSE; 16467 16468 /* Pointless to retry if the target thinks it's an illegal request */ 16469 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16470 sd_return_failed_command(un, bp, EIO); 16471 } 16472 16473 16474 16475 16476 /* 16477 * Function: sd_sense_key_unit_attention 16478 * 16479 * Description: Recovery actions for a SCSI "Unit Attention" sense key. 16480 * 16481 * Context: May be called from interrupt context 16482 */ 16483 16484 static void 16485 sd_sense_key_unit_attention(struct sd_lun *un, 16486 uint8_t *sense_datap, 16487 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16488 { 16489 /* 16490 * For UNIT ATTENTION we allow retries for one minute. Devices 16491 * like Sonoma can return UNIT ATTENTION close to a minute 16492 * under certain conditions. 16493 */ 16494 int retry_check_flag = SD_RETRIES_UA; 16495 boolean_t kstat_updated = B_FALSE; 16496 struct sd_sense_info si; 16497 uint8_t asc = scsi_sense_asc(sense_datap); 16498 16499 ASSERT(un != NULL); 16500 ASSERT(mutex_owned(SD_MUTEX(un))); 16501 ASSERT(bp != NULL); 16502 ASSERT(xp != NULL); 16503 ASSERT(pktp != NULL); 16504 16505 si.ssi_severity = SCSI_ERR_INFO; 16506 si.ssi_pfa_flag = FALSE; 16507 16508 16509 switch (asc) { 16510 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */ 16511 if (sd_report_pfa != 0) { 16512 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 16513 si.ssi_pfa_flag = TRUE; 16514 retry_check_flag = SD_RETRIES_STANDARD; 16515 goto do_retry; 16516 } 16517 16518 break; 16519 16520 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ 16521 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 16522 un->un_resvd_status |= 16523 (SD_LOST_RESERVE | SD_WANT_RESERVE); 16524 } 16525 #ifdef _LP64 16526 if (un->un_blockcount + 1 > SD_GROUP1_MAX_ADDRESS) { 16527 if (taskq_dispatch(sd_tq, sd_reenable_dsense_task, 16528 un, KM_NOSLEEP) == 0) { 16529 /* 16530 * If we can't dispatch the task we'll just 16531 * live without descriptor sense. We can 16532 * try again on the next "unit attention" 16533 */ 16534 SD_ERROR(SD_LOG_ERROR, un, 16535 "sd_sense_key_unit_attention: " 16536 "Could not dispatch " 16537 "sd_reenable_dsense_task\n"); 16538 } 16539 } 16540 #endif /* _LP64 */ 16541 /* FALLTHRU */ 16542 16543 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ 16544 if (!un->un_f_has_removable_media) { 16545 break; 16546 } 16547 16548 /* 16549 * When we get a unit attention from a removable-media device, 16550 * it may be in a state that will take a long time to recover 16551 * (e.g., from a reset). Since we are executing in interrupt 16552 * context here, we cannot wait around for the device to come 16553 * back. So hand this command off to sd_media_change_task() 16554 * for deferred processing under taskq thread context. (Note 16555 * that the command still may be failed if a problem is 16556 * encountered at a later time.) 16557 */ 16558 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp, 16559 KM_NOSLEEP) == 0) { 16560 /* 16561 * Cannot dispatch the request so fail the command. 16562 */ 16563 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16564 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 16565 si.ssi_severity = SCSI_ERR_FATAL; 16566 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16567 sd_return_failed_command(un, bp, EIO); 16568 } 16569 16570 /* 16571 * If failed to dispatch sd_media_change_task(), we already 16572 * updated kstat. If succeed to dispatch sd_media_change_task(), 16573 * we should update kstat later if it encounters an error. So, 16574 * we update kstat_updated flag here. 16575 */ 16576 kstat_updated = B_TRUE; 16577 16578 /* 16579 * Either the command has been successfully dispatched to a 16580 * task Q for retrying, or the dispatch failed. In either case 16581 * do NOT retry again by calling sd_retry_command. This sets up 16582 * two retries of the same command and when one completes and 16583 * frees the resources the other will access freed memory, 16584 * a bad thing. 16585 */ 16586 return; 16587 16588 default: 16589 break; 16590 } 16591 16592 /* 16593 * Update kstat if we haven't done that. 16594 */ 16595 if (!kstat_updated) { 16596 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16597 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 16598 } 16599 16600 do_retry: 16601 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si, 16602 EIO, SD_UA_RETRY_DELAY, NULL); 16603 } 16604 16605 16606 16607 /* 16608 * Function: sd_sense_key_fail_command 16609 * 16610 * Description: Use to fail a command when we don't like the sense key that 16611 * was returned. 16612 * 16613 * Context: May be called from interrupt context 16614 */ 16615 16616 static void 16617 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 16618 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16619 { 16620 struct sd_sense_info si; 16621 16622 ASSERT(un != NULL); 16623 ASSERT(mutex_owned(SD_MUTEX(un))); 16624 ASSERT(bp != NULL); 16625 ASSERT(xp != NULL); 16626 ASSERT(pktp != NULL); 16627 16628 si.ssi_severity = SCSI_ERR_FATAL; 16629 si.ssi_pfa_flag = FALSE; 16630 16631 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16632 sd_return_failed_command(un, bp, EIO); 16633 } 16634 16635 16636 16637 /* 16638 * Function: sd_sense_key_blank_check 16639 * 16640 * Description: Recovery actions for a SCSI "Blank Check" sense key. 16641 * Has no monetary connotation. 16642 * 16643 * Context: May be called from interrupt context 16644 */ 16645 16646 static void 16647 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 16648 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16649 { 16650 struct sd_sense_info si; 16651 16652 ASSERT(un != NULL); 16653 ASSERT(mutex_owned(SD_MUTEX(un))); 16654 ASSERT(bp != NULL); 16655 ASSERT(xp != NULL); 16656 ASSERT(pktp != NULL); 16657 16658 /* 16659 * Blank check is not fatal for removable devices, therefore 16660 * it does not require a console message. 16661 */ 16662 si.ssi_severity = (un->un_f_has_removable_media) ? SCSI_ERR_ALL : 16663 SCSI_ERR_FATAL; 16664 si.ssi_pfa_flag = FALSE; 16665 16666 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16667 sd_return_failed_command(un, bp, EIO); 16668 } 16669 16670 16671 16672 16673 /* 16674 * Function: sd_sense_key_aborted_command 16675 * 16676 * Description: Recovery actions for a SCSI "Aborted Command" sense key. 16677 * 16678 * Context: May be called from interrupt context 16679 */ 16680 16681 static void 16682 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 16683 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16684 { 16685 struct sd_sense_info si; 16686 16687 ASSERT(un != NULL); 16688 ASSERT(mutex_owned(SD_MUTEX(un))); 16689 ASSERT(bp != NULL); 16690 ASSERT(xp != NULL); 16691 ASSERT(pktp != NULL); 16692 16693 si.ssi_severity = SCSI_ERR_FATAL; 16694 si.ssi_pfa_flag = FALSE; 16695 16696 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16697 16698 /* 16699 * This really ought to be a fatal error, but we will retry anyway 16700 * as some drives report this as a spurious error. 16701 */ 16702 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16703 &si, EIO, drv_usectohz(100000), NULL); 16704 } 16705 16706 16707 16708 /* 16709 * Function: sd_sense_key_default 16710 * 16711 * Description: Default recovery action for several SCSI sense keys (basically 16712 * attempts a retry). 16713 * 16714 * Context: May be called from interrupt context 16715 */ 16716 16717 static void 16718 sd_sense_key_default(struct sd_lun *un, 16719 uint8_t *sense_datap, 16720 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16721 { 16722 struct sd_sense_info si; 16723 uint8_t sense_key = scsi_sense_key(sense_datap); 16724 16725 ASSERT(un != NULL); 16726 ASSERT(mutex_owned(SD_MUTEX(un))); 16727 ASSERT(bp != NULL); 16728 ASSERT(xp != NULL); 16729 ASSERT(pktp != NULL); 16730 16731 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16732 16733 /* 16734 * Undecoded sense key. Attempt retries and hope that will fix 16735 * the problem. Otherwise, we're dead. 16736 */ 16737 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16738 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16739 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]); 16740 } 16741 16742 si.ssi_severity = SCSI_ERR_FATAL; 16743 si.ssi_pfa_flag = FALSE; 16744 16745 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16746 &si, EIO, (clock_t)0, NULL); 16747 } 16748 16749 16750 16751 /* 16752 * Function: sd_print_retry_msg 16753 * 16754 * Description: Print a message indicating the retry action being taken. 16755 * 16756 * Arguments: un - ptr to associated softstate 16757 * bp - ptr to buf(9S) for the command 16758 * arg - not used. 16759 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16760 * or SD_NO_RETRY_ISSUED 16761 * 16762 * Context: May be called from interrupt context 16763 */ 16764 /* ARGSUSED */ 16765 static void 16766 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag) 16767 { 16768 struct sd_xbuf *xp; 16769 struct scsi_pkt *pktp; 16770 char *reasonp; 16771 char *msgp; 16772 16773 ASSERT(un != NULL); 16774 ASSERT(mutex_owned(SD_MUTEX(un))); 16775 ASSERT(bp != NULL); 16776 pktp = SD_GET_PKTP(bp); 16777 ASSERT(pktp != NULL); 16778 xp = SD_GET_XBUF(bp); 16779 ASSERT(xp != NULL); 16780 16781 ASSERT(!mutex_owned(&un->un_pm_mutex)); 16782 mutex_enter(&un->un_pm_mutex); 16783 if ((un->un_state == SD_STATE_SUSPENDED) || 16784 (SD_DEVICE_IS_IN_LOW_POWER(un)) || 16785 (pktp->pkt_flags & FLAG_SILENT)) { 16786 mutex_exit(&un->un_pm_mutex); 16787 goto update_pkt_reason; 16788 } 16789 mutex_exit(&un->un_pm_mutex); 16790 16791 /* 16792 * Suppress messages if they are all the same pkt_reason; with 16793 * TQ, many (up to 256) are returned with the same pkt_reason. 16794 * If we are in panic, then suppress the retry messages. 16795 */ 16796 switch (flag) { 16797 case SD_NO_RETRY_ISSUED: 16798 msgp = "giving up"; 16799 break; 16800 case SD_IMMEDIATE_RETRY_ISSUED: 16801 case SD_DELAYED_RETRY_ISSUED: 16802 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) || 16803 ((pktp->pkt_reason == un->un_last_pkt_reason) && 16804 (sd_error_level != SCSI_ERR_ALL))) { 16805 return; 16806 } 16807 msgp = "retrying command"; 16808 break; 16809 default: 16810 goto update_pkt_reason; 16811 } 16812 16813 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" : 16814 scsi_rname(pktp->pkt_reason)); 16815 16816 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16817 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp); 16818 16819 update_pkt_reason: 16820 /* 16821 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason. 16822 * This is to prevent multiple console messages for the same failure 16823 * condition. Note that un->un_last_pkt_reason is NOT restored if & 16824 * when the command is retried successfully because there still may be 16825 * more commands coming back with the same value of pktp->pkt_reason. 16826 */ 16827 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) { 16828 un->un_last_pkt_reason = pktp->pkt_reason; 16829 } 16830 } 16831 16832 16833 /* 16834 * Function: sd_print_cmd_incomplete_msg 16835 * 16836 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason. 16837 * 16838 * Arguments: un - ptr to associated softstate 16839 * bp - ptr to buf(9S) for the command 16840 * arg - passed to sd_print_retry_msg() 16841 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16842 * or SD_NO_RETRY_ISSUED 16843 * 16844 * Context: May be called from interrupt context 16845 */ 16846 16847 static void 16848 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, 16849 int code) 16850 { 16851 dev_info_t *dip; 16852 16853 ASSERT(un != NULL); 16854 ASSERT(mutex_owned(SD_MUTEX(un))); 16855 ASSERT(bp != NULL); 16856 16857 switch (code) { 16858 case SD_NO_RETRY_ISSUED: 16859 /* Command was failed. Someone turned off this target? */ 16860 if (un->un_state != SD_STATE_OFFLINE) { 16861 /* 16862 * Suppress message if we are detaching and 16863 * device has been disconnected 16864 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation 16865 * private interface and not part of the DDI 16866 */ 16867 dip = un->un_sd->sd_dev; 16868 if (!(DEVI_IS_DETACHING(dip) && 16869 DEVI_IS_DEVICE_REMOVED(dip))) { 16870 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16871 "disk not responding to selection\n"); 16872 } 16873 New_state(un, SD_STATE_OFFLINE); 16874 } 16875 break; 16876 16877 case SD_DELAYED_RETRY_ISSUED: 16878 case SD_IMMEDIATE_RETRY_ISSUED: 16879 default: 16880 /* Command was successfully queued for retry */ 16881 sd_print_retry_msg(un, bp, arg, code); 16882 break; 16883 } 16884 } 16885 16886 16887 /* 16888 * Function: sd_pkt_reason_cmd_incomplete 16889 * 16890 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason. 16891 * 16892 * Context: May be called from interrupt context 16893 */ 16894 16895 static void 16896 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 16897 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16898 { 16899 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE; 16900 16901 ASSERT(un != NULL); 16902 ASSERT(mutex_owned(SD_MUTEX(un))); 16903 ASSERT(bp != NULL); 16904 ASSERT(xp != NULL); 16905 ASSERT(pktp != NULL); 16906 16907 /* Do not do a reset if selection did not complete */ 16908 /* Note: Should this not just check the bit? */ 16909 if (pktp->pkt_state != STATE_GOT_BUS) { 16910 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16911 sd_reset_target(un, pktp); 16912 } 16913 16914 /* 16915 * If the target was not successfully selected, then set 16916 * SD_RETRIES_FAILFAST to indicate that we lost communication 16917 * with the target, and further retries and/or commands are 16918 * likely to take a long time. 16919 */ 16920 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) { 16921 flag |= SD_RETRIES_FAILFAST; 16922 } 16923 16924 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16925 16926 sd_retry_command(un, bp, flag, 16927 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16928 } 16929 16930 16931 16932 /* 16933 * Function: sd_pkt_reason_cmd_tran_err 16934 * 16935 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason. 16936 * 16937 * Context: May be called from interrupt context 16938 */ 16939 16940 static void 16941 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 16942 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16943 { 16944 ASSERT(un != NULL); 16945 ASSERT(mutex_owned(SD_MUTEX(un))); 16946 ASSERT(bp != NULL); 16947 ASSERT(xp != NULL); 16948 ASSERT(pktp != NULL); 16949 16950 /* 16951 * Do not reset if we got a parity error, or if 16952 * selection did not complete. 16953 */ 16954 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16955 /* Note: Should this not just check the bit for pkt_state? */ 16956 if (((pktp->pkt_statistics & STAT_PERR) == 0) && 16957 (pktp->pkt_state != STATE_GOT_BUS)) { 16958 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16959 sd_reset_target(un, pktp); 16960 } 16961 16962 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16963 16964 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 16965 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 16966 } 16967 16968 16969 16970 /* 16971 * Function: sd_pkt_reason_cmd_reset 16972 * 16973 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason. 16974 * 16975 * Context: May be called from interrupt context 16976 */ 16977 16978 static void 16979 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 16980 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16981 { 16982 ASSERT(un != NULL); 16983 ASSERT(mutex_owned(SD_MUTEX(un))); 16984 ASSERT(bp != NULL); 16985 ASSERT(xp != NULL); 16986 ASSERT(pktp != NULL); 16987 16988 /* The target may still be running the command, so try to reset. */ 16989 SD_UPDATE_ERRSTATS(un, sd_transerrs); 16990 sd_reset_target(un, pktp); 16991 16992 SD_UPDATE_RESERVATION_STATUS(un, pktp); 16993 16994 /* 16995 * If pkt_reason is CMD_RESET chances are that this pkt got 16996 * reset because another target on this bus caused it. The target 16997 * that caused it should get CMD_TIMEOUT with pkt_statistics 16998 * of STAT_TIMEOUT/STAT_DEV_RESET. 16999 */ 17000 17001 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 17002 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17003 } 17004 17005 17006 17007 17008 /* 17009 * Function: sd_pkt_reason_cmd_aborted 17010 * 17011 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason. 17012 * 17013 * Context: May be called from interrupt context 17014 */ 17015 17016 static void 17017 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 17018 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17019 { 17020 ASSERT(un != NULL); 17021 ASSERT(mutex_owned(SD_MUTEX(un))); 17022 ASSERT(bp != NULL); 17023 ASSERT(xp != NULL); 17024 ASSERT(pktp != NULL); 17025 17026 /* The target may still be running the command, so try to reset. */ 17027 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17028 sd_reset_target(un, pktp); 17029 17030 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17031 17032 /* 17033 * If pkt_reason is CMD_ABORTED chances are that this pkt got 17034 * aborted because another target on this bus caused it. The target 17035 * that caused it should get CMD_TIMEOUT with pkt_statistics 17036 * of STAT_TIMEOUT/STAT_DEV_RESET. 17037 */ 17038 17039 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 17040 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17041 } 17042 17043 17044 17045 /* 17046 * Function: sd_pkt_reason_cmd_timeout 17047 * 17048 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason. 17049 * 17050 * Context: May be called from interrupt context 17051 */ 17052 17053 static void 17054 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 17055 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17056 { 17057 ASSERT(un != NULL); 17058 ASSERT(mutex_owned(SD_MUTEX(un))); 17059 ASSERT(bp != NULL); 17060 ASSERT(xp != NULL); 17061 ASSERT(pktp != NULL); 17062 17063 17064 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17065 sd_reset_target(un, pktp); 17066 17067 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17068 17069 /* 17070 * A command timeout indicates that we could not establish 17071 * communication with the target, so set SD_RETRIES_FAILFAST 17072 * as further retries/commands are likely to take a long time. 17073 */ 17074 sd_retry_command(un, bp, 17075 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST), 17076 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17077 } 17078 17079 17080 17081 /* 17082 * Function: sd_pkt_reason_cmd_unx_bus_free 17083 * 17084 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason. 17085 * 17086 * Context: May be called from interrupt context 17087 */ 17088 17089 static void 17090 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 17091 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17092 { 17093 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code); 17094 17095 ASSERT(un != NULL); 17096 ASSERT(mutex_owned(SD_MUTEX(un))); 17097 ASSERT(bp != NULL); 17098 ASSERT(xp != NULL); 17099 ASSERT(pktp != NULL); 17100 17101 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17102 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17103 17104 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? 17105 sd_print_retry_msg : NULL; 17106 17107 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 17108 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17109 } 17110 17111 17112 /* 17113 * Function: sd_pkt_reason_cmd_tag_reject 17114 * 17115 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason. 17116 * 17117 * Context: May be called from interrupt context 17118 */ 17119 17120 static void 17121 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 17122 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17123 { 17124 ASSERT(un != NULL); 17125 ASSERT(mutex_owned(SD_MUTEX(un))); 17126 ASSERT(bp != NULL); 17127 ASSERT(xp != NULL); 17128 ASSERT(pktp != NULL); 17129 17130 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17131 pktp->pkt_flags = 0; 17132 un->un_tagflags = 0; 17133 if (un->un_f_opt_queueing == TRUE) { 17134 un->un_throttle = min(un->un_throttle, 3); 17135 } else { 17136 un->un_throttle = 1; 17137 } 17138 mutex_exit(SD_MUTEX(un)); 17139 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 17140 mutex_enter(SD_MUTEX(un)); 17141 17142 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17143 17144 /* Legacy behavior not to check retry counts here. */ 17145 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE), 17146 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17147 } 17148 17149 17150 /* 17151 * Function: sd_pkt_reason_default 17152 * 17153 * Description: Default recovery actions for SCSA pkt_reason values that 17154 * do not have more explicit recovery actions. 17155 * 17156 * Context: May be called from interrupt context 17157 */ 17158 17159 static void 17160 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 17161 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17162 { 17163 ASSERT(un != NULL); 17164 ASSERT(mutex_owned(SD_MUTEX(un))); 17165 ASSERT(bp != NULL); 17166 ASSERT(xp != NULL); 17167 ASSERT(pktp != NULL); 17168 17169 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17170 sd_reset_target(un, pktp); 17171 17172 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17173 17174 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 17175 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17176 } 17177 17178 17179 17180 /* 17181 * Function: sd_pkt_status_check_condition 17182 * 17183 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status. 17184 * 17185 * Context: May be called from interrupt context 17186 */ 17187 17188 static void 17189 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 17190 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17191 { 17192 ASSERT(un != NULL); 17193 ASSERT(mutex_owned(SD_MUTEX(un))); 17194 ASSERT(bp != NULL); 17195 ASSERT(xp != NULL); 17196 ASSERT(pktp != NULL); 17197 17198 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: " 17199 "entry: buf:0x%p xp:0x%p\n", bp, xp); 17200 17201 /* 17202 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the 17203 * command will be retried after the request sense). Otherwise, retry 17204 * the command. Note: we are issuing the request sense even though the 17205 * retry limit may have been reached for the failed command. 17206 */ 17207 if (un->un_f_arq_enabled == FALSE) { 17208 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 17209 "no ARQ, sending request sense command\n"); 17210 sd_send_request_sense_command(un, bp, pktp); 17211 } else { 17212 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 17213 "ARQ,retrying request sense command\n"); 17214 #if defined(__i386) || defined(__amd64) 17215 /* 17216 * The SD_RETRY_DELAY value need to be adjusted here 17217 * when SD_RETRY_DELAY change in sddef.h 17218 */ 17219 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 17220 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, 17221 NULL); 17222 #else 17223 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 17224 EIO, SD_RETRY_DELAY, NULL); 17225 #endif 17226 } 17227 17228 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); 17229 } 17230 17231 17232 /* 17233 * Function: sd_pkt_status_busy 17234 * 17235 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status. 17236 * 17237 * Context: May be called from interrupt context 17238 */ 17239 17240 static void 17241 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 17242 struct scsi_pkt *pktp) 17243 { 17244 ASSERT(un != NULL); 17245 ASSERT(mutex_owned(SD_MUTEX(un))); 17246 ASSERT(bp != NULL); 17247 ASSERT(xp != NULL); 17248 ASSERT(pktp != NULL); 17249 17250 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17251 "sd_pkt_status_busy: entry\n"); 17252 17253 /* If retries are exhausted, just fail the command. */ 17254 if (xp->xb_retry_count >= un->un_busy_retry_count) { 17255 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17256 "device busy too long\n"); 17257 sd_return_failed_command(un, bp, EIO); 17258 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17259 "sd_pkt_status_busy: exit\n"); 17260 return; 17261 } 17262 xp->xb_retry_count++; 17263 17264 /* 17265 * Try to reset the target. However, we do not want to perform 17266 * more than one reset if the device continues to fail. The reset 17267 * will be performed when the retry count reaches the reset 17268 * threshold. This threshold should be set such that at least 17269 * one retry is issued before the reset is performed. 17270 */ 17271 if (xp->xb_retry_count == 17272 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) { 17273 int rval = 0; 17274 mutex_exit(SD_MUTEX(un)); 17275 if (un->un_f_allow_bus_device_reset == TRUE) { 17276 /* 17277 * First try to reset the LUN; if we cannot then 17278 * try to reset the target. 17279 */ 17280 if (un->un_f_lun_reset_enabled == TRUE) { 17281 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17282 "sd_pkt_status_busy: RESET_LUN\n"); 17283 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 17284 } 17285 if (rval == 0) { 17286 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17287 "sd_pkt_status_busy: RESET_TARGET\n"); 17288 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 17289 } 17290 } 17291 if (rval == 0) { 17292 /* 17293 * If the RESET_LUN and/or RESET_TARGET failed, 17294 * try RESET_ALL 17295 */ 17296 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17297 "sd_pkt_status_busy: RESET_ALL\n"); 17298 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL); 17299 } 17300 mutex_enter(SD_MUTEX(un)); 17301 if (rval == 0) { 17302 /* 17303 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed. 17304 * At this point we give up & fail the command. 17305 */ 17306 sd_return_failed_command(un, bp, EIO); 17307 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17308 "sd_pkt_status_busy: exit (failed cmd)\n"); 17309 return; 17310 } 17311 } 17312 17313 /* 17314 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as 17315 * we have already checked the retry counts above. 17316 */ 17317 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 17318 EIO, SD_BSY_TIMEOUT, NULL); 17319 17320 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17321 "sd_pkt_status_busy: exit\n"); 17322 } 17323 17324 17325 /* 17326 * Function: sd_pkt_status_reservation_conflict 17327 * 17328 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI 17329 * command status. 17330 * 17331 * Context: May be called from interrupt context 17332 */ 17333 17334 static void 17335 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp, 17336 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17337 { 17338 ASSERT(un != NULL); 17339 ASSERT(mutex_owned(SD_MUTEX(un))); 17340 ASSERT(bp != NULL); 17341 ASSERT(xp != NULL); 17342 ASSERT(pktp != NULL); 17343 17344 /* 17345 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation 17346 * conflict could be due to various reasons like incorrect keys, not 17347 * registered or not reserved etc. So, we return EACCES to the caller. 17348 */ 17349 if (un->un_reservation_type == SD_SCSI3_RESERVATION) { 17350 int cmd = SD_GET_PKT_OPCODE(pktp); 17351 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) || 17352 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) { 17353 sd_return_failed_command(un, bp, EACCES); 17354 return; 17355 } 17356 } 17357 17358 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 17359 17360 if ((un->un_resvd_status & SD_FAILFAST) != 0) { 17361 if (sd_failfast_enable != 0) { 17362 /* By definition, we must panic here.... */ 17363 sd_panic_for_res_conflict(un); 17364 /*NOTREACHED*/ 17365 } 17366 SD_ERROR(SD_LOG_IO, un, 17367 "sd_handle_resv_conflict: Disk Reserved\n"); 17368 sd_return_failed_command(un, bp, EACCES); 17369 return; 17370 } 17371 17372 /* 17373 * 1147670: retry only if sd_retry_on_reservation_conflict 17374 * property is set (default is 1). Retries will not succeed 17375 * on a disk reserved by another initiator. HA systems 17376 * may reset this via sd.conf to avoid these retries. 17377 * 17378 * Note: The legacy return code for this failure is EIO, however EACCES 17379 * seems more appropriate for a reservation conflict. 17380 */ 17381 if (sd_retry_on_reservation_conflict == 0) { 17382 SD_ERROR(SD_LOG_IO, un, 17383 "sd_handle_resv_conflict: Device Reserved\n"); 17384 sd_return_failed_command(un, bp, EIO); 17385 return; 17386 } 17387 17388 /* 17389 * Retry the command if we can. 17390 * 17391 * Note: The legacy return code for this failure is EIO, however EACCES 17392 * seems more appropriate for a reservation conflict. 17393 */ 17394 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 17395 (clock_t)2, NULL); 17396 } 17397 17398 17399 17400 /* 17401 * Function: sd_pkt_status_qfull 17402 * 17403 * Description: Handle a QUEUE FULL condition from the target. This can 17404 * occur if the HBA does not handle the queue full condition. 17405 * (Basically this means third-party HBAs as Sun HBAs will 17406 * handle the queue full condition.) Note that if there are 17407 * some commands already in the transport, then the queue full 17408 * has occurred because the queue for this nexus is actually 17409 * full. If there are no commands in the transport, then the 17410 * queue full is resulting from some other initiator or lun 17411 * consuming all the resources at the target. 17412 * 17413 * Context: May be called from interrupt context 17414 */ 17415 17416 static void 17417 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 17418 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17419 { 17420 ASSERT(un != NULL); 17421 ASSERT(mutex_owned(SD_MUTEX(un))); 17422 ASSERT(bp != NULL); 17423 ASSERT(xp != NULL); 17424 ASSERT(pktp != NULL); 17425 17426 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17427 "sd_pkt_status_qfull: entry\n"); 17428 17429 /* 17430 * Just lower the QFULL throttle and retry the command. Note that 17431 * we do not limit the number of retries here. 17432 */ 17433 sd_reduce_throttle(un, SD_THROTTLE_QFULL); 17434 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, 17435 SD_RESTART_TIMEOUT, NULL); 17436 17437 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17438 "sd_pkt_status_qfull: exit\n"); 17439 } 17440 17441 17442 /* 17443 * Function: sd_reset_target 17444 * 17445 * Description: Issue a scsi_reset(9F), with either RESET_LUN, 17446 * RESET_TARGET, or RESET_ALL. 17447 * 17448 * Context: May be called under interrupt context. 17449 */ 17450 17451 static void 17452 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp) 17453 { 17454 int rval = 0; 17455 17456 ASSERT(un != NULL); 17457 ASSERT(mutex_owned(SD_MUTEX(un))); 17458 ASSERT(pktp != NULL); 17459 17460 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n"); 17461 17462 /* 17463 * No need to reset if the transport layer has already done so. 17464 */ 17465 if ((pktp->pkt_statistics & 17466 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) { 17467 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17468 "sd_reset_target: no reset\n"); 17469 return; 17470 } 17471 17472 mutex_exit(SD_MUTEX(un)); 17473 17474 if (un->un_f_allow_bus_device_reset == TRUE) { 17475 if (un->un_f_lun_reset_enabled == TRUE) { 17476 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17477 "sd_reset_target: RESET_LUN\n"); 17478 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 17479 } 17480 if (rval == 0) { 17481 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17482 "sd_reset_target: RESET_TARGET\n"); 17483 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 17484 } 17485 } 17486 17487 if (rval == 0) { 17488 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17489 "sd_reset_target: RESET_ALL\n"); 17490 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 17491 } 17492 17493 mutex_enter(SD_MUTEX(un)); 17494 17495 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n"); 17496 } 17497 17498 17499 /* 17500 * Function: sd_media_change_task 17501 * 17502 * Description: Recovery action for CDROM to become available. 17503 * 17504 * Context: Executes in a taskq() thread context 17505 */ 17506 17507 static void 17508 sd_media_change_task(void *arg) 17509 { 17510 struct scsi_pkt *pktp = arg; 17511 struct sd_lun *un; 17512 struct buf *bp; 17513 struct sd_xbuf *xp; 17514 int err = 0; 17515 int retry_count = 0; 17516 int retry_limit = SD_UNIT_ATTENTION_RETRY/10; 17517 struct sd_sense_info si; 17518 17519 ASSERT(pktp != NULL); 17520 bp = (struct buf *)pktp->pkt_private; 17521 ASSERT(bp != NULL); 17522 xp = SD_GET_XBUF(bp); 17523 ASSERT(xp != NULL); 17524 un = SD_GET_UN(bp); 17525 ASSERT(un != NULL); 17526 ASSERT(!mutex_owned(SD_MUTEX(un))); 17527 ASSERT(un->un_f_monitor_media_state); 17528 17529 si.ssi_severity = SCSI_ERR_INFO; 17530 si.ssi_pfa_flag = FALSE; 17531 17532 /* 17533 * When a reset is issued on a CDROM, it takes a long time to 17534 * recover. First few attempts to read capacity and other things 17535 * related to handling unit attention fail (with a ASC 0x4 and 17536 * ASCQ 0x1). In that case we want to do enough retries and we want 17537 * to limit the retries in other cases of genuine failures like 17538 * no media in drive. 17539 */ 17540 while (retry_count++ < retry_limit) { 17541 if ((err = sd_handle_mchange(un)) == 0) { 17542 break; 17543 } 17544 if (err == EAGAIN) { 17545 retry_limit = SD_UNIT_ATTENTION_RETRY; 17546 } 17547 /* Sleep for 0.5 sec. & try again */ 17548 delay(drv_usectohz(500000)); 17549 } 17550 17551 /* 17552 * Dispatch (retry or fail) the original command here, 17553 * along with appropriate console messages.... 17554 * 17555 * Must grab the mutex before calling sd_retry_command, 17556 * sd_print_sense_msg and sd_return_failed_command. 17557 */ 17558 mutex_enter(SD_MUTEX(un)); 17559 if (err != SD_CMD_SUCCESS) { 17560 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17561 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17562 si.ssi_severity = SCSI_ERR_FATAL; 17563 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17564 sd_return_failed_command(un, bp, EIO); 17565 } else { 17566 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 17567 &si, EIO, (clock_t)0, NULL); 17568 } 17569 mutex_exit(SD_MUTEX(un)); 17570 } 17571 17572 17573 17574 /* 17575 * Function: sd_handle_mchange 17576 * 17577 * Description: Perform geometry validation & other recovery when CDROM 17578 * has been removed from drive. 17579 * 17580 * Return Code: 0 for success 17581 * errno-type return code of either sd_send_scsi_DOORLOCK() or 17582 * sd_send_scsi_READ_CAPACITY() 17583 * 17584 * Context: Executes in a taskq() thread context 17585 */ 17586 17587 static int 17588 sd_handle_mchange(struct sd_lun *un) 17589 { 17590 uint64_t capacity; 17591 uint32_t lbasize; 17592 int rval; 17593 17594 ASSERT(!mutex_owned(SD_MUTEX(un))); 17595 ASSERT(un->un_f_monitor_media_state); 17596 17597 if ((rval = sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 17598 SD_PATH_DIRECT_PRIORITY)) != 0) { 17599 return (rval); 17600 } 17601 17602 mutex_enter(SD_MUTEX(un)); 17603 sd_update_block_info(un, lbasize, capacity); 17604 17605 if (un->un_errstats != NULL) { 17606 struct sd_errstats *stp = 17607 (struct sd_errstats *)un->un_errstats->ks_data; 17608 stp->sd_capacity.value.ui64 = (uint64_t) 17609 ((uint64_t)un->un_blockcount * 17610 (uint64_t)un->un_tgt_blocksize); 17611 } 17612 17613 17614 /* 17615 * Check if the media in the device is writable or not 17616 */ 17617 if (ISCD(un)) 17618 sd_check_for_writable_cd(un, SD_PATH_DIRECT_PRIORITY); 17619 17620 /* 17621 * Note: Maybe let the strategy/partitioning chain worry about getting 17622 * valid geometry. 17623 */ 17624 mutex_exit(SD_MUTEX(un)); 17625 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 17626 17627 17628 if (cmlb_validate(un->un_cmlbhandle, 0, 17629 (void *)SD_PATH_DIRECT_PRIORITY) != 0) { 17630 return (EIO); 17631 } else { 17632 if (un->un_f_pkstats_enabled) { 17633 sd_set_pstats(un); 17634 SD_TRACE(SD_LOG_IO_PARTITION, un, 17635 "sd_handle_mchange: un:0x%p pstats created and " 17636 "set\n", un); 17637 } 17638 } 17639 17640 17641 /* 17642 * Try to lock the door 17643 */ 17644 return (sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 17645 SD_PATH_DIRECT_PRIORITY)); 17646 } 17647 17648 17649 /* 17650 * Function: sd_send_scsi_DOORLOCK 17651 * 17652 * Description: Issue the scsi DOOR LOCK command 17653 * 17654 * Arguments: un - pointer to driver soft state (unit) structure for 17655 * this target. 17656 * flag - SD_REMOVAL_ALLOW 17657 * SD_REMOVAL_PREVENT 17658 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17659 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17660 * to use the USCSI "direct" chain and bypass the normal 17661 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 17662 * command is issued as part of an error recovery action. 17663 * 17664 * Return Code: 0 - Success 17665 * errno return code from sd_send_scsi_cmd() 17666 * 17667 * Context: Can sleep. 17668 */ 17669 17670 static int 17671 sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag) 17672 { 17673 union scsi_cdb cdb; 17674 struct uscsi_cmd ucmd_buf; 17675 struct scsi_extended_sense sense_buf; 17676 int status; 17677 17678 ASSERT(un != NULL); 17679 ASSERT(!mutex_owned(SD_MUTEX(un))); 17680 17681 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un); 17682 17683 /* already determined doorlock is not supported, fake success */ 17684 if (un->un_f_doorlock_supported == FALSE) { 17685 return (0); 17686 } 17687 17688 /* 17689 * If we are ejecting and see an SD_REMOVAL_PREVENT 17690 * ignore the command so we can complete the eject 17691 * operation. 17692 */ 17693 if (flag == SD_REMOVAL_PREVENT) { 17694 mutex_enter(SD_MUTEX(un)); 17695 if (un->un_f_ejecting == TRUE) { 17696 mutex_exit(SD_MUTEX(un)); 17697 return (EAGAIN); 17698 } 17699 mutex_exit(SD_MUTEX(un)); 17700 } 17701 17702 bzero(&cdb, sizeof (cdb)); 17703 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17704 17705 cdb.scc_cmd = SCMD_DOORLOCK; 17706 cdb.cdb_opaque[4] = (uchar_t)flag; 17707 17708 ucmd_buf.uscsi_cdb = (char *)&cdb; 17709 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 17710 ucmd_buf.uscsi_bufaddr = NULL; 17711 ucmd_buf.uscsi_buflen = 0; 17712 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17713 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 17714 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 17715 ucmd_buf.uscsi_timeout = 15; 17716 17717 SD_TRACE(SD_LOG_IO, un, 17718 "sd_send_scsi_DOORLOCK: returning sd_send_scsi_cmd()\n"); 17719 17720 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17721 UIO_SYSSPACE, path_flag); 17722 17723 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) && 17724 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 17725 (scsi_sense_key((uint8_t *)&sense_buf) == KEY_ILLEGAL_REQUEST)) { 17726 /* fake success and skip subsequent doorlock commands */ 17727 un->un_f_doorlock_supported = FALSE; 17728 return (0); 17729 } 17730 17731 return (status); 17732 } 17733 17734 /* 17735 * Function: sd_send_scsi_READ_CAPACITY 17736 * 17737 * Description: This routine uses the scsi READ CAPACITY command to determine 17738 * the device capacity in number of blocks and the device native 17739 * block size. If this function returns a failure, then the 17740 * values in *capp and *lbap are undefined. If the capacity 17741 * returned is 0xffffffff then the lun is too large for a 17742 * normal READ CAPACITY command and the results of a 17743 * READ CAPACITY 16 will be used instead. 17744 * 17745 * Arguments: un - ptr to soft state struct for the target 17746 * capp - ptr to unsigned 64-bit variable to receive the 17747 * capacity value from the command. 17748 * lbap - ptr to unsigned 32-bit varaible to receive the 17749 * block size value from the command 17750 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17751 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17752 * to use the USCSI "direct" chain and bypass the normal 17753 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 17754 * command is issued as part of an error recovery action. 17755 * 17756 * Return Code: 0 - Success 17757 * EIO - IO error 17758 * EACCES - Reservation conflict detected 17759 * EAGAIN - Device is becoming ready 17760 * errno return code from sd_send_scsi_cmd() 17761 * 17762 * Context: Can sleep. Blocks until command completes. 17763 */ 17764 17765 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity) 17766 17767 static int 17768 sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, uint32_t *lbap, 17769 int path_flag) 17770 { 17771 struct scsi_extended_sense sense_buf; 17772 struct uscsi_cmd ucmd_buf; 17773 union scsi_cdb cdb; 17774 uint32_t *capacity_buf; 17775 uint64_t capacity; 17776 uint32_t lbasize; 17777 int status; 17778 17779 ASSERT(un != NULL); 17780 ASSERT(!mutex_owned(SD_MUTEX(un))); 17781 ASSERT(capp != NULL); 17782 ASSERT(lbap != NULL); 17783 17784 SD_TRACE(SD_LOG_IO, un, 17785 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 17786 17787 /* 17788 * First send a READ_CAPACITY command to the target. 17789 * (This command is mandatory under SCSI-2.) 17790 * 17791 * Set up the CDB for the READ_CAPACITY command. The Partial 17792 * Medium Indicator bit is cleared. The address field must be 17793 * zero if the PMI bit is zero. 17794 */ 17795 bzero(&cdb, sizeof (cdb)); 17796 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17797 17798 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP); 17799 17800 cdb.scc_cmd = SCMD_READ_CAPACITY; 17801 17802 ucmd_buf.uscsi_cdb = (char *)&cdb; 17803 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 17804 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; 17805 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; 17806 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17807 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 17808 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 17809 ucmd_buf.uscsi_timeout = 60; 17810 17811 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 17812 UIO_SYSSPACE, path_flag); 17813 17814 switch (status) { 17815 case 0: 17816 /* Return failure if we did not get valid capacity data. */ 17817 if (ucmd_buf.uscsi_resid != 0) { 17818 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17819 return (EIO); 17820 } 17821 17822 /* 17823 * Read capacity and block size from the READ CAPACITY 10 data. 17824 * This data may be adjusted later due to device specific 17825 * issues. 17826 * 17827 * According to the SCSI spec, the READ CAPACITY 10 17828 * command returns the following: 17829 * 17830 * bytes 0-3: Maximum logical block address available. 17831 * (MSB in byte:0 & LSB in byte:3) 17832 * 17833 * bytes 4-7: Block length in bytes 17834 * (MSB in byte:4 & LSB in byte:7) 17835 * 17836 */ 17837 capacity = BE_32(capacity_buf[0]); 17838 lbasize = BE_32(capacity_buf[1]); 17839 17840 /* 17841 * Done with capacity_buf 17842 */ 17843 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17844 17845 /* 17846 * if the reported capacity is set to all 0xf's, then 17847 * this disk is too large and requires SBC-2 commands. 17848 * Reissue the request using READ CAPACITY 16. 17849 */ 17850 if (capacity == 0xffffffff) { 17851 status = sd_send_scsi_READ_CAPACITY_16(un, &capacity, 17852 &lbasize, path_flag); 17853 if (status != 0) { 17854 return (status); 17855 } 17856 } 17857 break; /* Success! */ 17858 case EIO: 17859 switch (ucmd_buf.uscsi_status) { 17860 case STATUS_RESERVATION_CONFLICT: 17861 status = EACCES; 17862 break; 17863 case STATUS_CHECK: 17864 /* 17865 * Check condition; look for ASC/ASCQ of 0x04/0x01 17866 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 17867 */ 17868 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 17869 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 17870 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 17871 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17872 return (EAGAIN); 17873 } 17874 break; 17875 default: 17876 break; 17877 } 17878 /* FALLTHRU */ 17879 default: 17880 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 17881 return (status); 17882 } 17883 17884 /* 17885 * Some ATAPI CD-ROM drives report inaccurate LBA size values 17886 * (2352 and 0 are common) so for these devices always force the value 17887 * to 2048 as required by the ATAPI specs. 17888 */ 17889 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 17890 lbasize = 2048; 17891 } 17892 17893 /* 17894 * Get the maximum LBA value from the READ CAPACITY data. 17895 * Here we assume that the Partial Medium Indicator (PMI) bit 17896 * was cleared when issuing the command. This means that the LBA 17897 * returned from the device is the LBA of the last logical block 17898 * on the logical unit. The actual logical block count will be 17899 * this value plus one. 17900 * 17901 * Currently the capacity is saved in terms of un->un_sys_blocksize, 17902 * so scale the capacity value to reflect this. 17903 */ 17904 capacity = (capacity + 1) * (lbasize / un->un_sys_blocksize); 17905 17906 /* 17907 * Copy the values from the READ CAPACITY command into the space 17908 * provided by the caller. 17909 */ 17910 *capp = capacity; 17911 *lbap = lbasize; 17912 17913 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: " 17914 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 17915 17916 /* 17917 * Both the lbasize and capacity from the device must be nonzero, 17918 * otherwise we assume that the values are not valid and return 17919 * failure to the caller. (4203735) 17920 */ 17921 if ((capacity == 0) || (lbasize == 0)) { 17922 return (EIO); 17923 } 17924 17925 return (0); 17926 } 17927 17928 /* 17929 * Function: sd_send_scsi_READ_CAPACITY_16 17930 * 17931 * Description: This routine uses the scsi READ CAPACITY 16 command to 17932 * determine the device capacity in number of blocks and the 17933 * device native block size. If this function returns a failure, 17934 * then the values in *capp and *lbap are undefined. 17935 * This routine should always be called by 17936 * sd_send_scsi_READ_CAPACITY which will appy any device 17937 * specific adjustments to capacity and lbasize. 17938 * 17939 * Arguments: un - ptr to soft state struct for the target 17940 * capp - ptr to unsigned 64-bit variable to receive the 17941 * capacity value from the command. 17942 * lbap - ptr to unsigned 32-bit varaible to receive the 17943 * block size value from the command 17944 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 17945 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 17946 * to use the USCSI "direct" chain and bypass the normal 17947 * command waitq. SD_PATH_DIRECT_PRIORITY is used when 17948 * this command is issued as part of an error recovery 17949 * action. 17950 * 17951 * Return Code: 0 - Success 17952 * EIO - IO error 17953 * EACCES - Reservation conflict detected 17954 * EAGAIN - Device is becoming ready 17955 * errno return code from sd_send_scsi_cmd() 17956 * 17957 * Context: Can sleep. Blocks until command completes. 17958 */ 17959 17960 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16) 17961 17962 static int 17963 sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 17964 uint32_t *lbap, int path_flag) 17965 { 17966 struct scsi_extended_sense sense_buf; 17967 struct uscsi_cmd ucmd_buf; 17968 union scsi_cdb cdb; 17969 uint64_t *capacity16_buf; 17970 uint64_t capacity; 17971 uint32_t lbasize; 17972 int status; 17973 17974 ASSERT(un != NULL); 17975 ASSERT(!mutex_owned(SD_MUTEX(un))); 17976 ASSERT(capp != NULL); 17977 ASSERT(lbap != NULL); 17978 17979 SD_TRACE(SD_LOG_IO, un, 17980 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 17981 17982 /* 17983 * First send a READ_CAPACITY_16 command to the target. 17984 * 17985 * Set up the CDB for the READ_CAPACITY_16 command. The Partial 17986 * Medium Indicator bit is cleared. The address field must be 17987 * zero if the PMI bit is zero. 17988 */ 17989 bzero(&cdb, sizeof (cdb)); 17990 bzero(&ucmd_buf, sizeof (ucmd_buf)); 17991 17992 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP); 17993 17994 ucmd_buf.uscsi_cdb = (char *)&cdb; 17995 ucmd_buf.uscsi_cdblen = CDB_GROUP4; 17996 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; 17997 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; 17998 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 17999 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 18000 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 18001 ucmd_buf.uscsi_timeout = 60; 18002 18003 /* 18004 * Read Capacity (16) is a Service Action In command. One 18005 * command byte (0x9E) is overloaded for multiple operations, 18006 * with the second CDB byte specifying the desired operation 18007 */ 18008 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4; 18009 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4; 18010 18011 /* 18012 * Fill in allocation length field 18013 */ 18014 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen); 18015 18016 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18017 UIO_SYSSPACE, path_flag); 18018 18019 switch (status) { 18020 case 0: 18021 /* Return failure if we did not get valid capacity data. */ 18022 if (ucmd_buf.uscsi_resid > 20) { 18023 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18024 return (EIO); 18025 } 18026 18027 /* 18028 * Read capacity and block size from the READ CAPACITY 10 data. 18029 * This data may be adjusted later due to device specific 18030 * issues. 18031 * 18032 * According to the SCSI spec, the READ CAPACITY 10 18033 * command returns the following: 18034 * 18035 * bytes 0-7: Maximum logical block address available. 18036 * (MSB in byte:0 & LSB in byte:7) 18037 * 18038 * bytes 8-11: Block length in bytes 18039 * (MSB in byte:8 & LSB in byte:11) 18040 * 18041 */ 18042 capacity = BE_64(capacity16_buf[0]); 18043 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); 18044 18045 /* 18046 * Done with capacity16_buf 18047 */ 18048 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18049 18050 /* 18051 * if the reported capacity is set to all 0xf's, then 18052 * this disk is too large. This could only happen with 18053 * a device that supports LBAs larger than 64 bits which 18054 * are not defined by any current T10 standards. 18055 */ 18056 if (capacity == 0xffffffffffffffff) { 18057 return (EIO); 18058 } 18059 break; /* Success! */ 18060 case EIO: 18061 switch (ucmd_buf.uscsi_status) { 18062 case STATUS_RESERVATION_CONFLICT: 18063 status = EACCES; 18064 break; 18065 case STATUS_CHECK: 18066 /* 18067 * Check condition; look for ASC/ASCQ of 0x04/0x01 18068 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 18069 */ 18070 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18071 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 18072 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 18073 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18074 return (EAGAIN); 18075 } 18076 break; 18077 default: 18078 break; 18079 } 18080 /* FALLTHRU */ 18081 default: 18082 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18083 return (status); 18084 } 18085 18086 *capp = capacity; 18087 *lbap = lbasize; 18088 18089 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: " 18090 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 18091 18092 return (0); 18093 } 18094 18095 18096 /* 18097 * Function: sd_send_scsi_START_STOP_UNIT 18098 * 18099 * Description: Issue a scsi START STOP UNIT command to the target. 18100 * 18101 * Arguments: un - pointer to driver soft state (unit) structure for 18102 * this target. 18103 * flag - SD_TARGET_START 18104 * SD_TARGET_STOP 18105 * SD_TARGET_EJECT 18106 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18107 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18108 * to use the USCSI "direct" chain and bypass the normal 18109 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 18110 * command is issued as part of an error recovery action. 18111 * 18112 * Return Code: 0 - Success 18113 * EIO - IO error 18114 * EACCES - Reservation conflict detected 18115 * ENXIO - Not Ready, medium not present 18116 * errno return code from sd_send_scsi_cmd() 18117 * 18118 * Context: Can sleep. 18119 */ 18120 18121 static int 18122 sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, int path_flag) 18123 { 18124 struct scsi_extended_sense sense_buf; 18125 union scsi_cdb cdb; 18126 struct uscsi_cmd ucmd_buf; 18127 int status; 18128 18129 ASSERT(un != NULL); 18130 ASSERT(!mutex_owned(SD_MUTEX(un))); 18131 18132 SD_TRACE(SD_LOG_IO, un, 18133 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un); 18134 18135 if (un->un_f_check_start_stop && 18136 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) && 18137 (un->un_f_start_stop_supported != TRUE)) { 18138 return (0); 18139 } 18140 18141 /* 18142 * If we are performing an eject operation and 18143 * we receive any command other than SD_TARGET_EJECT 18144 * we should immediately return. 18145 */ 18146 if (flag != SD_TARGET_EJECT) { 18147 mutex_enter(SD_MUTEX(un)); 18148 if (un->un_f_ejecting == TRUE) { 18149 mutex_exit(SD_MUTEX(un)); 18150 return (EAGAIN); 18151 } 18152 mutex_exit(SD_MUTEX(un)); 18153 } 18154 18155 bzero(&cdb, sizeof (cdb)); 18156 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18157 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18158 18159 cdb.scc_cmd = SCMD_START_STOP; 18160 cdb.cdb_opaque[4] = (uchar_t)flag; 18161 18162 ucmd_buf.uscsi_cdb = (char *)&cdb; 18163 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18164 ucmd_buf.uscsi_bufaddr = NULL; 18165 ucmd_buf.uscsi_buflen = 0; 18166 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18167 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18168 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18169 ucmd_buf.uscsi_timeout = 200; 18170 18171 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18172 UIO_SYSSPACE, path_flag); 18173 18174 switch (status) { 18175 case 0: 18176 break; /* Success! */ 18177 case EIO: 18178 switch (ucmd_buf.uscsi_status) { 18179 case STATUS_RESERVATION_CONFLICT: 18180 status = EACCES; 18181 break; 18182 case STATUS_CHECK: 18183 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) { 18184 switch (scsi_sense_key( 18185 (uint8_t *)&sense_buf)) { 18186 case KEY_ILLEGAL_REQUEST: 18187 status = ENOTSUP; 18188 break; 18189 case KEY_NOT_READY: 18190 if (scsi_sense_asc( 18191 (uint8_t *)&sense_buf) 18192 == 0x3A) { 18193 status = ENXIO; 18194 } 18195 break; 18196 default: 18197 break; 18198 } 18199 } 18200 break; 18201 default: 18202 break; 18203 } 18204 break; 18205 default: 18206 break; 18207 } 18208 18209 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n"); 18210 18211 return (status); 18212 } 18213 18214 18215 /* 18216 * Function: sd_start_stop_unit_callback 18217 * 18218 * Description: timeout(9F) callback to begin recovery process for a 18219 * device that has spun down. 18220 * 18221 * Arguments: arg - pointer to associated softstate struct. 18222 * 18223 * Context: Executes in a timeout(9F) thread context 18224 */ 18225 18226 static void 18227 sd_start_stop_unit_callback(void *arg) 18228 { 18229 struct sd_lun *un = arg; 18230 ASSERT(un != NULL); 18231 ASSERT(!mutex_owned(SD_MUTEX(un))); 18232 18233 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n"); 18234 18235 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP); 18236 } 18237 18238 18239 /* 18240 * Function: sd_start_stop_unit_task 18241 * 18242 * Description: Recovery procedure when a drive is spun down. 18243 * 18244 * Arguments: arg - pointer to associated softstate struct. 18245 * 18246 * Context: Executes in a taskq() thread context 18247 */ 18248 18249 static void 18250 sd_start_stop_unit_task(void *arg) 18251 { 18252 struct sd_lun *un = arg; 18253 18254 ASSERT(un != NULL); 18255 ASSERT(!mutex_owned(SD_MUTEX(un))); 18256 18257 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n"); 18258 18259 /* 18260 * Some unformatted drives report not ready error, no need to 18261 * restart if format has been initiated. 18262 */ 18263 mutex_enter(SD_MUTEX(un)); 18264 if (un->un_f_format_in_progress == TRUE) { 18265 mutex_exit(SD_MUTEX(un)); 18266 return; 18267 } 18268 mutex_exit(SD_MUTEX(un)); 18269 18270 /* 18271 * When a START STOP command is issued from here, it is part of a 18272 * failure recovery operation and must be issued before any other 18273 * commands, including any pending retries. Thus it must be sent 18274 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up 18275 * succeeds or not, we will start I/O after the attempt. 18276 */ 18277 (void) sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 18278 SD_PATH_DIRECT_PRIORITY); 18279 18280 /* 18281 * The above call blocks until the START_STOP_UNIT command completes. 18282 * Now that it has completed, we must re-try the original IO that 18283 * received the NOT READY condition in the first place. There are 18284 * three possible conditions here: 18285 * 18286 * (1) The original IO is on un_retry_bp. 18287 * (2) The original IO is on the regular wait queue, and un_retry_bp 18288 * is NULL. 18289 * (3) The original IO is on the regular wait queue, and un_retry_bp 18290 * points to some other, unrelated bp. 18291 * 18292 * For each case, we must call sd_start_cmds() with un_retry_bp 18293 * as the argument. If un_retry_bp is NULL, this will initiate 18294 * processing of the regular wait queue. If un_retry_bp is not NULL, 18295 * then this will process the bp on un_retry_bp. That may or may not 18296 * be the original IO, but that does not matter: the important thing 18297 * is to keep the IO processing going at this point. 18298 * 18299 * Note: This is a very specific error recovery sequence associated 18300 * with a drive that is not spun up. We attempt a START_STOP_UNIT and 18301 * serialize the I/O with completion of the spin-up. 18302 */ 18303 mutex_enter(SD_MUTEX(un)); 18304 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18305 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n", 18306 un, un->un_retry_bp); 18307 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */ 18308 sd_start_cmds(un, un->un_retry_bp); 18309 mutex_exit(SD_MUTEX(un)); 18310 18311 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n"); 18312 } 18313 18314 18315 /* 18316 * Function: sd_send_scsi_INQUIRY 18317 * 18318 * Description: Issue the scsi INQUIRY command. 18319 * 18320 * Arguments: un 18321 * bufaddr 18322 * buflen 18323 * evpd 18324 * page_code 18325 * page_length 18326 * 18327 * Return Code: 0 - Success 18328 * errno return code from sd_send_scsi_cmd() 18329 * 18330 * Context: Can sleep. Does not return until command is completed. 18331 */ 18332 18333 static int 18334 sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, size_t buflen, 18335 uchar_t evpd, uchar_t page_code, size_t *residp) 18336 { 18337 union scsi_cdb cdb; 18338 struct uscsi_cmd ucmd_buf; 18339 int status; 18340 18341 ASSERT(un != NULL); 18342 ASSERT(!mutex_owned(SD_MUTEX(un))); 18343 ASSERT(bufaddr != NULL); 18344 18345 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un); 18346 18347 bzero(&cdb, sizeof (cdb)); 18348 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18349 bzero(bufaddr, buflen); 18350 18351 cdb.scc_cmd = SCMD_INQUIRY; 18352 cdb.cdb_opaque[1] = evpd; 18353 cdb.cdb_opaque[2] = page_code; 18354 FORMG0COUNT(&cdb, buflen); 18355 18356 ucmd_buf.uscsi_cdb = (char *)&cdb; 18357 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18358 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 18359 ucmd_buf.uscsi_buflen = buflen; 18360 ucmd_buf.uscsi_rqbuf = NULL; 18361 ucmd_buf.uscsi_rqlen = 0; 18362 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 18363 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ 18364 18365 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18366 UIO_SYSSPACE, SD_PATH_DIRECT); 18367 18368 if ((status == 0) && (residp != NULL)) { 18369 *residp = ucmd_buf.uscsi_resid; 18370 } 18371 18372 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n"); 18373 18374 return (status); 18375 } 18376 18377 18378 /* 18379 * Function: sd_send_scsi_TEST_UNIT_READY 18380 * 18381 * Description: Issue the scsi TEST UNIT READY command. 18382 * This routine can be told to set the flag USCSI_DIAGNOSE to 18383 * prevent retrying failed commands. Use this when the intent 18384 * is either to check for device readiness, to clear a Unit 18385 * Attention, or to clear any outstanding sense data. 18386 * However under specific conditions the expected behavior 18387 * is for retries to bring a device ready, so use the flag 18388 * with caution. 18389 * 18390 * Arguments: un 18391 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present 18392 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE. 18393 * 0: dont check for media present, do retries on cmd. 18394 * 18395 * Return Code: 0 - Success 18396 * EIO - IO error 18397 * EACCES - Reservation conflict detected 18398 * ENXIO - Not Ready, medium not present 18399 * errno return code from sd_send_scsi_cmd() 18400 * 18401 * Context: Can sleep. Does not return until command is completed. 18402 */ 18403 18404 static int 18405 sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag) 18406 { 18407 struct scsi_extended_sense sense_buf; 18408 union scsi_cdb cdb; 18409 struct uscsi_cmd ucmd_buf; 18410 int status; 18411 18412 ASSERT(un != NULL); 18413 ASSERT(!mutex_owned(SD_MUTEX(un))); 18414 18415 SD_TRACE(SD_LOG_IO, un, 18416 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un); 18417 18418 /* 18419 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect 18420 * timeouts when they receive a TUR and the queue is not empty. Check 18421 * the configuration flag set during attach (indicating the drive has 18422 * this firmware bug) and un_ncmds_in_transport before issuing the 18423 * TUR. If there are 18424 * pending commands return success, this is a bit arbitrary but is ok 18425 * for non-removables (i.e. the eliteI disks) and non-clustering 18426 * configurations. 18427 */ 18428 if (un->un_f_cfg_tur_check == TRUE) { 18429 mutex_enter(SD_MUTEX(un)); 18430 if (un->un_ncmds_in_transport != 0) { 18431 mutex_exit(SD_MUTEX(un)); 18432 return (0); 18433 } 18434 mutex_exit(SD_MUTEX(un)); 18435 } 18436 18437 bzero(&cdb, sizeof (cdb)); 18438 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18439 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18440 18441 cdb.scc_cmd = SCMD_TEST_UNIT_READY; 18442 18443 ucmd_buf.uscsi_cdb = (char *)&cdb; 18444 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18445 ucmd_buf.uscsi_bufaddr = NULL; 18446 ucmd_buf.uscsi_buflen = 0; 18447 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18448 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18449 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18450 18451 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ 18452 if ((flag & SD_DONT_RETRY_TUR) != 0) { 18453 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; 18454 } 18455 ucmd_buf.uscsi_timeout = 60; 18456 18457 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18458 UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : 18459 SD_PATH_STANDARD)); 18460 18461 switch (status) { 18462 case 0: 18463 break; /* Success! */ 18464 case EIO: 18465 switch (ucmd_buf.uscsi_status) { 18466 case STATUS_RESERVATION_CONFLICT: 18467 status = EACCES; 18468 break; 18469 case STATUS_CHECK: 18470 if ((flag & SD_CHECK_FOR_MEDIA) == 0) { 18471 break; 18472 } 18473 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18474 (scsi_sense_key((uint8_t *)&sense_buf) == 18475 KEY_NOT_READY) && 18476 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x3A)) { 18477 status = ENXIO; 18478 } 18479 break; 18480 default: 18481 break; 18482 } 18483 break; 18484 default: 18485 break; 18486 } 18487 18488 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n"); 18489 18490 return (status); 18491 } 18492 18493 18494 /* 18495 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN 18496 * 18497 * Description: Issue the scsi PERSISTENT RESERVE IN command. 18498 * 18499 * Arguments: un 18500 * 18501 * Return Code: 0 - Success 18502 * EACCES 18503 * ENOTSUP 18504 * errno return code from sd_send_scsi_cmd() 18505 * 18506 * Context: Can sleep. Does not return until command is completed. 18507 */ 18508 18509 static int 18510 sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, uchar_t usr_cmd, 18511 uint16_t data_len, uchar_t *data_bufp) 18512 { 18513 struct scsi_extended_sense sense_buf; 18514 union scsi_cdb cdb; 18515 struct uscsi_cmd ucmd_buf; 18516 int status; 18517 int no_caller_buf = FALSE; 18518 18519 ASSERT(un != NULL); 18520 ASSERT(!mutex_owned(SD_MUTEX(un))); 18521 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV)); 18522 18523 SD_TRACE(SD_LOG_IO, un, 18524 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un); 18525 18526 bzero(&cdb, sizeof (cdb)); 18527 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18528 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18529 if (data_bufp == NULL) { 18530 /* Allocate a default buf if the caller did not give one */ 18531 ASSERT(data_len == 0); 18532 data_len = MHIOC_RESV_KEY_SIZE; 18533 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP); 18534 no_caller_buf = TRUE; 18535 } 18536 18537 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN; 18538 cdb.cdb_opaque[1] = usr_cmd; 18539 FORMG1COUNT(&cdb, data_len); 18540 18541 ucmd_buf.uscsi_cdb = (char *)&cdb; 18542 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 18543 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; 18544 ucmd_buf.uscsi_buflen = data_len; 18545 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18546 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18547 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 18548 ucmd_buf.uscsi_timeout = 60; 18549 18550 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18551 UIO_SYSSPACE, SD_PATH_STANDARD); 18552 18553 switch (status) { 18554 case 0: 18555 break; /* Success! */ 18556 case EIO: 18557 switch (ucmd_buf.uscsi_status) { 18558 case STATUS_RESERVATION_CONFLICT: 18559 status = EACCES; 18560 break; 18561 case STATUS_CHECK: 18562 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18563 (scsi_sense_key((uint8_t *)&sense_buf) == 18564 KEY_ILLEGAL_REQUEST)) { 18565 status = ENOTSUP; 18566 } 18567 break; 18568 default: 18569 break; 18570 } 18571 break; 18572 default: 18573 break; 18574 } 18575 18576 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n"); 18577 18578 if (no_caller_buf == TRUE) { 18579 kmem_free(data_bufp, data_len); 18580 } 18581 18582 return (status); 18583 } 18584 18585 18586 /* 18587 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT 18588 * 18589 * Description: This routine is the driver entry point for handling CD-ROM 18590 * multi-host persistent reservation requests (MHIOCGRP_INKEYS, 18591 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the 18592 * device. 18593 * 18594 * Arguments: un - Pointer to soft state struct for the target. 18595 * usr_cmd SCSI-3 reservation facility command (one of 18596 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE, 18597 * SD_SCSI3_PREEMPTANDABORT) 18598 * usr_bufp - user provided pointer register, reserve descriptor or 18599 * preempt and abort structure (mhioc_register_t, 18600 * mhioc_resv_desc_t, mhioc_preemptandabort_t) 18601 * 18602 * Return Code: 0 - Success 18603 * EACCES 18604 * ENOTSUP 18605 * errno return code from sd_send_scsi_cmd() 18606 * 18607 * Context: Can sleep. Does not return until command is completed. 18608 */ 18609 18610 static int 18611 sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, uchar_t usr_cmd, 18612 uchar_t *usr_bufp) 18613 { 18614 struct scsi_extended_sense sense_buf; 18615 union scsi_cdb cdb; 18616 struct uscsi_cmd ucmd_buf; 18617 int status; 18618 uchar_t data_len = sizeof (sd_prout_t); 18619 sd_prout_t *prp; 18620 18621 ASSERT(un != NULL); 18622 ASSERT(!mutex_owned(SD_MUTEX(un))); 18623 ASSERT(data_len == 24); /* required by scsi spec */ 18624 18625 SD_TRACE(SD_LOG_IO, un, 18626 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un); 18627 18628 if (usr_bufp == NULL) { 18629 return (EINVAL); 18630 } 18631 18632 bzero(&cdb, sizeof (cdb)); 18633 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18634 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18635 prp = kmem_zalloc(data_len, KM_SLEEP); 18636 18637 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT; 18638 cdb.cdb_opaque[1] = usr_cmd; 18639 FORMG1COUNT(&cdb, data_len); 18640 18641 ucmd_buf.uscsi_cdb = (char *)&cdb; 18642 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 18643 ucmd_buf.uscsi_bufaddr = (caddr_t)prp; 18644 ucmd_buf.uscsi_buflen = data_len; 18645 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18646 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18647 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 18648 ucmd_buf.uscsi_timeout = 60; 18649 18650 switch (usr_cmd) { 18651 case SD_SCSI3_REGISTER: { 18652 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; 18653 18654 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18655 bcopy(ptr->newkey.key, prp->service_key, 18656 MHIOC_RESV_KEY_SIZE); 18657 prp->aptpl = ptr->aptpl; 18658 break; 18659 } 18660 case SD_SCSI3_RESERVE: 18661 case SD_SCSI3_RELEASE: { 18662 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 18663 18664 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18665 prp->scope_address = BE_32(ptr->scope_specific_addr); 18666 cdb.cdb_opaque[2] = ptr->type; 18667 break; 18668 } 18669 case SD_SCSI3_PREEMPTANDABORT: { 18670 mhioc_preemptandabort_t *ptr = 18671 (mhioc_preemptandabort_t *)usr_bufp; 18672 18673 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 18674 bcopy(ptr->victim_key.key, prp->service_key, 18675 MHIOC_RESV_KEY_SIZE); 18676 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr); 18677 cdb.cdb_opaque[2] = ptr->resvdesc.type; 18678 ucmd_buf.uscsi_flags |= USCSI_HEAD; 18679 break; 18680 } 18681 case SD_SCSI3_REGISTERANDIGNOREKEY: 18682 { 18683 mhioc_registerandignorekey_t *ptr; 18684 ptr = (mhioc_registerandignorekey_t *)usr_bufp; 18685 bcopy(ptr->newkey.key, 18686 prp->service_key, MHIOC_RESV_KEY_SIZE); 18687 prp->aptpl = ptr->aptpl; 18688 break; 18689 } 18690 default: 18691 ASSERT(FALSE); 18692 break; 18693 } 18694 18695 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18696 UIO_SYSSPACE, SD_PATH_STANDARD); 18697 18698 switch (status) { 18699 case 0: 18700 break; /* Success! */ 18701 case EIO: 18702 switch (ucmd_buf.uscsi_status) { 18703 case STATUS_RESERVATION_CONFLICT: 18704 status = EACCES; 18705 break; 18706 case STATUS_CHECK: 18707 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18708 (scsi_sense_key((uint8_t *)&sense_buf) == 18709 KEY_ILLEGAL_REQUEST)) { 18710 status = ENOTSUP; 18711 } 18712 break; 18713 default: 18714 break; 18715 } 18716 break; 18717 default: 18718 break; 18719 } 18720 18721 kmem_free(prp, data_len); 18722 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n"); 18723 return (status); 18724 } 18725 18726 18727 /* 18728 * Function: sd_send_scsi_SYNCHRONIZE_CACHE 18729 * 18730 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target 18731 * 18732 * Arguments: un - pointer to the target's soft state struct 18733 * dkc - pointer to the callback structure 18734 * 18735 * Return Code: 0 - success 18736 * errno-type error code 18737 * 18738 * Context: kernel thread context only. 18739 * 18740 * _______________________________________________________________ 18741 * | dkc_flag & | dkc_callback | DKIOCFLUSHWRITECACHE | 18742 * |FLUSH_VOLATILE| | operation | 18743 * |______________|______________|_________________________________| 18744 * | 0 | NULL | Synchronous flush on both | 18745 * | | | volatile and non-volatile cache | 18746 * |______________|______________|_________________________________| 18747 * | 1 | NULL | Synchronous flush on volatile | 18748 * | | | cache; disk drivers may suppress| 18749 * | | | flush if disk table indicates | 18750 * | | | non-volatile cache | 18751 * |______________|______________|_________________________________| 18752 * | 0 | !NULL | Asynchronous flush on both | 18753 * | | | volatile and non-volatile cache;| 18754 * |______________|______________|_________________________________| 18755 * | 1 | !NULL | Asynchronous flush on volatile | 18756 * | | | cache; disk drivers may suppress| 18757 * | | | flush if disk table indicates | 18758 * | | | non-volatile cache | 18759 * |______________|______________|_________________________________| 18760 * 18761 */ 18762 18763 static int 18764 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc) 18765 { 18766 struct sd_uscsi_info *uip; 18767 struct uscsi_cmd *uscmd; 18768 union scsi_cdb *cdb; 18769 struct buf *bp; 18770 int rval = 0; 18771 int is_async; 18772 18773 SD_TRACE(SD_LOG_IO, un, 18774 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un); 18775 18776 ASSERT(un != NULL); 18777 ASSERT(!mutex_owned(SD_MUTEX(un))); 18778 18779 if (dkc == NULL || dkc->dkc_callback == NULL) { 18780 is_async = FALSE; 18781 } else { 18782 is_async = TRUE; 18783 } 18784 18785 mutex_enter(SD_MUTEX(un)); 18786 /* check whether cache flush should be suppressed */ 18787 if (un->un_f_suppress_cache_flush == TRUE) { 18788 mutex_exit(SD_MUTEX(un)); 18789 /* 18790 * suppress the cache flush if the device is told to do 18791 * so by sd.conf or disk table 18792 */ 18793 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_SYNCHRONIZE_CACHE: \ 18794 skip the cache flush since suppress_cache_flush is %d!\n", 18795 un->un_f_suppress_cache_flush); 18796 18797 if (is_async == TRUE) { 18798 /* invoke callback for asynchronous flush */ 18799 (*dkc->dkc_callback)(dkc->dkc_cookie, 0); 18800 } 18801 return (rval); 18802 } 18803 mutex_exit(SD_MUTEX(un)); 18804 18805 /* 18806 * check dkc_flag & FLUSH_VOLATILE so SYNC_NV bit can be 18807 * set properly 18808 */ 18809 cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP); 18810 cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE; 18811 18812 mutex_enter(SD_MUTEX(un)); 18813 if (dkc != NULL && un->un_f_sync_nv_supported && 18814 (dkc->dkc_flag & FLUSH_VOLATILE)) { 18815 /* 18816 * if the device supports SYNC_NV bit, turn on 18817 * the SYNC_NV bit to only flush volatile cache 18818 */ 18819 cdb->cdb_un.tag |= SD_SYNC_NV_BIT; 18820 } 18821 mutex_exit(SD_MUTEX(un)); 18822 18823 /* 18824 * First get some memory for the uscsi_cmd struct and cdb 18825 * and initialize for SYNCHRONIZE_CACHE cmd. 18826 */ 18827 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 18828 uscmd->uscsi_cdblen = CDB_GROUP1; 18829 uscmd->uscsi_cdb = (caddr_t)cdb; 18830 uscmd->uscsi_bufaddr = NULL; 18831 uscmd->uscsi_buflen = 0; 18832 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 18833 uscmd->uscsi_rqlen = SENSE_LENGTH; 18834 uscmd->uscsi_rqresid = SENSE_LENGTH; 18835 uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18836 uscmd->uscsi_timeout = sd_io_time; 18837 18838 /* 18839 * Allocate an sd_uscsi_info struct and fill it with the info 18840 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 18841 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 18842 * since we allocate the buf here in this function, we do not 18843 * need to preserve the prior contents of b_private. 18844 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 18845 */ 18846 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 18847 uip->ui_flags = SD_PATH_DIRECT; 18848 uip->ui_cmdp = uscmd; 18849 18850 bp = getrbuf(KM_SLEEP); 18851 bp->b_private = uip; 18852 18853 /* 18854 * Setup buffer to carry uscsi request. 18855 */ 18856 bp->b_flags = B_BUSY; 18857 bp->b_bcount = 0; 18858 bp->b_blkno = 0; 18859 18860 if (is_async == TRUE) { 18861 bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone; 18862 uip->ui_dkc = *dkc; 18863 } 18864 18865 bp->b_edev = SD_GET_DEV(un); 18866 bp->b_dev = cmpdev(bp->b_edev); /* maybe unnecessary? */ 18867 18868 (void) sd_uscsi_strategy(bp); 18869 18870 /* 18871 * If synchronous request, wait for completion 18872 * If async just return and let b_iodone callback 18873 * cleanup. 18874 * NOTE: On return, u_ncmds_in_driver will be decremented, 18875 * but it was also incremented in sd_uscsi_strategy(), so 18876 * we should be ok. 18877 */ 18878 if (is_async == FALSE) { 18879 (void) biowait(bp); 18880 rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp); 18881 } 18882 18883 return (rval); 18884 } 18885 18886 18887 static int 18888 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp) 18889 { 18890 struct sd_uscsi_info *uip; 18891 struct uscsi_cmd *uscmd; 18892 uint8_t *sense_buf; 18893 struct sd_lun *un; 18894 int status; 18895 union scsi_cdb *cdb; 18896 18897 uip = (struct sd_uscsi_info *)(bp->b_private); 18898 ASSERT(uip != NULL); 18899 18900 uscmd = uip->ui_cmdp; 18901 ASSERT(uscmd != NULL); 18902 18903 sense_buf = (uint8_t *)uscmd->uscsi_rqbuf; 18904 ASSERT(sense_buf != NULL); 18905 18906 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 18907 ASSERT(un != NULL); 18908 18909 cdb = (union scsi_cdb *)uscmd->uscsi_cdb; 18910 18911 status = geterror(bp); 18912 switch (status) { 18913 case 0: 18914 break; /* Success! */ 18915 case EIO: 18916 switch (uscmd->uscsi_status) { 18917 case STATUS_RESERVATION_CONFLICT: 18918 /* Ignore reservation conflict */ 18919 status = 0; 18920 goto done; 18921 18922 case STATUS_CHECK: 18923 if ((uscmd->uscsi_rqstatus == STATUS_GOOD) && 18924 (scsi_sense_key(sense_buf) == 18925 KEY_ILLEGAL_REQUEST)) { 18926 /* Ignore Illegal Request error */ 18927 if (cdb->cdb_un.tag|SD_SYNC_NV_BIT) { 18928 mutex_enter(SD_MUTEX(un)); 18929 un->un_f_sync_nv_supported = FALSE; 18930 mutex_exit(SD_MUTEX(un)); 18931 status = 0; 18932 SD_TRACE(SD_LOG_IO, un, 18933 "un_f_sync_nv_supported \ 18934 is set to false.\n"); 18935 goto done; 18936 } 18937 18938 mutex_enter(SD_MUTEX(un)); 18939 un->un_f_sync_cache_supported = FALSE; 18940 mutex_exit(SD_MUTEX(un)); 18941 SD_TRACE(SD_LOG_IO, un, 18942 "sd_send_scsi_SYNCHRONIZE_CACHE_biodone: \ 18943 un_f_sync_cache_supported set to false \ 18944 with asc = %x, ascq = %x\n", 18945 scsi_sense_asc(sense_buf), 18946 scsi_sense_ascq(sense_buf)); 18947 status = ENOTSUP; 18948 goto done; 18949 } 18950 break; 18951 default: 18952 break; 18953 } 18954 /* FALLTHRU */ 18955 default: 18956 /* 18957 * Don't log an error message if this device 18958 * has removable media. 18959 */ 18960 if (!un->un_f_has_removable_media) { 18961 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18962 "SYNCHRONIZE CACHE command failed (%d)\n", status); 18963 } 18964 break; 18965 } 18966 18967 done: 18968 if (uip->ui_dkc.dkc_callback != NULL) { 18969 (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status); 18970 } 18971 18972 ASSERT((bp->b_flags & B_REMAPPED) == 0); 18973 freerbuf(bp); 18974 kmem_free(uip, sizeof (struct sd_uscsi_info)); 18975 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 18976 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 18977 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 18978 18979 return (status); 18980 } 18981 18982 18983 /* 18984 * Function: sd_send_scsi_GET_CONFIGURATION 18985 * 18986 * Description: Issues the get configuration command to the device. 18987 * Called from sd_check_for_writable_cd & sd_get_media_info 18988 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN 18989 * Arguments: un 18990 * ucmdbuf 18991 * rqbuf 18992 * rqbuflen 18993 * bufaddr 18994 * buflen 18995 * path_flag 18996 * 18997 * Return Code: 0 - Success 18998 * errno return code from sd_send_scsi_cmd() 18999 * 19000 * Context: Can sleep. Does not return until command is completed. 19001 * 19002 */ 19003 19004 static int 19005 sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, struct uscsi_cmd *ucmdbuf, 19006 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen, 19007 int path_flag) 19008 { 19009 char cdb[CDB_GROUP1]; 19010 int status; 19011 19012 ASSERT(un != NULL); 19013 ASSERT(!mutex_owned(SD_MUTEX(un))); 19014 ASSERT(bufaddr != NULL); 19015 ASSERT(ucmdbuf != NULL); 19016 ASSERT(rqbuf != NULL); 19017 19018 SD_TRACE(SD_LOG_IO, un, 19019 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un); 19020 19021 bzero(cdb, sizeof (cdb)); 19022 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 19023 bzero(rqbuf, rqbuflen); 19024 bzero(bufaddr, buflen); 19025 19026 /* 19027 * Set up cdb field for the get configuration command. 19028 */ 19029 cdb[0] = SCMD_GET_CONFIGURATION; 19030 cdb[1] = 0x02; /* Requested Type */ 19031 cdb[8] = SD_PROFILE_HEADER_LEN; 19032 ucmdbuf->uscsi_cdb = cdb; 19033 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 19034 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 19035 ucmdbuf->uscsi_buflen = buflen; 19036 ucmdbuf->uscsi_timeout = sd_io_time; 19037 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 19038 ucmdbuf->uscsi_rqlen = rqbuflen; 19039 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 19040 19041 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, FKIOCTL, 19042 UIO_SYSSPACE, path_flag); 19043 19044 switch (status) { 19045 case 0: 19046 break; /* Success! */ 19047 case EIO: 19048 switch (ucmdbuf->uscsi_status) { 19049 case STATUS_RESERVATION_CONFLICT: 19050 status = EACCES; 19051 break; 19052 default: 19053 break; 19054 } 19055 break; 19056 default: 19057 break; 19058 } 19059 19060 if (status == 0) { 19061 SD_DUMP_MEMORY(un, SD_LOG_IO, 19062 "sd_send_scsi_GET_CONFIGURATION: data", 19063 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 19064 } 19065 19066 SD_TRACE(SD_LOG_IO, un, 19067 "sd_send_scsi_GET_CONFIGURATION: exit\n"); 19068 19069 return (status); 19070 } 19071 19072 /* 19073 * Function: sd_send_scsi_feature_GET_CONFIGURATION 19074 * 19075 * Description: Issues the get configuration command to the device to 19076 * retrieve a specific feature. Called from 19077 * sd_check_for_writable_cd & sd_set_mmc_caps. 19078 * Arguments: un 19079 * ucmdbuf 19080 * rqbuf 19081 * rqbuflen 19082 * bufaddr 19083 * buflen 19084 * feature 19085 * 19086 * Return Code: 0 - Success 19087 * errno return code from sd_send_scsi_cmd() 19088 * 19089 * Context: Can sleep. Does not return until command is completed. 19090 * 19091 */ 19092 static int 19093 sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 19094 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 19095 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag) 19096 { 19097 char cdb[CDB_GROUP1]; 19098 int status; 19099 19100 ASSERT(un != NULL); 19101 ASSERT(!mutex_owned(SD_MUTEX(un))); 19102 ASSERT(bufaddr != NULL); 19103 ASSERT(ucmdbuf != NULL); 19104 ASSERT(rqbuf != NULL); 19105 19106 SD_TRACE(SD_LOG_IO, un, 19107 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un); 19108 19109 bzero(cdb, sizeof (cdb)); 19110 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 19111 bzero(rqbuf, rqbuflen); 19112 bzero(bufaddr, buflen); 19113 19114 /* 19115 * Set up cdb field for the get configuration command. 19116 */ 19117 cdb[0] = SCMD_GET_CONFIGURATION; 19118 cdb[1] = 0x02; /* Requested Type */ 19119 cdb[3] = feature; 19120 cdb[8] = buflen; 19121 ucmdbuf->uscsi_cdb = cdb; 19122 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 19123 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 19124 ucmdbuf->uscsi_buflen = buflen; 19125 ucmdbuf->uscsi_timeout = sd_io_time; 19126 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 19127 ucmdbuf->uscsi_rqlen = rqbuflen; 19128 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 19129 19130 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, FKIOCTL, 19131 UIO_SYSSPACE, path_flag); 19132 19133 switch (status) { 19134 case 0: 19135 break; /* Success! */ 19136 case EIO: 19137 switch (ucmdbuf->uscsi_status) { 19138 case STATUS_RESERVATION_CONFLICT: 19139 status = EACCES; 19140 break; 19141 default: 19142 break; 19143 } 19144 break; 19145 default: 19146 break; 19147 } 19148 19149 if (status == 0) { 19150 SD_DUMP_MEMORY(un, SD_LOG_IO, 19151 "sd_send_scsi_feature_GET_CONFIGURATION: data", 19152 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 19153 } 19154 19155 SD_TRACE(SD_LOG_IO, un, 19156 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n"); 19157 19158 return (status); 19159 } 19160 19161 19162 /* 19163 * Function: sd_send_scsi_MODE_SENSE 19164 * 19165 * Description: Utility function for issuing a scsi MODE SENSE command. 19166 * Note: This routine uses a consistent implementation for Group0, 19167 * Group1, and Group2 commands across all platforms. ATAPI devices 19168 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 19169 * 19170 * Arguments: un - pointer to the softstate struct for the target. 19171 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 19172 * CDB_GROUP[1|2] (10 byte). 19173 * bufaddr - buffer for page data retrieved from the target. 19174 * buflen - size of page to be retrieved. 19175 * page_code - page code of data to be retrieved from the target. 19176 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19177 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19178 * to use the USCSI "direct" chain and bypass the normal 19179 * command waitq. 19180 * 19181 * Return Code: 0 - Success 19182 * errno return code from sd_send_scsi_cmd() 19183 * 19184 * Context: Can sleep. Does not return until command is completed. 19185 */ 19186 19187 static int 19188 sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 19189 size_t buflen, uchar_t page_code, int path_flag) 19190 { 19191 struct scsi_extended_sense sense_buf; 19192 union scsi_cdb cdb; 19193 struct uscsi_cmd ucmd_buf; 19194 int status; 19195 int headlen; 19196 19197 ASSERT(un != NULL); 19198 ASSERT(!mutex_owned(SD_MUTEX(un))); 19199 ASSERT(bufaddr != NULL); 19200 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 19201 (cdbsize == CDB_GROUP2)); 19202 19203 SD_TRACE(SD_LOG_IO, un, 19204 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un); 19205 19206 bzero(&cdb, sizeof (cdb)); 19207 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19208 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19209 bzero(bufaddr, buflen); 19210 19211 if (cdbsize == CDB_GROUP0) { 19212 cdb.scc_cmd = SCMD_MODE_SENSE; 19213 cdb.cdb_opaque[2] = page_code; 19214 FORMG0COUNT(&cdb, buflen); 19215 headlen = MODE_HEADER_LENGTH; 19216 } else { 19217 cdb.scc_cmd = SCMD_MODE_SENSE_G1; 19218 cdb.cdb_opaque[2] = page_code; 19219 FORMG1COUNT(&cdb, buflen); 19220 headlen = MODE_HEADER_LENGTH_GRP2; 19221 } 19222 19223 ASSERT(headlen <= buflen); 19224 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 19225 19226 ucmd_buf.uscsi_cdb = (char *)&cdb; 19227 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 19228 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19229 ucmd_buf.uscsi_buflen = buflen; 19230 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19231 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19232 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19233 ucmd_buf.uscsi_timeout = 60; 19234 19235 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19236 UIO_SYSSPACE, path_flag); 19237 19238 switch (status) { 19239 case 0: 19240 /* 19241 * sr_check_wp() uses 0x3f page code and check the header of 19242 * mode page to determine if target device is write-protected. 19243 * But some USB devices return 0 bytes for 0x3f page code. For 19244 * this case, make sure that mode page header is returned at 19245 * least. 19246 */ 19247 if (buflen - ucmd_buf.uscsi_resid < headlen) 19248 status = EIO; 19249 break; /* Success! */ 19250 case EIO: 19251 switch (ucmd_buf.uscsi_status) { 19252 case STATUS_RESERVATION_CONFLICT: 19253 status = EACCES; 19254 break; 19255 default: 19256 break; 19257 } 19258 break; 19259 default: 19260 break; 19261 } 19262 19263 if (status == 0) { 19264 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data", 19265 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19266 } 19267 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n"); 19268 19269 return (status); 19270 } 19271 19272 19273 /* 19274 * Function: sd_send_scsi_MODE_SELECT 19275 * 19276 * Description: Utility function for issuing a scsi MODE SELECT command. 19277 * Note: This routine uses a consistent implementation for Group0, 19278 * Group1, and Group2 commands across all platforms. ATAPI devices 19279 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 19280 * 19281 * Arguments: un - pointer to the softstate struct for the target. 19282 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 19283 * CDB_GROUP[1|2] (10 byte). 19284 * bufaddr - buffer for page data retrieved from the target. 19285 * buflen - size of page to be retrieved. 19286 * save_page - boolean to determin if SP bit should be set. 19287 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19288 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19289 * to use the USCSI "direct" chain and bypass the normal 19290 * command waitq. 19291 * 19292 * Return Code: 0 - Success 19293 * errno return code from sd_send_scsi_cmd() 19294 * 19295 * Context: Can sleep. Does not return until command is completed. 19296 */ 19297 19298 static int 19299 sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 19300 size_t buflen, uchar_t save_page, int path_flag) 19301 { 19302 struct scsi_extended_sense sense_buf; 19303 union scsi_cdb cdb; 19304 struct uscsi_cmd ucmd_buf; 19305 int status; 19306 19307 ASSERT(un != NULL); 19308 ASSERT(!mutex_owned(SD_MUTEX(un))); 19309 ASSERT(bufaddr != NULL); 19310 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 19311 (cdbsize == CDB_GROUP2)); 19312 19313 SD_TRACE(SD_LOG_IO, un, 19314 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un); 19315 19316 bzero(&cdb, sizeof (cdb)); 19317 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19318 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19319 19320 /* Set the PF bit for many third party drives */ 19321 cdb.cdb_opaque[1] = 0x10; 19322 19323 /* Set the savepage(SP) bit if given */ 19324 if (save_page == SD_SAVE_PAGE) { 19325 cdb.cdb_opaque[1] |= 0x01; 19326 } 19327 19328 if (cdbsize == CDB_GROUP0) { 19329 cdb.scc_cmd = SCMD_MODE_SELECT; 19330 FORMG0COUNT(&cdb, buflen); 19331 } else { 19332 cdb.scc_cmd = SCMD_MODE_SELECT_G1; 19333 FORMG1COUNT(&cdb, buflen); 19334 } 19335 19336 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 19337 19338 ucmd_buf.uscsi_cdb = (char *)&cdb; 19339 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 19340 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19341 ucmd_buf.uscsi_buflen = buflen; 19342 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19343 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19344 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 19345 ucmd_buf.uscsi_timeout = 60; 19346 19347 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19348 UIO_SYSSPACE, path_flag); 19349 19350 switch (status) { 19351 case 0: 19352 break; /* Success! */ 19353 case EIO: 19354 switch (ucmd_buf.uscsi_status) { 19355 case STATUS_RESERVATION_CONFLICT: 19356 status = EACCES; 19357 break; 19358 default: 19359 break; 19360 } 19361 break; 19362 default: 19363 break; 19364 } 19365 19366 if (status == 0) { 19367 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data", 19368 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19369 } 19370 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n"); 19371 19372 return (status); 19373 } 19374 19375 19376 /* 19377 * Function: sd_send_scsi_RDWR 19378 * 19379 * Description: Issue a scsi READ or WRITE command with the given parameters. 19380 * 19381 * Arguments: un: Pointer to the sd_lun struct for the target. 19382 * cmd: SCMD_READ or SCMD_WRITE 19383 * bufaddr: Address of caller's buffer to receive the RDWR data 19384 * buflen: Length of caller's buffer receive the RDWR data. 19385 * start_block: Block number for the start of the RDWR operation. 19386 * (Assumes target-native block size.) 19387 * residp: Pointer to variable to receive the redisual of the 19388 * RDWR operation (may be NULL of no residual requested). 19389 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19390 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19391 * to use the USCSI "direct" chain and bypass the normal 19392 * command waitq. 19393 * 19394 * Return Code: 0 - Success 19395 * errno return code from sd_send_scsi_cmd() 19396 * 19397 * Context: Can sleep. Does not return until command is completed. 19398 */ 19399 19400 static int 19401 sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 19402 size_t buflen, daddr_t start_block, int path_flag) 19403 { 19404 struct scsi_extended_sense sense_buf; 19405 union scsi_cdb cdb; 19406 struct uscsi_cmd ucmd_buf; 19407 uint32_t block_count; 19408 int status; 19409 int cdbsize; 19410 uchar_t flag; 19411 19412 ASSERT(un != NULL); 19413 ASSERT(!mutex_owned(SD_MUTEX(un))); 19414 ASSERT(bufaddr != NULL); 19415 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE)); 19416 19417 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un); 19418 19419 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 19420 return (EINVAL); 19421 } 19422 19423 mutex_enter(SD_MUTEX(un)); 19424 block_count = SD_BYTES2TGTBLOCKS(un, buflen); 19425 mutex_exit(SD_MUTEX(un)); 19426 19427 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE; 19428 19429 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: " 19430 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n", 19431 bufaddr, buflen, start_block, block_count); 19432 19433 bzero(&cdb, sizeof (cdb)); 19434 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19435 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19436 19437 /* Compute CDB size to use */ 19438 if (start_block > 0xffffffff) 19439 cdbsize = CDB_GROUP4; 19440 else if ((start_block & 0xFFE00000) || 19441 (un->un_f_cfg_is_atapi == TRUE)) 19442 cdbsize = CDB_GROUP1; 19443 else 19444 cdbsize = CDB_GROUP0; 19445 19446 switch (cdbsize) { 19447 case CDB_GROUP0: /* 6-byte CDBs */ 19448 cdb.scc_cmd = cmd; 19449 FORMG0ADDR(&cdb, start_block); 19450 FORMG0COUNT(&cdb, block_count); 19451 break; 19452 case CDB_GROUP1: /* 10-byte CDBs */ 19453 cdb.scc_cmd = cmd | SCMD_GROUP1; 19454 FORMG1ADDR(&cdb, start_block); 19455 FORMG1COUNT(&cdb, block_count); 19456 break; 19457 case CDB_GROUP4: /* 16-byte CDBs */ 19458 cdb.scc_cmd = cmd | SCMD_GROUP4; 19459 FORMG4LONGADDR(&cdb, (uint64_t)start_block); 19460 FORMG4COUNT(&cdb, block_count); 19461 break; 19462 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */ 19463 default: 19464 /* All others reserved */ 19465 return (EINVAL); 19466 } 19467 19468 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */ 19469 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 19470 19471 ucmd_buf.uscsi_cdb = (char *)&cdb; 19472 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 19473 ucmd_buf.uscsi_bufaddr = bufaddr; 19474 ucmd_buf.uscsi_buflen = buflen; 19475 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19476 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19477 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; 19478 ucmd_buf.uscsi_timeout = 60; 19479 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19480 UIO_SYSSPACE, path_flag); 19481 switch (status) { 19482 case 0: 19483 break; /* Success! */ 19484 case EIO: 19485 switch (ucmd_buf.uscsi_status) { 19486 case STATUS_RESERVATION_CONFLICT: 19487 status = EACCES; 19488 break; 19489 default: 19490 break; 19491 } 19492 break; 19493 default: 19494 break; 19495 } 19496 19497 if (status == 0) { 19498 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data", 19499 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19500 } 19501 19502 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n"); 19503 19504 return (status); 19505 } 19506 19507 19508 /* 19509 * Function: sd_send_scsi_LOG_SENSE 19510 * 19511 * Description: Issue a scsi LOG_SENSE command with the given parameters. 19512 * 19513 * Arguments: un: Pointer to the sd_lun struct for the target. 19514 * 19515 * Return Code: 0 - Success 19516 * errno return code from sd_send_scsi_cmd() 19517 * 19518 * Context: Can sleep. Does not return until command is completed. 19519 */ 19520 19521 static int 19522 sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, uint16_t buflen, 19523 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, 19524 int path_flag) 19525 19526 { 19527 struct scsi_extended_sense sense_buf; 19528 union scsi_cdb cdb; 19529 struct uscsi_cmd ucmd_buf; 19530 int status; 19531 19532 ASSERT(un != NULL); 19533 ASSERT(!mutex_owned(SD_MUTEX(un))); 19534 19535 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un); 19536 19537 bzero(&cdb, sizeof (cdb)); 19538 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19539 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19540 19541 cdb.scc_cmd = SCMD_LOG_SENSE_G1; 19542 cdb.cdb_opaque[2] = (page_control << 6) | page_code; 19543 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8); 19544 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF); 19545 FORMG1COUNT(&cdb, buflen); 19546 19547 ucmd_buf.uscsi_cdb = (char *)&cdb; 19548 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19549 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19550 ucmd_buf.uscsi_buflen = buflen; 19551 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19552 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19553 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19554 ucmd_buf.uscsi_timeout = 60; 19555 19556 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19557 UIO_SYSSPACE, path_flag); 19558 19559 switch (status) { 19560 case 0: 19561 break; 19562 case EIO: 19563 switch (ucmd_buf.uscsi_status) { 19564 case STATUS_RESERVATION_CONFLICT: 19565 status = EACCES; 19566 break; 19567 case STATUS_CHECK: 19568 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19569 (scsi_sense_key((uint8_t *)&sense_buf) == 19570 KEY_ILLEGAL_REQUEST) && 19571 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x24)) { 19572 /* 19573 * ASC 0x24: INVALID FIELD IN CDB 19574 */ 19575 switch (page_code) { 19576 case START_STOP_CYCLE_PAGE: 19577 /* 19578 * The start stop cycle counter is 19579 * implemented as page 0x31 in earlier 19580 * generation disks. In new generation 19581 * disks the start stop cycle counter is 19582 * implemented as page 0xE. To properly 19583 * handle this case if an attempt for 19584 * log page 0xE is made and fails we 19585 * will try again using page 0x31. 19586 * 19587 * Network storage BU committed to 19588 * maintain the page 0x31 for this 19589 * purpose and will not have any other 19590 * page implemented with page code 0x31 19591 * until all disks transition to the 19592 * standard page. 19593 */ 19594 mutex_enter(SD_MUTEX(un)); 19595 un->un_start_stop_cycle_page = 19596 START_STOP_CYCLE_VU_PAGE; 19597 cdb.cdb_opaque[2] = 19598 (char)(page_control << 6) | 19599 un->un_start_stop_cycle_page; 19600 mutex_exit(SD_MUTEX(un)); 19601 status = sd_send_scsi_cmd( 19602 SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19603 UIO_SYSSPACE, path_flag); 19604 19605 break; 19606 case TEMPERATURE_PAGE: 19607 status = ENOTTY; 19608 break; 19609 default: 19610 break; 19611 } 19612 } 19613 break; 19614 default: 19615 break; 19616 } 19617 break; 19618 default: 19619 break; 19620 } 19621 19622 if (status == 0) { 19623 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data", 19624 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19625 } 19626 19627 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n"); 19628 19629 return (status); 19630 } 19631 19632 19633 /* 19634 * Function: sdioctl 19635 * 19636 * Description: Driver's ioctl(9e) entry point function. 19637 * 19638 * Arguments: dev - device number 19639 * cmd - ioctl operation to be performed 19640 * arg - user argument, contains data to be set or reference 19641 * parameter for get 19642 * flag - bit flag, indicating open settings, 32/64 bit type 19643 * cred_p - user credential pointer 19644 * rval_p - calling process return value (OPT) 19645 * 19646 * Return Code: EINVAL 19647 * ENOTTY 19648 * ENXIO 19649 * EIO 19650 * EFAULT 19651 * ENOTSUP 19652 * EPERM 19653 * 19654 * Context: Called from the device switch at normal priority. 19655 */ 19656 19657 static int 19658 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) 19659 { 19660 struct sd_lun *un = NULL; 19661 int err = 0; 19662 int i = 0; 19663 cred_t *cr; 19664 int tmprval = EINVAL; 19665 int is_valid; 19666 19667 /* 19668 * All device accesses go thru sdstrategy where we check on suspend 19669 * status 19670 */ 19671 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 19672 return (ENXIO); 19673 } 19674 19675 ASSERT(!mutex_owned(SD_MUTEX(un))); 19676 19677 19678 is_valid = SD_IS_VALID_LABEL(un); 19679 19680 /* 19681 * Moved this wait from sd_uscsi_strategy to here for 19682 * reasons of deadlock prevention. Internal driver commands, 19683 * specifically those to change a devices power level, result 19684 * in a call to sd_uscsi_strategy. 19685 */ 19686 mutex_enter(SD_MUTEX(un)); 19687 while ((un->un_state == SD_STATE_SUSPENDED) || 19688 (un->un_state == SD_STATE_PM_CHANGING)) { 19689 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 19690 } 19691 /* 19692 * Twiddling the counter here protects commands from now 19693 * through to the top of sd_uscsi_strategy. Without the 19694 * counter inc. a power down, for example, could get in 19695 * after the above check for state is made and before 19696 * execution gets to the top of sd_uscsi_strategy. 19697 * That would cause problems. 19698 */ 19699 un->un_ncmds_in_driver++; 19700 19701 if (!is_valid && 19702 (flag & (FNDELAY | FNONBLOCK))) { 19703 switch (cmd) { 19704 case DKIOCGGEOM: /* SD_PATH_DIRECT */ 19705 case DKIOCGVTOC: 19706 case DKIOCGAPART: 19707 case DKIOCPARTINFO: 19708 case DKIOCSGEOM: 19709 case DKIOCSAPART: 19710 case DKIOCGETEFI: 19711 case DKIOCPARTITION: 19712 case DKIOCSVTOC: 19713 case DKIOCSETEFI: 19714 case DKIOCGMBOOT: 19715 case DKIOCSMBOOT: 19716 case DKIOCG_PHYGEOM: 19717 case DKIOCG_VIRTGEOM: 19718 /* let cmlb handle it */ 19719 goto skip_ready_valid; 19720 19721 case CDROMPAUSE: 19722 case CDROMRESUME: 19723 case CDROMPLAYMSF: 19724 case CDROMPLAYTRKIND: 19725 case CDROMREADTOCHDR: 19726 case CDROMREADTOCENTRY: 19727 case CDROMSTOP: 19728 case CDROMSTART: 19729 case CDROMVOLCTRL: 19730 case CDROMSUBCHNL: 19731 case CDROMREADMODE2: 19732 case CDROMREADMODE1: 19733 case CDROMREADOFFSET: 19734 case CDROMSBLKMODE: 19735 case CDROMGBLKMODE: 19736 case CDROMGDRVSPEED: 19737 case CDROMSDRVSPEED: 19738 case CDROMCDDA: 19739 case CDROMCDXA: 19740 case CDROMSUBCODE: 19741 if (!ISCD(un)) { 19742 un->un_ncmds_in_driver--; 19743 ASSERT(un->un_ncmds_in_driver >= 0); 19744 mutex_exit(SD_MUTEX(un)); 19745 return (ENOTTY); 19746 } 19747 break; 19748 case FDEJECT: 19749 case DKIOCEJECT: 19750 case CDROMEJECT: 19751 if (!un->un_f_eject_media_supported) { 19752 un->un_ncmds_in_driver--; 19753 ASSERT(un->un_ncmds_in_driver >= 0); 19754 mutex_exit(SD_MUTEX(un)); 19755 return (ENOTTY); 19756 } 19757 break; 19758 case DKIOCFLUSHWRITECACHE: 19759 mutex_exit(SD_MUTEX(un)); 19760 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 19761 if (err != 0) { 19762 mutex_enter(SD_MUTEX(un)); 19763 un->un_ncmds_in_driver--; 19764 ASSERT(un->un_ncmds_in_driver >= 0); 19765 mutex_exit(SD_MUTEX(un)); 19766 return (EIO); 19767 } 19768 mutex_enter(SD_MUTEX(un)); 19769 /* FALLTHROUGH */ 19770 case DKIOCREMOVABLE: 19771 case DKIOCHOTPLUGGABLE: 19772 case DKIOCINFO: 19773 case DKIOCGMEDIAINFO: 19774 case MHIOCENFAILFAST: 19775 case MHIOCSTATUS: 19776 case MHIOCTKOWN: 19777 case MHIOCRELEASE: 19778 case MHIOCGRP_INKEYS: 19779 case MHIOCGRP_INRESV: 19780 case MHIOCGRP_REGISTER: 19781 case MHIOCGRP_RESERVE: 19782 case MHIOCGRP_PREEMPTANDABORT: 19783 case MHIOCGRP_REGISTERANDIGNOREKEY: 19784 case CDROMCLOSETRAY: 19785 case USCSICMD: 19786 goto skip_ready_valid; 19787 default: 19788 break; 19789 } 19790 19791 mutex_exit(SD_MUTEX(un)); 19792 err = sd_ready_and_valid(un); 19793 mutex_enter(SD_MUTEX(un)); 19794 19795 if (err != SD_READY_VALID) { 19796 switch (cmd) { 19797 case DKIOCSTATE: 19798 case CDROMGDRVSPEED: 19799 case CDROMSDRVSPEED: 19800 case FDEJECT: /* for eject command */ 19801 case DKIOCEJECT: 19802 case CDROMEJECT: 19803 case DKIOCREMOVABLE: 19804 case DKIOCHOTPLUGGABLE: 19805 break; 19806 default: 19807 if (un->un_f_has_removable_media) { 19808 err = ENXIO; 19809 } else { 19810 /* Do not map SD_RESERVED_BY_OTHERS to EIO */ 19811 if (err == SD_RESERVED_BY_OTHERS) { 19812 err = EACCES; 19813 } else { 19814 err = EIO; 19815 } 19816 } 19817 un->un_ncmds_in_driver--; 19818 ASSERT(un->un_ncmds_in_driver >= 0); 19819 mutex_exit(SD_MUTEX(un)); 19820 return (err); 19821 } 19822 } 19823 } 19824 19825 skip_ready_valid: 19826 mutex_exit(SD_MUTEX(un)); 19827 19828 switch (cmd) { 19829 case DKIOCINFO: 19830 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n"); 19831 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag); 19832 break; 19833 19834 case DKIOCGMEDIAINFO: 19835 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n"); 19836 err = sd_get_media_info(dev, (caddr_t)arg, flag); 19837 break; 19838 19839 case DKIOCGGEOM: 19840 case DKIOCGVTOC: 19841 case DKIOCGAPART: 19842 case DKIOCPARTINFO: 19843 case DKIOCSGEOM: 19844 case DKIOCSAPART: 19845 case DKIOCGETEFI: 19846 case DKIOCPARTITION: 19847 case DKIOCSVTOC: 19848 case DKIOCSETEFI: 19849 case DKIOCGMBOOT: 19850 case DKIOCSMBOOT: 19851 case DKIOCG_PHYGEOM: 19852 case DKIOCG_VIRTGEOM: 19853 SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd); 19854 19855 /* TUR should spin up */ 19856 19857 if (un->un_f_has_removable_media) 19858 err = sd_send_scsi_TEST_UNIT_READY(un, 19859 SD_CHECK_FOR_MEDIA); 19860 else 19861 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 19862 19863 if (err != 0) 19864 break; 19865 19866 err = cmlb_ioctl(un->un_cmlbhandle, dev, 19867 cmd, arg, flag, cred_p, rval_p, (void *)SD_PATH_DIRECT); 19868 19869 if ((err == 0) && 19870 ((cmd == DKIOCSETEFI) || 19871 (un->un_f_pkstats_enabled) && 19872 (cmd == DKIOCSAPART || cmd == DKIOCSVTOC))) { 19873 19874 tmprval = cmlb_validate(un->un_cmlbhandle, CMLB_SILENT, 19875 (void *)SD_PATH_DIRECT); 19876 if ((tmprval == 0) && un->un_f_pkstats_enabled) { 19877 sd_set_pstats(un); 19878 SD_TRACE(SD_LOG_IO_PARTITION, un, 19879 "sd_ioctl: un:0x%p pstats created and " 19880 "set\n", un); 19881 } 19882 } 19883 19884 if ((cmd == DKIOCSVTOC) || 19885 ((cmd == DKIOCSETEFI) && (tmprval == 0))) { 19886 19887 mutex_enter(SD_MUTEX(un)); 19888 if (un->un_f_devid_supported && 19889 (un->un_f_opt_fab_devid == TRUE)) { 19890 if (un->un_devid == NULL) { 19891 sd_register_devid(un, SD_DEVINFO(un), 19892 SD_TARGET_IS_UNRESERVED); 19893 } else { 19894 /* 19895 * The device id for this disk 19896 * has been fabricated. The 19897 * device id must be preserved 19898 * by writing it back out to 19899 * disk. 19900 */ 19901 if (sd_write_deviceid(un) != 0) { 19902 ddi_devid_free(un->un_devid); 19903 un->un_devid = NULL; 19904 } 19905 } 19906 } 19907 mutex_exit(SD_MUTEX(un)); 19908 } 19909 19910 break; 19911 19912 case DKIOCLOCK: 19913 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n"); 19914 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 19915 SD_PATH_STANDARD); 19916 break; 19917 19918 case DKIOCUNLOCK: 19919 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n"); 19920 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 19921 SD_PATH_STANDARD); 19922 break; 19923 19924 case DKIOCSTATE: { 19925 enum dkio_state state; 19926 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n"); 19927 19928 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) { 19929 err = EFAULT; 19930 } else { 19931 err = sd_check_media(dev, state); 19932 if (err == 0) { 19933 if (ddi_copyout(&un->un_mediastate, (void *)arg, 19934 sizeof (int), flag) != 0) 19935 err = EFAULT; 19936 } 19937 } 19938 break; 19939 } 19940 19941 case DKIOCREMOVABLE: 19942 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n"); 19943 i = un->un_f_has_removable_media ? 1 : 0; 19944 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 19945 err = EFAULT; 19946 } else { 19947 err = 0; 19948 } 19949 break; 19950 19951 case DKIOCHOTPLUGGABLE: 19952 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCHOTPLUGGABLE\n"); 19953 i = un->un_f_is_hotpluggable ? 1 : 0; 19954 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 19955 err = EFAULT; 19956 } else { 19957 err = 0; 19958 } 19959 break; 19960 19961 case DKIOCGTEMPERATURE: 19962 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n"); 19963 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag); 19964 break; 19965 19966 case MHIOCENFAILFAST: 19967 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n"); 19968 if ((err = drv_priv(cred_p)) == 0) { 19969 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag); 19970 } 19971 break; 19972 19973 case MHIOCTKOWN: 19974 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n"); 19975 if ((err = drv_priv(cred_p)) == 0) { 19976 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag); 19977 } 19978 break; 19979 19980 case MHIOCRELEASE: 19981 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n"); 19982 if ((err = drv_priv(cred_p)) == 0) { 19983 err = sd_mhdioc_release(dev); 19984 } 19985 break; 19986 19987 case MHIOCSTATUS: 19988 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n"); 19989 if ((err = drv_priv(cred_p)) == 0) { 19990 switch (sd_send_scsi_TEST_UNIT_READY(un, 0)) { 19991 case 0: 19992 err = 0; 19993 break; 19994 case EACCES: 19995 *rval_p = 1; 19996 err = 0; 19997 break; 19998 default: 19999 err = EIO; 20000 break; 20001 } 20002 } 20003 break; 20004 20005 case MHIOCQRESERVE: 20006 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n"); 20007 if ((err = drv_priv(cred_p)) == 0) { 20008 err = sd_reserve_release(dev, SD_RESERVE); 20009 } 20010 break; 20011 20012 case MHIOCREREGISTERDEVID: 20013 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n"); 20014 if (drv_priv(cred_p) == EPERM) { 20015 err = EPERM; 20016 } else if (!un->un_f_devid_supported) { 20017 err = ENOTTY; 20018 } else { 20019 err = sd_mhdioc_register_devid(dev); 20020 } 20021 break; 20022 20023 case MHIOCGRP_INKEYS: 20024 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n"); 20025 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 20026 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20027 err = ENOTSUP; 20028 } else { 20029 err = sd_mhdioc_inkeys(dev, (caddr_t)arg, 20030 flag); 20031 } 20032 } 20033 break; 20034 20035 case MHIOCGRP_INRESV: 20036 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n"); 20037 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 20038 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20039 err = ENOTSUP; 20040 } else { 20041 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag); 20042 } 20043 } 20044 break; 20045 20046 case MHIOCGRP_REGISTER: 20047 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n"); 20048 if ((err = drv_priv(cred_p)) != EPERM) { 20049 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20050 err = ENOTSUP; 20051 } else if (arg != NULL) { 20052 mhioc_register_t reg; 20053 if (ddi_copyin((void *)arg, ®, 20054 sizeof (mhioc_register_t), flag) != 0) { 20055 err = EFAULT; 20056 } else { 20057 err = 20058 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20059 un, SD_SCSI3_REGISTER, 20060 (uchar_t *)®); 20061 } 20062 } 20063 } 20064 break; 20065 20066 case MHIOCGRP_RESERVE: 20067 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n"); 20068 if ((err = drv_priv(cred_p)) != EPERM) { 20069 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20070 err = ENOTSUP; 20071 } else if (arg != NULL) { 20072 mhioc_resv_desc_t resv_desc; 20073 if (ddi_copyin((void *)arg, &resv_desc, 20074 sizeof (mhioc_resv_desc_t), flag) != 0) { 20075 err = EFAULT; 20076 } else { 20077 err = 20078 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20079 un, SD_SCSI3_RESERVE, 20080 (uchar_t *)&resv_desc); 20081 } 20082 } 20083 } 20084 break; 20085 20086 case MHIOCGRP_PREEMPTANDABORT: 20087 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 20088 if ((err = drv_priv(cred_p)) != EPERM) { 20089 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20090 err = ENOTSUP; 20091 } else if (arg != NULL) { 20092 mhioc_preemptandabort_t preempt_abort; 20093 if (ddi_copyin((void *)arg, &preempt_abort, 20094 sizeof (mhioc_preemptandabort_t), 20095 flag) != 0) { 20096 err = EFAULT; 20097 } else { 20098 err = 20099 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20100 un, SD_SCSI3_PREEMPTANDABORT, 20101 (uchar_t *)&preempt_abort); 20102 } 20103 } 20104 } 20105 break; 20106 20107 case MHIOCGRP_REGISTERANDIGNOREKEY: 20108 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTERANDIGNOREKEY\n"); 20109 if ((err = drv_priv(cred_p)) != EPERM) { 20110 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20111 err = ENOTSUP; 20112 } else if (arg != NULL) { 20113 mhioc_registerandignorekey_t r_and_i; 20114 if (ddi_copyin((void *)arg, (void *)&r_and_i, 20115 sizeof (mhioc_registerandignorekey_t), 20116 flag) != 0) { 20117 err = EFAULT; 20118 } else { 20119 err = 20120 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20121 un, SD_SCSI3_REGISTERANDIGNOREKEY, 20122 (uchar_t *)&r_and_i); 20123 } 20124 } 20125 } 20126 break; 20127 20128 case USCSICMD: 20129 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n"); 20130 cr = ddi_get_cred(); 20131 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 20132 err = EPERM; 20133 } else { 20134 enum uio_seg uioseg; 20135 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE : 20136 UIO_USERSPACE; 20137 if (un->un_f_format_in_progress == TRUE) { 20138 err = EAGAIN; 20139 break; 20140 } 20141 err = sd_send_scsi_cmd(dev, (struct uscsi_cmd *)arg, 20142 flag, uioseg, SD_PATH_STANDARD); 20143 } 20144 break; 20145 20146 case CDROMPAUSE: 20147 case CDROMRESUME: 20148 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n"); 20149 if (!ISCD(un)) { 20150 err = ENOTTY; 20151 } else { 20152 err = sr_pause_resume(dev, cmd); 20153 } 20154 break; 20155 20156 case CDROMPLAYMSF: 20157 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n"); 20158 if (!ISCD(un)) { 20159 err = ENOTTY; 20160 } else { 20161 err = sr_play_msf(dev, (caddr_t)arg, flag); 20162 } 20163 break; 20164 20165 case CDROMPLAYTRKIND: 20166 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); 20167 #if defined(__i386) || defined(__amd64) 20168 /* 20169 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead 20170 */ 20171 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 20172 #else 20173 if (!ISCD(un)) { 20174 #endif 20175 err = ENOTTY; 20176 } else { 20177 err = sr_play_trkind(dev, (caddr_t)arg, flag); 20178 } 20179 break; 20180 20181 case CDROMREADTOCHDR: 20182 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n"); 20183 if (!ISCD(un)) { 20184 err = ENOTTY; 20185 } else { 20186 err = sr_read_tochdr(dev, (caddr_t)arg, flag); 20187 } 20188 break; 20189 20190 case CDROMREADTOCENTRY: 20191 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n"); 20192 if (!ISCD(un)) { 20193 err = ENOTTY; 20194 } else { 20195 err = sr_read_tocentry(dev, (caddr_t)arg, flag); 20196 } 20197 break; 20198 20199 case CDROMSTOP: 20200 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n"); 20201 if (!ISCD(un)) { 20202 err = ENOTTY; 20203 } else { 20204 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_STOP, 20205 SD_PATH_STANDARD); 20206 } 20207 break; 20208 20209 case CDROMSTART: 20210 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n"); 20211 if (!ISCD(un)) { 20212 err = ENOTTY; 20213 } else { 20214 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 20215 SD_PATH_STANDARD); 20216 } 20217 break; 20218 20219 case CDROMCLOSETRAY: 20220 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n"); 20221 if (!ISCD(un)) { 20222 err = ENOTTY; 20223 } else { 20224 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_CLOSE, 20225 SD_PATH_STANDARD); 20226 } 20227 break; 20228 20229 case FDEJECT: /* for eject command */ 20230 case DKIOCEJECT: 20231 case CDROMEJECT: 20232 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n"); 20233 if (!un->un_f_eject_media_supported) { 20234 err = ENOTTY; 20235 } else { 20236 err = sr_eject(dev); 20237 } 20238 break; 20239 20240 case CDROMVOLCTRL: 20241 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n"); 20242 if (!ISCD(un)) { 20243 err = ENOTTY; 20244 } else { 20245 err = sr_volume_ctrl(dev, (caddr_t)arg, flag); 20246 } 20247 break; 20248 20249 case CDROMSUBCHNL: 20250 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n"); 20251 if (!ISCD(un)) { 20252 err = ENOTTY; 20253 } else { 20254 err = sr_read_subchannel(dev, (caddr_t)arg, flag); 20255 } 20256 break; 20257 20258 case CDROMREADMODE2: 20259 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n"); 20260 if (!ISCD(un)) { 20261 err = ENOTTY; 20262 } else if (un->un_f_cfg_is_atapi == TRUE) { 20263 /* 20264 * If the drive supports READ CD, use that instead of 20265 * switching the LBA size via a MODE SELECT 20266 * Block Descriptor 20267 */ 20268 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag); 20269 } else { 20270 err = sr_read_mode2(dev, (caddr_t)arg, flag); 20271 } 20272 break; 20273 20274 case CDROMREADMODE1: 20275 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n"); 20276 if (!ISCD(un)) { 20277 err = ENOTTY; 20278 } else { 20279 err = sr_read_mode1(dev, (caddr_t)arg, flag); 20280 } 20281 break; 20282 20283 case CDROMREADOFFSET: 20284 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n"); 20285 if (!ISCD(un)) { 20286 err = ENOTTY; 20287 } else { 20288 err = sr_read_sony_session_offset(dev, (caddr_t)arg, 20289 flag); 20290 } 20291 break; 20292 20293 case CDROMSBLKMODE: 20294 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n"); 20295 /* 20296 * There is no means of changing block size in case of atapi 20297 * drives, thus return ENOTTY if drive type is atapi 20298 */ 20299 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 20300 err = ENOTTY; 20301 } else if (un->un_f_mmc_cap == TRUE) { 20302 20303 /* 20304 * MMC Devices do not support changing the 20305 * logical block size 20306 * 20307 * Note: EINVAL is being returned instead of ENOTTY to 20308 * maintain consistancy with the original mmc 20309 * driver update. 20310 */ 20311 err = EINVAL; 20312 } else { 20313 mutex_enter(SD_MUTEX(un)); 20314 if ((!(un->un_exclopen & (1<<SDPART(dev)))) || 20315 (un->un_ncmds_in_transport > 0)) { 20316 mutex_exit(SD_MUTEX(un)); 20317 err = EINVAL; 20318 } else { 20319 mutex_exit(SD_MUTEX(un)); 20320 err = sr_change_blkmode(dev, cmd, arg, flag); 20321 } 20322 } 20323 break; 20324 20325 case CDROMGBLKMODE: 20326 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n"); 20327 if (!ISCD(un)) { 20328 err = ENOTTY; 20329 } else if ((un->un_f_cfg_is_atapi != FALSE) && 20330 (un->un_f_blockcount_is_valid != FALSE)) { 20331 /* 20332 * Drive is an ATAPI drive so return target block 20333 * size for ATAPI drives since we cannot change the 20334 * blocksize on ATAPI drives. Used primarily to detect 20335 * if an ATAPI cdrom is present. 20336 */ 20337 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg, 20338 sizeof (int), flag) != 0) { 20339 err = EFAULT; 20340 } else { 20341 err = 0; 20342 } 20343 20344 } else { 20345 /* 20346 * Drive supports changing block sizes via a Mode 20347 * Select. 20348 */ 20349 err = sr_change_blkmode(dev, cmd, arg, flag); 20350 } 20351 break; 20352 20353 case CDROMGDRVSPEED: 20354 case CDROMSDRVSPEED: 20355 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n"); 20356 if (!ISCD(un)) { 20357 err = ENOTTY; 20358 } else if (un->un_f_mmc_cap == TRUE) { 20359 /* 20360 * Note: In the future the driver implementation 20361 * for getting and 20362 * setting cd speed should entail: 20363 * 1) If non-mmc try the Toshiba mode page 20364 * (sr_change_speed) 20365 * 2) If mmc but no support for Real Time Streaming try 20366 * the SET CD SPEED (0xBB) command 20367 * (sr_atapi_change_speed) 20368 * 3) If mmc and support for Real Time Streaming 20369 * try the GET PERFORMANCE and SET STREAMING 20370 * commands (not yet implemented, 4380808) 20371 */ 20372 /* 20373 * As per recent MMC spec, CD-ROM speed is variable 20374 * and changes with LBA. Since there is no such 20375 * things as drive speed now, fail this ioctl. 20376 * 20377 * Note: EINVAL is returned for consistancy of original 20378 * implementation which included support for getting 20379 * the drive speed of mmc devices but not setting 20380 * the drive speed. Thus EINVAL would be returned 20381 * if a set request was made for an mmc device. 20382 * We no longer support get or set speed for 20383 * mmc but need to remain consistent with regard 20384 * to the error code returned. 20385 */ 20386 err = EINVAL; 20387 } else if (un->un_f_cfg_is_atapi == TRUE) { 20388 err = sr_atapi_change_speed(dev, cmd, arg, flag); 20389 } else { 20390 err = sr_change_speed(dev, cmd, arg, flag); 20391 } 20392 break; 20393 20394 case CDROMCDDA: 20395 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n"); 20396 if (!ISCD(un)) { 20397 err = ENOTTY; 20398 } else { 20399 err = sr_read_cdda(dev, (void *)arg, flag); 20400 } 20401 break; 20402 20403 case CDROMCDXA: 20404 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n"); 20405 if (!ISCD(un)) { 20406 err = ENOTTY; 20407 } else { 20408 err = sr_read_cdxa(dev, (caddr_t)arg, flag); 20409 } 20410 break; 20411 20412 case CDROMSUBCODE: 20413 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n"); 20414 if (!ISCD(un)) { 20415 err = ENOTTY; 20416 } else { 20417 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag); 20418 } 20419 break; 20420 20421 20422 #ifdef SDDEBUG 20423 /* RESET/ABORTS testing ioctls */ 20424 case DKIOCRESET: { 20425 int reset_level; 20426 20427 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) { 20428 err = EFAULT; 20429 } else { 20430 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: " 20431 "reset_level = 0x%lx\n", reset_level); 20432 if (scsi_reset(SD_ADDRESS(un), reset_level)) { 20433 err = 0; 20434 } else { 20435 err = EIO; 20436 } 20437 } 20438 break; 20439 } 20440 20441 case DKIOCABORT: 20442 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n"); 20443 if (scsi_abort(SD_ADDRESS(un), NULL)) { 20444 err = 0; 20445 } else { 20446 err = EIO; 20447 } 20448 break; 20449 #endif 20450 20451 #ifdef SD_FAULT_INJECTION 20452 /* SDIOC FaultInjection testing ioctls */ 20453 case SDIOCSTART: 20454 case SDIOCSTOP: 20455 case SDIOCINSERTPKT: 20456 case SDIOCINSERTXB: 20457 case SDIOCINSERTUN: 20458 case SDIOCINSERTARQ: 20459 case SDIOCPUSH: 20460 case SDIOCRETRIEVE: 20461 case SDIOCRUN: 20462 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" 20463 "SDIOC detected cmd:0x%X:\n", cmd); 20464 /* call error generator */ 20465 sd_faultinjection_ioctl(cmd, arg, un); 20466 err = 0; 20467 break; 20468 20469 #endif /* SD_FAULT_INJECTION */ 20470 20471 case DKIOCFLUSHWRITECACHE: 20472 { 20473 struct dk_callback *dkc = (struct dk_callback *)arg; 20474 20475 mutex_enter(SD_MUTEX(un)); 20476 if (!un->un_f_sync_cache_supported || 20477 !un->un_f_write_cache_enabled) { 20478 err = un->un_f_sync_cache_supported ? 20479 0 : ENOTSUP; 20480 mutex_exit(SD_MUTEX(un)); 20481 if ((flag & FKIOCTL) && dkc != NULL && 20482 dkc->dkc_callback != NULL) { 20483 (*dkc->dkc_callback)(dkc->dkc_cookie, 20484 err); 20485 /* 20486 * Did callback and reported error. 20487 * Since we did a callback, ioctl 20488 * should return 0. 20489 */ 20490 err = 0; 20491 } 20492 break; 20493 } 20494 mutex_exit(SD_MUTEX(un)); 20495 20496 if ((flag & FKIOCTL) && dkc != NULL && 20497 dkc->dkc_callback != NULL) { 20498 /* async SYNC CACHE request */ 20499 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 20500 } else { 20501 /* synchronous SYNC CACHE request */ 20502 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 20503 } 20504 } 20505 break; 20506 20507 case DKIOCGETWCE: { 20508 20509 int wce; 20510 20511 if ((err = sd_get_write_cache_enabled(un, &wce)) != 0) { 20512 break; 20513 } 20514 20515 if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) { 20516 err = EFAULT; 20517 } 20518 break; 20519 } 20520 20521 case DKIOCSETWCE: { 20522 20523 int wce, sync_supported; 20524 20525 if (ddi_copyin((void *)arg, &wce, sizeof (wce), flag)) { 20526 err = EFAULT; 20527 break; 20528 } 20529 20530 /* 20531 * Synchronize multiple threads trying to enable 20532 * or disable the cache via the un_f_wcc_cv 20533 * condition variable. 20534 */ 20535 mutex_enter(SD_MUTEX(un)); 20536 20537 /* 20538 * Don't allow the cache to be enabled if the 20539 * config file has it disabled. 20540 */ 20541 if (un->un_f_opt_disable_cache && wce) { 20542 mutex_exit(SD_MUTEX(un)); 20543 err = EINVAL; 20544 break; 20545 } 20546 20547 /* 20548 * Wait for write cache change in progress 20549 * bit to be clear before proceeding. 20550 */ 20551 while (un->un_f_wcc_inprog) 20552 cv_wait(&un->un_wcc_cv, SD_MUTEX(un)); 20553 20554 un->un_f_wcc_inprog = 1; 20555 20556 if (un->un_f_write_cache_enabled && wce == 0) { 20557 /* 20558 * Disable the write cache. Don't clear 20559 * un_f_write_cache_enabled until after 20560 * the mode select and flush are complete. 20561 */ 20562 sync_supported = un->un_f_sync_cache_supported; 20563 20564 /* 20565 * If cache flush is suppressed, we assume that the 20566 * controller firmware will take care of managing the 20567 * write cache for us: no need to explicitly 20568 * disable it. 20569 */ 20570 if (!un->un_f_suppress_cache_flush) { 20571 mutex_exit(SD_MUTEX(un)); 20572 if ((err = sd_cache_control(un, 20573 SD_CACHE_NOCHANGE, 20574 SD_CACHE_DISABLE)) == 0 && 20575 sync_supported) { 20576 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, 20577 NULL); 20578 } 20579 } else { 20580 mutex_exit(SD_MUTEX(un)); 20581 } 20582 20583 mutex_enter(SD_MUTEX(un)); 20584 if (err == 0) { 20585 un->un_f_write_cache_enabled = 0; 20586 } 20587 20588 } else if (!un->un_f_write_cache_enabled && wce != 0) { 20589 /* 20590 * Set un_f_write_cache_enabled first, so there is 20591 * no window where the cache is enabled, but the 20592 * bit says it isn't. 20593 */ 20594 un->un_f_write_cache_enabled = 1; 20595 20596 /* 20597 * If cache flush is suppressed, we assume that the 20598 * controller firmware will take care of managing the 20599 * write cache for us: no need to explicitly 20600 * enable it. 20601 */ 20602 if (!un->un_f_suppress_cache_flush) { 20603 mutex_exit(SD_MUTEX(un)); 20604 err = sd_cache_control(un, SD_CACHE_NOCHANGE, 20605 SD_CACHE_ENABLE); 20606 } else { 20607 mutex_exit(SD_MUTEX(un)); 20608 } 20609 20610 mutex_enter(SD_MUTEX(un)); 20611 20612 if (err) { 20613 un->un_f_write_cache_enabled = 0; 20614 } 20615 } 20616 20617 un->un_f_wcc_inprog = 0; 20618 cv_broadcast(&un->un_wcc_cv); 20619 mutex_exit(SD_MUTEX(un)); 20620 break; 20621 } 20622 20623 default: 20624 err = ENOTTY; 20625 break; 20626 } 20627 mutex_enter(SD_MUTEX(un)); 20628 un->un_ncmds_in_driver--; 20629 ASSERT(un->un_ncmds_in_driver >= 0); 20630 mutex_exit(SD_MUTEX(un)); 20631 20632 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 20633 return (err); 20634 } 20635 20636 20637 /* 20638 * Function: sd_dkio_ctrl_info 20639 * 20640 * Description: This routine is the driver entry point for handling controller 20641 * information ioctl requests (DKIOCINFO). 20642 * 20643 * Arguments: dev - the device number 20644 * arg - pointer to user provided dk_cinfo structure 20645 * specifying the controller type and attributes. 20646 * flag - this argument is a pass through to ddi_copyxxx() 20647 * directly from the mode argument of ioctl(). 20648 * 20649 * Return Code: 0 20650 * EFAULT 20651 * ENXIO 20652 */ 20653 20654 static int 20655 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag) 20656 { 20657 struct sd_lun *un = NULL; 20658 struct dk_cinfo *info; 20659 dev_info_t *pdip; 20660 int lun, tgt; 20661 20662 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20663 return (ENXIO); 20664 } 20665 20666 info = (struct dk_cinfo *) 20667 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); 20668 20669 switch (un->un_ctype) { 20670 case CTYPE_CDROM: 20671 info->dki_ctype = DKC_CDROM; 20672 break; 20673 default: 20674 info->dki_ctype = DKC_SCSI_CCS; 20675 break; 20676 } 20677 pdip = ddi_get_parent(SD_DEVINFO(un)); 20678 info->dki_cnum = ddi_get_instance(pdip); 20679 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) { 20680 (void) strcpy(info->dki_cname, ddi_get_name(pdip)); 20681 } else { 20682 (void) strncpy(info->dki_cname, ddi_node_name(pdip), 20683 DK_DEVLEN - 1); 20684 } 20685 20686 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 20687 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0); 20688 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 20689 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0); 20690 20691 /* Unit Information */ 20692 info->dki_unit = ddi_get_instance(SD_DEVINFO(un)); 20693 info->dki_slave = ((tgt << 3) | lun); 20694 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)), 20695 DK_DEVLEN - 1); 20696 info->dki_flags = DKI_FMTVOL; 20697 info->dki_partition = SDPART(dev); 20698 20699 /* Max Transfer size of this device in blocks */ 20700 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize; 20701 info->dki_addr = 0; 20702 info->dki_space = 0; 20703 info->dki_prio = 0; 20704 info->dki_vec = 0; 20705 20706 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) { 20707 kmem_free(info, sizeof (struct dk_cinfo)); 20708 return (EFAULT); 20709 } else { 20710 kmem_free(info, sizeof (struct dk_cinfo)); 20711 return (0); 20712 } 20713 } 20714 20715 20716 /* 20717 * Function: sd_get_media_info 20718 * 20719 * Description: This routine is the driver entry point for handling ioctl 20720 * requests for the media type or command set profile used by the 20721 * drive to operate on the media (DKIOCGMEDIAINFO). 20722 * 20723 * Arguments: dev - the device number 20724 * arg - pointer to user provided dk_minfo structure 20725 * specifying the media type, logical block size and 20726 * drive capacity. 20727 * flag - this argument is a pass through to ddi_copyxxx() 20728 * directly from the mode argument of ioctl(). 20729 * 20730 * Return Code: 0 20731 * EACCESS 20732 * EFAULT 20733 * ENXIO 20734 * EIO 20735 */ 20736 20737 static int 20738 sd_get_media_info(dev_t dev, caddr_t arg, int flag) 20739 { 20740 struct sd_lun *un = NULL; 20741 struct uscsi_cmd com; 20742 struct scsi_inquiry *sinq; 20743 struct dk_minfo media_info; 20744 u_longlong_t media_capacity; 20745 uint64_t capacity; 20746 uint_t lbasize; 20747 uchar_t *out_data; 20748 uchar_t *rqbuf; 20749 int rval = 0; 20750 int rtn; 20751 20752 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 20753 (un->un_state == SD_STATE_OFFLINE)) { 20754 return (ENXIO); 20755 } 20756 20757 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info: entry\n"); 20758 20759 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 20760 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 20761 20762 /* Issue a TUR to determine if the drive is ready with media present */ 20763 rval = sd_send_scsi_TEST_UNIT_READY(un, SD_CHECK_FOR_MEDIA); 20764 if (rval == ENXIO) { 20765 goto done; 20766 } 20767 20768 /* Now get configuration data */ 20769 if (ISCD(un)) { 20770 media_info.dki_media_type = DK_CDROM; 20771 20772 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 20773 if (un->un_f_mmc_cap == TRUE) { 20774 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, 20775 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN, 20776 SD_PATH_STANDARD); 20777 20778 if (rtn) { 20779 /* 20780 * Failed for other than an illegal request 20781 * or command not supported 20782 */ 20783 if ((com.uscsi_status == STATUS_CHECK) && 20784 (com.uscsi_rqstatus == STATUS_GOOD)) { 20785 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 20786 (rqbuf[12] != 0x20)) { 20787 rval = EIO; 20788 goto done; 20789 } 20790 } 20791 } else { 20792 /* 20793 * The GET CONFIGURATION command succeeded 20794 * so set the media type according to the 20795 * returned data 20796 */ 20797 media_info.dki_media_type = out_data[6]; 20798 media_info.dki_media_type <<= 8; 20799 media_info.dki_media_type |= out_data[7]; 20800 } 20801 } 20802 } else { 20803 /* 20804 * The profile list is not available, so we attempt to identify 20805 * the media type based on the inquiry data 20806 */ 20807 sinq = un->un_sd->sd_inq; 20808 if ((sinq->inq_dtype == DTYPE_DIRECT) || 20809 (sinq->inq_dtype == DTYPE_OPTICAL)) { 20810 /* This is a direct access device or optical disk */ 20811 media_info.dki_media_type = DK_FIXED_DISK; 20812 20813 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 20814 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 20815 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 20816 media_info.dki_media_type = DK_ZIP; 20817 } else if ( 20818 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 20819 media_info.dki_media_type = DK_JAZ; 20820 } 20821 } 20822 } else { 20823 /* 20824 * Not a CD, direct access or optical disk so return 20825 * unknown media 20826 */ 20827 media_info.dki_media_type = DK_UNKNOWN; 20828 } 20829 } 20830 20831 /* Now read the capacity so we can provide the lbasize and capacity */ 20832 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 20833 SD_PATH_DIRECT)) { 20834 case 0: 20835 break; 20836 case EACCES: 20837 rval = EACCES; 20838 goto done; 20839 default: 20840 rval = EIO; 20841 goto done; 20842 } 20843 20844 media_info.dki_lbsize = lbasize; 20845 media_capacity = capacity; 20846 20847 /* 20848 * sd_send_scsi_READ_CAPACITY() reports capacity in 20849 * un->un_sys_blocksize chunks. So we need to convert it into 20850 * cap.lbasize chunks. 20851 */ 20852 media_capacity *= un->un_sys_blocksize; 20853 media_capacity /= lbasize; 20854 media_info.dki_capacity = media_capacity; 20855 20856 if (ddi_copyout(&media_info, arg, sizeof (struct dk_minfo), flag)) { 20857 rval = EFAULT; 20858 /* Put goto. Anybody might add some code below in future */ 20859 goto done; 20860 } 20861 done: 20862 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 20863 kmem_free(rqbuf, SENSE_LENGTH); 20864 return (rval); 20865 } 20866 20867 20868 /* 20869 * Function: sd_check_media 20870 * 20871 * Description: This utility routine implements the functionality for the 20872 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the 20873 * driver state changes from that specified by the user 20874 * (inserted or ejected). For example, if the user specifies 20875 * DKIO_EJECTED and the current media state is inserted this 20876 * routine will immediately return DKIO_INSERTED. However, if the 20877 * current media state is not inserted the user thread will be 20878 * blocked until the drive state changes. If DKIO_NONE is specified 20879 * the user thread will block until a drive state change occurs. 20880 * 20881 * Arguments: dev - the device number 20882 * state - user pointer to a dkio_state, updated with the current 20883 * drive state at return. 20884 * 20885 * Return Code: ENXIO 20886 * EIO 20887 * EAGAIN 20888 * EINTR 20889 */ 20890 20891 static int 20892 sd_check_media(dev_t dev, enum dkio_state state) 20893 { 20894 struct sd_lun *un = NULL; 20895 enum dkio_state prev_state; 20896 opaque_t token = NULL; 20897 int rval = 0; 20898 20899 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20900 return (ENXIO); 20901 } 20902 20903 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n"); 20904 20905 mutex_enter(SD_MUTEX(un)); 20906 20907 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: " 20908 "state=%x, mediastate=%x\n", state, un->un_mediastate); 20909 20910 prev_state = un->un_mediastate; 20911 20912 /* is there anything to do? */ 20913 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) { 20914 /* 20915 * submit the request to the scsi_watch service; 20916 * scsi_media_watch_cb() does the real work 20917 */ 20918 mutex_exit(SD_MUTEX(un)); 20919 20920 /* 20921 * This change handles the case where a scsi watch request is 20922 * added to a device that is powered down. To accomplish this 20923 * we power up the device before adding the scsi watch request, 20924 * since the scsi watch sends a TUR directly to the device 20925 * which the device cannot handle if it is powered down. 20926 */ 20927 if (sd_pm_entry(un) != DDI_SUCCESS) { 20928 mutex_enter(SD_MUTEX(un)); 20929 goto done; 20930 } 20931 20932 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), 20933 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 20934 (caddr_t)dev); 20935 20936 sd_pm_exit(un); 20937 20938 mutex_enter(SD_MUTEX(un)); 20939 if (token == NULL) { 20940 rval = EAGAIN; 20941 goto done; 20942 } 20943 20944 /* 20945 * This is a special case IOCTL that doesn't return 20946 * until the media state changes. Routine sdpower 20947 * knows about and handles this so don't count it 20948 * as an active cmd in the driver, which would 20949 * keep the device busy to the pm framework. 20950 * If the count isn't decremented the device can't 20951 * be powered down. 20952 */ 20953 un->un_ncmds_in_driver--; 20954 ASSERT(un->un_ncmds_in_driver >= 0); 20955 20956 /* 20957 * if a prior request had been made, this will be the same 20958 * token, as scsi_watch was designed that way. 20959 */ 20960 un->un_swr_token = token; 20961 un->un_specified_mediastate = state; 20962 20963 /* 20964 * now wait for media change 20965 * we will not be signalled unless mediastate == state but it is 20966 * still better to test for this condition, since there is a 20967 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED 20968 */ 20969 SD_TRACE(SD_LOG_COMMON, un, 20970 "sd_check_media: waiting for media state change\n"); 20971 while (un->un_mediastate == state) { 20972 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) { 20973 SD_TRACE(SD_LOG_COMMON, un, 20974 "sd_check_media: waiting for media state " 20975 "was interrupted\n"); 20976 un->un_ncmds_in_driver++; 20977 rval = EINTR; 20978 goto done; 20979 } 20980 SD_TRACE(SD_LOG_COMMON, un, 20981 "sd_check_media: received signal, state=%x\n", 20982 un->un_mediastate); 20983 } 20984 /* 20985 * Inc the counter to indicate the device once again 20986 * has an active outstanding cmd. 20987 */ 20988 un->un_ncmds_in_driver++; 20989 } 20990 20991 /* invalidate geometry */ 20992 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) { 20993 sr_ejected(un); 20994 } 20995 20996 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) { 20997 uint64_t capacity; 20998 uint_t lbasize; 20999 21000 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n"); 21001 mutex_exit(SD_MUTEX(un)); 21002 /* 21003 * Since the following routines use SD_PATH_DIRECT, we must 21004 * call PM directly before the upcoming disk accesses. This 21005 * may cause the disk to be power/spin up. 21006 */ 21007 21008 if (sd_pm_entry(un) == DDI_SUCCESS) { 21009 rval = sd_send_scsi_READ_CAPACITY(un, 21010 &capacity, 21011 &lbasize, SD_PATH_DIRECT); 21012 if (rval != 0) { 21013 sd_pm_exit(un); 21014 mutex_enter(SD_MUTEX(un)); 21015 goto done; 21016 } 21017 } else { 21018 rval = EIO; 21019 mutex_enter(SD_MUTEX(un)); 21020 goto done; 21021 } 21022 mutex_enter(SD_MUTEX(un)); 21023 21024 sd_update_block_info(un, lbasize, capacity); 21025 21026 /* 21027 * Check if the media in the device is writable or not 21028 */ 21029 if (ISCD(un)) 21030 sd_check_for_writable_cd(un, SD_PATH_DIRECT); 21031 21032 mutex_exit(SD_MUTEX(un)); 21033 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 21034 if ((cmlb_validate(un->un_cmlbhandle, 0, 21035 (void *)SD_PATH_DIRECT) == 0) && un->un_f_pkstats_enabled) { 21036 sd_set_pstats(un); 21037 SD_TRACE(SD_LOG_IO_PARTITION, un, 21038 "sd_check_media: un:0x%p pstats created and " 21039 "set\n", un); 21040 } 21041 21042 rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 21043 SD_PATH_DIRECT); 21044 sd_pm_exit(un); 21045 21046 mutex_enter(SD_MUTEX(un)); 21047 } 21048 done: 21049 un->un_f_watcht_stopped = FALSE; 21050 if (un->un_swr_token) { 21051 /* 21052 * Use of this local token and the mutex ensures that we avoid 21053 * some race conditions associated with terminating the 21054 * scsi watch. 21055 */ 21056 token = un->un_swr_token; 21057 un->un_swr_token = (opaque_t)NULL; 21058 mutex_exit(SD_MUTEX(un)); 21059 (void) scsi_watch_request_terminate(token, 21060 SCSI_WATCH_TERMINATE_WAIT); 21061 mutex_enter(SD_MUTEX(un)); 21062 } 21063 21064 /* 21065 * Update the capacity kstat value, if no media previously 21066 * (capacity kstat is 0) and a media has been inserted 21067 * (un_f_blockcount_is_valid == TRUE) 21068 */ 21069 if (un->un_errstats) { 21070 struct sd_errstats *stp = NULL; 21071 21072 stp = (struct sd_errstats *)un->un_errstats->ks_data; 21073 if ((stp->sd_capacity.value.ui64 == 0) && 21074 (un->un_f_blockcount_is_valid == TRUE)) { 21075 stp->sd_capacity.value.ui64 = 21076 (uint64_t)((uint64_t)un->un_blockcount * 21077 un->un_sys_blocksize); 21078 } 21079 } 21080 mutex_exit(SD_MUTEX(un)); 21081 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n"); 21082 return (rval); 21083 } 21084 21085 21086 /* 21087 * Function: sd_delayed_cv_broadcast 21088 * 21089 * Description: Delayed cv_broadcast to allow for target to recover from media 21090 * insertion. 21091 * 21092 * Arguments: arg - driver soft state (unit) structure 21093 */ 21094 21095 static void 21096 sd_delayed_cv_broadcast(void *arg) 21097 { 21098 struct sd_lun *un = arg; 21099 21100 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n"); 21101 21102 mutex_enter(SD_MUTEX(un)); 21103 un->un_dcvb_timeid = NULL; 21104 cv_broadcast(&un->un_state_cv); 21105 mutex_exit(SD_MUTEX(un)); 21106 } 21107 21108 21109 /* 21110 * Function: sd_media_watch_cb 21111 * 21112 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This 21113 * routine processes the TUR sense data and updates the driver 21114 * state if a transition has occurred. The user thread 21115 * (sd_check_media) is then signalled. 21116 * 21117 * Arguments: arg - the device 'dev_t' is used for context to discriminate 21118 * among multiple watches that share this callback function 21119 * resultp - scsi watch facility result packet containing scsi 21120 * packet, status byte and sense data 21121 * 21122 * Return Code: 0 for success, -1 for failure 21123 */ 21124 21125 static int 21126 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 21127 { 21128 struct sd_lun *un; 21129 struct scsi_status *statusp = resultp->statusp; 21130 uint8_t *sensep = (uint8_t *)resultp->sensep; 21131 enum dkio_state state = DKIO_NONE; 21132 dev_t dev = (dev_t)arg; 21133 uchar_t actual_sense_length; 21134 uint8_t skey, asc, ascq; 21135 21136 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21137 return (-1); 21138 } 21139 actual_sense_length = resultp->actual_sense_length; 21140 21141 mutex_enter(SD_MUTEX(un)); 21142 SD_TRACE(SD_LOG_COMMON, un, 21143 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n", 21144 *((char *)statusp), (void *)sensep, actual_sense_length); 21145 21146 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) { 21147 un->un_mediastate = DKIO_DEV_GONE; 21148 cv_broadcast(&un->un_state_cv); 21149 mutex_exit(SD_MUTEX(un)); 21150 21151 return (0); 21152 } 21153 21154 /* 21155 * If there was a check condition then sensep points to valid sense data 21156 * If status was not a check condition but a reservation or busy status 21157 * then the new state is DKIO_NONE 21158 */ 21159 if (sensep != NULL) { 21160 skey = scsi_sense_key(sensep); 21161 asc = scsi_sense_asc(sensep); 21162 ascq = scsi_sense_ascq(sensep); 21163 21164 SD_INFO(SD_LOG_COMMON, un, 21165 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n", 21166 skey, asc, ascq); 21167 /* This routine only uses up to 13 bytes of sense data. */ 21168 if (actual_sense_length >= 13) { 21169 if (skey == KEY_UNIT_ATTENTION) { 21170 if (asc == 0x28) { 21171 state = DKIO_INSERTED; 21172 } 21173 } else if (skey == KEY_NOT_READY) { 21174 /* 21175 * if 02/04/02 means that the host 21176 * should send start command. Explicitly 21177 * leave the media state as is 21178 * (inserted) as the media is inserted 21179 * and host has stopped device for PM 21180 * reasons. Upon next true read/write 21181 * to this media will bring the 21182 * device to the right state good for 21183 * media access. 21184 */ 21185 if (asc == 0x3a) { 21186 state = DKIO_EJECTED; 21187 } else { 21188 /* 21189 * If the drive is busy with an 21190 * operation or long write, keep the 21191 * media in an inserted state. 21192 */ 21193 21194 if ((asc == 0x04) && 21195 ((ascq == 0x02) || 21196 (ascq == 0x07) || 21197 (ascq == 0x08))) { 21198 state = DKIO_INSERTED; 21199 } 21200 } 21201 } else if (skey == KEY_NO_SENSE) { 21202 if ((asc == 0x00) && (ascq == 0x00)) { 21203 /* 21204 * Sense Data 00/00/00 does not provide 21205 * any information about the state of 21206 * the media. Ignore it. 21207 */ 21208 mutex_exit(SD_MUTEX(un)); 21209 return (0); 21210 } 21211 } 21212 } 21213 } else if ((*((char *)statusp) == STATUS_GOOD) && 21214 (resultp->pkt->pkt_reason == CMD_CMPLT)) { 21215 state = DKIO_INSERTED; 21216 } 21217 21218 SD_TRACE(SD_LOG_COMMON, un, 21219 "sd_media_watch_cb: state=%x, specified=%x\n", 21220 state, un->un_specified_mediastate); 21221 21222 /* 21223 * now signal the waiting thread if this is *not* the specified state; 21224 * delay the signal if the state is DKIO_INSERTED to allow the target 21225 * to recover 21226 */ 21227 if (state != un->un_specified_mediastate) { 21228 un->un_mediastate = state; 21229 if (state == DKIO_INSERTED) { 21230 /* 21231 * delay the signal to give the drive a chance 21232 * to do what it apparently needs to do 21233 */ 21234 SD_TRACE(SD_LOG_COMMON, un, 21235 "sd_media_watch_cb: delayed cv_broadcast\n"); 21236 if (un->un_dcvb_timeid == NULL) { 21237 un->un_dcvb_timeid = 21238 timeout(sd_delayed_cv_broadcast, un, 21239 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY)); 21240 } 21241 } else { 21242 SD_TRACE(SD_LOG_COMMON, un, 21243 "sd_media_watch_cb: immediate cv_broadcast\n"); 21244 cv_broadcast(&un->un_state_cv); 21245 } 21246 } 21247 mutex_exit(SD_MUTEX(un)); 21248 return (0); 21249 } 21250 21251 21252 /* 21253 * Function: sd_dkio_get_temp 21254 * 21255 * Description: This routine is the driver entry point for handling ioctl 21256 * requests to get the disk temperature. 21257 * 21258 * Arguments: dev - the device number 21259 * arg - pointer to user provided dk_temperature structure. 21260 * flag - this argument is a pass through to ddi_copyxxx() 21261 * directly from the mode argument of ioctl(). 21262 * 21263 * Return Code: 0 21264 * EFAULT 21265 * ENXIO 21266 * EAGAIN 21267 */ 21268 21269 static int 21270 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag) 21271 { 21272 struct sd_lun *un = NULL; 21273 struct dk_temperature *dktemp = NULL; 21274 uchar_t *temperature_page; 21275 int rval = 0; 21276 int path_flag = SD_PATH_STANDARD; 21277 21278 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21279 return (ENXIO); 21280 } 21281 21282 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP); 21283 21284 /* copyin the disk temp argument to get the user flags */ 21285 if (ddi_copyin((void *)arg, dktemp, 21286 sizeof (struct dk_temperature), flag) != 0) { 21287 rval = EFAULT; 21288 goto done; 21289 } 21290 21291 /* Initialize the temperature to invalid. */ 21292 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 21293 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 21294 21295 /* 21296 * Note: Investigate removing the "bypass pm" semantic. 21297 * Can we just bypass PM always? 21298 */ 21299 if (dktemp->dkt_flags & DKT_BYPASS_PM) { 21300 path_flag = SD_PATH_DIRECT; 21301 ASSERT(!mutex_owned(&un->un_pm_mutex)); 21302 mutex_enter(&un->un_pm_mutex); 21303 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 21304 /* 21305 * If DKT_BYPASS_PM is set, and the drive happens to be 21306 * in low power mode, we can not wake it up, Need to 21307 * return EAGAIN. 21308 */ 21309 mutex_exit(&un->un_pm_mutex); 21310 rval = EAGAIN; 21311 goto done; 21312 } else { 21313 /* 21314 * Indicate to PM the device is busy. This is required 21315 * to avoid a race - i.e. the ioctl is issuing a 21316 * command and the pm framework brings down the device 21317 * to low power mode (possible power cut-off on some 21318 * platforms). 21319 */ 21320 mutex_exit(&un->un_pm_mutex); 21321 if (sd_pm_entry(un) != DDI_SUCCESS) { 21322 rval = EAGAIN; 21323 goto done; 21324 } 21325 } 21326 } 21327 21328 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP); 21329 21330 if ((rval = sd_send_scsi_LOG_SENSE(un, temperature_page, 21331 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag)) != 0) { 21332 goto done2; 21333 } 21334 21335 /* 21336 * For the current temperature verify that the parameter length is 0x02 21337 * and the parameter code is 0x00 21338 */ 21339 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) && 21340 (temperature_page[5] == 0x00)) { 21341 if (temperature_page[9] == 0xFF) { 21342 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 21343 } else { 21344 dktemp->dkt_cur_temp = (short)(temperature_page[9]); 21345 } 21346 } 21347 21348 /* 21349 * For the reference temperature verify that the parameter 21350 * length is 0x02 and the parameter code is 0x01 21351 */ 21352 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) && 21353 (temperature_page[11] == 0x01)) { 21354 if (temperature_page[15] == 0xFF) { 21355 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 21356 } else { 21357 dktemp->dkt_ref_temp = (short)(temperature_page[15]); 21358 } 21359 } 21360 21361 /* Do the copyout regardless of the temperature commands status. */ 21362 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature), 21363 flag) != 0) { 21364 rval = EFAULT; 21365 } 21366 21367 done2: 21368 if (path_flag == SD_PATH_DIRECT) { 21369 sd_pm_exit(un); 21370 } 21371 21372 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE); 21373 done: 21374 if (dktemp != NULL) { 21375 kmem_free(dktemp, sizeof (struct dk_temperature)); 21376 } 21377 21378 return (rval); 21379 } 21380 21381 21382 /* 21383 * Function: sd_log_page_supported 21384 * 21385 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of 21386 * supported log pages. 21387 * 21388 * Arguments: un - 21389 * log_page - 21390 * 21391 * Return Code: -1 - on error (log sense is optional and may not be supported). 21392 * 0 - log page not found. 21393 * 1 - log page found. 21394 */ 21395 21396 static int 21397 sd_log_page_supported(struct sd_lun *un, int log_page) 21398 { 21399 uchar_t *log_page_data; 21400 int i; 21401 int match = 0; 21402 int log_size; 21403 21404 log_page_data = kmem_zalloc(0xFF, KM_SLEEP); 21405 21406 if (sd_send_scsi_LOG_SENSE(un, log_page_data, 0xFF, 0, 0x01, 0, 21407 SD_PATH_DIRECT) != 0) { 21408 SD_ERROR(SD_LOG_COMMON, un, 21409 "sd_log_page_supported: failed log page retrieval\n"); 21410 kmem_free(log_page_data, 0xFF); 21411 return (-1); 21412 } 21413 log_size = log_page_data[3]; 21414 21415 /* 21416 * The list of supported log pages start from the fourth byte. Check 21417 * until we run out of log pages or a match is found. 21418 */ 21419 for (i = 4; (i < (log_size + 4)) && !match; i++) { 21420 if (log_page_data[i] == log_page) { 21421 match++; 21422 } 21423 } 21424 kmem_free(log_page_data, 0xFF); 21425 return (match); 21426 } 21427 21428 21429 /* 21430 * Function: sd_mhdioc_failfast 21431 * 21432 * Description: This routine is the driver entry point for handling ioctl 21433 * requests to enable/disable the multihost failfast option. 21434 * (MHIOCENFAILFAST) 21435 * 21436 * Arguments: dev - the device number 21437 * arg - user specified probing interval. 21438 * flag - this argument is a pass through to ddi_copyxxx() 21439 * directly from the mode argument of ioctl(). 21440 * 21441 * Return Code: 0 21442 * EFAULT 21443 * ENXIO 21444 */ 21445 21446 static int 21447 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag) 21448 { 21449 struct sd_lun *un = NULL; 21450 int mh_time; 21451 int rval = 0; 21452 21453 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21454 return (ENXIO); 21455 } 21456 21457 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag)) 21458 return (EFAULT); 21459 21460 if (mh_time) { 21461 mutex_enter(SD_MUTEX(un)); 21462 un->un_resvd_status |= SD_FAILFAST; 21463 mutex_exit(SD_MUTEX(un)); 21464 /* 21465 * If mh_time is INT_MAX, then this ioctl is being used for 21466 * SCSI-3 PGR purposes, and we don't need to spawn watch thread. 21467 */ 21468 if (mh_time != INT_MAX) { 21469 rval = sd_check_mhd(dev, mh_time); 21470 } 21471 } else { 21472 (void) sd_check_mhd(dev, 0); 21473 mutex_enter(SD_MUTEX(un)); 21474 un->un_resvd_status &= ~SD_FAILFAST; 21475 mutex_exit(SD_MUTEX(un)); 21476 } 21477 return (rval); 21478 } 21479 21480 21481 /* 21482 * Function: sd_mhdioc_takeown 21483 * 21484 * Description: This routine is the driver entry point for handling ioctl 21485 * requests to forcefully acquire exclusive access rights to the 21486 * multihost disk (MHIOCTKOWN). 21487 * 21488 * Arguments: dev - the device number 21489 * arg - user provided structure specifying the delay 21490 * parameters in milliseconds 21491 * flag - this argument is a pass through to ddi_copyxxx() 21492 * directly from the mode argument of ioctl(). 21493 * 21494 * Return Code: 0 21495 * EFAULT 21496 * ENXIO 21497 */ 21498 21499 static int 21500 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag) 21501 { 21502 struct sd_lun *un = NULL; 21503 struct mhioctkown *tkown = NULL; 21504 int rval = 0; 21505 21506 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21507 return (ENXIO); 21508 } 21509 21510 if (arg != NULL) { 21511 tkown = (struct mhioctkown *) 21512 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP); 21513 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag); 21514 if (rval != 0) { 21515 rval = EFAULT; 21516 goto error; 21517 } 21518 } 21519 21520 rval = sd_take_ownership(dev, tkown); 21521 mutex_enter(SD_MUTEX(un)); 21522 if (rval == 0) { 21523 un->un_resvd_status |= SD_RESERVE; 21524 if (tkown != NULL && tkown->reinstate_resv_delay != 0) { 21525 sd_reinstate_resv_delay = 21526 tkown->reinstate_resv_delay * 1000; 21527 } else { 21528 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 21529 } 21530 /* 21531 * Give the scsi_watch routine interval set by 21532 * the MHIOCENFAILFAST ioctl precedence here. 21533 */ 21534 if ((un->un_resvd_status & SD_FAILFAST) == 0) { 21535 mutex_exit(SD_MUTEX(un)); 21536 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000); 21537 SD_TRACE(SD_LOG_IOCTL_MHD, un, 21538 "sd_mhdioc_takeown : %d\n", 21539 sd_reinstate_resv_delay); 21540 } else { 21541 mutex_exit(SD_MUTEX(un)); 21542 } 21543 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY, 21544 sd_mhd_reset_notify_cb, (caddr_t)un); 21545 } else { 21546 un->un_resvd_status &= ~SD_RESERVE; 21547 mutex_exit(SD_MUTEX(un)); 21548 } 21549 21550 error: 21551 if (tkown != NULL) { 21552 kmem_free(tkown, sizeof (struct mhioctkown)); 21553 } 21554 return (rval); 21555 } 21556 21557 21558 /* 21559 * Function: sd_mhdioc_release 21560 * 21561 * Description: This routine is the driver entry point for handling ioctl 21562 * requests to release exclusive access rights to the multihost 21563 * disk (MHIOCRELEASE). 21564 * 21565 * Arguments: dev - the device number 21566 * 21567 * Return Code: 0 21568 * ENXIO 21569 */ 21570 21571 static int 21572 sd_mhdioc_release(dev_t dev) 21573 { 21574 struct sd_lun *un = NULL; 21575 timeout_id_t resvd_timeid_save; 21576 int resvd_status_save; 21577 int rval = 0; 21578 21579 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21580 return (ENXIO); 21581 } 21582 21583 mutex_enter(SD_MUTEX(un)); 21584 resvd_status_save = un->un_resvd_status; 21585 un->un_resvd_status &= 21586 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE); 21587 if (un->un_resvd_timeid) { 21588 resvd_timeid_save = un->un_resvd_timeid; 21589 un->un_resvd_timeid = NULL; 21590 mutex_exit(SD_MUTEX(un)); 21591 (void) untimeout(resvd_timeid_save); 21592 } else { 21593 mutex_exit(SD_MUTEX(un)); 21594 } 21595 21596 /* 21597 * destroy any pending timeout thread that may be attempting to 21598 * reinstate reservation on this device. 21599 */ 21600 sd_rmv_resv_reclaim_req(dev); 21601 21602 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) { 21603 mutex_enter(SD_MUTEX(un)); 21604 if ((un->un_mhd_token) && 21605 ((un->un_resvd_status & SD_FAILFAST) == 0)) { 21606 mutex_exit(SD_MUTEX(un)); 21607 (void) sd_check_mhd(dev, 0); 21608 } else { 21609 mutex_exit(SD_MUTEX(un)); 21610 } 21611 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 21612 sd_mhd_reset_notify_cb, (caddr_t)un); 21613 } else { 21614 /* 21615 * sd_mhd_watch_cb will restart the resvd recover timeout thread 21616 */ 21617 mutex_enter(SD_MUTEX(un)); 21618 un->un_resvd_status = resvd_status_save; 21619 mutex_exit(SD_MUTEX(un)); 21620 } 21621 return (rval); 21622 } 21623 21624 21625 /* 21626 * Function: sd_mhdioc_register_devid 21627 * 21628 * Description: This routine is the driver entry point for handling ioctl 21629 * requests to register the device id (MHIOCREREGISTERDEVID). 21630 * 21631 * Note: The implementation for this ioctl has been updated to 21632 * be consistent with the original PSARC case (1999/357) 21633 * (4375899, 4241671, 4220005) 21634 * 21635 * Arguments: dev - the device number 21636 * 21637 * Return Code: 0 21638 * ENXIO 21639 */ 21640 21641 static int 21642 sd_mhdioc_register_devid(dev_t dev) 21643 { 21644 struct sd_lun *un = NULL; 21645 int rval = 0; 21646 21647 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21648 return (ENXIO); 21649 } 21650 21651 ASSERT(!mutex_owned(SD_MUTEX(un))); 21652 21653 mutex_enter(SD_MUTEX(un)); 21654 21655 /* If a devid already exists, de-register it */ 21656 if (un->un_devid != NULL) { 21657 ddi_devid_unregister(SD_DEVINFO(un)); 21658 /* 21659 * After unregister devid, needs to free devid memory 21660 */ 21661 ddi_devid_free(un->un_devid); 21662 un->un_devid = NULL; 21663 } 21664 21665 /* Check for reservation conflict */ 21666 mutex_exit(SD_MUTEX(un)); 21667 rval = sd_send_scsi_TEST_UNIT_READY(un, 0); 21668 mutex_enter(SD_MUTEX(un)); 21669 21670 switch (rval) { 21671 case 0: 21672 sd_register_devid(un, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED); 21673 break; 21674 case EACCES: 21675 break; 21676 default: 21677 rval = EIO; 21678 } 21679 21680 mutex_exit(SD_MUTEX(un)); 21681 return (rval); 21682 } 21683 21684 21685 /* 21686 * Function: sd_mhdioc_inkeys 21687 * 21688 * Description: This routine is the driver entry point for handling ioctl 21689 * requests to issue the SCSI-3 Persistent In Read Keys command 21690 * to the device (MHIOCGRP_INKEYS). 21691 * 21692 * Arguments: dev - the device number 21693 * arg - user provided in_keys structure 21694 * flag - this argument is a pass through to ddi_copyxxx() 21695 * directly from the mode argument of ioctl(). 21696 * 21697 * Return Code: code returned by sd_persistent_reservation_in_read_keys() 21698 * ENXIO 21699 * EFAULT 21700 */ 21701 21702 static int 21703 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag) 21704 { 21705 struct sd_lun *un; 21706 mhioc_inkeys_t inkeys; 21707 int rval = 0; 21708 21709 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21710 return (ENXIO); 21711 } 21712 21713 #ifdef _MULTI_DATAMODEL 21714 switch (ddi_model_convert_from(flag & FMODELS)) { 21715 case DDI_MODEL_ILP32: { 21716 struct mhioc_inkeys32 inkeys32; 21717 21718 if (ddi_copyin(arg, &inkeys32, 21719 sizeof (struct mhioc_inkeys32), flag) != 0) { 21720 return (EFAULT); 21721 } 21722 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li; 21723 if ((rval = sd_persistent_reservation_in_read_keys(un, 21724 &inkeys, flag)) != 0) { 21725 return (rval); 21726 } 21727 inkeys32.generation = inkeys.generation; 21728 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32), 21729 flag) != 0) { 21730 return (EFAULT); 21731 } 21732 break; 21733 } 21734 case DDI_MODEL_NONE: 21735 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), 21736 flag) != 0) { 21737 return (EFAULT); 21738 } 21739 if ((rval = sd_persistent_reservation_in_read_keys(un, 21740 &inkeys, flag)) != 0) { 21741 return (rval); 21742 } 21743 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), 21744 flag) != 0) { 21745 return (EFAULT); 21746 } 21747 break; 21748 } 21749 21750 #else /* ! _MULTI_DATAMODEL */ 21751 21752 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) { 21753 return (EFAULT); 21754 } 21755 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag); 21756 if (rval != 0) { 21757 return (rval); 21758 } 21759 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) { 21760 return (EFAULT); 21761 } 21762 21763 #endif /* _MULTI_DATAMODEL */ 21764 21765 return (rval); 21766 } 21767 21768 21769 /* 21770 * Function: sd_mhdioc_inresv 21771 * 21772 * Description: This routine is the driver entry point for handling ioctl 21773 * requests to issue the SCSI-3 Persistent In Read Reservations 21774 * command to the device (MHIOCGRP_INKEYS). 21775 * 21776 * Arguments: dev - the device number 21777 * arg - user provided in_resv structure 21778 * flag - this argument is a pass through to ddi_copyxxx() 21779 * directly from the mode argument of ioctl(). 21780 * 21781 * Return Code: code returned by sd_persistent_reservation_in_read_resv() 21782 * ENXIO 21783 * EFAULT 21784 */ 21785 21786 static int 21787 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag) 21788 { 21789 struct sd_lun *un; 21790 mhioc_inresvs_t inresvs; 21791 int rval = 0; 21792 21793 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21794 return (ENXIO); 21795 } 21796 21797 #ifdef _MULTI_DATAMODEL 21798 21799 switch (ddi_model_convert_from(flag & FMODELS)) { 21800 case DDI_MODEL_ILP32: { 21801 struct mhioc_inresvs32 inresvs32; 21802 21803 if (ddi_copyin(arg, &inresvs32, 21804 sizeof (struct mhioc_inresvs32), flag) != 0) { 21805 return (EFAULT); 21806 } 21807 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li; 21808 if ((rval = sd_persistent_reservation_in_read_resv(un, 21809 &inresvs, flag)) != 0) { 21810 return (rval); 21811 } 21812 inresvs32.generation = inresvs.generation; 21813 if (ddi_copyout(&inresvs32, arg, 21814 sizeof (struct mhioc_inresvs32), flag) != 0) { 21815 return (EFAULT); 21816 } 21817 break; 21818 } 21819 case DDI_MODEL_NONE: 21820 if (ddi_copyin(arg, &inresvs, 21821 sizeof (mhioc_inresvs_t), flag) != 0) { 21822 return (EFAULT); 21823 } 21824 if ((rval = sd_persistent_reservation_in_read_resv(un, 21825 &inresvs, flag)) != 0) { 21826 return (rval); 21827 } 21828 if (ddi_copyout(&inresvs, arg, 21829 sizeof (mhioc_inresvs_t), flag) != 0) { 21830 return (EFAULT); 21831 } 21832 break; 21833 } 21834 21835 #else /* ! _MULTI_DATAMODEL */ 21836 21837 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) { 21838 return (EFAULT); 21839 } 21840 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag); 21841 if (rval != 0) { 21842 return (rval); 21843 } 21844 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) { 21845 return (EFAULT); 21846 } 21847 21848 #endif /* ! _MULTI_DATAMODEL */ 21849 21850 return (rval); 21851 } 21852 21853 21854 /* 21855 * The following routines support the clustering functionality described below 21856 * and implement lost reservation reclaim functionality. 21857 * 21858 * Clustering 21859 * ---------- 21860 * The clustering code uses two different, independent forms of SCSI 21861 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3 21862 * Persistent Group Reservations. For any particular disk, it will use either 21863 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk. 21864 * 21865 * SCSI-2 21866 * The cluster software takes ownership of a multi-hosted disk by issuing the 21867 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the 21868 * MHIOCRELEASE ioctl. Closely related is the MHIOCENFAILFAST ioctl -- a 21869 * cluster, just after taking ownership of the disk with the MHIOCTKOWN ioctl 21870 * then issues the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the 21871 * driver. The meaning of failfast is that if the driver (on this host) ever 21872 * encounters the scsi error return code RESERVATION_CONFLICT from the device, 21873 * it should immediately panic the host. The motivation for this ioctl is that 21874 * if this host does encounter reservation conflict, the underlying cause is 21875 * that some other host of the cluster has decided that this host is no longer 21876 * in the cluster and has seized control of the disks for itself. Since this 21877 * host is no longer in the cluster, it ought to panic itself. The 21878 * MHIOCENFAILFAST ioctl does two things: 21879 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT 21880 * error to panic the host 21881 * (b) it sets up a periodic timer to test whether this host still has 21882 * "access" (in that no other host has reserved the device): if the 21883 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The 21884 * purpose of that periodic timer is to handle scenarios where the host is 21885 * otherwise temporarily quiescent, temporarily doing no real i/o. 21886 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host, 21887 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for 21888 * the device itself. 21889 * 21890 * SCSI-3 PGR 21891 * A direct semantic implementation of the SCSI-3 Persistent Reservation 21892 * facility is supported through the shared multihost disk ioctls 21893 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE, 21894 * MHIOCGRP_PREEMPTANDABORT) 21895 * 21896 * Reservation Reclaim: 21897 * -------------------- 21898 * To support the lost reservation reclaim operations this driver creates a 21899 * single thread to handle reinstating reservations on all devices that have 21900 * lost reservations sd_resv_reclaim_requests are logged for all devices that 21901 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb 21902 * and the reservation reclaim thread loops through the requests to regain the 21903 * lost reservations. 21904 */ 21905 21906 /* 21907 * Function: sd_check_mhd() 21908 * 21909 * Description: This function sets up and submits a scsi watch request or 21910 * terminates an existing watch request. This routine is used in 21911 * support of reservation reclaim. 21912 * 21913 * Arguments: dev - the device 'dev_t' is used for context to discriminate 21914 * among multiple watches that share the callback function 21915 * interval - the number of microseconds specifying the watch 21916 * interval for issuing TEST UNIT READY commands. If 21917 * set to 0 the watch should be terminated. If the 21918 * interval is set to 0 and if the device is required 21919 * to hold reservation while disabling failfast, the 21920 * watch is restarted with an interval of 21921 * reinstate_resv_delay. 21922 * 21923 * Return Code: 0 - Successful submit/terminate of scsi watch request 21924 * ENXIO - Indicates an invalid device was specified 21925 * EAGAIN - Unable to submit the scsi watch request 21926 */ 21927 21928 static int 21929 sd_check_mhd(dev_t dev, int interval) 21930 { 21931 struct sd_lun *un; 21932 opaque_t token; 21933 21934 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21935 return (ENXIO); 21936 } 21937 21938 /* is this a watch termination request? */ 21939 if (interval == 0) { 21940 mutex_enter(SD_MUTEX(un)); 21941 /* if there is an existing watch task then terminate it */ 21942 if (un->un_mhd_token) { 21943 token = un->un_mhd_token; 21944 un->un_mhd_token = NULL; 21945 mutex_exit(SD_MUTEX(un)); 21946 (void) scsi_watch_request_terminate(token, 21947 SCSI_WATCH_TERMINATE_WAIT); 21948 mutex_enter(SD_MUTEX(un)); 21949 } else { 21950 mutex_exit(SD_MUTEX(un)); 21951 /* 21952 * Note: If we return here we don't check for the 21953 * failfast case. This is the original legacy 21954 * implementation but perhaps we should be checking 21955 * the failfast case. 21956 */ 21957 return (0); 21958 } 21959 /* 21960 * If the device is required to hold reservation while 21961 * disabling failfast, we need to restart the scsi_watch 21962 * routine with an interval of reinstate_resv_delay. 21963 */ 21964 if (un->un_resvd_status & SD_RESERVE) { 21965 interval = sd_reinstate_resv_delay/1000; 21966 } else { 21967 /* no failfast so bail */ 21968 mutex_exit(SD_MUTEX(un)); 21969 return (0); 21970 } 21971 mutex_exit(SD_MUTEX(un)); 21972 } 21973 21974 /* 21975 * adjust minimum time interval to 1 second, 21976 * and convert from msecs to usecs 21977 */ 21978 if (interval > 0 && interval < 1000) { 21979 interval = 1000; 21980 } 21981 interval *= 1000; 21982 21983 /* 21984 * submit the request to the scsi_watch service 21985 */ 21986 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval, 21987 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev); 21988 if (token == NULL) { 21989 return (EAGAIN); 21990 } 21991 21992 /* 21993 * save token for termination later on 21994 */ 21995 mutex_enter(SD_MUTEX(un)); 21996 un->un_mhd_token = token; 21997 mutex_exit(SD_MUTEX(un)); 21998 return (0); 21999 } 22000 22001 22002 /* 22003 * Function: sd_mhd_watch_cb() 22004 * 22005 * Description: This function is the call back function used by the scsi watch 22006 * facility. The scsi watch facility sends the "Test Unit Ready" 22007 * and processes the status. If applicable (i.e. a "Unit Attention" 22008 * status and automatic "Request Sense" not used) the scsi watch 22009 * facility will send a "Request Sense" and retrieve the sense data 22010 * to be passed to this callback function. In either case the 22011 * automatic "Request Sense" or the facility submitting one, this 22012 * callback is passed the status and sense data. 22013 * 22014 * Arguments: arg - the device 'dev_t' is used for context to discriminate 22015 * among multiple watches that share this callback function 22016 * resultp - scsi watch facility result packet containing scsi 22017 * packet, status byte and sense data 22018 * 22019 * Return Code: 0 - continue the watch task 22020 * non-zero - terminate the watch task 22021 */ 22022 22023 static int 22024 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 22025 { 22026 struct sd_lun *un; 22027 struct scsi_status *statusp; 22028 uint8_t *sensep; 22029 struct scsi_pkt *pkt; 22030 uchar_t actual_sense_length; 22031 dev_t dev = (dev_t)arg; 22032 22033 ASSERT(resultp != NULL); 22034 statusp = resultp->statusp; 22035 sensep = (uint8_t *)resultp->sensep; 22036 pkt = resultp->pkt; 22037 actual_sense_length = resultp->actual_sense_length; 22038 22039 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22040 return (ENXIO); 22041 } 22042 22043 SD_TRACE(SD_LOG_IOCTL_MHD, un, 22044 "sd_mhd_watch_cb: reason '%s', status '%s'\n", 22045 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp))); 22046 22047 /* Begin processing of the status and/or sense data */ 22048 if (pkt->pkt_reason != CMD_CMPLT) { 22049 /* Handle the incomplete packet */ 22050 sd_mhd_watch_incomplete(un, pkt); 22051 return (0); 22052 } else if (*((unsigned char *)statusp) != STATUS_GOOD) { 22053 if (*((unsigned char *)statusp) 22054 == STATUS_RESERVATION_CONFLICT) { 22055 /* 22056 * Handle a reservation conflict by panicking if 22057 * configured for failfast or by logging the conflict 22058 * and updating the reservation status 22059 */ 22060 mutex_enter(SD_MUTEX(un)); 22061 if ((un->un_resvd_status & SD_FAILFAST) && 22062 (sd_failfast_enable)) { 22063 sd_panic_for_res_conflict(un); 22064 /*NOTREACHED*/ 22065 } 22066 SD_INFO(SD_LOG_IOCTL_MHD, un, 22067 "sd_mhd_watch_cb: Reservation Conflict\n"); 22068 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 22069 mutex_exit(SD_MUTEX(un)); 22070 } 22071 } 22072 22073 if (sensep != NULL) { 22074 if (actual_sense_length >= (SENSE_LENGTH - 2)) { 22075 mutex_enter(SD_MUTEX(un)); 22076 if ((scsi_sense_asc(sensep) == 22077 SD_SCSI_RESET_SENSE_CODE) && 22078 (un->un_resvd_status & SD_RESERVE)) { 22079 /* 22080 * The additional sense code indicates a power 22081 * on or bus device reset has occurred; update 22082 * the reservation status. 22083 */ 22084 un->un_resvd_status |= 22085 (SD_LOST_RESERVE | SD_WANT_RESERVE); 22086 SD_INFO(SD_LOG_IOCTL_MHD, un, 22087 "sd_mhd_watch_cb: Lost Reservation\n"); 22088 } 22089 } else { 22090 return (0); 22091 } 22092 } else { 22093 mutex_enter(SD_MUTEX(un)); 22094 } 22095 22096 if ((un->un_resvd_status & SD_RESERVE) && 22097 (un->un_resvd_status & SD_LOST_RESERVE)) { 22098 if (un->un_resvd_status & SD_WANT_RESERVE) { 22099 /* 22100 * A reset occurred in between the last probe and this 22101 * one so if a timeout is pending cancel it. 22102 */ 22103 if (un->un_resvd_timeid) { 22104 timeout_id_t temp_id = un->un_resvd_timeid; 22105 un->un_resvd_timeid = NULL; 22106 mutex_exit(SD_MUTEX(un)); 22107 (void) untimeout(temp_id); 22108 mutex_enter(SD_MUTEX(un)); 22109 } 22110 un->un_resvd_status &= ~SD_WANT_RESERVE; 22111 } 22112 if (un->un_resvd_timeid == 0) { 22113 /* Schedule a timeout to handle the lost reservation */ 22114 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover, 22115 (void *)dev, 22116 drv_usectohz(sd_reinstate_resv_delay)); 22117 } 22118 } 22119 mutex_exit(SD_MUTEX(un)); 22120 return (0); 22121 } 22122 22123 22124 /* 22125 * Function: sd_mhd_watch_incomplete() 22126 * 22127 * Description: This function is used to find out why a scsi pkt sent by the 22128 * scsi watch facility was not completed. Under some scenarios this 22129 * routine will return. Otherwise it will send a bus reset to see 22130 * if the drive is still online. 22131 * 22132 * Arguments: un - driver soft state (unit) structure 22133 * pkt - incomplete scsi pkt 22134 */ 22135 22136 static void 22137 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt) 22138 { 22139 int be_chatty; 22140 int perr; 22141 22142 ASSERT(pkt != NULL); 22143 ASSERT(un != NULL); 22144 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT)); 22145 perr = (pkt->pkt_statistics & STAT_PERR); 22146 22147 mutex_enter(SD_MUTEX(un)); 22148 if (un->un_state == SD_STATE_DUMPING) { 22149 mutex_exit(SD_MUTEX(un)); 22150 return; 22151 } 22152 22153 switch (pkt->pkt_reason) { 22154 case CMD_UNX_BUS_FREE: 22155 /* 22156 * If we had a parity error that caused the target to drop BSY*, 22157 * don't be chatty about it. 22158 */ 22159 if (perr && be_chatty) { 22160 be_chatty = 0; 22161 } 22162 break; 22163 case CMD_TAG_REJECT: 22164 /* 22165 * The SCSI-2 spec states that a tag reject will be sent by the 22166 * target if tagged queuing is not supported. A tag reject may 22167 * also be sent during certain initialization periods or to 22168 * control internal resources. For the latter case the target 22169 * may also return Queue Full. 22170 * 22171 * If this driver receives a tag reject from a target that is 22172 * going through an init period or controlling internal 22173 * resources tagged queuing will be disabled. This is a less 22174 * than optimal behavior but the driver is unable to determine 22175 * the target state and assumes tagged queueing is not supported 22176 */ 22177 pkt->pkt_flags = 0; 22178 un->un_tagflags = 0; 22179 22180 if (un->un_f_opt_queueing == TRUE) { 22181 un->un_throttle = min(un->un_throttle, 3); 22182 } else { 22183 un->un_throttle = 1; 22184 } 22185 mutex_exit(SD_MUTEX(un)); 22186 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 22187 mutex_enter(SD_MUTEX(un)); 22188 break; 22189 case CMD_INCOMPLETE: 22190 /* 22191 * The transport stopped with an abnormal state, fallthrough and 22192 * reset the target and/or bus unless selection did not complete 22193 * (indicated by STATE_GOT_BUS) in which case we don't want to 22194 * go through a target/bus reset 22195 */ 22196 if (pkt->pkt_state == STATE_GOT_BUS) { 22197 break; 22198 } 22199 /*FALLTHROUGH*/ 22200 22201 case CMD_TIMEOUT: 22202 default: 22203 /* 22204 * The lun may still be running the command, so a lun reset 22205 * should be attempted. If the lun reset fails or cannot be 22206 * issued, than try a target reset. Lastly try a bus reset. 22207 */ 22208 if ((pkt->pkt_statistics & 22209 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 22210 int reset_retval = 0; 22211 mutex_exit(SD_MUTEX(un)); 22212 if (un->un_f_allow_bus_device_reset == TRUE) { 22213 if (un->un_f_lun_reset_enabled == TRUE) { 22214 reset_retval = 22215 scsi_reset(SD_ADDRESS(un), 22216 RESET_LUN); 22217 } 22218 if (reset_retval == 0) { 22219 reset_retval = 22220 scsi_reset(SD_ADDRESS(un), 22221 RESET_TARGET); 22222 } 22223 } 22224 if (reset_retval == 0) { 22225 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 22226 } 22227 mutex_enter(SD_MUTEX(un)); 22228 } 22229 break; 22230 } 22231 22232 /* A device/bus reset has occurred; update the reservation status. */ 22233 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics & 22234 (STAT_BUS_RESET | STAT_DEV_RESET))) { 22235 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 22236 un->un_resvd_status |= 22237 (SD_LOST_RESERVE | SD_WANT_RESERVE); 22238 SD_INFO(SD_LOG_IOCTL_MHD, un, 22239 "sd_mhd_watch_incomplete: Lost Reservation\n"); 22240 } 22241 } 22242 22243 /* 22244 * The disk has been turned off; Update the device state. 22245 * 22246 * Note: Should we be offlining the disk here? 22247 */ 22248 if (pkt->pkt_state == STATE_GOT_BUS) { 22249 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: " 22250 "Disk not responding to selection\n"); 22251 if (un->un_state != SD_STATE_OFFLINE) { 22252 New_state(un, SD_STATE_OFFLINE); 22253 } 22254 } else if (be_chatty) { 22255 /* 22256 * suppress messages if they are all the same pkt reason; 22257 * with TQ, many (up to 256) are returned with the same 22258 * pkt_reason 22259 */ 22260 if (pkt->pkt_reason != un->un_last_pkt_reason) { 22261 SD_ERROR(SD_LOG_IOCTL_MHD, un, 22262 "sd_mhd_watch_incomplete: " 22263 "SCSI transport failed: reason '%s'\n", 22264 scsi_rname(pkt->pkt_reason)); 22265 } 22266 } 22267 un->un_last_pkt_reason = pkt->pkt_reason; 22268 mutex_exit(SD_MUTEX(un)); 22269 } 22270 22271 22272 /* 22273 * Function: sd_sname() 22274 * 22275 * Description: This is a simple little routine to return a string containing 22276 * a printable description of command status byte for use in 22277 * logging. 22278 * 22279 * Arguments: status - pointer to a status byte 22280 * 22281 * Return Code: char * - string containing status description. 22282 */ 22283 22284 static char * 22285 sd_sname(uchar_t status) 22286 { 22287 switch (status & STATUS_MASK) { 22288 case STATUS_GOOD: 22289 return ("good status"); 22290 case STATUS_CHECK: 22291 return ("check condition"); 22292 case STATUS_MET: 22293 return ("condition met"); 22294 case STATUS_BUSY: 22295 return ("busy"); 22296 case STATUS_INTERMEDIATE: 22297 return ("intermediate"); 22298 case STATUS_INTERMEDIATE_MET: 22299 return ("intermediate - condition met"); 22300 case STATUS_RESERVATION_CONFLICT: 22301 return ("reservation_conflict"); 22302 case STATUS_TERMINATED: 22303 return ("command terminated"); 22304 case STATUS_QFULL: 22305 return ("queue full"); 22306 default: 22307 return ("<unknown status>"); 22308 } 22309 } 22310 22311 22312 /* 22313 * Function: sd_mhd_resvd_recover() 22314 * 22315 * Description: This function adds a reservation entry to the 22316 * sd_resv_reclaim_request list and signals the reservation 22317 * reclaim thread that there is work pending. If the reservation 22318 * reclaim thread has not been previously created this function 22319 * will kick it off. 22320 * 22321 * Arguments: arg - the device 'dev_t' is used for context to discriminate 22322 * among multiple watches that share this callback function 22323 * 22324 * Context: This routine is called by timeout() and is run in interrupt 22325 * context. It must not sleep or call other functions which may 22326 * sleep. 22327 */ 22328 22329 static void 22330 sd_mhd_resvd_recover(void *arg) 22331 { 22332 dev_t dev = (dev_t)arg; 22333 struct sd_lun *un; 22334 struct sd_thr_request *sd_treq = NULL; 22335 struct sd_thr_request *sd_cur = NULL; 22336 struct sd_thr_request *sd_prev = NULL; 22337 int already_there = 0; 22338 22339 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22340 return; 22341 } 22342 22343 mutex_enter(SD_MUTEX(un)); 22344 un->un_resvd_timeid = NULL; 22345 if (un->un_resvd_status & SD_WANT_RESERVE) { 22346 /* 22347 * There was a reset so don't issue the reserve, allow the 22348 * sd_mhd_watch_cb callback function to notice this and 22349 * reschedule the timeout for reservation. 22350 */ 22351 mutex_exit(SD_MUTEX(un)); 22352 return; 22353 } 22354 mutex_exit(SD_MUTEX(un)); 22355 22356 /* 22357 * Add this device to the sd_resv_reclaim_request list and the 22358 * sd_resv_reclaim_thread should take care of the rest. 22359 * 22360 * Note: We can't sleep in this context so if the memory allocation 22361 * fails allow the sd_mhd_watch_cb callback function to notice this and 22362 * reschedule the timeout for reservation. (4378460) 22363 */ 22364 sd_treq = (struct sd_thr_request *) 22365 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP); 22366 if (sd_treq == NULL) { 22367 return; 22368 } 22369 22370 sd_treq->sd_thr_req_next = NULL; 22371 sd_treq->dev = dev; 22372 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22373 if (sd_tr.srq_thr_req_head == NULL) { 22374 sd_tr.srq_thr_req_head = sd_treq; 22375 } else { 22376 sd_cur = sd_prev = sd_tr.srq_thr_req_head; 22377 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) { 22378 if (sd_cur->dev == dev) { 22379 /* 22380 * already in Queue so don't log 22381 * another request for the device 22382 */ 22383 already_there = 1; 22384 break; 22385 } 22386 sd_prev = sd_cur; 22387 } 22388 if (!already_there) { 22389 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: " 22390 "logging request for %lx\n", dev); 22391 sd_prev->sd_thr_req_next = sd_treq; 22392 } else { 22393 kmem_free(sd_treq, sizeof (struct sd_thr_request)); 22394 } 22395 } 22396 22397 /* 22398 * Create a kernel thread to do the reservation reclaim and free up this 22399 * thread. We cannot block this thread while we go away to do the 22400 * reservation reclaim 22401 */ 22402 if (sd_tr.srq_resv_reclaim_thread == NULL) 22403 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0, 22404 sd_resv_reclaim_thread, NULL, 22405 0, &p0, TS_RUN, v.v_maxsyspri - 2); 22406 22407 /* Tell the reservation reclaim thread that it has work to do */ 22408 cv_signal(&sd_tr.srq_resv_reclaim_cv); 22409 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22410 } 22411 22412 /* 22413 * Function: sd_resv_reclaim_thread() 22414 * 22415 * Description: This function implements the reservation reclaim operations 22416 * 22417 * Arguments: arg - the device 'dev_t' is used for context to discriminate 22418 * among multiple watches that share this callback function 22419 */ 22420 22421 static void 22422 sd_resv_reclaim_thread() 22423 { 22424 struct sd_lun *un; 22425 struct sd_thr_request *sd_mhreq; 22426 22427 /* Wait for work */ 22428 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22429 if (sd_tr.srq_thr_req_head == NULL) { 22430 cv_wait(&sd_tr.srq_resv_reclaim_cv, 22431 &sd_tr.srq_resv_reclaim_mutex); 22432 } 22433 22434 /* Loop while we have work */ 22435 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) { 22436 un = ddi_get_soft_state(sd_state, 22437 SDUNIT(sd_tr.srq_thr_cur_req->dev)); 22438 if (un == NULL) { 22439 /* 22440 * softstate structure is NULL so just 22441 * dequeue the request and continue 22442 */ 22443 sd_tr.srq_thr_req_head = 22444 sd_tr.srq_thr_cur_req->sd_thr_req_next; 22445 kmem_free(sd_tr.srq_thr_cur_req, 22446 sizeof (struct sd_thr_request)); 22447 continue; 22448 } 22449 22450 /* dequeue the request */ 22451 sd_mhreq = sd_tr.srq_thr_cur_req; 22452 sd_tr.srq_thr_req_head = 22453 sd_tr.srq_thr_cur_req->sd_thr_req_next; 22454 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22455 22456 /* 22457 * Reclaim reservation only if SD_RESERVE is still set. There 22458 * may have been a call to MHIOCRELEASE before we got here. 22459 */ 22460 mutex_enter(SD_MUTEX(un)); 22461 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 22462 /* 22463 * Note: The SD_LOST_RESERVE flag is cleared before 22464 * reclaiming the reservation. If this is done after the 22465 * call to sd_reserve_release a reservation loss in the 22466 * window between pkt completion of reserve cmd and 22467 * mutex_enter below may not be recognized 22468 */ 22469 un->un_resvd_status &= ~SD_LOST_RESERVE; 22470 mutex_exit(SD_MUTEX(un)); 22471 22472 if (sd_reserve_release(sd_mhreq->dev, 22473 SD_RESERVE) == 0) { 22474 mutex_enter(SD_MUTEX(un)); 22475 un->un_resvd_status |= SD_RESERVE; 22476 mutex_exit(SD_MUTEX(un)); 22477 SD_INFO(SD_LOG_IOCTL_MHD, un, 22478 "sd_resv_reclaim_thread: " 22479 "Reservation Recovered\n"); 22480 } else { 22481 mutex_enter(SD_MUTEX(un)); 22482 un->un_resvd_status |= SD_LOST_RESERVE; 22483 mutex_exit(SD_MUTEX(un)); 22484 SD_INFO(SD_LOG_IOCTL_MHD, un, 22485 "sd_resv_reclaim_thread: Failed " 22486 "Reservation Recovery\n"); 22487 } 22488 } else { 22489 mutex_exit(SD_MUTEX(un)); 22490 } 22491 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22492 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req); 22493 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22494 sd_mhreq = sd_tr.srq_thr_cur_req = NULL; 22495 /* 22496 * wakeup the destroy thread if anyone is waiting on 22497 * us to complete. 22498 */ 22499 cv_signal(&sd_tr.srq_inprocess_cv); 22500 SD_TRACE(SD_LOG_IOCTL_MHD, un, 22501 "sd_resv_reclaim_thread: cv_signalling current request \n"); 22502 } 22503 22504 /* 22505 * cleanup the sd_tr structure now that this thread will not exist 22506 */ 22507 ASSERT(sd_tr.srq_thr_req_head == NULL); 22508 ASSERT(sd_tr.srq_thr_cur_req == NULL); 22509 sd_tr.srq_resv_reclaim_thread = NULL; 22510 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22511 thread_exit(); 22512 } 22513 22514 22515 /* 22516 * Function: sd_rmv_resv_reclaim_req() 22517 * 22518 * Description: This function removes any pending reservation reclaim requests 22519 * for the specified device. 22520 * 22521 * Arguments: dev - the device 'dev_t' 22522 */ 22523 22524 static void 22525 sd_rmv_resv_reclaim_req(dev_t dev) 22526 { 22527 struct sd_thr_request *sd_mhreq; 22528 struct sd_thr_request *sd_prev; 22529 22530 /* Remove a reservation reclaim request from the list */ 22531 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22532 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) { 22533 /* 22534 * We are attempting to reinstate reservation for 22535 * this device. We wait for sd_reserve_release() 22536 * to return before we return. 22537 */ 22538 cv_wait(&sd_tr.srq_inprocess_cv, 22539 &sd_tr.srq_resv_reclaim_mutex); 22540 } else { 22541 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head; 22542 if (sd_mhreq && sd_mhreq->dev == dev) { 22543 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next; 22544 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22545 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22546 return; 22547 } 22548 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) { 22549 if (sd_mhreq && sd_mhreq->dev == dev) { 22550 break; 22551 } 22552 sd_prev = sd_mhreq; 22553 } 22554 if (sd_mhreq != NULL) { 22555 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next; 22556 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22557 } 22558 } 22559 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22560 } 22561 22562 22563 /* 22564 * Function: sd_mhd_reset_notify_cb() 22565 * 22566 * Description: This is a call back function for scsi_reset_notify. This 22567 * function updates the softstate reserved status and logs the 22568 * reset. The driver scsi watch facility callback function 22569 * (sd_mhd_watch_cb) and reservation reclaim thread functionality 22570 * will reclaim the reservation. 22571 * 22572 * Arguments: arg - driver soft state (unit) structure 22573 */ 22574 22575 static void 22576 sd_mhd_reset_notify_cb(caddr_t arg) 22577 { 22578 struct sd_lun *un = (struct sd_lun *)arg; 22579 22580 mutex_enter(SD_MUTEX(un)); 22581 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 22582 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE); 22583 SD_INFO(SD_LOG_IOCTL_MHD, un, 22584 "sd_mhd_reset_notify_cb: Lost Reservation\n"); 22585 } 22586 mutex_exit(SD_MUTEX(un)); 22587 } 22588 22589 22590 /* 22591 * Function: sd_take_ownership() 22592 * 22593 * Description: This routine implements an algorithm to achieve a stable 22594 * reservation on disks which don't implement priority reserve, 22595 * and makes sure that other host lose re-reservation attempts. 22596 * This algorithm contains of a loop that keeps issuing the RESERVE 22597 * for some period of time (min_ownership_delay, default 6 seconds) 22598 * During that loop, it looks to see if there has been a bus device 22599 * reset or bus reset (both of which cause an existing reservation 22600 * to be lost). If the reservation is lost issue RESERVE until a 22601 * period of min_ownership_delay with no resets has gone by, or 22602 * until max_ownership_delay has expired. This loop ensures that 22603 * the host really did manage to reserve the device, in spite of 22604 * resets. The looping for min_ownership_delay (default six 22605 * seconds) is important to early generation clustering products, 22606 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an 22607 * MHIOCENFAILFAST periodic timer of two seconds. By having 22608 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having 22609 * MHIOCENFAILFAST poll every two seconds, the idea is that by the 22610 * time the MHIOCTKOWN ioctl returns, the other host (if any) will 22611 * have already noticed, via the MHIOCENFAILFAST polling, that it 22612 * no longer "owns" the disk and will have panicked itself. Thus, 22613 * the host issuing the MHIOCTKOWN is assured (with timing 22614 * dependencies) that by the time it actually starts to use the 22615 * disk for real work, the old owner is no longer accessing it. 22616 * 22617 * min_ownership_delay is the minimum amount of time for which the 22618 * disk must be reserved continuously devoid of resets before the 22619 * MHIOCTKOWN ioctl will return success. 22620 * 22621 * max_ownership_delay indicates the amount of time by which the 22622 * take ownership should succeed or timeout with an error. 22623 * 22624 * Arguments: dev - the device 'dev_t' 22625 * *p - struct containing timing info. 22626 * 22627 * Return Code: 0 for success or error code 22628 */ 22629 22630 static int 22631 sd_take_ownership(dev_t dev, struct mhioctkown *p) 22632 { 22633 struct sd_lun *un; 22634 int rval; 22635 int err; 22636 int reservation_count = 0; 22637 int min_ownership_delay = 6000000; /* in usec */ 22638 int max_ownership_delay = 30000000; /* in usec */ 22639 clock_t start_time; /* starting time of this algorithm */ 22640 clock_t end_time; /* time limit for giving up */ 22641 clock_t ownership_time; /* time limit for stable ownership */ 22642 clock_t current_time; 22643 clock_t previous_current_time; 22644 22645 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22646 return (ENXIO); 22647 } 22648 22649 /* 22650 * Attempt a device reservation. A priority reservation is requested. 22651 */ 22652 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE)) 22653 != SD_SUCCESS) { 22654 SD_ERROR(SD_LOG_IOCTL_MHD, un, 22655 "sd_take_ownership: return(1)=%d\n", rval); 22656 return (rval); 22657 } 22658 22659 /* Update the softstate reserved status to indicate the reservation */ 22660 mutex_enter(SD_MUTEX(un)); 22661 un->un_resvd_status |= SD_RESERVE; 22662 un->un_resvd_status &= 22663 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT); 22664 mutex_exit(SD_MUTEX(un)); 22665 22666 if (p != NULL) { 22667 if (p->min_ownership_delay != 0) { 22668 min_ownership_delay = p->min_ownership_delay * 1000; 22669 } 22670 if (p->max_ownership_delay != 0) { 22671 max_ownership_delay = p->max_ownership_delay * 1000; 22672 } 22673 } 22674 SD_INFO(SD_LOG_IOCTL_MHD, un, 22675 "sd_take_ownership: min, max delays: %d, %d\n", 22676 min_ownership_delay, max_ownership_delay); 22677 22678 start_time = ddi_get_lbolt(); 22679 current_time = start_time; 22680 ownership_time = current_time + drv_usectohz(min_ownership_delay); 22681 end_time = start_time + drv_usectohz(max_ownership_delay); 22682 22683 while (current_time - end_time < 0) { 22684 delay(drv_usectohz(500000)); 22685 22686 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) { 22687 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) { 22688 mutex_enter(SD_MUTEX(un)); 22689 rval = (un->un_resvd_status & 22690 SD_RESERVATION_CONFLICT) ? EACCES : EIO; 22691 mutex_exit(SD_MUTEX(un)); 22692 break; 22693 } 22694 } 22695 previous_current_time = current_time; 22696 current_time = ddi_get_lbolt(); 22697 mutex_enter(SD_MUTEX(un)); 22698 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) { 22699 ownership_time = ddi_get_lbolt() + 22700 drv_usectohz(min_ownership_delay); 22701 reservation_count = 0; 22702 } else { 22703 reservation_count++; 22704 } 22705 un->un_resvd_status |= SD_RESERVE; 22706 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE); 22707 mutex_exit(SD_MUTEX(un)); 22708 22709 SD_INFO(SD_LOG_IOCTL_MHD, un, 22710 "sd_take_ownership: ticks for loop iteration=%ld, " 22711 "reservation=%s\n", (current_time - previous_current_time), 22712 reservation_count ? "ok" : "reclaimed"); 22713 22714 if (current_time - ownership_time >= 0 && 22715 reservation_count >= 4) { 22716 rval = 0; /* Achieved a stable ownership */ 22717 break; 22718 } 22719 if (current_time - end_time >= 0) { 22720 rval = EACCES; /* No ownership in max possible time */ 22721 break; 22722 } 22723 } 22724 SD_TRACE(SD_LOG_IOCTL_MHD, un, 22725 "sd_take_ownership: return(2)=%d\n", rval); 22726 return (rval); 22727 } 22728 22729 22730 /* 22731 * Function: sd_reserve_release() 22732 * 22733 * Description: This function builds and sends scsi RESERVE, RELEASE, and 22734 * PRIORITY RESERVE commands based on a user specified command type 22735 * 22736 * Arguments: dev - the device 'dev_t' 22737 * cmd - user specified command type; one of SD_PRIORITY_RESERVE, 22738 * SD_RESERVE, SD_RELEASE 22739 * 22740 * Return Code: 0 or Error Code 22741 */ 22742 22743 static int 22744 sd_reserve_release(dev_t dev, int cmd) 22745 { 22746 struct uscsi_cmd *com = NULL; 22747 struct sd_lun *un = NULL; 22748 char cdb[CDB_GROUP0]; 22749 int rval; 22750 22751 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) || 22752 (cmd == SD_PRIORITY_RESERVE)); 22753 22754 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22755 return (ENXIO); 22756 } 22757 22758 /* instantiate and initialize the command and cdb */ 22759 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 22760 bzero(cdb, CDB_GROUP0); 22761 com->uscsi_flags = USCSI_SILENT; 22762 com->uscsi_timeout = un->un_reserve_release_time; 22763 com->uscsi_cdblen = CDB_GROUP0; 22764 com->uscsi_cdb = cdb; 22765 if (cmd == SD_RELEASE) { 22766 cdb[0] = SCMD_RELEASE; 22767 } else { 22768 cdb[0] = SCMD_RESERVE; 22769 } 22770 22771 /* Send the command. */ 22772 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 22773 SD_PATH_STANDARD); 22774 22775 /* 22776 * "break" a reservation that is held by another host, by issuing a 22777 * reset if priority reserve is desired, and we could not get the 22778 * device. 22779 */ 22780 if ((cmd == SD_PRIORITY_RESERVE) && 22781 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 22782 /* 22783 * First try to reset the LUN. If we cannot, then try a target 22784 * reset, followed by a bus reset if the target reset fails. 22785 */ 22786 int reset_retval = 0; 22787 if (un->un_f_lun_reset_enabled == TRUE) { 22788 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 22789 } 22790 if (reset_retval == 0) { 22791 /* The LUN reset either failed or was not issued */ 22792 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 22793 } 22794 if ((reset_retval == 0) && 22795 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) { 22796 rval = EIO; 22797 kmem_free(com, sizeof (*com)); 22798 return (rval); 22799 } 22800 22801 bzero(com, sizeof (struct uscsi_cmd)); 22802 com->uscsi_flags = USCSI_SILENT; 22803 com->uscsi_cdb = cdb; 22804 com->uscsi_cdblen = CDB_GROUP0; 22805 com->uscsi_timeout = 5; 22806 22807 /* 22808 * Reissue the last reserve command, this time without request 22809 * sense. Assume that it is just a regular reserve command. 22810 */ 22811 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 22812 SD_PATH_STANDARD); 22813 } 22814 22815 /* Return an error if still getting a reservation conflict. */ 22816 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 22817 rval = EACCES; 22818 } 22819 22820 kmem_free(com, sizeof (*com)); 22821 return (rval); 22822 } 22823 22824 22825 #define SD_NDUMP_RETRIES 12 22826 /* 22827 * System Crash Dump routine 22828 */ 22829 22830 static int 22831 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 22832 { 22833 int instance; 22834 int partition; 22835 int i; 22836 int err; 22837 struct sd_lun *un; 22838 struct scsi_pkt *wr_pktp; 22839 struct buf *wr_bp; 22840 struct buf wr_buf; 22841 daddr_t tgt_byte_offset; /* rmw - byte offset for target */ 22842 daddr_t tgt_blkno; /* rmw - blkno for target */ 22843 size_t tgt_byte_count; /* rmw - # of bytes to xfer */ 22844 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */ 22845 size_t io_start_offset; 22846 int doing_rmw = FALSE; 22847 int rval; 22848 ssize_t dma_resid; 22849 daddr_t oblkno; 22850 diskaddr_t nblks = 0; 22851 diskaddr_t start_block; 22852 22853 instance = SDUNIT(dev); 22854 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 22855 !SD_IS_VALID_LABEL(un) || ISCD(un)) { 22856 return (ENXIO); 22857 } 22858 22859 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 22860 22861 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); 22862 22863 partition = SDPART(dev); 22864 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); 22865 22866 /* Validate blocks to dump at against partition size. */ 22867 22868 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 22869 &nblks, &start_block, NULL, NULL, (void *)SD_PATH_DIRECT); 22870 22871 if ((blkno + nblk) > nblks) { 22872 SD_TRACE(SD_LOG_DUMP, un, 22873 "sddump: dump range larger than partition: " 22874 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 22875 blkno, nblk, nblks); 22876 return (EINVAL); 22877 } 22878 22879 mutex_enter(&un->un_pm_mutex); 22880 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 22881 struct scsi_pkt *start_pktp; 22882 22883 mutex_exit(&un->un_pm_mutex); 22884 22885 /* 22886 * use pm framework to power on HBA 1st 22887 */ 22888 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 22889 22890 /* 22891 * Dump no long uses sdpower to power on a device, it's 22892 * in-line here so it can be done in polled mode. 22893 */ 22894 22895 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n"); 22896 22897 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL, 22898 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL); 22899 22900 if (start_pktp == NULL) { 22901 /* We were not given a SCSI packet, fail. */ 22902 return (EIO); 22903 } 22904 bzero(start_pktp->pkt_cdbp, CDB_GROUP0); 22905 start_pktp->pkt_cdbp[0] = SCMD_START_STOP; 22906 start_pktp->pkt_cdbp[4] = SD_TARGET_START; 22907 start_pktp->pkt_flags = FLAG_NOINTR; 22908 22909 mutex_enter(SD_MUTEX(un)); 22910 SD_FILL_SCSI1_LUN(un, start_pktp); 22911 mutex_exit(SD_MUTEX(un)); 22912 /* 22913 * Scsi_poll returns 0 (success) if the command completes and 22914 * the status block is STATUS_GOOD. 22915 */ 22916 if (sd_scsi_poll(un, start_pktp) != 0) { 22917 scsi_destroy_pkt(start_pktp); 22918 return (EIO); 22919 } 22920 scsi_destroy_pkt(start_pktp); 22921 (void) sd_ddi_pm_resume(un); 22922 } else { 22923 mutex_exit(&un->un_pm_mutex); 22924 } 22925 22926 mutex_enter(SD_MUTEX(un)); 22927 un->un_throttle = 0; 22928 22929 /* 22930 * The first time through, reset the specific target device. 22931 * However, when cpr calls sddump we know that sd is in a 22932 * a good state so no bus reset is required. 22933 * Clear sense data via Request Sense cmd. 22934 * In sddump we don't care about allow_bus_device_reset anymore 22935 */ 22936 22937 if ((un->un_state != SD_STATE_SUSPENDED) && 22938 (un->un_state != SD_STATE_DUMPING)) { 22939 22940 New_state(un, SD_STATE_DUMPING); 22941 22942 if (un->un_f_is_fibre == FALSE) { 22943 mutex_exit(SD_MUTEX(un)); 22944 /* 22945 * Attempt a bus reset for parallel scsi. 22946 * 22947 * Note: A bus reset is required because on some host 22948 * systems (i.e. E420R) a bus device reset is 22949 * insufficient to reset the state of the target. 22950 * 22951 * Note: Don't issue the reset for fibre-channel, 22952 * because this tends to hang the bus (loop) for 22953 * too long while everyone is logging out and in 22954 * and the deadman timer for dumping will fire 22955 * before the dump is complete. 22956 */ 22957 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) { 22958 mutex_enter(SD_MUTEX(un)); 22959 Restore_state(un); 22960 mutex_exit(SD_MUTEX(un)); 22961 return (EIO); 22962 } 22963 22964 /* Delay to give the device some recovery time. */ 22965 drv_usecwait(10000); 22966 22967 if (sd_send_polled_RQS(un) == SD_FAILURE) { 22968 SD_INFO(SD_LOG_DUMP, un, 22969 "sddump: sd_send_polled_RQS failed\n"); 22970 } 22971 mutex_enter(SD_MUTEX(un)); 22972 } 22973 } 22974 22975 /* 22976 * Convert the partition-relative block number to a 22977 * disk physical block number. 22978 */ 22979 blkno += start_block; 22980 22981 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno); 22982 22983 22984 /* 22985 * Check if the device has a non-512 block size. 22986 */ 22987 wr_bp = NULL; 22988 if (NOT_DEVBSIZE(un)) { 22989 tgt_byte_offset = blkno * un->un_sys_blocksize; 22990 tgt_byte_count = nblk * un->un_sys_blocksize; 22991 if ((tgt_byte_offset % un->un_tgt_blocksize) || 22992 (tgt_byte_count % un->un_tgt_blocksize)) { 22993 doing_rmw = TRUE; 22994 /* 22995 * Calculate the block number and number of block 22996 * in terms of the media block size. 22997 */ 22998 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 22999 tgt_nblk = 23000 ((tgt_byte_offset + tgt_byte_count + 23001 (un->un_tgt_blocksize - 1)) / 23002 un->un_tgt_blocksize) - tgt_blkno; 23003 23004 /* 23005 * Invoke the routine which is going to do read part 23006 * of read-modify-write. 23007 * Note that this routine returns a pointer to 23008 * a valid bp in wr_bp. 23009 */ 23010 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk, 23011 &wr_bp); 23012 if (err) { 23013 mutex_exit(SD_MUTEX(un)); 23014 return (err); 23015 } 23016 /* 23017 * Offset is being calculated as - 23018 * (original block # * system block size) - 23019 * (new block # * target block size) 23020 */ 23021 io_start_offset = 23022 ((uint64_t)(blkno * un->un_sys_blocksize)) - 23023 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize)); 23024 23025 ASSERT((io_start_offset >= 0) && 23026 (io_start_offset < un->un_tgt_blocksize)); 23027 /* 23028 * Do the modify portion of read modify write. 23029 */ 23030 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset], 23031 (size_t)nblk * un->un_sys_blocksize); 23032 } else { 23033 doing_rmw = FALSE; 23034 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 23035 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize; 23036 } 23037 23038 /* Convert blkno and nblk to target blocks */ 23039 blkno = tgt_blkno; 23040 nblk = tgt_nblk; 23041 } else { 23042 wr_bp = &wr_buf; 23043 bzero(wr_bp, sizeof (struct buf)); 23044 wr_bp->b_flags = B_BUSY; 23045 wr_bp->b_un.b_addr = addr; 23046 wr_bp->b_bcount = nblk << DEV_BSHIFT; 23047 wr_bp->b_resid = 0; 23048 } 23049 23050 mutex_exit(SD_MUTEX(un)); 23051 23052 /* 23053 * Obtain a SCSI packet for the write command. 23054 * It should be safe to call the allocator here without 23055 * worrying about being locked for DVMA mapping because 23056 * the address we're passed is already a DVMA mapping 23057 * 23058 * We are also not going to worry about semaphore ownership 23059 * in the dump buffer. Dumping is single threaded at present. 23060 */ 23061 23062 wr_pktp = NULL; 23063 23064 dma_resid = wr_bp->b_bcount; 23065 oblkno = blkno; 23066 23067 while (dma_resid != 0) { 23068 23069 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 23070 wr_bp->b_flags &= ~B_ERROR; 23071 23072 if (un->un_partial_dma_supported == 1) { 23073 blkno = oblkno + 23074 ((wr_bp->b_bcount - dma_resid) / 23075 un->un_tgt_blocksize); 23076 nblk = dma_resid / un->un_tgt_blocksize; 23077 23078 if (wr_pktp) { 23079 /* 23080 * Partial DMA transfers after initial transfer 23081 */ 23082 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp, 23083 blkno, nblk); 23084 } else { 23085 /* Initial transfer */ 23086 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 23087 un->un_pkt_flags, NULL_FUNC, NULL, 23088 blkno, nblk); 23089 } 23090 } else { 23091 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 23092 0, NULL_FUNC, NULL, blkno, nblk); 23093 } 23094 23095 if (rval == 0) { 23096 /* We were given a SCSI packet, continue. */ 23097 break; 23098 } 23099 23100 if (i == 0) { 23101 if (wr_bp->b_flags & B_ERROR) { 23102 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23103 "no resources for dumping; " 23104 "error code: 0x%x, retrying", 23105 geterror(wr_bp)); 23106 } else { 23107 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23108 "no resources for dumping; retrying"); 23109 } 23110 } else if (i != (SD_NDUMP_RETRIES - 1)) { 23111 if (wr_bp->b_flags & B_ERROR) { 23112 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 23113 "no resources for dumping; error code: " 23114 "0x%x, retrying\n", geterror(wr_bp)); 23115 } 23116 } else { 23117 if (wr_bp->b_flags & B_ERROR) { 23118 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 23119 "no resources for dumping; " 23120 "error code: 0x%x, retries failed, " 23121 "giving up.\n", geterror(wr_bp)); 23122 } else { 23123 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 23124 "no resources for dumping; " 23125 "retries failed, giving up.\n"); 23126 } 23127 mutex_enter(SD_MUTEX(un)); 23128 Restore_state(un); 23129 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) { 23130 mutex_exit(SD_MUTEX(un)); 23131 scsi_free_consistent_buf(wr_bp); 23132 } else { 23133 mutex_exit(SD_MUTEX(un)); 23134 } 23135 return (EIO); 23136 } 23137 drv_usecwait(10000); 23138 } 23139 23140 if (un->un_partial_dma_supported == 1) { 23141 /* 23142 * save the resid from PARTIAL_DMA 23143 */ 23144 dma_resid = wr_pktp->pkt_resid; 23145 if (dma_resid != 0) 23146 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid); 23147 wr_pktp->pkt_resid = 0; 23148 } else { 23149 dma_resid = 0; 23150 } 23151 23152 /* SunBug 1222170 */ 23153 wr_pktp->pkt_flags = FLAG_NOINTR; 23154 23155 err = EIO; 23156 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 23157 23158 /* 23159 * Scsi_poll returns 0 (success) if the command completes and 23160 * the status block is STATUS_GOOD. We should only check 23161 * errors if this condition is not true. Even then we should 23162 * send our own request sense packet only if we have a check 23163 * condition and auto request sense has not been performed by 23164 * the hba. 23165 */ 23166 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n"); 23167 23168 if ((sd_scsi_poll(un, wr_pktp) == 0) && 23169 (wr_pktp->pkt_resid == 0)) { 23170 err = SD_SUCCESS; 23171 break; 23172 } 23173 23174 /* 23175 * Check CMD_DEV_GONE 1st, give up if device is gone. 23176 */ 23177 if (wr_pktp->pkt_reason == CMD_DEV_GONE) { 23178 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23179 "Error while dumping state...Device is gone\n"); 23180 break; 23181 } 23182 23183 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) { 23184 SD_INFO(SD_LOG_DUMP, un, 23185 "sddump: write failed with CHECK, try # %d\n", i); 23186 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) { 23187 (void) sd_send_polled_RQS(un); 23188 } 23189 23190 continue; 23191 } 23192 23193 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) { 23194 int reset_retval = 0; 23195 23196 SD_INFO(SD_LOG_DUMP, un, 23197 "sddump: write failed with BUSY, try # %d\n", i); 23198 23199 if (un->un_f_lun_reset_enabled == TRUE) { 23200 reset_retval = scsi_reset(SD_ADDRESS(un), 23201 RESET_LUN); 23202 } 23203 if (reset_retval == 0) { 23204 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 23205 } 23206 (void) sd_send_polled_RQS(un); 23207 23208 } else { 23209 SD_INFO(SD_LOG_DUMP, un, 23210 "sddump: write failed with 0x%x, try # %d\n", 23211 SD_GET_PKT_STATUS(wr_pktp), i); 23212 mutex_enter(SD_MUTEX(un)); 23213 sd_reset_target(un, wr_pktp); 23214 mutex_exit(SD_MUTEX(un)); 23215 } 23216 23217 /* 23218 * If we are not getting anywhere with lun/target resets, 23219 * let's reset the bus. 23220 */ 23221 if (i == SD_NDUMP_RETRIES/2) { 23222 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 23223 (void) sd_send_polled_RQS(un); 23224 } 23225 } 23226 } 23227 23228 scsi_destroy_pkt(wr_pktp); 23229 mutex_enter(SD_MUTEX(un)); 23230 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) { 23231 mutex_exit(SD_MUTEX(un)); 23232 scsi_free_consistent_buf(wr_bp); 23233 } else { 23234 mutex_exit(SD_MUTEX(un)); 23235 } 23236 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err); 23237 return (err); 23238 } 23239 23240 /* 23241 * Function: sd_scsi_poll() 23242 * 23243 * Description: This is a wrapper for the scsi_poll call. 23244 * 23245 * Arguments: sd_lun - The unit structure 23246 * scsi_pkt - The scsi packet being sent to the device. 23247 * 23248 * Return Code: 0 - Command completed successfully with good status 23249 * -1 - Command failed. This could indicate a check condition 23250 * or other status value requiring recovery action. 23251 * 23252 * NOTE: This code is only called off sddump(). 23253 */ 23254 23255 static int 23256 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp) 23257 { 23258 int status; 23259 23260 ASSERT(un != NULL); 23261 ASSERT(!mutex_owned(SD_MUTEX(un))); 23262 ASSERT(pktp != NULL); 23263 23264 status = SD_SUCCESS; 23265 23266 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) { 23267 pktp->pkt_flags |= un->un_tagflags; 23268 pktp->pkt_flags &= ~FLAG_NODISCON; 23269 } 23270 23271 status = sd_ddi_scsi_poll(pktp); 23272 /* 23273 * Scsi_poll returns 0 (success) if the command completes and the 23274 * status block is STATUS_GOOD. We should only check errors if this 23275 * condition is not true. Even then we should send our own request 23276 * sense packet only if we have a check condition and auto 23277 * request sense has not been performed by the hba. 23278 * Don't get RQS data if pkt_reason is CMD_DEV_GONE. 23279 */ 23280 if ((status != SD_SUCCESS) && 23281 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) && 23282 (pktp->pkt_state & STATE_ARQ_DONE) == 0 && 23283 (pktp->pkt_reason != CMD_DEV_GONE)) 23284 (void) sd_send_polled_RQS(un); 23285 23286 return (status); 23287 } 23288 23289 /* 23290 * Function: sd_send_polled_RQS() 23291 * 23292 * Description: This sends the request sense command to a device. 23293 * 23294 * Arguments: sd_lun - The unit structure 23295 * 23296 * Return Code: 0 - Command completed successfully with good status 23297 * -1 - Command failed. 23298 * 23299 */ 23300 23301 static int 23302 sd_send_polled_RQS(struct sd_lun *un) 23303 { 23304 int ret_val; 23305 struct scsi_pkt *rqs_pktp; 23306 struct buf *rqs_bp; 23307 23308 ASSERT(un != NULL); 23309 ASSERT(!mutex_owned(SD_MUTEX(un))); 23310 23311 ret_val = SD_SUCCESS; 23312 23313 rqs_pktp = un->un_rqs_pktp; 23314 rqs_bp = un->un_rqs_bp; 23315 23316 mutex_enter(SD_MUTEX(un)); 23317 23318 if (un->un_sense_isbusy) { 23319 ret_val = SD_FAILURE; 23320 mutex_exit(SD_MUTEX(un)); 23321 return (ret_val); 23322 } 23323 23324 /* 23325 * If the request sense buffer (and packet) is not in use, 23326 * let's set the un_sense_isbusy and send our packet 23327 */ 23328 un->un_sense_isbusy = 1; 23329 rqs_pktp->pkt_resid = 0; 23330 rqs_pktp->pkt_reason = 0; 23331 rqs_pktp->pkt_flags |= FLAG_NOINTR; 23332 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH); 23333 23334 mutex_exit(SD_MUTEX(un)); 23335 23336 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at" 23337 " 0x%p\n", rqs_bp->b_un.b_addr); 23338 23339 /* 23340 * Can't send this to sd_scsi_poll, we wrap ourselves around the 23341 * axle - it has a call into us! 23342 */ 23343 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) { 23344 SD_INFO(SD_LOG_COMMON, un, 23345 "sd_send_polled_RQS: RQS failed\n"); 23346 } 23347 23348 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:", 23349 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX); 23350 23351 mutex_enter(SD_MUTEX(un)); 23352 un->un_sense_isbusy = 0; 23353 mutex_exit(SD_MUTEX(un)); 23354 23355 return (ret_val); 23356 } 23357 23358 /* 23359 * Defines needed for localized version of the scsi_poll routine. 23360 */ 23361 #define CSEC 10000 /* usecs */ 23362 #define SEC_TO_CSEC (1000000/CSEC) 23363 23364 /* 23365 * Function: sd_ddi_scsi_poll() 23366 * 23367 * Description: Localized version of the scsi_poll routine. The purpose is to 23368 * send a scsi_pkt to a device as a polled command. This version 23369 * is to ensure more robust handling of transport errors. 23370 * Specifically this routine cures not ready, coming ready 23371 * transition for power up and reset of sonoma's. This can take 23372 * up to 45 seconds for power-on and 20 seconds for reset of a 23373 * sonoma lun. 23374 * 23375 * Arguments: scsi_pkt - The scsi_pkt being sent to a device 23376 * 23377 * Return Code: 0 - Command completed successfully with good status 23378 * -1 - Command failed. 23379 * 23380 * NOTE: This code is almost identical to scsi_poll, however before 6668774 can 23381 * be fixed (removing this code), we need to determine how to handle the 23382 * KEY_UNIT_ATTENTION condition below in conditions not as limited as sddump(). 23383 * 23384 * NOTE: This code is only called off sddump(). 23385 */ 23386 static int 23387 sd_ddi_scsi_poll(struct scsi_pkt *pkt) 23388 { 23389 int rval = -1; 23390 int savef; 23391 long savet; 23392 void (*savec)(); 23393 int timeout; 23394 int busy_count; 23395 int poll_delay; 23396 int rc; 23397 uint8_t *sensep; 23398 struct scsi_arq_status *arqstat; 23399 extern int do_polled_io; 23400 23401 ASSERT(pkt->pkt_scbp); 23402 23403 /* 23404 * save old flags.. 23405 */ 23406 savef = pkt->pkt_flags; 23407 savec = pkt->pkt_comp; 23408 savet = pkt->pkt_time; 23409 23410 pkt->pkt_flags |= FLAG_NOINTR; 23411 23412 /* 23413 * XXX there is nothing in the SCSA spec that states that we should not 23414 * do a callback for polled cmds; however, removing this will break sd 23415 * and probably other target drivers 23416 */ 23417 pkt->pkt_comp = NULL; 23418 23419 /* 23420 * we don't like a polled command without timeout. 23421 * 60 seconds seems long enough. 23422 */ 23423 if (pkt->pkt_time == 0) 23424 pkt->pkt_time = SCSI_POLL_TIMEOUT; 23425 23426 /* 23427 * Send polled cmd. 23428 * 23429 * We do some error recovery for various errors. Tran_busy, 23430 * queue full, and non-dispatched commands are retried every 10 msec. 23431 * as they are typically transient failures. Busy status and Not 23432 * Ready are retried every second as this status takes a while to 23433 * change. 23434 */ 23435 timeout = pkt->pkt_time * SEC_TO_CSEC; 23436 23437 for (busy_count = 0; busy_count < timeout; busy_count++) { 23438 /* 23439 * Initialize pkt status variables. 23440 */ 23441 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0; 23442 23443 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) { 23444 if (rc != TRAN_BUSY) { 23445 /* Transport failed - give up. */ 23446 break; 23447 } else { 23448 /* Transport busy - try again. */ 23449 poll_delay = 1 * CSEC; /* 10 msec. */ 23450 } 23451 } else { 23452 /* 23453 * Transport accepted - check pkt status. 23454 */ 23455 rc = (*pkt->pkt_scbp) & STATUS_MASK; 23456 if ((pkt->pkt_reason == CMD_CMPLT) && 23457 (rc == STATUS_CHECK) && 23458 (pkt->pkt_state & STATE_ARQ_DONE)) { 23459 arqstat = 23460 (struct scsi_arq_status *)(pkt->pkt_scbp); 23461 sensep = (uint8_t *)&arqstat->sts_sensedata; 23462 } else { 23463 sensep = NULL; 23464 } 23465 23466 if ((pkt->pkt_reason == CMD_CMPLT) && 23467 (rc == STATUS_GOOD)) { 23468 /* No error - we're done */ 23469 rval = 0; 23470 break; 23471 23472 } else if (pkt->pkt_reason == CMD_DEV_GONE) { 23473 /* Lost connection - give up */ 23474 break; 23475 23476 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) && 23477 (pkt->pkt_state == 0)) { 23478 /* Pkt not dispatched - try again. */ 23479 poll_delay = 1 * CSEC; /* 10 msec. */ 23480 23481 } else if ((pkt->pkt_reason == CMD_CMPLT) && 23482 (rc == STATUS_QFULL)) { 23483 /* Queue full - try again. */ 23484 poll_delay = 1 * CSEC; /* 10 msec. */ 23485 23486 } else if ((pkt->pkt_reason == CMD_CMPLT) && 23487 (rc == STATUS_BUSY)) { 23488 /* Busy - try again. */ 23489 poll_delay = 100 * CSEC; /* 1 sec. */ 23490 busy_count += (SEC_TO_CSEC - 1); 23491 23492 } else if ((sensep != NULL) && 23493 (scsi_sense_key(sensep) == KEY_UNIT_ATTENTION)) { 23494 /* 23495 * Unit Attention - try again. 23496 * Pretend it took 1 sec. 23497 * NOTE: 'continue' avoids poll_delay 23498 */ 23499 busy_count += (SEC_TO_CSEC - 1); 23500 continue; 23501 23502 } else if ((sensep != NULL) && 23503 (scsi_sense_key(sensep) == KEY_NOT_READY) && 23504 (scsi_sense_asc(sensep) == 0x04) && 23505 (scsi_sense_ascq(sensep) == 0x01)) { 23506 /* 23507 * Not ready -> ready - try again. 23508 * 04h/01h: LUN IS IN PROCESS OF BECOMING READY 23509 * ...same as STATUS_BUSY 23510 */ 23511 poll_delay = 100 * CSEC; /* 1 sec. */ 23512 busy_count += (SEC_TO_CSEC - 1); 23513 23514 } else { 23515 /* BAD status - give up. */ 23516 break; 23517 } 23518 } 23519 23520 if (((curthread->t_flag & T_INTR_THREAD) == 0) && 23521 !do_polled_io) { 23522 delay(drv_usectohz(poll_delay)); 23523 } else { 23524 /* we busy wait during cpr_dump or interrupt threads */ 23525 drv_usecwait(poll_delay); 23526 } 23527 } 23528 23529 pkt->pkt_flags = savef; 23530 pkt->pkt_comp = savec; 23531 pkt->pkt_time = savet; 23532 23533 /* return on error */ 23534 if (rval) 23535 return (rval); 23536 23537 /* 23538 * This is not a performance critical code path. 23539 * 23540 * As an accommodation for scsi_poll callers, to avoid ddi_dma_sync() 23541 * issues associated with looking at DMA memory prior to 23542 * scsi_pkt_destroy(), we scsi_sync_pkt() prior to return. 23543 */ 23544 scsi_sync_pkt(pkt); 23545 return (0); 23546 } 23547 23548 23549 23550 /* 23551 * Function: sd_persistent_reservation_in_read_keys 23552 * 23553 * Description: This routine is the driver entry point for handling CD-ROM 23554 * multi-host persistent reservation requests (MHIOCGRP_INKEYS) 23555 * by sending the SCSI-3 PRIN commands to the device. 23556 * Processes the read keys command response by copying the 23557 * reservation key information into the user provided buffer. 23558 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented. 23559 * 23560 * Arguments: un - Pointer to soft state struct for the target. 23561 * usrp - user provided pointer to multihost Persistent In Read 23562 * Keys structure (mhioc_inkeys_t) 23563 * flag - this argument is a pass through to ddi_copyxxx() 23564 * directly from the mode argument of ioctl(). 23565 * 23566 * Return Code: 0 - Success 23567 * EACCES 23568 * ENOTSUP 23569 * errno return code from sd_send_scsi_cmd() 23570 * 23571 * Context: Can sleep. Does not return until command is completed. 23572 */ 23573 23574 static int 23575 sd_persistent_reservation_in_read_keys(struct sd_lun *un, 23576 mhioc_inkeys_t *usrp, int flag) 23577 { 23578 #ifdef _MULTI_DATAMODEL 23579 struct mhioc_key_list32 li32; 23580 #endif 23581 sd_prin_readkeys_t *in; 23582 mhioc_inkeys_t *ptr; 23583 mhioc_key_list_t li; 23584 uchar_t *data_bufp; 23585 int data_len; 23586 int rval; 23587 size_t copysz; 23588 23589 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) { 23590 return (EINVAL); 23591 } 23592 bzero(&li, sizeof (mhioc_key_list_t)); 23593 23594 /* 23595 * Get the listsize from user 23596 */ 23597 #ifdef _MULTI_DATAMODEL 23598 23599 switch (ddi_model_convert_from(flag & FMODELS)) { 23600 case DDI_MODEL_ILP32: 23601 copysz = sizeof (struct mhioc_key_list32); 23602 if (ddi_copyin(ptr->li, &li32, copysz, flag)) { 23603 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23604 "sd_persistent_reservation_in_read_keys: " 23605 "failed ddi_copyin: mhioc_key_list32_t\n"); 23606 rval = EFAULT; 23607 goto done; 23608 } 23609 li.listsize = li32.listsize; 23610 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list; 23611 break; 23612 23613 case DDI_MODEL_NONE: 23614 copysz = sizeof (mhioc_key_list_t); 23615 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 23616 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23617 "sd_persistent_reservation_in_read_keys: " 23618 "failed ddi_copyin: mhioc_key_list_t\n"); 23619 rval = EFAULT; 23620 goto done; 23621 } 23622 break; 23623 } 23624 23625 #else /* ! _MULTI_DATAMODEL */ 23626 copysz = sizeof (mhioc_key_list_t); 23627 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 23628 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23629 "sd_persistent_reservation_in_read_keys: " 23630 "failed ddi_copyin: mhioc_key_list_t\n"); 23631 rval = EFAULT; 23632 goto done; 23633 } 23634 #endif 23635 23636 data_len = li.listsize * MHIOC_RESV_KEY_SIZE; 23637 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t)); 23638 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 23639 23640 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 23641 data_len, data_bufp)) != 0) { 23642 goto done; 23643 } 23644 in = (sd_prin_readkeys_t *)data_bufp; 23645 ptr->generation = BE_32(in->generation); 23646 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE; 23647 23648 /* 23649 * Return the min(listsize, listlen) keys 23650 */ 23651 #ifdef _MULTI_DATAMODEL 23652 23653 switch (ddi_model_convert_from(flag & FMODELS)) { 23654 case DDI_MODEL_ILP32: 23655 li32.listlen = li.listlen; 23656 if (ddi_copyout(&li32, ptr->li, copysz, flag)) { 23657 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23658 "sd_persistent_reservation_in_read_keys: " 23659 "failed ddi_copyout: mhioc_key_list32_t\n"); 23660 rval = EFAULT; 23661 goto done; 23662 } 23663 break; 23664 23665 case DDI_MODEL_NONE: 23666 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 23667 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23668 "sd_persistent_reservation_in_read_keys: " 23669 "failed ddi_copyout: mhioc_key_list_t\n"); 23670 rval = EFAULT; 23671 goto done; 23672 } 23673 break; 23674 } 23675 23676 #else /* ! _MULTI_DATAMODEL */ 23677 23678 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 23679 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23680 "sd_persistent_reservation_in_read_keys: " 23681 "failed ddi_copyout: mhioc_key_list_t\n"); 23682 rval = EFAULT; 23683 goto done; 23684 } 23685 23686 #endif /* _MULTI_DATAMODEL */ 23687 23688 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE, 23689 li.listsize * MHIOC_RESV_KEY_SIZE); 23690 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) { 23691 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23692 "sd_persistent_reservation_in_read_keys: " 23693 "failed ddi_copyout: keylist\n"); 23694 rval = EFAULT; 23695 } 23696 done: 23697 kmem_free(data_bufp, data_len); 23698 return (rval); 23699 } 23700 23701 23702 /* 23703 * Function: sd_persistent_reservation_in_read_resv 23704 * 23705 * Description: This routine is the driver entry point for handling CD-ROM 23706 * multi-host persistent reservation requests (MHIOCGRP_INRESV) 23707 * by sending the SCSI-3 PRIN commands to the device. 23708 * Process the read persistent reservations command response by 23709 * copying the reservation information into the user provided 23710 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented. 23711 * 23712 * Arguments: un - Pointer to soft state struct for the target. 23713 * usrp - user provided pointer to multihost Persistent In Read 23714 * Keys structure (mhioc_inkeys_t) 23715 * flag - this argument is a pass through to ddi_copyxxx() 23716 * directly from the mode argument of ioctl(). 23717 * 23718 * Return Code: 0 - Success 23719 * EACCES 23720 * ENOTSUP 23721 * errno return code from sd_send_scsi_cmd() 23722 * 23723 * Context: Can sleep. Does not return until command is completed. 23724 */ 23725 23726 static int 23727 sd_persistent_reservation_in_read_resv(struct sd_lun *un, 23728 mhioc_inresvs_t *usrp, int flag) 23729 { 23730 #ifdef _MULTI_DATAMODEL 23731 struct mhioc_resv_desc_list32 resvlist32; 23732 #endif 23733 sd_prin_readresv_t *in; 23734 mhioc_inresvs_t *ptr; 23735 sd_readresv_desc_t *readresv_ptr; 23736 mhioc_resv_desc_list_t resvlist; 23737 mhioc_resv_desc_t resvdesc; 23738 uchar_t *data_bufp; 23739 int data_len; 23740 int rval; 23741 int i; 23742 size_t copysz; 23743 mhioc_resv_desc_t *bufp; 23744 23745 if ((ptr = usrp) == NULL) { 23746 return (EINVAL); 23747 } 23748 23749 /* 23750 * Get the listsize from user 23751 */ 23752 #ifdef _MULTI_DATAMODEL 23753 switch (ddi_model_convert_from(flag & FMODELS)) { 23754 case DDI_MODEL_ILP32: 23755 copysz = sizeof (struct mhioc_resv_desc_list32); 23756 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) { 23757 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23758 "sd_persistent_reservation_in_read_resv: " 23759 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23760 rval = EFAULT; 23761 goto done; 23762 } 23763 resvlist.listsize = resvlist32.listsize; 23764 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list; 23765 break; 23766 23767 case DDI_MODEL_NONE: 23768 copysz = sizeof (mhioc_resv_desc_list_t); 23769 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 23770 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23771 "sd_persistent_reservation_in_read_resv: " 23772 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23773 rval = EFAULT; 23774 goto done; 23775 } 23776 break; 23777 } 23778 #else /* ! _MULTI_DATAMODEL */ 23779 copysz = sizeof (mhioc_resv_desc_list_t); 23780 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 23781 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23782 "sd_persistent_reservation_in_read_resv: " 23783 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 23784 rval = EFAULT; 23785 goto done; 23786 } 23787 #endif /* ! _MULTI_DATAMODEL */ 23788 23789 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN; 23790 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t)); 23791 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 23792 23793 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_RESV, 23794 data_len, data_bufp)) != 0) { 23795 goto done; 23796 } 23797 in = (sd_prin_readresv_t *)data_bufp; 23798 ptr->generation = BE_32(in->generation); 23799 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN; 23800 23801 /* 23802 * Return the min(listsize, listlen( keys 23803 */ 23804 #ifdef _MULTI_DATAMODEL 23805 23806 switch (ddi_model_convert_from(flag & FMODELS)) { 23807 case DDI_MODEL_ILP32: 23808 resvlist32.listlen = resvlist.listlen; 23809 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) { 23810 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23811 "sd_persistent_reservation_in_read_resv: " 23812 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 23813 rval = EFAULT; 23814 goto done; 23815 } 23816 break; 23817 23818 case DDI_MODEL_NONE: 23819 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 23820 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23821 "sd_persistent_reservation_in_read_resv: " 23822 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 23823 rval = EFAULT; 23824 goto done; 23825 } 23826 break; 23827 } 23828 23829 #else /* ! _MULTI_DATAMODEL */ 23830 23831 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 23832 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23833 "sd_persistent_reservation_in_read_resv: " 23834 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 23835 rval = EFAULT; 23836 goto done; 23837 } 23838 23839 #endif /* ! _MULTI_DATAMODEL */ 23840 23841 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc; 23842 bufp = resvlist.list; 23843 copysz = sizeof (mhioc_resv_desc_t); 23844 for (i = 0; i < min(resvlist.listlen, resvlist.listsize); 23845 i++, readresv_ptr++, bufp++) { 23846 23847 bcopy(&readresv_ptr->resvkey, &resvdesc.key, 23848 MHIOC_RESV_KEY_SIZE); 23849 resvdesc.type = readresv_ptr->type; 23850 resvdesc.scope = readresv_ptr->scope; 23851 resvdesc.scope_specific_addr = 23852 BE_32(readresv_ptr->scope_specific_addr); 23853 23854 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) { 23855 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23856 "sd_persistent_reservation_in_read_resv: " 23857 "failed ddi_copyout: resvlist\n"); 23858 rval = EFAULT; 23859 goto done; 23860 } 23861 } 23862 done: 23863 kmem_free(data_bufp, data_len); 23864 return (rval); 23865 } 23866 23867 23868 /* 23869 * Function: sr_change_blkmode() 23870 * 23871 * Description: This routine is the driver entry point for handling CD-ROM 23872 * block mode ioctl requests. Support for returning and changing 23873 * the current block size in use by the device is implemented. The 23874 * LBA size is changed via a MODE SELECT Block Descriptor. 23875 * 23876 * This routine issues a mode sense with an allocation length of 23877 * 12 bytes for the mode page header and a single block descriptor. 23878 * 23879 * Arguments: dev - the device 'dev_t' 23880 * cmd - the request type; one of CDROMGBLKMODE (get) or 23881 * CDROMSBLKMODE (set) 23882 * data - current block size or requested block size 23883 * flag - this argument is a pass through to ddi_copyxxx() directly 23884 * from the mode argument of ioctl(). 23885 * 23886 * Return Code: the code returned by sd_send_scsi_cmd() 23887 * EINVAL if invalid arguments are provided 23888 * EFAULT if ddi_copyxxx() fails 23889 * ENXIO if fail ddi_get_soft_state 23890 * EIO if invalid mode sense block descriptor length 23891 * 23892 */ 23893 23894 static int 23895 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag) 23896 { 23897 struct sd_lun *un = NULL; 23898 struct mode_header *sense_mhp, *select_mhp; 23899 struct block_descriptor *sense_desc, *select_desc; 23900 int current_bsize; 23901 int rval = EINVAL; 23902 uchar_t *sense = NULL; 23903 uchar_t *select = NULL; 23904 23905 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE)); 23906 23907 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23908 return (ENXIO); 23909 } 23910 23911 /* 23912 * The block length is changed via the Mode Select block descriptor, the 23913 * "Read/Write Error Recovery" mode page (0x1) contents are not actually 23914 * required as part of this routine. Therefore the mode sense allocation 23915 * length is specified to be the length of a mode page header and a 23916 * block descriptor. 23917 */ 23918 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 23919 23920 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 23921 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD)) != 0) { 23922 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23923 "sr_change_blkmode: Mode Sense Failed\n"); 23924 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23925 return (rval); 23926 } 23927 23928 /* Check the block descriptor len to handle only 1 block descriptor */ 23929 sense_mhp = (struct mode_header *)sense; 23930 if ((sense_mhp->bdesc_length == 0) || 23931 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) { 23932 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23933 "sr_change_blkmode: Mode Sense returned invalid block" 23934 " descriptor length\n"); 23935 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23936 return (EIO); 23937 } 23938 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH); 23939 current_bsize = ((sense_desc->blksize_hi << 16) | 23940 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo); 23941 23942 /* Process command */ 23943 switch (cmd) { 23944 case CDROMGBLKMODE: 23945 /* Return the block size obtained during the mode sense */ 23946 if (ddi_copyout(¤t_bsize, (void *)data, 23947 sizeof (int), flag) != 0) 23948 rval = EFAULT; 23949 break; 23950 case CDROMSBLKMODE: 23951 /* Validate the requested block size */ 23952 switch (data) { 23953 case CDROM_BLK_512: 23954 case CDROM_BLK_1024: 23955 case CDROM_BLK_2048: 23956 case CDROM_BLK_2056: 23957 case CDROM_BLK_2336: 23958 case CDROM_BLK_2340: 23959 case CDROM_BLK_2352: 23960 case CDROM_BLK_2368: 23961 case CDROM_BLK_2448: 23962 case CDROM_BLK_2646: 23963 case CDROM_BLK_2647: 23964 break; 23965 default: 23966 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23967 "sr_change_blkmode: " 23968 "Block Size '%ld' Not Supported\n", data); 23969 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 23970 return (EINVAL); 23971 } 23972 23973 /* 23974 * The current block size matches the requested block size so 23975 * there is no need to send the mode select to change the size 23976 */ 23977 if (current_bsize == data) { 23978 break; 23979 } 23980 23981 /* Build the select data for the requested block size */ 23982 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 23983 select_mhp = (struct mode_header *)select; 23984 select_desc = 23985 (struct block_descriptor *)(select + MODE_HEADER_LENGTH); 23986 /* 23987 * The LBA size is changed via the block descriptor, so the 23988 * descriptor is built according to the user data 23989 */ 23990 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH; 23991 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16); 23992 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8); 23993 select_desc->blksize_lo = (char)((data) & 0x000000ff); 23994 23995 /* Send the mode select for the requested block size */ 23996 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 23997 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 23998 SD_PATH_STANDARD)) != 0) { 23999 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24000 "sr_change_blkmode: Mode Select Failed\n"); 24001 /* 24002 * The mode select failed for the requested block size, 24003 * so reset the data for the original block size and 24004 * send it to the target. The error is indicated by the 24005 * return value for the failed mode select. 24006 */ 24007 select_desc->blksize_hi = sense_desc->blksize_hi; 24008 select_desc->blksize_mid = sense_desc->blksize_mid; 24009 select_desc->blksize_lo = sense_desc->blksize_lo; 24010 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 24011 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 24012 SD_PATH_STANDARD); 24013 } else { 24014 ASSERT(!mutex_owned(SD_MUTEX(un))); 24015 mutex_enter(SD_MUTEX(un)); 24016 sd_update_block_info(un, (uint32_t)data, 0); 24017 mutex_exit(SD_MUTEX(un)); 24018 } 24019 break; 24020 default: 24021 /* should not reach here, but check anyway */ 24022 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24023 "sr_change_blkmode: Command '%x' Not Supported\n", cmd); 24024 rval = EINVAL; 24025 break; 24026 } 24027 24028 if (select) { 24029 kmem_free(select, BUFLEN_CHG_BLK_MODE); 24030 } 24031 if (sense) { 24032 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 24033 } 24034 return (rval); 24035 } 24036 24037 24038 /* 24039 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines 24040 * implement driver support for getting and setting the CD speed. The command 24041 * set used will be based on the device type. If the device has not been 24042 * identified as MMC the Toshiba vendor specific mode page will be used. If 24043 * the device is MMC but does not support the Real Time Streaming feature 24044 * the SET CD SPEED command will be used to set speed and mode page 0x2A will 24045 * be used to read the speed. 24046 */ 24047 24048 /* 24049 * Function: sr_change_speed() 24050 * 24051 * Description: This routine is the driver entry point for handling CD-ROM 24052 * drive speed ioctl requests for devices supporting the Toshiba 24053 * vendor specific drive speed mode page. Support for returning 24054 * and changing the current drive speed in use by the device is 24055 * implemented. 24056 * 24057 * Arguments: dev - the device 'dev_t' 24058 * cmd - the request type; one of CDROMGDRVSPEED (get) or 24059 * CDROMSDRVSPEED (set) 24060 * data - current drive speed or requested drive speed 24061 * flag - this argument is a pass through to ddi_copyxxx() directly 24062 * from the mode argument of ioctl(). 24063 * 24064 * Return Code: the code returned by sd_send_scsi_cmd() 24065 * EINVAL if invalid arguments are provided 24066 * EFAULT if ddi_copyxxx() fails 24067 * ENXIO if fail ddi_get_soft_state 24068 * EIO if invalid mode sense block descriptor length 24069 */ 24070 24071 static int 24072 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 24073 { 24074 struct sd_lun *un = NULL; 24075 struct mode_header *sense_mhp, *select_mhp; 24076 struct mode_speed *sense_page, *select_page; 24077 int current_speed; 24078 int rval = EINVAL; 24079 int bd_len; 24080 uchar_t *sense = NULL; 24081 uchar_t *select = NULL; 24082 24083 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 24084 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24085 return (ENXIO); 24086 } 24087 24088 /* 24089 * Note: The drive speed is being modified here according to a Toshiba 24090 * vendor specific mode page (0x31). 24091 */ 24092 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 24093 24094 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 24095 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED, 24096 SD_PATH_STANDARD)) != 0) { 24097 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24098 "sr_change_speed: Mode Sense Failed\n"); 24099 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24100 return (rval); 24101 } 24102 sense_mhp = (struct mode_header *)sense; 24103 24104 /* Check the block descriptor len to handle only 1 block descriptor */ 24105 bd_len = sense_mhp->bdesc_length; 24106 if (bd_len > MODE_BLK_DESC_LENGTH) { 24107 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24108 "sr_change_speed: Mode Sense returned invalid block " 24109 "descriptor length\n"); 24110 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24111 return (EIO); 24112 } 24113 24114 sense_page = (struct mode_speed *) 24115 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 24116 current_speed = sense_page->speed; 24117 24118 /* Process command */ 24119 switch (cmd) { 24120 case CDROMGDRVSPEED: 24121 /* Return the drive speed obtained during the mode sense */ 24122 if (current_speed == 0x2) { 24123 current_speed = CDROM_TWELVE_SPEED; 24124 } 24125 if (ddi_copyout(¤t_speed, (void *)data, 24126 sizeof (int), flag) != 0) { 24127 rval = EFAULT; 24128 } 24129 break; 24130 case CDROMSDRVSPEED: 24131 /* Validate the requested drive speed */ 24132 switch ((uchar_t)data) { 24133 case CDROM_TWELVE_SPEED: 24134 data = 0x2; 24135 /*FALLTHROUGH*/ 24136 case CDROM_NORMAL_SPEED: 24137 case CDROM_DOUBLE_SPEED: 24138 case CDROM_QUAD_SPEED: 24139 case CDROM_MAXIMUM_SPEED: 24140 break; 24141 default: 24142 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24143 "sr_change_speed: " 24144 "Drive Speed '%d' Not Supported\n", (uchar_t)data); 24145 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24146 return (EINVAL); 24147 } 24148 24149 /* 24150 * The current drive speed matches the requested drive speed so 24151 * there is no need to send the mode select to change the speed 24152 */ 24153 if (current_speed == data) { 24154 break; 24155 } 24156 24157 /* Build the select data for the requested drive speed */ 24158 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 24159 select_mhp = (struct mode_header *)select; 24160 select_mhp->bdesc_length = 0; 24161 select_page = 24162 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 24163 select_page = 24164 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 24165 select_page->mode_page.code = CDROM_MODE_SPEED; 24166 select_page->mode_page.length = 2; 24167 select_page->speed = (uchar_t)data; 24168 24169 /* Send the mode select for the requested block size */ 24170 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 24171 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 24172 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 24173 /* 24174 * The mode select failed for the requested drive speed, 24175 * so reset the data for the original drive speed and 24176 * send it to the target. The error is indicated by the 24177 * return value for the failed mode select. 24178 */ 24179 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24180 "sr_drive_speed: Mode Select Failed\n"); 24181 select_page->speed = sense_page->speed; 24182 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 24183 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 24184 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 24185 } 24186 break; 24187 default: 24188 /* should not reach here, but check anyway */ 24189 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24190 "sr_change_speed: Command '%x' Not Supported\n", cmd); 24191 rval = EINVAL; 24192 break; 24193 } 24194 24195 if (select) { 24196 kmem_free(select, BUFLEN_MODE_CDROM_SPEED); 24197 } 24198 if (sense) { 24199 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24200 } 24201 24202 return (rval); 24203 } 24204 24205 24206 /* 24207 * Function: sr_atapi_change_speed() 24208 * 24209 * Description: This routine is the driver entry point for handling CD-ROM 24210 * drive speed ioctl requests for MMC devices that do not support 24211 * the Real Time Streaming feature (0x107). 24212 * 24213 * Note: This routine will use the SET SPEED command which may not 24214 * be supported by all devices. 24215 * 24216 * Arguments: dev- the device 'dev_t' 24217 * cmd- the request type; one of CDROMGDRVSPEED (get) or 24218 * CDROMSDRVSPEED (set) 24219 * data- current drive speed or requested drive speed 24220 * flag- this argument is a pass through to ddi_copyxxx() directly 24221 * from the mode argument of ioctl(). 24222 * 24223 * Return Code: the code returned by sd_send_scsi_cmd() 24224 * EINVAL if invalid arguments are provided 24225 * EFAULT if ddi_copyxxx() fails 24226 * ENXIO if fail ddi_get_soft_state 24227 * EIO if invalid mode sense block descriptor length 24228 */ 24229 24230 static int 24231 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 24232 { 24233 struct sd_lun *un; 24234 struct uscsi_cmd *com = NULL; 24235 struct mode_header_grp2 *sense_mhp; 24236 uchar_t *sense_page; 24237 uchar_t *sense = NULL; 24238 char cdb[CDB_GROUP5]; 24239 int bd_len; 24240 int current_speed = 0; 24241 int max_speed = 0; 24242 int rval; 24243 24244 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 24245 24246 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24247 return (ENXIO); 24248 } 24249 24250 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 24251 24252 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 24253 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, 24254 SD_PATH_STANDARD)) != 0) { 24255 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24256 "sr_atapi_change_speed: Mode Sense Failed\n"); 24257 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24258 return (rval); 24259 } 24260 24261 /* Check the block descriptor len to handle only 1 block descriptor */ 24262 sense_mhp = (struct mode_header_grp2 *)sense; 24263 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 24264 if (bd_len > MODE_BLK_DESC_LENGTH) { 24265 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24266 "sr_atapi_change_speed: Mode Sense returned invalid " 24267 "block descriptor length\n"); 24268 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24269 return (EIO); 24270 } 24271 24272 /* Calculate the current and maximum drive speeds */ 24273 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 24274 current_speed = (sense_page[14] << 8) | sense_page[15]; 24275 max_speed = (sense_page[8] << 8) | sense_page[9]; 24276 24277 /* Process the command */ 24278 switch (cmd) { 24279 case CDROMGDRVSPEED: 24280 current_speed /= SD_SPEED_1X; 24281 if (ddi_copyout(¤t_speed, (void *)data, 24282 sizeof (int), flag) != 0) 24283 rval = EFAULT; 24284 break; 24285 case CDROMSDRVSPEED: 24286 /* Convert the speed code to KB/sec */ 24287 switch ((uchar_t)data) { 24288 case CDROM_NORMAL_SPEED: 24289 current_speed = SD_SPEED_1X; 24290 break; 24291 case CDROM_DOUBLE_SPEED: 24292 current_speed = 2 * SD_SPEED_1X; 24293 break; 24294 case CDROM_QUAD_SPEED: 24295 current_speed = 4 * SD_SPEED_1X; 24296 break; 24297 case CDROM_TWELVE_SPEED: 24298 current_speed = 12 * SD_SPEED_1X; 24299 break; 24300 case CDROM_MAXIMUM_SPEED: 24301 current_speed = 0xffff; 24302 break; 24303 default: 24304 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24305 "sr_atapi_change_speed: invalid drive speed %d\n", 24306 (uchar_t)data); 24307 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24308 return (EINVAL); 24309 } 24310 24311 /* Check the request against the drive's max speed. */ 24312 if (current_speed != 0xffff) { 24313 if (current_speed > max_speed) { 24314 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24315 return (EINVAL); 24316 } 24317 } 24318 24319 /* 24320 * Build and send the SET SPEED command 24321 * 24322 * Note: The SET SPEED (0xBB) command used in this routine is 24323 * obsolete per the SCSI MMC spec but still supported in the 24324 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 24325 * therefore the command is still implemented in this routine. 24326 */ 24327 bzero(cdb, sizeof (cdb)); 24328 cdb[0] = (char)SCMD_SET_CDROM_SPEED; 24329 cdb[2] = (uchar_t)(current_speed >> 8); 24330 cdb[3] = (uchar_t)current_speed; 24331 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24332 com->uscsi_cdb = (caddr_t)cdb; 24333 com->uscsi_cdblen = CDB_GROUP5; 24334 com->uscsi_bufaddr = NULL; 24335 com->uscsi_buflen = 0; 24336 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24337 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, 0, SD_PATH_STANDARD); 24338 break; 24339 default: 24340 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24341 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd); 24342 rval = EINVAL; 24343 } 24344 24345 if (sense) { 24346 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24347 } 24348 if (com) { 24349 kmem_free(com, sizeof (*com)); 24350 } 24351 return (rval); 24352 } 24353 24354 24355 /* 24356 * Function: sr_pause_resume() 24357 * 24358 * Description: This routine is the driver entry point for handling CD-ROM 24359 * pause/resume ioctl requests. This only affects the audio play 24360 * operation. 24361 * 24362 * Arguments: dev - the device 'dev_t' 24363 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used 24364 * for setting the resume bit of the cdb. 24365 * 24366 * Return Code: the code returned by sd_send_scsi_cmd() 24367 * EINVAL if invalid mode specified 24368 * 24369 */ 24370 24371 static int 24372 sr_pause_resume(dev_t dev, int cmd) 24373 { 24374 struct sd_lun *un; 24375 struct uscsi_cmd *com; 24376 char cdb[CDB_GROUP1]; 24377 int rval; 24378 24379 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24380 return (ENXIO); 24381 } 24382 24383 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24384 bzero(cdb, CDB_GROUP1); 24385 cdb[0] = SCMD_PAUSE_RESUME; 24386 switch (cmd) { 24387 case CDROMRESUME: 24388 cdb[8] = 1; 24389 break; 24390 case CDROMPAUSE: 24391 cdb[8] = 0; 24392 break; 24393 default: 24394 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:" 24395 " Command '%x' Not Supported\n", cmd); 24396 rval = EINVAL; 24397 goto done; 24398 } 24399 24400 com->uscsi_cdb = cdb; 24401 com->uscsi_cdblen = CDB_GROUP1; 24402 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24403 24404 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24405 SD_PATH_STANDARD); 24406 24407 done: 24408 kmem_free(com, sizeof (*com)); 24409 return (rval); 24410 } 24411 24412 24413 /* 24414 * Function: sr_play_msf() 24415 * 24416 * Description: This routine is the driver entry point for handling CD-ROM 24417 * ioctl requests to output the audio signals at the specified 24418 * starting address and continue the audio play until the specified 24419 * ending address (CDROMPLAYMSF) The address is in Minute Second 24420 * Frame (MSF) format. 24421 * 24422 * Arguments: dev - the device 'dev_t' 24423 * data - pointer to user provided audio msf structure, 24424 * specifying start/end addresses. 24425 * flag - this argument is a pass through to ddi_copyxxx() 24426 * directly from the mode argument of ioctl(). 24427 * 24428 * Return Code: the code returned by sd_send_scsi_cmd() 24429 * EFAULT if ddi_copyxxx() fails 24430 * ENXIO if fail ddi_get_soft_state 24431 * EINVAL if data pointer is NULL 24432 */ 24433 24434 static int 24435 sr_play_msf(dev_t dev, caddr_t data, int flag) 24436 { 24437 struct sd_lun *un; 24438 struct uscsi_cmd *com; 24439 struct cdrom_msf msf_struct; 24440 struct cdrom_msf *msf = &msf_struct; 24441 char cdb[CDB_GROUP1]; 24442 int rval; 24443 24444 if (data == NULL) { 24445 return (EINVAL); 24446 } 24447 24448 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24449 return (ENXIO); 24450 } 24451 24452 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) { 24453 return (EFAULT); 24454 } 24455 24456 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24457 bzero(cdb, CDB_GROUP1); 24458 cdb[0] = SCMD_PLAYAUDIO_MSF; 24459 if (un->un_f_cfg_playmsf_bcd == TRUE) { 24460 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0); 24461 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0); 24462 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0); 24463 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1); 24464 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1); 24465 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1); 24466 } else { 24467 cdb[3] = msf->cdmsf_min0; 24468 cdb[4] = msf->cdmsf_sec0; 24469 cdb[5] = msf->cdmsf_frame0; 24470 cdb[6] = msf->cdmsf_min1; 24471 cdb[7] = msf->cdmsf_sec1; 24472 cdb[8] = msf->cdmsf_frame1; 24473 } 24474 com->uscsi_cdb = cdb; 24475 com->uscsi_cdblen = CDB_GROUP1; 24476 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24477 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24478 SD_PATH_STANDARD); 24479 kmem_free(com, sizeof (*com)); 24480 return (rval); 24481 } 24482 24483 24484 /* 24485 * Function: sr_play_trkind() 24486 * 24487 * Description: This routine is the driver entry point for handling CD-ROM 24488 * ioctl requests to output the audio signals at the specified 24489 * starting address and continue the audio play until the specified 24490 * ending address (CDROMPLAYTRKIND). The address is in Track Index 24491 * format. 24492 * 24493 * Arguments: dev - the device 'dev_t' 24494 * data - pointer to user provided audio track/index structure, 24495 * specifying start/end addresses. 24496 * flag - this argument is a pass through to ddi_copyxxx() 24497 * directly from the mode argument of ioctl(). 24498 * 24499 * Return Code: the code returned by sd_send_scsi_cmd() 24500 * EFAULT if ddi_copyxxx() fails 24501 * ENXIO if fail ddi_get_soft_state 24502 * EINVAL if data pointer is NULL 24503 */ 24504 24505 static int 24506 sr_play_trkind(dev_t dev, caddr_t data, int flag) 24507 { 24508 struct cdrom_ti ti_struct; 24509 struct cdrom_ti *ti = &ti_struct; 24510 struct uscsi_cmd *com = NULL; 24511 char cdb[CDB_GROUP1]; 24512 int rval; 24513 24514 if (data == NULL) { 24515 return (EINVAL); 24516 } 24517 24518 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) { 24519 return (EFAULT); 24520 } 24521 24522 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24523 bzero(cdb, CDB_GROUP1); 24524 cdb[0] = SCMD_PLAYAUDIO_TI; 24525 cdb[4] = ti->cdti_trk0; 24526 cdb[5] = ti->cdti_ind0; 24527 cdb[7] = ti->cdti_trk1; 24528 cdb[8] = ti->cdti_ind1; 24529 com->uscsi_cdb = cdb; 24530 com->uscsi_cdblen = CDB_GROUP1; 24531 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24532 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24533 SD_PATH_STANDARD); 24534 kmem_free(com, sizeof (*com)); 24535 return (rval); 24536 } 24537 24538 24539 /* 24540 * Function: sr_read_all_subcodes() 24541 * 24542 * Description: This routine is the driver entry point for handling CD-ROM 24543 * ioctl requests to return raw subcode data while the target is 24544 * playing audio (CDROMSUBCODE). 24545 * 24546 * Arguments: dev - the device 'dev_t' 24547 * data - pointer to user provided cdrom subcode structure, 24548 * specifying the transfer length and address. 24549 * flag - this argument is a pass through to ddi_copyxxx() 24550 * directly from the mode argument of ioctl(). 24551 * 24552 * Return Code: the code returned by sd_send_scsi_cmd() 24553 * EFAULT if ddi_copyxxx() fails 24554 * ENXIO if fail ddi_get_soft_state 24555 * EINVAL if data pointer is NULL 24556 */ 24557 24558 static int 24559 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag) 24560 { 24561 struct sd_lun *un = NULL; 24562 struct uscsi_cmd *com = NULL; 24563 struct cdrom_subcode *subcode = NULL; 24564 int rval; 24565 size_t buflen; 24566 char cdb[CDB_GROUP5]; 24567 24568 #ifdef _MULTI_DATAMODEL 24569 /* To support ILP32 applications in an LP64 world */ 24570 struct cdrom_subcode32 cdrom_subcode32; 24571 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32; 24572 #endif 24573 if (data == NULL) { 24574 return (EINVAL); 24575 } 24576 24577 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24578 return (ENXIO); 24579 } 24580 24581 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP); 24582 24583 #ifdef _MULTI_DATAMODEL 24584 switch (ddi_model_convert_from(flag & FMODELS)) { 24585 case DDI_MODEL_ILP32: 24586 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) { 24587 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24588 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24589 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24590 return (EFAULT); 24591 } 24592 /* Convert the ILP32 uscsi data from the application to LP64 */ 24593 cdrom_subcode32tocdrom_subcode(cdsc32, subcode); 24594 break; 24595 case DDI_MODEL_NONE: 24596 if (ddi_copyin(data, subcode, 24597 sizeof (struct cdrom_subcode), flag)) { 24598 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24599 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24600 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24601 return (EFAULT); 24602 } 24603 break; 24604 } 24605 #else /* ! _MULTI_DATAMODEL */ 24606 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) { 24607 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24608 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24609 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24610 return (EFAULT); 24611 } 24612 #endif /* _MULTI_DATAMODEL */ 24613 24614 /* 24615 * Since MMC-2 expects max 3 bytes for length, check if the 24616 * length input is greater than 3 bytes 24617 */ 24618 if ((subcode->cdsc_length & 0xFF000000) != 0) { 24619 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24620 "sr_read_all_subcodes: " 24621 "cdrom transfer length too large: %d (limit %d)\n", 24622 subcode->cdsc_length, 0xFFFFFF); 24623 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24624 return (EINVAL); 24625 } 24626 24627 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length; 24628 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24629 bzero(cdb, CDB_GROUP5); 24630 24631 if (un->un_f_mmc_cap == TRUE) { 24632 cdb[0] = (char)SCMD_READ_CD; 24633 cdb[2] = (char)0xff; 24634 cdb[3] = (char)0xff; 24635 cdb[4] = (char)0xff; 24636 cdb[5] = (char)0xff; 24637 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 24638 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 24639 cdb[8] = ((subcode->cdsc_length) & 0x000000ff); 24640 cdb[10] = 1; 24641 } else { 24642 /* 24643 * Note: A vendor specific command (0xDF) is being used her to 24644 * request a read of all subcodes. 24645 */ 24646 cdb[0] = (char)SCMD_READ_ALL_SUBCODES; 24647 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24); 24648 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 24649 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 24650 cdb[9] = ((subcode->cdsc_length) & 0x000000ff); 24651 } 24652 com->uscsi_cdb = cdb; 24653 com->uscsi_cdblen = CDB_GROUP5; 24654 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr; 24655 com->uscsi_buflen = buflen; 24656 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24657 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 24658 SD_PATH_STANDARD); 24659 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24660 kmem_free(com, sizeof (*com)); 24661 return (rval); 24662 } 24663 24664 24665 /* 24666 * Function: sr_read_subchannel() 24667 * 24668 * Description: This routine is the driver entry point for handling CD-ROM 24669 * ioctl requests to return the Q sub-channel data of the CD 24670 * current position block. (CDROMSUBCHNL) The data includes the 24671 * track number, index number, absolute CD-ROM address (LBA or MSF 24672 * format per the user) , track relative CD-ROM address (LBA or MSF 24673 * format per the user), control data and audio status. 24674 * 24675 * Arguments: dev - the device 'dev_t' 24676 * data - pointer to user provided cdrom sub-channel structure 24677 * flag - this argument is a pass through to ddi_copyxxx() 24678 * directly from the mode argument of ioctl(). 24679 * 24680 * Return Code: the code returned by sd_send_scsi_cmd() 24681 * EFAULT if ddi_copyxxx() fails 24682 * ENXIO if fail ddi_get_soft_state 24683 * EINVAL if data pointer is NULL 24684 */ 24685 24686 static int 24687 sr_read_subchannel(dev_t dev, caddr_t data, int flag) 24688 { 24689 struct sd_lun *un; 24690 struct uscsi_cmd *com; 24691 struct cdrom_subchnl subchanel; 24692 struct cdrom_subchnl *subchnl = &subchanel; 24693 char cdb[CDB_GROUP1]; 24694 caddr_t buffer; 24695 int rval; 24696 24697 if (data == NULL) { 24698 return (EINVAL); 24699 } 24700 24701 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24702 (un->un_state == SD_STATE_OFFLINE)) { 24703 return (ENXIO); 24704 } 24705 24706 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) { 24707 return (EFAULT); 24708 } 24709 24710 buffer = kmem_zalloc((size_t)16, KM_SLEEP); 24711 bzero(cdb, CDB_GROUP1); 24712 cdb[0] = SCMD_READ_SUBCHANNEL; 24713 /* Set the MSF bit based on the user requested address format */ 24714 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02; 24715 /* 24716 * Set the Q bit in byte 2 to indicate that Q sub-channel data be 24717 * returned 24718 */ 24719 cdb[2] = 0x40; 24720 /* 24721 * Set byte 3 to specify the return data format. A value of 0x01 24722 * indicates that the CD-ROM current position should be returned. 24723 */ 24724 cdb[3] = 0x01; 24725 cdb[8] = 0x10; 24726 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24727 com->uscsi_cdb = cdb; 24728 com->uscsi_cdblen = CDB_GROUP1; 24729 com->uscsi_bufaddr = buffer; 24730 com->uscsi_buflen = 16; 24731 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 24732 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24733 SD_PATH_STANDARD); 24734 if (rval != 0) { 24735 kmem_free(buffer, 16); 24736 kmem_free(com, sizeof (*com)); 24737 return (rval); 24738 } 24739 24740 /* Process the returned Q sub-channel data */ 24741 subchnl->cdsc_audiostatus = buffer[1]; 24742 subchnl->cdsc_adr = (buffer[5] & 0xF0); 24743 subchnl->cdsc_ctrl = (buffer[5] & 0x0F); 24744 subchnl->cdsc_trk = buffer[6]; 24745 subchnl->cdsc_ind = buffer[7]; 24746 if (subchnl->cdsc_format & CDROM_LBA) { 24747 subchnl->cdsc_absaddr.lba = 24748 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 24749 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 24750 subchnl->cdsc_reladdr.lba = 24751 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) + 24752 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]); 24753 } else if (un->un_f_cfg_readsub_bcd == TRUE) { 24754 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]); 24755 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]); 24756 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]); 24757 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]); 24758 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]); 24759 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]); 24760 } else { 24761 subchnl->cdsc_absaddr.msf.minute = buffer[9]; 24762 subchnl->cdsc_absaddr.msf.second = buffer[10]; 24763 subchnl->cdsc_absaddr.msf.frame = buffer[11]; 24764 subchnl->cdsc_reladdr.msf.minute = buffer[13]; 24765 subchnl->cdsc_reladdr.msf.second = buffer[14]; 24766 subchnl->cdsc_reladdr.msf.frame = buffer[15]; 24767 } 24768 kmem_free(buffer, 16); 24769 kmem_free(com, sizeof (*com)); 24770 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag) 24771 != 0) { 24772 return (EFAULT); 24773 } 24774 return (rval); 24775 } 24776 24777 24778 /* 24779 * Function: sr_read_tocentry() 24780 * 24781 * Description: This routine is the driver entry point for handling CD-ROM 24782 * ioctl requests to read from the Table of Contents (TOC) 24783 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL 24784 * fields, the starting address (LBA or MSF format per the user) 24785 * and the data mode if the user specified track is a data track. 24786 * 24787 * Note: The READ HEADER (0x44) command used in this routine is 24788 * obsolete per the SCSI MMC spec but still supported in the 24789 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 24790 * therefore the command is still implemented in this routine. 24791 * 24792 * Arguments: dev - the device 'dev_t' 24793 * data - pointer to user provided toc entry structure, 24794 * specifying the track # and the address format 24795 * (LBA or MSF). 24796 * flag - this argument is a pass through to ddi_copyxxx() 24797 * directly from the mode argument of ioctl(). 24798 * 24799 * Return Code: the code returned by sd_send_scsi_cmd() 24800 * EFAULT if ddi_copyxxx() fails 24801 * ENXIO if fail ddi_get_soft_state 24802 * EINVAL if data pointer is NULL 24803 */ 24804 24805 static int 24806 sr_read_tocentry(dev_t dev, caddr_t data, int flag) 24807 { 24808 struct sd_lun *un = NULL; 24809 struct uscsi_cmd *com; 24810 struct cdrom_tocentry toc_entry; 24811 struct cdrom_tocentry *entry = &toc_entry; 24812 caddr_t buffer; 24813 int rval; 24814 char cdb[CDB_GROUP1]; 24815 24816 if (data == NULL) { 24817 return (EINVAL); 24818 } 24819 24820 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24821 (un->un_state == SD_STATE_OFFLINE)) { 24822 return (ENXIO); 24823 } 24824 24825 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) { 24826 return (EFAULT); 24827 } 24828 24829 /* Validate the requested track and address format */ 24830 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) { 24831 return (EINVAL); 24832 } 24833 24834 if (entry->cdte_track == 0) { 24835 return (EINVAL); 24836 } 24837 24838 buffer = kmem_zalloc((size_t)12, KM_SLEEP); 24839 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24840 bzero(cdb, CDB_GROUP1); 24841 24842 cdb[0] = SCMD_READ_TOC; 24843 /* Set the MSF bit based on the user requested address format */ 24844 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2); 24845 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 24846 cdb[6] = BYTE_TO_BCD(entry->cdte_track); 24847 } else { 24848 cdb[6] = entry->cdte_track; 24849 } 24850 24851 /* 24852 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 24853 * (4 byte TOC response header + 8 byte track descriptor) 24854 */ 24855 cdb[8] = 12; 24856 com->uscsi_cdb = cdb; 24857 com->uscsi_cdblen = CDB_GROUP1; 24858 com->uscsi_bufaddr = buffer; 24859 com->uscsi_buflen = 0x0C; 24860 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ); 24861 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24862 SD_PATH_STANDARD); 24863 if (rval != 0) { 24864 kmem_free(buffer, 12); 24865 kmem_free(com, sizeof (*com)); 24866 return (rval); 24867 } 24868 24869 /* Process the toc entry */ 24870 entry->cdte_adr = (buffer[5] & 0xF0) >> 4; 24871 entry->cdte_ctrl = (buffer[5] & 0x0F); 24872 if (entry->cdte_format & CDROM_LBA) { 24873 entry->cdte_addr.lba = 24874 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 24875 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 24876 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) { 24877 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]); 24878 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]); 24879 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]); 24880 /* 24881 * Send a READ TOC command using the LBA address format to get 24882 * the LBA for the track requested so it can be used in the 24883 * READ HEADER request 24884 * 24885 * Note: The MSF bit of the READ HEADER command specifies the 24886 * output format. The block address specified in that command 24887 * must be in LBA format. 24888 */ 24889 cdb[1] = 0; 24890 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24891 SD_PATH_STANDARD); 24892 if (rval != 0) { 24893 kmem_free(buffer, 12); 24894 kmem_free(com, sizeof (*com)); 24895 return (rval); 24896 } 24897 } else { 24898 entry->cdte_addr.msf.minute = buffer[9]; 24899 entry->cdte_addr.msf.second = buffer[10]; 24900 entry->cdte_addr.msf.frame = buffer[11]; 24901 /* 24902 * Send a READ TOC command using the LBA address format to get 24903 * the LBA for the track requested so it can be used in the 24904 * READ HEADER request 24905 * 24906 * Note: The MSF bit of the READ HEADER command specifies the 24907 * output format. The block address specified in that command 24908 * must be in LBA format. 24909 */ 24910 cdb[1] = 0; 24911 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24912 SD_PATH_STANDARD); 24913 if (rval != 0) { 24914 kmem_free(buffer, 12); 24915 kmem_free(com, sizeof (*com)); 24916 return (rval); 24917 } 24918 } 24919 24920 /* 24921 * Build and send the READ HEADER command to determine the data mode of 24922 * the user specified track. 24923 */ 24924 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) && 24925 (entry->cdte_track != CDROM_LEADOUT)) { 24926 bzero(cdb, CDB_GROUP1); 24927 cdb[0] = SCMD_READ_HEADER; 24928 cdb[2] = buffer[8]; 24929 cdb[3] = buffer[9]; 24930 cdb[4] = buffer[10]; 24931 cdb[5] = buffer[11]; 24932 cdb[8] = 0x08; 24933 com->uscsi_buflen = 0x08; 24934 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24935 SD_PATH_STANDARD); 24936 if (rval == 0) { 24937 entry->cdte_datamode = buffer[0]; 24938 } else { 24939 /* 24940 * READ HEADER command failed, since this is 24941 * obsoleted in one spec, its better to return 24942 * -1 for an invlid track so that we can still 24943 * receive the rest of the TOC data. 24944 */ 24945 entry->cdte_datamode = (uchar_t)-1; 24946 } 24947 } else { 24948 entry->cdte_datamode = (uchar_t)-1; 24949 } 24950 24951 kmem_free(buffer, 12); 24952 kmem_free(com, sizeof (*com)); 24953 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0) 24954 return (EFAULT); 24955 24956 return (rval); 24957 } 24958 24959 24960 /* 24961 * Function: sr_read_tochdr() 24962 * 24963 * Description: This routine is the driver entry point for handling CD-ROM 24964 * ioctl requests to read the Table of Contents (TOC) header 24965 * (CDROMREADTOHDR). The TOC header consists of the disk starting 24966 * and ending track numbers 24967 * 24968 * Arguments: dev - the device 'dev_t' 24969 * data - pointer to user provided toc header structure, 24970 * specifying the starting and ending track numbers. 24971 * flag - this argument is a pass through to ddi_copyxxx() 24972 * directly from the mode argument of ioctl(). 24973 * 24974 * Return Code: the code returned by sd_send_scsi_cmd() 24975 * EFAULT if ddi_copyxxx() fails 24976 * ENXIO if fail ddi_get_soft_state 24977 * EINVAL if data pointer is NULL 24978 */ 24979 24980 static int 24981 sr_read_tochdr(dev_t dev, caddr_t data, int flag) 24982 { 24983 struct sd_lun *un; 24984 struct uscsi_cmd *com; 24985 struct cdrom_tochdr toc_header; 24986 struct cdrom_tochdr *hdr = &toc_header; 24987 char cdb[CDB_GROUP1]; 24988 int rval; 24989 caddr_t buffer; 24990 24991 if (data == NULL) { 24992 return (EINVAL); 24993 } 24994 24995 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 24996 (un->un_state == SD_STATE_OFFLINE)) { 24997 return (ENXIO); 24998 } 24999 25000 buffer = kmem_zalloc(4, KM_SLEEP); 25001 bzero(cdb, CDB_GROUP1); 25002 cdb[0] = SCMD_READ_TOC; 25003 /* 25004 * Specifying a track number of 0x00 in the READ TOC command indicates 25005 * that the TOC header should be returned 25006 */ 25007 cdb[6] = 0x00; 25008 /* 25009 * Bytes 7 & 8 are the 4 byte allocation length for TOC header. 25010 * (2 byte data len + 1 byte starting track # + 1 byte ending track #) 25011 */ 25012 cdb[8] = 0x04; 25013 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25014 com->uscsi_cdb = cdb; 25015 com->uscsi_cdblen = CDB_GROUP1; 25016 com->uscsi_bufaddr = buffer; 25017 com->uscsi_buflen = 0x04; 25018 com->uscsi_timeout = 300; 25019 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25020 25021 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25022 SD_PATH_STANDARD); 25023 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 25024 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]); 25025 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]); 25026 } else { 25027 hdr->cdth_trk0 = buffer[2]; 25028 hdr->cdth_trk1 = buffer[3]; 25029 } 25030 kmem_free(buffer, 4); 25031 kmem_free(com, sizeof (*com)); 25032 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) { 25033 return (EFAULT); 25034 } 25035 return (rval); 25036 } 25037 25038 25039 /* 25040 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(), 25041 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for 25042 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data, 25043 * digital audio and extended architecture digital audio. These modes are 25044 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3 25045 * MMC specs. 25046 * 25047 * In addition to support for the various data formats these routines also 25048 * include support for devices that implement only the direct access READ 25049 * commands (0x08, 0x28), devices that implement the READ_CD commands 25050 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and 25051 * READ CDXA commands (0xD8, 0xDB) 25052 */ 25053 25054 /* 25055 * Function: sr_read_mode1() 25056 * 25057 * Description: This routine is the driver entry point for handling CD-ROM 25058 * ioctl read mode1 requests (CDROMREADMODE1). 25059 * 25060 * Arguments: dev - the device 'dev_t' 25061 * data - pointer to user provided cd read structure specifying 25062 * the lba buffer address and length. 25063 * flag - this argument is a pass through to ddi_copyxxx() 25064 * directly from the mode argument of ioctl(). 25065 * 25066 * Return Code: the code returned by sd_send_scsi_cmd() 25067 * EFAULT if ddi_copyxxx() fails 25068 * ENXIO if fail ddi_get_soft_state 25069 * EINVAL if data pointer is NULL 25070 */ 25071 25072 static int 25073 sr_read_mode1(dev_t dev, caddr_t data, int flag) 25074 { 25075 struct sd_lun *un; 25076 struct cdrom_read mode1_struct; 25077 struct cdrom_read *mode1 = &mode1_struct; 25078 int rval; 25079 #ifdef _MULTI_DATAMODEL 25080 /* To support ILP32 applications in an LP64 world */ 25081 struct cdrom_read32 cdrom_read32; 25082 struct cdrom_read32 *cdrd32 = &cdrom_read32; 25083 #endif /* _MULTI_DATAMODEL */ 25084 25085 if (data == NULL) { 25086 return (EINVAL); 25087 } 25088 25089 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25090 (un->un_state == SD_STATE_OFFLINE)) { 25091 return (ENXIO); 25092 } 25093 25094 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25095 "sd_read_mode1: entry: un:0x%p\n", un); 25096 25097 #ifdef _MULTI_DATAMODEL 25098 switch (ddi_model_convert_from(flag & FMODELS)) { 25099 case DDI_MODEL_ILP32: 25100 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 25101 return (EFAULT); 25102 } 25103 /* Convert the ILP32 uscsi data from the application to LP64 */ 25104 cdrom_read32tocdrom_read(cdrd32, mode1); 25105 break; 25106 case DDI_MODEL_NONE: 25107 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 25108 return (EFAULT); 25109 } 25110 } 25111 #else /* ! _MULTI_DATAMODEL */ 25112 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 25113 return (EFAULT); 25114 } 25115 #endif /* _MULTI_DATAMODEL */ 25116 25117 rval = sd_send_scsi_READ(un, mode1->cdread_bufaddr, 25118 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD); 25119 25120 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25121 "sd_read_mode1: exit: un:0x%p\n", un); 25122 25123 return (rval); 25124 } 25125 25126 25127 /* 25128 * Function: sr_read_cd_mode2() 25129 * 25130 * Description: This routine is the driver entry point for handling CD-ROM 25131 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 25132 * support the READ CD (0xBE) command or the 1st generation 25133 * READ CD (0xD4) command. 25134 * 25135 * Arguments: dev - the device 'dev_t' 25136 * data - pointer to user provided cd read structure specifying 25137 * the lba buffer address and length. 25138 * flag - this argument is a pass through to ddi_copyxxx() 25139 * directly from the mode argument of ioctl(). 25140 * 25141 * Return Code: the code returned by sd_send_scsi_cmd() 25142 * EFAULT if ddi_copyxxx() fails 25143 * ENXIO if fail ddi_get_soft_state 25144 * EINVAL if data pointer is NULL 25145 */ 25146 25147 static int 25148 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag) 25149 { 25150 struct sd_lun *un; 25151 struct uscsi_cmd *com; 25152 struct cdrom_read mode2_struct; 25153 struct cdrom_read *mode2 = &mode2_struct; 25154 uchar_t cdb[CDB_GROUP5]; 25155 int nblocks; 25156 int rval; 25157 #ifdef _MULTI_DATAMODEL 25158 /* To support ILP32 applications in an LP64 world */ 25159 struct cdrom_read32 cdrom_read32; 25160 struct cdrom_read32 *cdrd32 = &cdrom_read32; 25161 #endif /* _MULTI_DATAMODEL */ 25162 25163 if (data == NULL) { 25164 return (EINVAL); 25165 } 25166 25167 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25168 (un->un_state == SD_STATE_OFFLINE)) { 25169 return (ENXIO); 25170 } 25171 25172 #ifdef _MULTI_DATAMODEL 25173 switch (ddi_model_convert_from(flag & FMODELS)) { 25174 case DDI_MODEL_ILP32: 25175 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 25176 return (EFAULT); 25177 } 25178 /* Convert the ILP32 uscsi data from the application to LP64 */ 25179 cdrom_read32tocdrom_read(cdrd32, mode2); 25180 break; 25181 case DDI_MODEL_NONE: 25182 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 25183 return (EFAULT); 25184 } 25185 break; 25186 } 25187 25188 #else /* ! _MULTI_DATAMODEL */ 25189 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 25190 return (EFAULT); 25191 } 25192 #endif /* _MULTI_DATAMODEL */ 25193 25194 bzero(cdb, sizeof (cdb)); 25195 if (un->un_f_cfg_read_cd_xd4 == TRUE) { 25196 /* Read command supported by 1st generation atapi drives */ 25197 cdb[0] = SCMD_READ_CDD4; 25198 } else { 25199 /* Universal CD Access Command */ 25200 cdb[0] = SCMD_READ_CD; 25201 } 25202 25203 /* 25204 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book 25205 */ 25206 cdb[1] = CDROM_SECTOR_TYPE_MODE2; 25207 25208 /* set the start address */ 25209 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF); 25210 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF); 25211 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 25212 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF); 25213 25214 /* set the transfer length */ 25215 nblocks = mode2->cdread_buflen / 2336; 25216 cdb[6] = (uchar_t)(nblocks >> 16); 25217 cdb[7] = (uchar_t)(nblocks >> 8); 25218 cdb[8] = (uchar_t)nblocks; 25219 25220 /* set the filter bits */ 25221 cdb[9] = CDROM_READ_CD_USERDATA; 25222 25223 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25224 com->uscsi_cdb = (caddr_t)cdb; 25225 com->uscsi_cdblen = sizeof (cdb); 25226 com->uscsi_bufaddr = mode2->cdread_bufaddr; 25227 com->uscsi_buflen = mode2->cdread_buflen; 25228 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25229 25230 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25231 SD_PATH_STANDARD); 25232 kmem_free(com, sizeof (*com)); 25233 return (rval); 25234 } 25235 25236 25237 /* 25238 * Function: sr_read_mode2() 25239 * 25240 * Description: This routine is the driver entry point for handling CD-ROM 25241 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 25242 * do not support the READ CD (0xBE) command. 25243 * 25244 * Arguments: dev - the device 'dev_t' 25245 * data - pointer to user provided cd read structure specifying 25246 * the lba buffer address and length. 25247 * flag - this argument is a pass through to ddi_copyxxx() 25248 * directly from the mode argument of ioctl(). 25249 * 25250 * Return Code: the code returned by sd_send_scsi_cmd() 25251 * EFAULT if ddi_copyxxx() fails 25252 * ENXIO if fail ddi_get_soft_state 25253 * EINVAL if data pointer is NULL 25254 * EIO if fail to reset block size 25255 * EAGAIN if commands are in progress in the driver 25256 */ 25257 25258 static int 25259 sr_read_mode2(dev_t dev, caddr_t data, int flag) 25260 { 25261 struct sd_lun *un; 25262 struct cdrom_read mode2_struct; 25263 struct cdrom_read *mode2 = &mode2_struct; 25264 int rval; 25265 uint32_t restore_blksize; 25266 struct uscsi_cmd *com; 25267 uchar_t cdb[CDB_GROUP0]; 25268 int nblocks; 25269 25270 #ifdef _MULTI_DATAMODEL 25271 /* To support ILP32 applications in an LP64 world */ 25272 struct cdrom_read32 cdrom_read32; 25273 struct cdrom_read32 *cdrd32 = &cdrom_read32; 25274 #endif /* _MULTI_DATAMODEL */ 25275 25276 if (data == NULL) { 25277 return (EINVAL); 25278 } 25279 25280 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25281 (un->un_state == SD_STATE_OFFLINE)) { 25282 return (ENXIO); 25283 } 25284 25285 /* 25286 * Because this routine will update the device and driver block size 25287 * being used we want to make sure there are no commands in progress. 25288 * If commands are in progress the user will have to try again. 25289 * 25290 * We check for 1 instead of 0 because we increment un_ncmds_in_driver 25291 * in sdioctl to protect commands from sdioctl through to the top of 25292 * sd_uscsi_strategy. See sdioctl for details. 25293 */ 25294 mutex_enter(SD_MUTEX(un)); 25295 if (un->un_ncmds_in_driver != 1) { 25296 mutex_exit(SD_MUTEX(un)); 25297 return (EAGAIN); 25298 } 25299 mutex_exit(SD_MUTEX(un)); 25300 25301 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25302 "sd_read_mode2: entry: un:0x%p\n", un); 25303 25304 #ifdef _MULTI_DATAMODEL 25305 switch (ddi_model_convert_from(flag & FMODELS)) { 25306 case DDI_MODEL_ILP32: 25307 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 25308 return (EFAULT); 25309 } 25310 /* Convert the ILP32 uscsi data from the application to LP64 */ 25311 cdrom_read32tocdrom_read(cdrd32, mode2); 25312 break; 25313 case DDI_MODEL_NONE: 25314 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 25315 return (EFAULT); 25316 } 25317 break; 25318 } 25319 #else /* ! _MULTI_DATAMODEL */ 25320 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) { 25321 return (EFAULT); 25322 } 25323 #endif /* _MULTI_DATAMODEL */ 25324 25325 /* Store the current target block size for restoration later */ 25326 restore_blksize = un->un_tgt_blocksize; 25327 25328 /* Change the device and soft state target block size to 2336 */ 25329 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) { 25330 rval = EIO; 25331 goto done; 25332 } 25333 25334 25335 bzero(cdb, sizeof (cdb)); 25336 25337 /* set READ operation */ 25338 cdb[0] = SCMD_READ; 25339 25340 /* adjust lba for 2kbyte blocks from 512 byte blocks */ 25341 mode2->cdread_lba >>= 2; 25342 25343 /* set the start address */ 25344 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F); 25345 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 25346 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF); 25347 25348 /* set the transfer length */ 25349 nblocks = mode2->cdread_buflen / 2336; 25350 cdb[4] = (uchar_t)nblocks & 0xFF; 25351 25352 /* build command */ 25353 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25354 com->uscsi_cdb = (caddr_t)cdb; 25355 com->uscsi_cdblen = sizeof (cdb); 25356 com->uscsi_bufaddr = mode2->cdread_bufaddr; 25357 com->uscsi_buflen = mode2->cdread_buflen; 25358 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25359 25360 /* 25361 * Issue SCSI command with user space address for read buffer. 25362 * 25363 * This sends the command through main channel in the driver. 25364 * 25365 * Since this is accessed via an IOCTL call, we go through the 25366 * standard path, so that if the device was powered down, then 25367 * it would be 'awakened' to handle the command. 25368 */ 25369 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25370 SD_PATH_STANDARD); 25371 25372 kmem_free(com, sizeof (*com)); 25373 25374 /* Restore the device and soft state target block size */ 25375 if (sr_sector_mode(dev, restore_blksize) != 0) { 25376 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25377 "can't do switch back to mode 1\n"); 25378 /* 25379 * If sd_send_scsi_READ succeeded we still need to report 25380 * an error because we failed to reset the block size 25381 */ 25382 if (rval == 0) { 25383 rval = EIO; 25384 } 25385 } 25386 25387 done: 25388 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25389 "sd_read_mode2: exit: un:0x%p\n", un); 25390 25391 return (rval); 25392 } 25393 25394 25395 /* 25396 * Function: sr_sector_mode() 25397 * 25398 * Description: This utility function is used by sr_read_mode2 to set the target 25399 * block size based on the user specified size. This is a legacy 25400 * implementation based upon a vendor specific mode page 25401 * 25402 * Arguments: dev - the device 'dev_t' 25403 * data - flag indicating if block size is being set to 2336 or 25404 * 512. 25405 * 25406 * Return Code: the code returned by sd_send_scsi_cmd() 25407 * EFAULT if ddi_copyxxx() fails 25408 * ENXIO if fail ddi_get_soft_state 25409 * EINVAL if data pointer is NULL 25410 */ 25411 25412 static int 25413 sr_sector_mode(dev_t dev, uint32_t blksize) 25414 { 25415 struct sd_lun *un; 25416 uchar_t *sense; 25417 uchar_t *select; 25418 int rval; 25419 25420 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25421 (un->un_state == SD_STATE_OFFLINE)) { 25422 return (ENXIO); 25423 } 25424 25425 sense = kmem_zalloc(20, KM_SLEEP); 25426 25427 /* Note: This is a vendor specific mode page (0x81) */ 25428 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 20, 0x81, 25429 SD_PATH_STANDARD)) != 0) { 25430 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 25431 "sr_sector_mode: Mode Sense failed\n"); 25432 kmem_free(sense, 20); 25433 return (rval); 25434 } 25435 select = kmem_zalloc(20, KM_SLEEP); 25436 select[3] = 0x08; 25437 select[10] = ((blksize >> 8) & 0xff); 25438 select[11] = (blksize & 0xff); 25439 select[12] = 0x01; 25440 select[13] = 0x06; 25441 select[14] = sense[14]; 25442 select[15] = sense[15]; 25443 if (blksize == SD_MODE2_BLKSIZE) { 25444 select[14] |= 0x01; 25445 } 25446 25447 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 20, 25448 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 25449 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 25450 "sr_sector_mode: Mode Select failed\n"); 25451 } else { 25452 /* 25453 * Only update the softstate block size if we successfully 25454 * changed the device block mode. 25455 */ 25456 mutex_enter(SD_MUTEX(un)); 25457 sd_update_block_info(un, blksize, 0); 25458 mutex_exit(SD_MUTEX(un)); 25459 } 25460 kmem_free(sense, 20); 25461 kmem_free(select, 20); 25462 return (rval); 25463 } 25464 25465 25466 /* 25467 * Function: sr_read_cdda() 25468 * 25469 * Description: This routine is the driver entry point for handling CD-ROM 25470 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If 25471 * the target supports CDDA these requests are handled via a vendor 25472 * specific command (0xD8) If the target does not support CDDA 25473 * these requests are handled via the READ CD command (0xBE). 25474 * 25475 * Arguments: dev - the device 'dev_t' 25476 * data - pointer to user provided CD-DA structure specifying 25477 * the track starting address, transfer length, and 25478 * subcode options. 25479 * flag - this argument is a pass through to ddi_copyxxx() 25480 * directly from the mode argument of ioctl(). 25481 * 25482 * Return Code: the code returned by sd_send_scsi_cmd() 25483 * EFAULT if ddi_copyxxx() fails 25484 * ENXIO if fail ddi_get_soft_state 25485 * EINVAL if invalid arguments are provided 25486 * ENOTTY 25487 */ 25488 25489 static int 25490 sr_read_cdda(dev_t dev, caddr_t data, int flag) 25491 { 25492 struct sd_lun *un; 25493 struct uscsi_cmd *com; 25494 struct cdrom_cdda *cdda; 25495 int rval; 25496 size_t buflen; 25497 char cdb[CDB_GROUP5]; 25498 25499 #ifdef _MULTI_DATAMODEL 25500 /* To support ILP32 applications in an LP64 world */ 25501 struct cdrom_cdda32 cdrom_cdda32; 25502 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32; 25503 #endif /* _MULTI_DATAMODEL */ 25504 25505 if (data == NULL) { 25506 return (EINVAL); 25507 } 25508 25509 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25510 return (ENXIO); 25511 } 25512 25513 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP); 25514 25515 #ifdef _MULTI_DATAMODEL 25516 switch (ddi_model_convert_from(flag & FMODELS)) { 25517 case DDI_MODEL_ILP32: 25518 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) { 25519 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25520 "sr_read_cdda: ddi_copyin Failed\n"); 25521 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25522 return (EFAULT); 25523 } 25524 /* Convert the ILP32 uscsi data from the application to LP64 */ 25525 cdrom_cdda32tocdrom_cdda(cdda32, cdda); 25526 break; 25527 case DDI_MODEL_NONE: 25528 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 25529 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25530 "sr_read_cdda: ddi_copyin Failed\n"); 25531 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25532 return (EFAULT); 25533 } 25534 break; 25535 } 25536 #else /* ! _MULTI_DATAMODEL */ 25537 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 25538 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25539 "sr_read_cdda: ddi_copyin Failed\n"); 25540 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25541 return (EFAULT); 25542 } 25543 #endif /* _MULTI_DATAMODEL */ 25544 25545 /* 25546 * Since MMC-2 expects max 3 bytes for length, check if the 25547 * length input is greater than 3 bytes 25548 */ 25549 if ((cdda->cdda_length & 0xFF000000) != 0) { 25550 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: " 25551 "cdrom transfer length too large: %d (limit %d)\n", 25552 cdda->cdda_length, 0xFFFFFF); 25553 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25554 return (EINVAL); 25555 } 25556 25557 switch (cdda->cdda_subcode) { 25558 case CDROM_DA_NO_SUBCODE: 25559 buflen = CDROM_BLK_2352 * cdda->cdda_length; 25560 break; 25561 case CDROM_DA_SUBQ: 25562 buflen = CDROM_BLK_2368 * cdda->cdda_length; 25563 break; 25564 case CDROM_DA_ALL_SUBCODE: 25565 buflen = CDROM_BLK_2448 * cdda->cdda_length; 25566 break; 25567 case CDROM_DA_SUBCODE_ONLY: 25568 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length; 25569 break; 25570 default: 25571 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25572 "sr_read_cdda: Subcode '0x%x' Not Supported\n", 25573 cdda->cdda_subcode); 25574 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25575 return (EINVAL); 25576 } 25577 25578 /* Build and send the command */ 25579 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25580 bzero(cdb, CDB_GROUP5); 25581 25582 if (un->un_f_cfg_cdda == TRUE) { 25583 cdb[0] = (char)SCMD_READ_CD; 25584 cdb[1] = 0x04; 25585 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 25586 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 25587 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 25588 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 25589 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 25590 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 25591 cdb[8] = ((cdda->cdda_length) & 0x000000ff); 25592 cdb[9] = 0x10; 25593 switch (cdda->cdda_subcode) { 25594 case CDROM_DA_NO_SUBCODE : 25595 cdb[10] = 0x0; 25596 break; 25597 case CDROM_DA_SUBQ : 25598 cdb[10] = 0x2; 25599 break; 25600 case CDROM_DA_ALL_SUBCODE : 25601 cdb[10] = 0x1; 25602 break; 25603 case CDROM_DA_SUBCODE_ONLY : 25604 /* FALLTHROUGH */ 25605 default : 25606 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25607 kmem_free(com, sizeof (*com)); 25608 return (ENOTTY); 25609 } 25610 } else { 25611 cdb[0] = (char)SCMD_READ_CDDA; 25612 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 25613 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 25614 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 25615 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 25616 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24); 25617 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 25618 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 25619 cdb[9] = ((cdda->cdda_length) & 0x000000ff); 25620 cdb[10] = cdda->cdda_subcode; 25621 } 25622 25623 com->uscsi_cdb = cdb; 25624 com->uscsi_cdblen = CDB_GROUP5; 25625 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data; 25626 com->uscsi_buflen = buflen; 25627 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25628 25629 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25630 SD_PATH_STANDARD); 25631 25632 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25633 kmem_free(com, sizeof (*com)); 25634 return (rval); 25635 } 25636 25637 25638 /* 25639 * Function: sr_read_cdxa() 25640 * 25641 * Description: This routine is the driver entry point for handling CD-ROM 25642 * ioctl requests to return CD-XA (Extended Architecture) data. 25643 * (CDROMCDXA). 25644 * 25645 * Arguments: dev - the device 'dev_t' 25646 * data - pointer to user provided CD-XA structure specifying 25647 * the data starting address, transfer length, and format 25648 * flag - this argument is a pass through to ddi_copyxxx() 25649 * directly from the mode argument of ioctl(). 25650 * 25651 * Return Code: the code returned by sd_send_scsi_cmd() 25652 * EFAULT if ddi_copyxxx() fails 25653 * ENXIO if fail ddi_get_soft_state 25654 * EINVAL if data pointer is NULL 25655 */ 25656 25657 static int 25658 sr_read_cdxa(dev_t dev, caddr_t data, int flag) 25659 { 25660 struct sd_lun *un; 25661 struct uscsi_cmd *com; 25662 struct cdrom_cdxa *cdxa; 25663 int rval; 25664 size_t buflen; 25665 char cdb[CDB_GROUP5]; 25666 uchar_t read_flags; 25667 25668 #ifdef _MULTI_DATAMODEL 25669 /* To support ILP32 applications in an LP64 world */ 25670 struct cdrom_cdxa32 cdrom_cdxa32; 25671 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32; 25672 #endif /* _MULTI_DATAMODEL */ 25673 25674 if (data == NULL) { 25675 return (EINVAL); 25676 } 25677 25678 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25679 return (ENXIO); 25680 } 25681 25682 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP); 25683 25684 #ifdef _MULTI_DATAMODEL 25685 switch (ddi_model_convert_from(flag & FMODELS)) { 25686 case DDI_MODEL_ILP32: 25687 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) { 25688 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25689 return (EFAULT); 25690 } 25691 /* 25692 * Convert the ILP32 uscsi data from the 25693 * application to LP64 for internal use. 25694 */ 25695 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa); 25696 break; 25697 case DDI_MODEL_NONE: 25698 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 25699 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25700 return (EFAULT); 25701 } 25702 break; 25703 } 25704 #else /* ! _MULTI_DATAMODEL */ 25705 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 25706 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25707 return (EFAULT); 25708 } 25709 #endif /* _MULTI_DATAMODEL */ 25710 25711 /* 25712 * Since MMC-2 expects max 3 bytes for length, check if the 25713 * length input is greater than 3 bytes 25714 */ 25715 if ((cdxa->cdxa_length & 0xFF000000) != 0) { 25716 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: " 25717 "cdrom transfer length too large: %d (limit %d)\n", 25718 cdxa->cdxa_length, 0xFFFFFF); 25719 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25720 return (EINVAL); 25721 } 25722 25723 switch (cdxa->cdxa_format) { 25724 case CDROM_XA_DATA: 25725 buflen = CDROM_BLK_2048 * cdxa->cdxa_length; 25726 read_flags = 0x10; 25727 break; 25728 case CDROM_XA_SECTOR_DATA: 25729 buflen = CDROM_BLK_2352 * cdxa->cdxa_length; 25730 read_flags = 0xf8; 25731 break; 25732 case CDROM_XA_DATA_W_ERROR: 25733 buflen = CDROM_BLK_2646 * cdxa->cdxa_length; 25734 read_flags = 0xfc; 25735 break; 25736 default: 25737 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25738 "sr_read_cdxa: Format '0x%x' Not Supported\n", 25739 cdxa->cdxa_format); 25740 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25741 return (EINVAL); 25742 } 25743 25744 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25745 bzero(cdb, CDB_GROUP5); 25746 if (un->un_f_mmc_cap == TRUE) { 25747 cdb[0] = (char)SCMD_READ_CD; 25748 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 25749 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 25750 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 25751 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 25752 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 25753 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 25754 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff); 25755 cdb[9] = (char)read_flags; 25756 } else { 25757 /* 25758 * Note: A vendor specific command (0xDB) is being used her to 25759 * request a read of all subcodes. 25760 */ 25761 cdb[0] = (char)SCMD_READ_CDXA; 25762 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 25763 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 25764 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 25765 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 25766 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24); 25767 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 25768 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 25769 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff); 25770 cdb[10] = cdxa->cdxa_format; 25771 } 25772 com->uscsi_cdb = cdb; 25773 com->uscsi_cdblen = CDB_GROUP5; 25774 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data; 25775 com->uscsi_buflen = buflen; 25776 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25777 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25778 SD_PATH_STANDARD); 25779 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 25780 kmem_free(com, sizeof (*com)); 25781 return (rval); 25782 } 25783 25784 25785 /* 25786 * Function: sr_eject() 25787 * 25788 * Description: This routine is the driver entry point for handling CD-ROM 25789 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT) 25790 * 25791 * Arguments: dev - the device 'dev_t' 25792 * 25793 * Return Code: the code returned by sd_send_scsi_cmd() 25794 */ 25795 25796 static int 25797 sr_eject(dev_t dev) 25798 { 25799 struct sd_lun *un; 25800 int rval; 25801 25802 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25803 (un->un_state == SD_STATE_OFFLINE)) { 25804 return (ENXIO); 25805 } 25806 25807 /* 25808 * To prevent race conditions with the eject 25809 * command, keep track of an eject command as 25810 * it progresses. If we are already handling 25811 * an eject command in the driver for the given 25812 * unit and another request to eject is received 25813 * immediately return EAGAIN so we don't lose 25814 * the command if the current eject command fails. 25815 */ 25816 mutex_enter(SD_MUTEX(un)); 25817 if (un->un_f_ejecting == TRUE) { 25818 mutex_exit(SD_MUTEX(un)); 25819 return (EAGAIN); 25820 } 25821 un->un_f_ejecting = TRUE; 25822 mutex_exit(SD_MUTEX(un)); 25823 25824 if ((rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 25825 SD_PATH_STANDARD)) != 0) { 25826 mutex_enter(SD_MUTEX(un)); 25827 un->un_f_ejecting = FALSE; 25828 mutex_exit(SD_MUTEX(un)); 25829 return (rval); 25830 } 25831 25832 rval = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_EJECT, 25833 SD_PATH_STANDARD); 25834 25835 if (rval == 0) { 25836 mutex_enter(SD_MUTEX(un)); 25837 sr_ejected(un); 25838 un->un_mediastate = DKIO_EJECTED; 25839 un->un_f_ejecting = FALSE; 25840 cv_broadcast(&un->un_state_cv); 25841 mutex_exit(SD_MUTEX(un)); 25842 } else { 25843 mutex_enter(SD_MUTEX(un)); 25844 un->un_f_ejecting = FALSE; 25845 mutex_exit(SD_MUTEX(un)); 25846 } 25847 return (rval); 25848 } 25849 25850 25851 /* 25852 * Function: sr_ejected() 25853 * 25854 * Description: This routine updates the soft state structure to invalidate the 25855 * geometry information after the media has been ejected or a 25856 * media eject has been detected. 25857 * 25858 * Arguments: un - driver soft state (unit) structure 25859 */ 25860 25861 static void 25862 sr_ejected(struct sd_lun *un) 25863 { 25864 struct sd_errstats *stp; 25865 25866 ASSERT(un != NULL); 25867 ASSERT(mutex_owned(SD_MUTEX(un))); 25868 25869 un->un_f_blockcount_is_valid = FALSE; 25870 un->un_f_tgt_blocksize_is_valid = FALSE; 25871 mutex_exit(SD_MUTEX(un)); 25872 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 25873 mutex_enter(SD_MUTEX(un)); 25874 25875 if (un->un_errstats != NULL) { 25876 stp = (struct sd_errstats *)un->un_errstats->ks_data; 25877 stp->sd_capacity.value.ui64 = 0; 25878 } 25879 25880 /* remove "capacity-of-device" properties */ 25881 (void) ddi_prop_remove(DDI_DEV_T_NONE, SD_DEVINFO(un), 25882 "device-nblocks"); 25883 (void) ddi_prop_remove(DDI_DEV_T_NONE, SD_DEVINFO(un), 25884 "device-blksize"); 25885 } 25886 25887 25888 /* 25889 * Function: sr_check_wp() 25890 * 25891 * Description: This routine checks the write protection of a removable 25892 * media disk and hotpluggable devices via the write protect bit of 25893 * the Mode Page Header device specific field. Some devices choke 25894 * on unsupported mode page. In order to workaround this issue, 25895 * this routine has been implemented to use 0x3f mode page(request 25896 * for all pages) for all device types. 25897 * 25898 * Arguments: dev - the device 'dev_t' 25899 * 25900 * Return Code: int indicating if the device is write protected (1) or not (0) 25901 * 25902 * Context: Kernel thread. 25903 * 25904 */ 25905 25906 static int 25907 sr_check_wp(dev_t dev) 25908 { 25909 struct sd_lun *un; 25910 uchar_t device_specific; 25911 uchar_t *sense; 25912 int hdrlen; 25913 int rval = FALSE; 25914 25915 /* 25916 * Note: The return codes for this routine should be reworked to 25917 * properly handle the case of a NULL softstate. 25918 */ 25919 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25920 return (FALSE); 25921 } 25922 25923 if (un->un_f_cfg_is_atapi == TRUE) { 25924 /* 25925 * The mode page contents are not required; set the allocation 25926 * length for the mode page header only 25927 */ 25928 hdrlen = MODE_HEADER_LENGTH_GRP2; 25929 sense = kmem_zalloc(hdrlen, KM_SLEEP); 25930 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, hdrlen, 25931 MODEPAGE_ALLPAGES, SD_PATH_STANDARD) != 0) 25932 goto err_exit; 25933 device_specific = 25934 ((struct mode_header_grp2 *)sense)->device_specific; 25935 } else { 25936 hdrlen = MODE_HEADER_LENGTH; 25937 sense = kmem_zalloc(hdrlen, KM_SLEEP); 25938 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, hdrlen, 25939 MODEPAGE_ALLPAGES, SD_PATH_STANDARD) != 0) 25940 goto err_exit; 25941 device_specific = 25942 ((struct mode_header *)sense)->device_specific; 25943 } 25944 25945 /* 25946 * Write protect mode sense failed; not all disks 25947 * understand this query. Return FALSE assuming that 25948 * these devices are not writable. 25949 */ 25950 if (device_specific & WRITE_PROTECT) { 25951 rval = TRUE; 25952 } 25953 25954 err_exit: 25955 kmem_free(sense, hdrlen); 25956 return (rval); 25957 } 25958 25959 /* 25960 * Function: sr_volume_ctrl() 25961 * 25962 * Description: This routine is the driver entry point for handling CD-ROM 25963 * audio output volume ioctl requests. (CDROMVOLCTRL) 25964 * 25965 * Arguments: dev - the device 'dev_t' 25966 * data - pointer to user audio volume control structure 25967 * flag - this argument is a pass through to ddi_copyxxx() 25968 * directly from the mode argument of ioctl(). 25969 * 25970 * Return Code: the code returned by sd_send_scsi_cmd() 25971 * EFAULT if ddi_copyxxx() fails 25972 * ENXIO if fail ddi_get_soft_state 25973 * EINVAL if data pointer is NULL 25974 * 25975 */ 25976 25977 static int 25978 sr_volume_ctrl(dev_t dev, caddr_t data, int flag) 25979 { 25980 struct sd_lun *un; 25981 struct cdrom_volctrl volume; 25982 struct cdrom_volctrl *vol = &volume; 25983 uchar_t *sense_page; 25984 uchar_t *select_page; 25985 uchar_t *sense; 25986 uchar_t *select; 25987 int sense_buflen; 25988 int select_buflen; 25989 int rval; 25990 25991 if (data == NULL) { 25992 return (EINVAL); 25993 } 25994 25995 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25996 (un->un_state == SD_STATE_OFFLINE)) { 25997 return (ENXIO); 25998 } 25999 26000 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) { 26001 return (EFAULT); 26002 } 26003 26004 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 26005 struct mode_header_grp2 *sense_mhp; 26006 struct mode_header_grp2 *select_mhp; 26007 int bd_len; 26008 26009 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN; 26010 select_buflen = MODE_HEADER_LENGTH_GRP2 + 26011 MODEPAGE_AUDIO_CTRL_LEN; 26012 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 26013 select = kmem_zalloc(select_buflen, KM_SLEEP); 26014 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 26015 sense_buflen, MODEPAGE_AUDIO_CTRL, 26016 SD_PATH_STANDARD)) != 0) { 26017 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 26018 "sr_volume_ctrl: Mode Sense Failed\n"); 26019 kmem_free(sense, sense_buflen); 26020 kmem_free(select, select_buflen); 26021 return (rval); 26022 } 26023 sense_mhp = (struct mode_header_grp2 *)sense; 26024 select_mhp = (struct mode_header_grp2 *)select; 26025 bd_len = (sense_mhp->bdesc_length_hi << 8) | 26026 sense_mhp->bdesc_length_lo; 26027 if (bd_len > MODE_BLK_DESC_LENGTH) { 26028 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26029 "sr_volume_ctrl: Mode Sense returned invalid " 26030 "block descriptor length\n"); 26031 kmem_free(sense, sense_buflen); 26032 kmem_free(select, select_buflen); 26033 return (EIO); 26034 } 26035 sense_page = (uchar_t *) 26036 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 26037 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2); 26038 select_mhp->length_msb = 0; 26039 select_mhp->length_lsb = 0; 26040 select_mhp->bdesc_length_hi = 0; 26041 select_mhp->bdesc_length_lo = 0; 26042 } else { 26043 struct mode_header *sense_mhp, *select_mhp; 26044 26045 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 26046 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 26047 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 26048 select = kmem_zalloc(select_buflen, KM_SLEEP); 26049 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 26050 sense_buflen, MODEPAGE_AUDIO_CTRL, 26051 SD_PATH_STANDARD)) != 0) { 26052 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26053 "sr_volume_ctrl: Mode Sense Failed\n"); 26054 kmem_free(sense, sense_buflen); 26055 kmem_free(select, select_buflen); 26056 return (rval); 26057 } 26058 sense_mhp = (struct mode_header *)sense; 26059 select_mhp = (struct mode_header *)select; 26060 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) { 26061 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26062 "sr_volume_ctrl: Mode Sense returned invalid " 26063 "block descriptor length\n"); 26064 kmem_free(sense, sense_buflen); 26065 kmem_free(select, select_buflen); 26066 return (EIO); 26067 } 26068 sense_page = (uchar_t *) 26069 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 26070 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH); 26071 select_mhp->length = 0; 26072 select_mhp->bdesc_length = 0; 26073 } 26074 /* 26075 * Note: An audio control data structure could be created and overlayed 26076 * on the following in place of the array indexing method implemented. 26077 */ 26078 26079 /* Build the select data for the user volume data */ 26080 select_page[0] = MODEPAGE_AUDIO_CTRL; 26081 select_page[1] = 0xE; 26082 /* Set the immediate bit */ 26083 select_page[2] = 0x04; 26084 /* Zero out reserved fields */ 26085 select_page[3] = 0x00; 26086 select_page[4] = 0x00; 26087 /* Return sense data for fields not to be modified */ 26088 select_page[5] = sense_page[5]; 26089 select_page[6] = sense_page[6]; 26090 select_page[7] = sense_page[7]; 26091 /* Set the user specified volume levels for channel 0 and 1 */ 26092 select_page[8] = 0x01; 26093 select_page[9] = vol->channel0; 26094 select_page[10] = 0x02; 26095 select_page[11] = vol->channel1; 26096 /* Channel 2 and 3 are currently unsupported so return the sense data */ 26097 select_page[12] = sense_page[12]; 26098 select_page[13] = sense_page[13]; 26099 select_page[14] = sense_page[14]; 26100 select_page[15] = sense_page[15]; 26101 26102 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 26103 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, select, 26104 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 26105 } else { 26106 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 26107 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 26108 } 26109 26110 kmem_free(sense, sense_buflen); 26111 kmem_free(select, select_buflen); 26112 return (rval); 26113 } 26114 26115 26116 /* 26117 * Function: sr_read_sony_session_offset() 26118 * 26119 * Description: This routine is the driver entry point for handling CD-ROM 26120 * ioctl requests for session offset information. (CDROMREADOFFSET) 26121 * The address of the first track in the last session of a 26122 * multi-session CD-ROM is returned 26123 * 26124 * Note: This routine uses a vendor specific key value in the 26125 * command control field without implementing any vendor check here 26126 * or in the ioctl routine. 26127 * 26128 * Arguments: dev - the device 'dev_t' 26129 * data - pointer to an int to hold the requested address 26130 * flag - this argument is a pass through to ddi_copyxxx() 26131 * directly from the mode argument of ioctl(). 26132 * 26133 * Return Code: the code returned by sd_send_scsi_cmd() 26134 * EFAULT if ddi_copyxxx() fails 26135 * ENXIO if fail ddi_get_soft_state 26136 * EINVAL if data pointer is NULL 26137 */ 26138 26139 static int 26140 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag) 26141 { 26142 struct sd_lun *un; 26143 struct uscsi_cmd *com; 26144 caddr_t buffer; 26145 char cdb[CDB_GROUP1]; 26146 int session_offset = 0; 26147 int rval; 26148 26149 if (data == NULL) { 26150 return (EINVAL); 26151 } 26152 26153 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26154 (un->un_state == SD_STATE_OFFLINE)) { 26155 return (ENXIO); 26156 } 26157 26158 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP); 26159 bzero(cdb, CDB_GROUP1); 26160 cdb[0] = SCMD_READ_TOC; 26161 /* 26162 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 26163 * (4 byte TOC response header + 8 byte response data) 26164 */ 26165 cdb[8] = SONY_SESSION_OFFSET_LEN; 26166 /* Byte 9 is the control byte. A vendor specific value is used */ 26167 cdb[9] = SONY_SESSION_OFFSET_KEY; 26168 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26169 com->uscsi_cdb = cdb; 26170 com->uscsi_cdblen = CDB_GROUP1; 26171 com->uscsi_bufaddr = buffer; 26172 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN; 26173 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26174 26175 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26176 SD_PATH_STANDARD); 26177 if (rval != 0) { 26178 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 26179 kmem_free(com, sizeof (*com)); 26180 return (rval); 26181 } 26182 if (buffer[1] == SONY_SESSION_OFFSET_VALID) { 26183 session_offset = 26184 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 26185 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 26186 /* 26187 * Offset returned offset in current lbasize block's. Convert to 26188 * 2k block's to return to the user 26189 */ 26190 if (un->un_tgt_blocksize == CDROM_BLK_512) { 26191 session_offset >>= 2; 26192 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) { 26193 session_offset >>= 1; 26194 } 26195 } 26196 26197 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) { 26198 rval = EFAULT; 26199 } 26200 26201 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 26202 kmem_free(com, sizeof (*com)); 26203 return (rval); 26204 } 26205 26206 26207 /* 26208 * Function: sd_wm_cache_constructor() 26209 * 26210 * Description: Cache Constructor for the wmap cache for the read/modify/write 26211 * devices. 26212 * 26213 * Arguments: wm - A pointer to the sd_w_map to be initialized. 26214 * un - sd_lun structure for the device. 26215 * flag - the km flags passed to constructor 26216 * 26217 * Return Code: 0 on success. 26218 * -1 on failure. 26219 */ 26220 26221 /*ARGSUSED*/ 26222 static int 26223 sd_wm_cache_constructor(void *wm, void *un, int flags) 26224 { 26225 bzero(wm, sizeof (struct sd_w_map)); 26226 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL); 26227 return (0); 26228 } 26229 26230 26231 /* 26232 * Function: sd_wm_cache_destructor() 26233 * 26234 * Description: Cache destructor for the wmap cache for the read/modify/write 26235 * devices. 26236 * 26237 * Arguments: wm - A pointer to the sd_w_map to be initialized. 26238 * un - sd_lun structure for the device. 26239 */ 26240 /*ARGSUSED*/ 26241 static void 26242 sd_wm_cache_destructor(void *wm, void *un) 26243 { 26244 cv_destroy(&((struct sd_w_map *)wm)->wm_avail); 26245 } 26246 26247 26248 /* 26249 * Function: sd_range_lock() 26250 * 26251 * Description: Lock the range of blocks specified as parameter to ensure 26252 * that read, modify write is atomic and no other i/o writes 26253 * to the same location. The range is specified in terms 26254 * of start and end blocks. Block numbers are the actual 26255 * media block numbers and not system. 26256 * 26257 * Arguments: un - sd_lun structure for the device. 26258 * startb - The starting block number 26259 * endb - The end block number 26260 * typ - type of i/o - simple/read_modify_write 26261 * 26262 * Return Code: wm - pointer to the wmap structure. 26263 * 26264 * Context: This routine can sleep. 26265 */ 26266 26267 static struct sd_w_map * 26268 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ) 26269 { 26270 struct sd_w_map *wmp = NULL; 26271 struct sd_w_map *sl_wmp = NULL; 26272 struct sd_w_map *tmp_wmp; 26273 wm_state state = SD_WM_CHK_LIST; 26274 26275 26276 ASSERT(un != NULL); 26277 ASSERT(!mutex_owned(SD_MUTEX(un))); 26278 26279 mutex_enter(SD_MUTEX(un)); 26280 26281 while (state != SD_WM_DONE) { 26282 26283 switch (state) { 26284 case SD_WM_CHK_LIST: 26285 /* 26286 * This is the starting state. Check the wmap list 26287 * to see if the range is currently available. 26288 */ 26289 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) { 26290 /* 26291 * If this is a simple write and no rmw 26292 * i/o is pending then try to lock the 26293 * range as the range should be available. 26294 */ 26295 state = SD_WM_LOCK_RANGE; 26296 } else { 26297 tmp_wmp = sd_get_range(un, startb, endb); 26298 if (tmp_wmp != NULL) { 26299 if ((wmp != NULL) && ONLIST(un, wmp)) { 26300 /* 26301 * Should not keep onlist wmps 26302 * while waiting this macro 26303 * will also do wmp = NULL; 26304 */ 26305 FREE_ONLIST_WMAP(un, wmp); 26306 } 26307 /* 26308 * sl_wmp is the wmap on which wait 26309 * is done, since the tmp_wmp points 26310 * to the inuse wmap, set sl_wmp to 26311 * tmp_wmp and change the state to sleep 26312 */ 26313 sl_wmp = tmp_wmp; 26314 state = SD_WM_WAIT_MAP; 26315 } else { 26316 state = SD_WM_LOCK_RANGE; 26317 } 26318 26319 } 26320 break; 26321 26322 case SD_WM_LOCK_RANGE: 26323 ASSERT(un->un_wm_cache); 26324 /* 26325 * The range need to be locked, try to get a wmap. 26326 * First attempt it with NO_SLEEP, want to avoid a sleep 26327 * if possible as we will have to release the sd mutex 26328 * if we have to sleep. 26329 */ 26330 if (wmp == NULL) 26331 wmp = kmem_cache_alloc(un->un_wm_cache, 26332 KM_NOSLEEP); 26333 if (wmp == NULL) { 26334 mutex_exit(SD_MUTEX(un)); 26335 _NOTE(DATA_READABLE_WITHOUT_LOCK 26336 (sd_lun::un_wm_cache)) 26337 wmp = kmem_cache_alloc(un->un_wm_cache, 26338 KM_SLEEP); 26339 mutex_enter(SD_MUTEX(un)); 26340 /* 26341 * we released the mutex so recheck and go to 26342 * check list state. 26343 */ 26344 state = SD_WM_CHK_LIST; 26345 } else { 26346 /* 26347 * We exit out of state machine since we 26348 * have the wmap. Do the housekeeping first. 26349 * place the wmap on the wmap list if it is not 26350 * on it already and then set the state to done. 26351 */ 26352 wmp->wm_start = startb; 26353 wmp->wm_end = endb; 26354 wmp->wm_flags = typ | SD_WM_BUSY; 26355 if (typ & SD_WTYPE_RMW) { 26356 un->un_rmw_count++; 26357 } 26358 /* 26359 * If not already on the list then link 26360 */ 26361 if (!ONLIST(un, wmp)) { 26362 wmp->wm_next = un->un_wm; 26363 wmp->wm_prev = NULL; 26364 if (wmp->wm_next) 26365 wmp->wm_next->wm_prev = wmp; 26366 un->un_wm = wmp; 26367 } 26368 state = SD_WM_DONE; 26369 } 26370 break; 26371 26372 case SD_WM_WAIT_MAP: 26373 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY); 26374 /* 26375 * Wait is done on sl_wmp, which is set in the 26376 * check_list state. 26377 */ 26378 sl_wmp->wm_wanted_count++; 26379 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un)); 26380 sl_wmp->wm_wanted_count--; 26381 /* 26382 * We can reuse the memory from the completed sl_wmp 26383 * lock range for our new lock, but only if noone is 26384 * waiting for it. 26385 */ 26386 ASSERT(!(sl_wmp->wm_flags & SD_WM_BUSY)); 26387 if (sl_wmp->wm_wanted_count == 0) { 26388 if (wmp != NULL) 26389 CHK_N_FREEWMP(un, wmp); 26390 wmp = sl_wmp; 26391 } 26392 sl_wmp = NULL; 26393 /* 26394 * After waking up, need to recheck for availability of 26395 * range. 26396 */ 26397 state = SD_WM_CHK_LIST; 26398 break; 26399 26400 default: 26401 panic("sd_range_lock: " 26402 "Unknown state %d in sd_range_lock", state); 26403 /*NOTREACHED*/ 26404 } /* switch(state) */ 26405 26406 } /* while(state != SD_WM_DONE) */ 26407 26408 mutex_exit(SD_MUTEX(un)); 26409 26410 ASSERT(wmp != NULL); 26411 26412 return (wmp); 26413 } 26414 26415 26416 /* 26417 * Function: sd_get_range() 26418 * 26419 * Description: Find if there any overlapping I/O to this one 26420 * Returns the write-map of 1st such I/O, NULL otherwise. 26421 * 26422 * Arguments: un - sd_lun structure for the device. 26423 * startb - The starting block number 26424 * endb - The end block number 26425 * 26426 * Return Code: wm - pointer to the wmap structure. 26427 */ 26428 26429 static struct sd_w_map * 26430 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb) 26431 { 26432 struct sd_w_map *wmp; 26433 26434 ASSERT(un != NULL); 26435 26436 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) { 26437 if (!(wmp->wm_flags & SD_WM_BUSY)) { 26438 continue; 26439 } 26440 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) { 26441 break; 26442 } 26443 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) { 26444 break; 26445 } 26446 } 26447 26448 return (wmp); 26449 } 26450 26451 26452 /* 26453 * Function: sd_free_inlist_wmap() 26454 * 26455 * Description: Unlink and free a write map struct. 26456 * 26457 * Arguments: un - sd_lun structure for the device. 26458 * wmp - sd_w_map which needs to be unlinked. 26459 */ 26460 26461 static void 26462 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp) 26463 { 26464 ASSERT(un != NULL); 26465 26466 if (un->un_wm == wmp) { 26467 un->un_wm = wmp->wm_next; 26468 } else { 26469 wmp->wm_prev->wm_next = wmp->wm_next; 26470 } 26471 26472 if (wmp->wm_next) { 26473 wmp->wm_next->wm_prev = wmp->wm_prev; 26474 } 26475 26476 wmp->wm_next = wmp->wm_prev = NULL; 26477 26478 kmem_cache_free(un->un_wm_cache, wmp); 26479 } 26480 26481 26482 /* 26483 * Function: sd_range_unlock() 26484 * 26485 * Description: Unlock the range locked by wm. 26486 * Free write map if nobody else is waiting on it. 26487 * 26488 * Arguments: un - sd_lun structure for the device. 26489 * wmp - sd_w_map which needs to be unlinked. 26490 */ 26491 26492 static void 26493 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm) 26494 { 26495 ASSERT(un != NULL); 26496 ASSERT(wm != NULL); 26497 ASSERT(!mutex_owned(SD_MUTEX(un))); 26498 26499 mutex_enter(SD_MUTEX(un)); 26500 26501 if (wm->wm_flags & SD_WTYPE_RMW) { 26502 un->un_rmw_count--; 26503 } 26504 26505 if (wm->wm_wanted_count) { 26506 wm->wm_flags = 0; 26507 /* 26508 * Broadcast that the wmap is available now. 26509 */ 26510 cv_broadcast(&wm->wm_avail); 26511 } else { 26512 /* 26513 * If no one is waiting on the map, it should be free'ed. 26514 */ 26515 sd_free_inlist_wmap(un, wm); 26516 } 26517 26518 mutex_exit(SD_MUTEX(un)); 26519 } 26520 26521 26522 /* 26523 * Function: sd_read_modify_write_task 26524 * 26525 * Description: Called from a taskq thread to initiate the write phase of 26526 * a read-modify-write request. This is used for targets where 26527 * un->un_sys_blocksize != un->un_tgt_blocksize. 26528 * 26529 * Arguments: arg - a pointer to the buf(9S) struct for the write command. 26530 * 26531 * Context: Called under taskq thread context. 26532 */ 26533 26534 static void 26535 sd_read_modify_write_task(void *arg) 26536 { 26537 struct sd_mapblocksize_info *bsp; 26538 struct buf *bp; 26539 struct sd_xbuf *xp; 26540 struct sd_lun *un; 26541 26542 bp = arg; /* The bp is given in arg */ 26543 ASSERT(bp != NULL); 26544 26545 /* Get the pointer to the layer-private data struct */ 26546 xp = SD_GET_XBUF(bp); 26547 ASSERT(xp != NULL); 26548 bsp = xp->xb_private; 26549 ASSERT(bsp != NULL); 26550 26551 un = SD_GET_UN(bp); 26552 ASSERT(un != NULL); 26553 ASSERT(!mutex_owned(SD_MUTEX(un))); 26554 26555 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 26556 "sd_read_modify_write_task: entry: buf:0x%p\n", bp); 26557 26558 /* 26559 * This is the write phase of a read-modify-write request, called 26560 * under the context of a taskq thread in response to the completion 26561 * of the read portion of the rmw request completing under interrupt 26562 * context. The write request must be sent from here down the iostart 26563 * chain as if it were being sent from sd_mapblocksize_iostart(), so 26564 * we use the layer index saved in the layer-private data area. 26565 */ 26566 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp); 26567 26568 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 26569 "sd_read_modify_write_task: exit: buf:0x%p\n", bp); 26570 } 26571 26572 26573 /* 26574 * Function: sddump_do_read_of_rmw() 26575 * 26576 * Description: This routine will be called from sddump, If sddump is called 26577 * with an I/O which not aligned on device blocksize boundary 26578 * then the write has to be converted to read-modify-write. 26579 * Do the read part here in order to keep sddump simple. 26580 * Note - That the sd_mutex is held across the call to this 26581 * routine. 26582 * 26583 * Arguments: un - sd_lun 26584 * blkno - block number in terms of media block size. 26585 * nblk - number of blocks. 26586 * bpp - pointer to pointer to the buf structure. On return 26587 * from this function, *bpp points to the valid buffer 26588 * to which the write has to be done. 26589 * 26590 * Return Code: 0 for success or errno-type return code 26591 */ 26592 26593 static int 26594 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 26595 struct buf **bpp) 26596 { 26597 int err; 26598 int i; 26599 int rval; 26600 struct buf *bp; 26601 struct scsi_pkt *pkt = NULL; 26602 uint32_t target_blocksize; 26603 26604 ASSERT(un != NULL); 26605 ASSERT(mutex_owned(SD_MUTEX(un))); 26606 26607 target_blocksize = un->un_tgt_blocksize; 26608 26609 mutex_exit(SD_MUTEX(un)); 26610 26611 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL, 26612 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL); 26613 if (bp == NULL) { 26614 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26615 "no resources for dumping; giving up"); 26616 err = ENOMEM; 26617 goto done; 26618 } 26619 26620 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL, 26621 blkno, nblk); 26622 if (rval != 0) { 26623 scsi_free_consistent_buf(bp); 26624 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26625 "no resources for dumping; giving up"); 26626 err = ENOMEM; 26627 goto done; 26628 } 26629 26630 pkt->pkt_flags |= FLAG_NOINTR; 26631 26632 err = EIO; 26633 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 26634 26635 /* 26636 * Scsi_poll returns 0 (success) if the command completes and 26637 * the status block is STATUS_GOOD. We should only check 26638 * errors if this condition is not true. Even then we should 26639 * send our own request sense packet only if we have a check 26640 * condition and auto request sense has not been performed by 26641 * the hba. 26642 */ 26643 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n"); 26644 26645 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) { 26646 err = 0; 26647 break; 26648 } 26649 26650 /* 26651 * Check CMD_DEV_GONE 1st, give up if device is gone, 26652 * no need to read RQS data. 26653 */ 26654 if (pkt->pkt_reason == CMD_DEV_GONE) { 26655 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26656 "Error while dumping state with rmw..." 26657 "Device is gone\n"); 26658 break; 26659 } 26660 26661 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) { 26662 SD_INFO(SD_LOG_DUMP, un, 26663 "sddump: read failed with CHECK, try # %d\n", i); 26664 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) { 26665 (void) sd_send_polled_RQS(un); 26666 } 26667 26668 continue; 26669 } 26670 26671 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) { 26672 int reset_retval = 0; 26673 26674 SD_INFO(SD_LOG_DUMP, un, 26675 "sddump: read failed with BUSY, try # %d\n", i); 26676 26677 if (un->un_f_lun_reset_enabled == TRUE) { 26678 reset_retval = scsi_reset(SD_ADDRESS(un), 26679 RESET_LUN); 26680 } 26681 if (reset_retval == 0) { 26682 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 26683 } 26684 (void) sd_send_polled_RQS(un); 26685 26686 } else { 26687 SD_INFO(SD_LOG_DUMP, un, 26688 "sddump: read failed with 0x%x, try # %d\n", 26689 SD_GET_PKT_STATUS(pkt), i); 26690 mutex_enter(SD_MUTEX(un)); 26691 sd_reset_target(un, pkt); 26692 mutex_exit(SD_MUTEX(un)); 26693 } 26694 26695 /* 26696 * If we are not getting anywhere with lun/target resets, 26697 * let's reset the bus. 26698 */ 26699 if (i > SD_NDUMP_RETRIES/2) { 26700 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 26701 (void) sd_send_polled_RQS(un); 26702 } 26703 26704 } 26705 scsi_destroy_pkt(pkt); 26706 26707 if (err != 0) { 26708 scsi_free_consistent_buf(bp); 26709 *bpp = NULL; 26710 } else { 26711 *bpp = bp; 26712 } 26713 26714 done: 26715 mutex_enter(SD_MUTEX(un)); 26716 return (err); 26717 } 26718 26719 26720 /* 26721 * Function: sd_failfast_flushq 26722 * 26723 * Description: Take all bp's on the wait queue that have B_FAILFAST set 26724 * in b_flags and move them onto the failfast queue, then kick 26725 * off a thread to return all bp's on the failfast queue to 26726 * their owners with an error set. 26727 * 26728 * Arguments: un - pointer to the soft state struct for the instance. 26729 * 26730 * Context: may execute in interrupt context. 26731 */ 26732 26733 static void 26734 sd_failfast_flushq(struct sd_lun *un) 26735 { 26736 struct buf *bp; 26737 struct buf *next_waitq_bp; 26738 struct buf *prev_waitq_bp = NULL; 26739 26740 ASSERT(un != NULL); 26741 ASSERT(mutex_owned(SD_MUTEX(un))); 26742 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE); 26743 ASSERT(un->un_failfast_bp == NULL); 26744 26745 SD_TRACE(SD_LOG_IO_FAILFAST, un, 26746 "sd_failfast_flushq: entry: un:0x%p\n", un); 26747 26748 /* 26749 * Check if we should flush all bufs when entering failfast state, or 26750 * just those with B_FAILFAST set. 26751 */ 26752 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { 26753 /* 26754 * Move *all* bp's on the wait queue to the failfast flush 26755 * queue, including those that do NOT have B_FAILFAST set. 26756 */ 26757 if (un->un_failfast_headp == NULL) { 26758 ASSERT(un->un_failfast_tailp == NULL); 26759 un->un_failfast_headp = un->un_waitq_headp; 26760 } else { 26761 ASSERT(un->un_failfast_tailp != NULL); 26762 un->un_failfast_tailp->av_forw = un->un_waitq_headp; 26763 } 26764 26765 un->un_failfast_tailp = un->un_waitq_tailp; 26766 26767 /* update kstat for each bp moved out of the waitq */ 26768 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) { 26769 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 26770 } 26771 26772 /* empty the waitq */ 26773 un->un_waitq_headp = un->un_waitq_tailp = NULL; 26774 26775 } else { 26776 /* 26777 * Go thru the wait queue, pick off all entries with 26778 * B_FAILFAST set, and move these onto the failfast queue. 26779 */ 26780 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) { 26781 /* 26782 * Save the pointer to the next bp on the wait queue, 26783 * so we get to it on the next iteration of this loop. 26784 */ 26785 next_waitq_bp = bp->av_forw; 26786 26787 /* 26788 * If this bp from the wait queue does NOT have 26789 * B_FAILFAST set, just move on to the next element 26790 * in the wait queue. Note, this is the only place 26791 * where it is correct to set prev_waitq_bp. 26792 */ 26793 if ((bp->b_flags & B_FAILFAST) == 0) { 26794 prev_waitq_bp = bp; 26795 continue; 26796 } 26797 26798 /* 26799 * Remove the bp from the wait queue. 26800 */ 26801 if (bp == un->un_waitq_headp) { 26802 /* The bp is the first element of the waitq. */ 26803 un->un_waitq_headp = next_waitq_bp; 26804 if (un->un_waitq_headp == NULL) { 26805 /* The wait queue is now empty */ 26806 un->un_waitq_tailp = NULL; 26807 } 26808 } else { 26809 /* 26810 * The bp is either somewhere in the middle 26811 * or at the end of the wait queue. 26812 */ 26813 ASSERT(un->un_waitq_headp != NULL); 26814 ASSERT(prev_waitq_bp != NULL); 26815 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST) 26816 == 0); 26817 if (bp == un->un_waitq_tailp) { 26818 /* bp is the last entry on the waitq. */ 26819 ASSERT(next_waitq_bp == NULL); 26820 un->un_waitq_tailp = prev_waitq_bp; 26821 } 26822 prev_waitq_bp->av_forw = next_waitq_bp; 26823 } 26824 bp->av_forw = NULL; 26825 26826 /* 26827 * update kstat since the bp is moved out of 26828 * the waitq 26829 */ 26830 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 26831 26832 /* 26833 * Now put the bp onto the failfast queue. 26834 */ 26835 if (un->un_failfast_headp == NULL) { 26836 /* failfast queue is currently empty */ 26837 ASSERT(un->un_failfast_tailp == NULL); 26838 un->un_failfast_headp = 26839 un->un_failfast_tailp = bp; 26840 } else { 26841 /* Add the bp to the end of the failfast q */ 26842 ASSERT(un->un_failfast_tailp != NULL); 26843 ASSERT(un->un_failfast_tailp->b_flags & 26844 B_FAILFAST); 26845 un->un_failfast_tailp->av_forw = bp; 26846 un->un_failfast_tailp = bp; 26847 } 26848 } 26849 } 26850 26851 /* 26852 * Now return all bp's on the failfast queue to their owners. 26853 */ 26854 while ((bp = un->un_failfast_headp) != NULL) { 26855 26856 un->un_failfast_headp = bp->av_forw; 26857 if (un->un_failfast_headp == NULL) { 26858 un->un_failfast_tailp = NULL; 26859 } 26860 26861 /* 26862 * We want to return the bp with a failure error code, but 26863 * we do not want a call to sd_start_cmds() to occur here, 26864 * so use sd_return_failed_command_no_restart() instead of 26865 * sd_return_failed_command(). 26866 */ 26867 sd_return_failed_command_no_restart(un, bp, EIO); 26868 } 26869 26870 /* Flush the xbuf queues if required. */ 26871 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) { 26872 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback); 26873 } 26874 26875 SD_TRACE(SD_LOG_IO_FAILFAST, un, 26876 "sd_failfast_flushq: exit: un:0x%p\n", un); 26877 } 26878 26879 26880 /* 26881 * Function: sd_failfast_flushq_callback 26882 * 26883 * Description: Return TRUE if the given bp meets the criteria for failfast 26884 * flushing. Used with ddi_xbuf_flushq(9F). 26885 * 26886 * Arguments: bp - ptr to buf struct to be examined. 26887 * 26888 * Context: Any 26889 */ 26890 26891 static int 26892 sd_failfast_flushq_callback(struct buf *bp) 26893 { 26894 /* 26895 * Return TRUE if (1) we want to flush ALL bufs when the failfast 26896 * state is entered; OR (2) the given bp has B_FAILFAST set. 26897 */ 26898 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || 26899 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE); 26900 } 26901 26902 26903 26904 /* 26905 * Function: sd_setup_next_xfer 26906 * 26907 * Description: Prepare next I/O operation using DMA_PARTIAL 26908 * 26909 */ 26910 26911 static int 26912 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 26913 struct scsi_pkt *pkt, struct sd_xbuf *xp) 26914 { 26915 ssize_t num_blks_not_xfered; 26916 daddr_t strt_blk_num; 26917 ssize_t bytes_not_xfered; 26918 int rval; 26919 26920 ASSERT(pkt->pkt_resid == 0); 26921 26922 /* 26923 * Calculate next block number and amount to be transferred. 26924 * 26925 * How much data NOT transfered to the HBA yet. 26926 */ 26927 bytes_not_xfered = xp->xb_dma_resid; 26928 26929 /* 26930 * figure how many blocks NOT transfered to the HBA yet. 26931 */ 26932 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered); 26933 26934 /* 26935 * set starting block number to the end of what WAS transfered. 26936 */ 26937 strt_blk_num = xp->xb_blkno + 26938 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered); 26939 26940 /* 26941 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt 26942 * will call scsi_initpkt with NULL_FUNC so we do not have to release 26943 * the disk mutex here. 26944 */ 26945 rval = sd_setup_next_rw_pkt(un, pkt, bp, 26946 strt_blk_num, num_blks_not_xfered); 26947 26948 if (rval == 0) { 26949 26950 /* 26951 * Success. 26952 * 26953 * Adjust things if there are still more blocks to be 26954 * transfered. 26955 */ 26956 xp->xb_dma_resid = pkt->pkt_resid; 26957 pkt->pkt_resid = 0; 26958 26959 return (1); 26960 } 26961 26962 /* 26963 * There's really only one possible return value from 26964 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt 26965 * returns NULL. 26966 */ 26967 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 26968 26969 bp->b_resid = bp->b_bcount; 26970 bp->b_flags |= B_ERROR; 26971 26972 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26973 "Error setting up next portion of DMA transfer\n"); 26974 26975 return (0); 26976 } 26977 26978 /* 26979 * Function: sd_panic_for_res_conflict 26980 * 26981 * Description: Call panic with a string formatted with "Reservation Conflict" 26982 * and a human readable identifier indicating the SD instance 26983 * that experienced the reservation conflict. 26984 * 26985 * Arguments: un - pointer to the soft state struct for the instance. 26986 * 26987 * Context: may execute in interrupt context. 26988 */ 26989 26990 #define SD_RESV_CONFLICT_FMT_LEN 40 26991 void 26992 sd_panic_for_res_conflict(struct sd_lun *un) 26993 { 26994 char panic_str[SD_RESV_CONFLICT_FMT_LEN+MAXPATHLEN]; 26995 char path_str[MAXPATHLEN]; 26996 26997 (void) snprintf(panic_str, sizeof (panic_str), 26998 "Reservation Conflict\nDisk: %s", 26999 ddi_pathname(SD_DEVINFO(un), path_str)); 27000 27001 panic(panic_str); 27002 } 27003 27004 /* 27005 * Note: The following sd_faultinjection_ioctl( ) routines implement 27006 * driver support for handling fault injection for error analysis 27007 * causing faults in multiple layers of the driver. 27008 * 27009 */ 27010 27011 #ifdef SD_FAULT_INJECTION 27012 static uint_t sd_fault_injection_on = 0; 27013 27014 /* 27015 * Function: sd_faultinjection_ioctl() 27016 * 27017 * Description: This routine is the driver entry point for handling 27018 * faultinjection ioctls to inject errors into the 27019 * layer model 27020 * 27021 * Arguments: cmd - the ioctl cmd received 27022 * arg - the arguments from user and returns 27023 */ 27024 27025 static void 27026 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) { 27027 27028 uint_t i; 27029 uint_t rval; 27030 27031 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); 27032 27033 mutex_enter(SD_MUTEX(un)); 27034 27035 switch (cmd) { 27036 case SDIOCRUN: 27037 /* Allow pushed faults to be injected */ 27038 SD_INFO(SD_LOG_SDTEST, un, 27039 "sd_faultinjection_ioctl: Injecting Fault Run\n"); 27040 27041 sd_fault_injection_on = 1; 27042 27043 SD_INFO(SD_LOG_IOERR, un, 27044 "sd_faultinjection_ioctl: run finished\n"); 27045 break; 27046 27047 case SDIOCSTART: 27048 /* Start Injection Session */ 27049 SD_INFO(SD_LOG_SDTEST, un, 27050 "sd_faultinjection_ioctl: Injecting Fault Start\n"); 27051 27052 sd_fault_injection_on = 0; 27053 un->sd_injection_mask = 0xFFFFFFFF; 27054 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 27055 un->sd_fi_fifo_pkt[i] = NULL; 27056 un->sd_fi_fifo_xb[i] = NULL; 27057 un->sd_fi_fifo_un[i] = NULL; 27058 un->sd_fi_fifo_arq[i] = NULL; 27059 } 27060 un->sd_fi_fifo_start = 0; 27061 un->sd_fi_fifo_end = 0; 27062 27063 mutex_enter(&(un->un_fi_mutex)); 27064 un->sd_fi_log[0] = '\0'; 27065 un->sd_fi_buf_len = 0; 27066 mutex_exit(&(un->un_fi_mutex)); 27067 27068 SD_INFO(SD_LOG_IOERR, un, 27069 "sd_faultinjection_ioctl: start finished\n"); 27070 break; 27071 27072 case SDIOCSTOP: 27073 /* Stop Injection Session */ 27074 SD_INFO(SD_LOG_SDTEST, un, 27075 "sd_faultinjection_ioctl: Injecting Fault Stop\n"); 27076 sd_fault_injection_on = 0; 27077 un->sd_injection_mask = 0x0; 27078 27079 /* Empty stray or unuseds structs from fifo */ 27080 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 27081 if (un->sd_fi_fifo_pkt[i] != NULL) { 27082 kmem_free(un->sd_fi_fifo_pkt[i], 27083 sizeof (struct sd_fi_pkt)); 27084 } 27085 if (un->sd_fi_fifo_xb[i] != NULL) { 27086 kmem_free(un->sd_fi_fifo_xb[i], 27087 sizeof (struct sd_fi_xb)); 27088 } 27089 if (un->sd_fi_fifo_un[i] != NULL) { 27090 kmem_free(un->sd_fi_fifo_un[i], 27091 sizeof (struct sd_fi_un)); 27092 } 27093 if (un->sd_fi_fifo_arq[i] != NULL) { 27094 kmem_free(un->sd_fi_fifo_arq[i], 27095 sizeof (struct sd_fi_arq)); 27096 } 27097 un->sd_fi_fifo_pkt[i] = NULL; 27098 un->sd_fi_fifo_un[i] = NULL; 27099 un->sd_fi_fifo_xb[i] = NULL; 27100 un->sd_fi_fifo_arq[i] = NULL; 27101 } 27102 un->sd_fi_fifo_start = 0; 27103 un->sd_fi_fifo_end = 0; 27104 27105 SD_INFO(SD_LOG_IOERR, un, 27106 "sd_faultinjection_ioctl: stop finished\n"); 27107 break; 27108 27109 case SDIOCINSERTPKT: 27110 /* Store a packet struct to be pushed onto fifo */ 27111 SD_INFO(SD_LOG_SDTEST, un, 27112 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); 27113 27114 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27115 27116 sd_fault_injection_on = 0; 27117 27118 /* No more that SD_FI_MAX_ERROR allowed in Queue */ 27119 if (un->sd_fi_fifo_pkt[i] != NULL) { 27120 kmem_free(un->sd_fi_fifo_pkt[i], 27121 sizeof (struct sd_fi_pkt)); 27122 } 27123 if (arg != NULL) { 27124 un->sd_fi_fifo_pkt[i] = 27125 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); 27126 if (un->sd_fi_fifo_pkt[i] == NULL) { 27127 /* Alloc failed don't store anything */ 27128 break; 27129 } 27130 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], 27131 sizeof (struct sd_fi_pkt), 0); 27132 if (rval == -1) { 27133 kmem_free(un->sd_fi_fifo_pkt[i], 27134 sizeof (struct sd_fi_pkt)); 27135 un->sd_fi_fifo_pkt[i] = NULL; 27136 } 27137 } else { 27138 SD_INFO(SD_LOG_IOERR, un, 27139 "sd_faultinjection_ioctl: pkt null\n"); 27140 } 27141 break; 27142 27143 case SDIOCINSERTXB: 27144 /* Store a xb struct to be pushed onto fifo */ 27145 SD_INFO(SD_LOG_SDTEST, un, 27146 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); 27147 27148 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27149 27150 sd_fault_injection_on = 0; 27151 27152 if (un->sd_fi_fifo_xb[i] != NULL) { 27153 kmem_free(un->sd_fi_fifo_xb[i], 27154 sizeof (struct sd_fi_xb)); 27155 un->sd_fi_fifo_xb[i] = NULL; 27156 } 27157 if (arg != NULL) { 27158 un->sd_fi_fifo_xb[i] = 27159 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); 27160 if (un->sd_fi_fifo_xb[i] == NULL) { 27161 /* Alloc failed don't store anything */ 27162 break; 27163 } 27164 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], 27165 sizeof (struct sd_fi_xb), 0); 27166 27167 if (rval == -1) { 27168 kmem_free(un->sd_fi_fifo_xb[i], 27169 sizeof (struct sd_fi_xb)); 27170 un->sd_fi_fifo_xb[i] = NULL; 27171 } 27172 } else { 27173 SD_INFO(SD_LOG_IOERR, un, 27174 "sd_faultinjection_ioctl: xb null\n"); 27175 } 27176 break; 27177 27178 case SDIOCINSERTUN: 27179 /* Store a un struct to be pushed onto fifo */ 27180 SD_INFO(SD_LOG_SDTEST, un, 27181 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); 27182 27183 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27184 27185 sd_fault_injection_on = 0; 27186 27187 if (un->sd_fi_fifo_un[i] != NULL) { 27188 kmem_free(un->sd_fi_fifo_un[i], 27189 sizeof (struct sd_fi_un)); 27190 un->sd_fi_fifo_un[i] = NULL; 27191 } 27192 if (arg != NULL) { 27193 un->sd_fi_fifo_un[i] = 27194 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); 27195 if (un->sd_fi_fifo_un[i] == NULL) { 27196 /* Alloc failed don't store anything */ 27197 break; 27198 } 27199 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], 27200 sizeof (struct sd_fi_un), 0); 27201 if (rval == -1) { 27202 kmem_free(un->sd_fi_fifo_un[i], 27203 sizeof (struct sd_fi_un)); 27204 un->sd_fi_fifo_un[i] = NULL; 27205 } 27206 27207 } else { 27208 SD_INFO(SD_LOG_IOERR, un, 27209 "sd_faultinjection_ioctl: un null\n"); 27210 } 27211 27212 break; 27213 27214 case SDIOCINSERTARQ: 27215 /* Store a arq struct to be pushed onto fifo */ 27216 SD_INFO(SD_LOG_SDTEST, un, 27217 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); 27218 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27219 27220 sd_fault_injection_on = 0; 27221 27222 if (un->sd_fi_fifo_arq[i] != NULL) { 27223 kmem_free(un->sd_fi_fifo_arq[i], 27224 sizeof (struct sd_fi_arq)); 27225 un->sd_fi_fifo_arq[i] = NULL; 27226 } 27227 if (arg != NULL) { 27228 un->sd_fi_fifo_arq[i] = 27229 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); 27230 if (un->sd_fi_fifo_arq[i] == NULL) { 27231 /* Alloc failed don't store anything */ 27232 break; 27233 } 27234 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], 27235 sizeof (struct sd_fi_arq), 0); 27236 if (rval == -1) { 27237 kmem_free(un->sd_fi_fifo_arq[i], 27238 sizeof (struct sd_fi_arq)); 27239 un->sd_fi_fifo_arq[i] = NULL; 27240 } 27241 27242 } else { 27243 SD_INFO(SD_LOG_IOERR, un, 27244 "sd_faultinjection_ioctl: arq null\n"); 27245 } 27246 27247 break; 27248 27249 case SDIOCPUSH: 27250 /* Push stored xb, pkt, un, and arq onto fifo */ 27251 sd_fault_injection_on = 0; 27252 27253 if (arg != NULL) { 27254 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); 27255 if (rval != -1 && 27256 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 27257 un->sd_fi_fifo_end += i; 27258 } 27259 } else { 27260 SD_INFO(SD_LOG_IOERR, un, 27261 "sd_faultinjection_ioctl: push arg null\n"); 27262 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 27263 un->sd_fi_fifo_end++; 27264 } 27265 } 27266 SD_INFO(SD_LOG_IOERR, un, 27267 "sd_faultinjection_ioctl: push to end=%d\n", 27268 un->sd_fi_fifo_end); 27269 break; 27270 27271 case SDIOCRETRIEVE: 27272 /* Return buffer of log from Injection session */ 27273 SD_INFO(SD_LOG_SDTEST, un, 27274 "sd_faultinjection_ioctl: Injecting Fault Retreive"); 27275 27276 sd_fault_injection_on = 0; 27277 27278 mutex_enter(&(un->un_fi_mutex)); 27279 rval = ddi_copyout(un->sd_fi_log, (void *)arg, 27280 un->sd_fi_buf_len+1, 0); 27281 mutex_exit(&(un->un_fi_mutex)); 27282 27283 if (rval == -1) { 27284 /* 27285 * arg is possibly invalid setting 27286 * it to NULL for return 27287 */ 27288 arg = NULL; 27289 } 27290 break; 27291 } 27292 27293 mutex_exit(SD_MUTEX(un)); 27294 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl:" 27295 " exit\n"); 27296 } 27297 27298 27299 /* 27300 * Function: sd_injection_log() 27301 * 27302 * Description: This routine adds buff to the already existing injection log 27303 * for retrieval via faultinjection_ioctl for use in fault 27304 * detection and recovery 27305 * 27306 * Arguments: buf - the string to add to the log 27307 */ 27308 27309 static void 27310 sd_injection_log(char *buf, struct sd_lun *un) 27311 { 27312 uint_t len; 27313 27314 ASSERT(un != NULL); 27315 ASSERT(buf != NULL); 27316 27317 mutex_enter(&(un->un_fi_mutex)); 27318 27319 len = min(strlen(buf), 255); 27320 /* Add logged value to Injection log to be returned later */ 27321 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) { 27322 uint_t offset = strlen((char *)un->sd_fi_log); 27323 char *destp = (char *)un->sd_fi_log + offset; 27324 int i; 27325 for (i = 0; i < len; i++) { 27326 *destp++ = *buf++; 27327 } 27328 un->sd_fi_buf_len += len; 27329 un->sd_fi_log[un->sd_fi_buf_len] = '\0'; 27330 } 27331 27332 mutex_exit(&(un->un_fi_mutex)); 27333 } 27334 27335 27336 /* 27337 * Function: sd_faultinjection() 27338 * 27339 * Description: This routine takes the pkt and changes its 27340 * content based on error injection scenerio. 27341 * 27342 * Arguments: pktp - packet to be changed 27343 */ 27344 27345 static void 27346 sd_faultinjection(struct scsi_pkt *pktp) 27347 { 27348 uint_t i; 27349 struct sd_fi_pkt *fi_pkt; 27350 struct sd_fi_xb *fi_xb; 27351 struct sd_fi_un *fi_un; 27352 struct sd_fi_arq *fi_arq; 27353 struct buf *bp; 27354 struct sd_xbuf *xb; 27355 struct sd_lun *un; 27356 27357 ASSERT(pktp != NULL); 27358 27359 /* pull bp xb and un from pktp */ 27360 bp = (struct buf *)pktp->pkt_private; 27361 xb = SD_GET_XBUF(bp); 27362 un = SD_GET_UN(bp); 27363 27364 ASSERT(un != NULL); 27365 27366 mutex_enter(SD_MUTEX(un)); 27367 27368 SD_TRACE(SD_LOG_SDTEST, un, 27369 "sd_faultinjection: entry Injection from sdintr\n"); 27370 27371 /* if injection is off return */ 27372 if (sd_fault_injection_on == 0 || 27373 un->sd_fi_fifo_start == un->sd_fi_fifo_end) { 27374 mutex_exit(SD_MUTEX(un)); 27375 return; 27376 } 27377 27378 27379 /* take next set off fifo */ 27380 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; 27381 27382 fi_pkt = un->sd_fi_fifo_pkt[i]; 27383 fi_xb = un->sd_fi_fifo_xb[i]; 27384 fi_un = un->sd_fi_fifo_un[i]; 27385 fi_arq = un->sd_fi_fifo_arq[i]; 27386 27387 27388 /* set variables accordingly */ 27389 /* set pkt if it was on fifo */ 27390 if (fi_pkt != NULL) { 27391 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags"); 27392 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp"); 27393 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp"); 27394 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state"); 27395 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics"); 27396 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason"); 27397 27398 } 27399 27400 /* set xb if it was on fifo */ 27401 if (fi_xb != NULL) { 27402 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno"); 27403 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid"); 27404 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count"); 27405 SD_CONDSET(xb, xb, xb_victim_retry_count, 27406 "xb_victim_retry_count"); 27407 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status"); 27408 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state"); 27409 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid"); 27410 27411 /* copy in block data from sense */ 27412 if (fi_xb->xb_sense_data[0] != -1) { 27413 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, 27414 SENSE_LENGTH); 27415 } 27416 27417 /* copy in extended sense codes */ 27418 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_code, 27419 "es_code"); 27420 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_key, 27421 "es_key"); 27422 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_add_code, 27423 "es_add_code"); 27424 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, 27425 es_qual_code, "es_qual_code"); 27426 } 27427 27428 /* set un if it was on fifo */ 27429 if (fi_un != NULL) { 27430 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb"); 27431 SD_CONDSET(un, un, un_ctype, "un_ctype"); 27432 SD_CONDSET(un, un, un_reset_retry_count, 27433 "un_reset_retry_count"); 27434 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type"); 27435 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status"); 27436 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled"); 27437 SD_CONDSET(un, un, un_f_allow_bus_device_reset, 27438 "un_f_allow_bus_device_reset"); 27439 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing"); 27440 27441 } 27442 27443 /* copy in auto request sense if it was on fifo */ 27444 if (fi_arq != NULL) { 27445 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq)); 27446 } 27447 27448 /* free structs */ 27449 if (un->sd_fi_fifo_pkt[i] != NULL) { 27450 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); 27451 } 27452 if (un->sd_fi_fifo_xb[i] != NULL) { 27453 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); 27454 } 27455 if (un->sd_fi_fifo_un[i] != NULL) { 27456 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); 27457 } 27458 if (un->sd_fi_fifo_arq[i] != NULL) { 27459 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); 27460 } 27461 27462 /* 27463 * kmem_free does not gurantee to set to NULL 27464 * since we uses these to determine if we set 27465 * values or not lets confirm they are always 27466 * NULL after free 27467 */ 27468 un->sd_fi_fifo_pkt[i] = NULL; 27469 un->sd_fi_fifo_un[i] = NULL; 27470 un->sd_fi_fifo_xb[i] = NULL; 27471 un->sd_fi_fifo_arq[i] = NULL; 27472 27473 un->sd_fi_fifo_start++; 27474 27475 mutex_exit(SD_MUTEX(un)); 27476 27477 SD_TRACE(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n"); 27478 } 27479 27480 #endif /* SD_FAULT_INJECTION */ 27481 27482 /* 27483 * This routine is invoked in sd_unit_attach(). Before calling it, the 27484 * properties in conf file should be processed already, and "hotpluggable" 27485 * property was processed also. 27486 * 27487 * The sd driver distinguishes 3 different type of devices: removable media, 27488 * non-removable media, and hotpluggable. Below the differences are defined: 27489 * 27490 * 1. Device ID 27491 * 27492 * The device ID of a device is used to identify this device. Refer to 27493 * ddi_devid_register(9F). 27494 * 27495 * For a non-removable media disk device which can provide 0x80 or 0x83 27496 * VPD page (refer to INQUIRY command of SCSI SPC specification), a unique 27497 * device ID is created to identify this device. For other non-removable 27498 * media devices, a default device ID is created only if this device has 27499 * at least 2 alter cylinders. Otherwise, this device has no devid. 27500 * 27501 * ------------------------------------------------------- 27502 * removable media hotpluggable | Can Have Device ID 27503 * ------------------------------------------------------- 27504 * false false | Yes 27505 * false true | Yes 27506 * true x | No 27507 * ------------------------------------------------------ 27508 * 27509 * 27510 * 2. SCSI group 4 commands 27511 * 27512 * In SCSI specs, only some commands in group 4 command set can use 27513 * 8-byte addresses that can be used to access >2TB storage spaces. 27514 * Other commands have no such capability. Without supporting group4, 27515 * it is impossible to make full use of storage spaces of a disk with 27516 * capacity larger than 2TB. 27517 * 27518 * ----------------------------------------------- 27519 * removable media hotpluggable LP64 | Group 27520 * ----------------------------------------------- 27521 * false false false | 1 27522 * false false true | 4 27523 * false true false | 1 27524 * false true true | 4 27525 * true x x | 5 27526 * ----------------------------------------------- 27527 * 27528 * 27529 * 3. Check for VTOC Label 27530 * 27531 * If a direct-access disk has no EFI label, sd will check if it has a 27532 * valid VTOC label. Now, sd also does that check for removable media 27533 * and hotpluggable devices. 27534 * 27535 * -------------------------------------------------------------- 27536 * Direct-Access removable media hotpluggable | Check Label 27537 * ------------------------------------------------------------- 27538 * false false false | No 27539 * false false true | No 27540 * false true false | Yes 27541 * false true true | Yes 27542 * true x x | Yes 27543 * -------------------------------------------------------------- 27544 * 27545 * 27546 * 4. Building default VTOC label 27547 * 27548 * As section 3 says, sd checks if some kinds of devices have VTOC label. 27549 * If those devices have no valid VTOC label, sd(7d) will attempt to 27550 * create default VTOC for them. Currently sd creates default VTOC label 27551 * for all devices on x86 platform (VTOC_16), but only for removable 27552 * media devices on SPARC (VTOC_8). 27553 * 27554 * ----------------------------------------------------------- 27555 * removable media hotpluggable platform | Default Label 27556 * ----------------------------------------------------------- 27557 * false false sparc | No 27558 * false true x86 | Yes 27559 * false true sparc | Yes 27560 * true x x | Yes 27561 * ---------------------------------------------------------- 27562 * 27563 * 27564 * 5. Supported blocksizes of target devices 27565 * 27566 * Sd supports non-512-byte blocksize for removable media devices only. 27567 * For other devices, only 512-byte blocksize is supported. This may be 27568 * changed in near future because some RAID devices require non-512-byte 27569 * blocksize 27570 * 27571 * ----------------------------------------------------------- 27572 * removable media hotpluggable | non-512-byte blocksize 27573 * ----------------------------------------------------------- 27574 * false false | No 27575 * false true | No 27576 * true x | Yes 27577 * ----------------------------------------------------------- 27578 * 27579 * 27580 * 6. Automatic mount & unmount 27581 * 27582 * Sd(7d) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query 27583 * if a device is removable media device. It return 1 for removable media 27584 * devices, and 0 for others. 27585 * 27586 * The automatic mounting subsystem should distinguish between the types 27587 * of devices and apply automounting policies to each. 27588 * 27589 * 27590 * 7. fdisk partition management 27591 * 27592 * Fdisk is traditional partition method on x86 platform. Sd(7d) driver 27593 * just supports fdisk partitions on x86 platform. On sparc platform, sd 27594 * doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize 27595 * fdisk partitions on both x86 and SPARC platform. 27596 * 27597 * ----------------------------------------------------------- 27598 * platform removable media USB/1394 | fdisk supported 27599 * ----------------------------------------------------------- 27600 * x86 X X | true 27601 * ------------------------------------------------------------ 27602 * sparc X X | false 27603 * ------------------------------------------------------------ 27604 * 27605 * 27606 * 8. MBOOT/MBR 27607 * 27608 * Although sd(7d) doesn't support fdisk on SPARC platform, it does support 27609 * read/write mboot for removable media devices on sparc platform. 27610 * 27611 * ----------------------------------------------------------- 27612 * platform removable media USB/1394 | mboot supported 27613 * ----------------------------------------------------------- 27614 * x86 X X | true 27615 * ------------------------------------------------------------ 27616 * sparc false false | false 27617 * sparc false true | true 27618 * sparc true false | true 27619 * sparc true true | true 27620 * ------------------------------------------------------------ 27621 * 27622 * 27623 * 9. error handling during opening device 27624 * 27625 * If failed to open a disk device, an errno is returned. For some kinds 27626 * of errors, different errno is returned depending on if this device is 27627 * a removable media device. This brings USB/1394 hard disks in line with 27628 * expected hard disk behavior. It is not expected that this breaks any 27629 * application. 27630 * 27631 * ------------------------------------------------------ 27632 * removable media hotpluggable | errno 27633 * ------------------------------------------------------ 27634 * false false | EIO 27635 * false true | EIO 27636 * true x | ENXIO 27637 * ------------------------------------------------------ 27638 * 27639 * 27640 * 11. ioctls: DKIOCEJECT, CDROMEJECT 27641 * 27642 * These IOCTLs are applicable only to removable media devices. 27643 * 27644 * ----------------------------------------------------------- 27645 * removable media hotpluggable |DKIOCEJECT, CDROMEJECT 27646 * ----------------------------------------------------------- 27647 * false false | No 27648 * false true | No 27649 * true x | Yes 27650 * ----------------------------------------------------------- 27651 * 27652 * 27653 * 12. Kstats for partitions 27654 * 27655 * sd creates partition kstat for non-removable media devices. USB and 27656 * Firewire hard disks now have partition kstats 27657 * 27658 * ------------------------------------------------------ 27659 * removable media hotpluggable | kstat 27660 * ------------------------------------------------------ 27661 * false false | Yes 27662 * false true | Yes 27663 * true x | No 27664 * ------------------------------------------------------ 27665 * 27666 * 27667 * 13. Removable media & hotpluggable properties 27668 * 27669 * Sd driver creates a "removable-media" property for removable media 27670 * devices. Parent nexus drivers create a "hotpluggable" property if 27671 * it supports hotplugging. 27672 * 27673 * --------------------------------------------------------------------- 27674 * removable media hotpluggable | "removable-media" " hotpluggable" 27675 * --------------------------------------------------------------------- 27676 * false false | No No 27677 * false true | No Yes 27678 * true false | Yes No 27679 * true true | Yes Yes 27680 * --------------------------------------------------------------------- 27681 * 27682 * 27683 * 14. Power Management 27684 * 27685 * sd only power manages removable media devices or devices that support 27686 * LOG_SENSE or have a "pm-capable" property (PSARC/2002/250) 27687 * 27688 * A parent nexus that supports hotplugging can also set "pm-capable" 27689 * if the disk can be power managed. 27690 * 27691 * ------------------------------------------------------------ 27692 * removable media hotpluggable pm-capable | power manage 27693 * ------------------------------------------------------------ 27694 * false false false | No 27695 * false false true | Yes 27696 * false true false | No 27697 * false true true | Yes 27698 * true x x | Yes 27699 * ------------------------------------------------------------ 27700 * 27701 * USB and firewire hard disks can now be power managed independently 27702 * of the framebuffer 27703 * 27704 * 27705 * 15. Support for USB disks with capacity larger than 1TB 27706 * 27707 * Currently, sd doesn't permit a fixed disk device with capacity 27708 * larger than 1TB to be used in a 32-bit operating system environment. 27709 * However, sd doesn't do that for removable media devices. Instead, it 27710 * assumes that removable media devices cannot have a capacity larger 27711 * than 1TB. Therefore, using those devices on 32-bit system is partially 27712 * supported, which can cause some unexpected results. 27713 * 27714 * --------------------------------------------------------------------- 27715 * removable media USB/1394 | Capacity > 1TB | Used in 32-bit env 27716 * --------------------------------------------------------------------- 27717 * false false | true | no 27718 * false true | true | no 27719 * true false | true | Yes 27720 * true true | true | Yes 27721 * --------------------------------------------------------------------- 27722 * 27723 * 27724 * 16. Check write-protection at open time 27725 * 27726 * When a removable media device is being opened for writing without NDELAY 27727 * flag, sd will check if this device is writable. If attempting to open 27728 * without NDELAY flag a write-protected device, this operation will abort. 27729 * 27730 * ------------------------------------------------------------ 27731 * removable media USB/1394 | WP Check 27732 * ------------------------------------------------------------ 27733 * false false | No 27734 * false true | No 27735 * true false | Yes 27736 * true true | Yes 27737 * ------------------------------------------------------------ 27738 * 27739 * 27740 * 17. syslog when corrupted VTOC is encountered 27741 * 27742 * Currently, if an invalid VTOC is encountered, sd only print syslog 27743 * for fixed SCSI disks. 27744 * ------------------------------------------------------------ 27745 * removable media USB/1394 | print syslog 27746 * ------------------------------------------------------------ 27747 * false false | Yes 27748 * false true | No 27749 * true false | No 27750 * true true | No 27751 * ------------------------------------------------------------ 27752 */ 27753 static void 27754 sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi) 27755 { 27756 int pm_capable_prop; 27757 27758 ASSERT(un->un_sd); 27759 ASSERT(un->un_sd->sd_inq); 27760 27761 /* 27762 * Enable SYNC CACHE support for all devices. 27763 */ 27764 un->un_f_sync_cache_supported = TRUE; 27765 27766 if (un->un_sd->sd_inq->inq_rmb) { 27767 /* 27768 * The media of this device is removable. And for this kind 27769 * of devices, it is possible to change medium after opening 27770 * devices. Thus we should support this operation. 27771 */ 27772 un->un_f_has_removable_media = TRUE; 27773 27774 /* 27775 * support non-512-byte blocksize of removable media devices 27776 */ 27777 un->un_f_non_devbsize_supported = TRUE; 27778 27779 /* 27780 * Assume that all removable media devices support DOOR_LOCK 27781 */ 27782 un->un_f_doorlock_supported = TRUE; 27783 27784 /* 27785 * For a removable media device, it is possible to be opened 27786 * with NDELAY flag when there is no media in drive, in this 27787 * case we don't care if device is writable. But if without 27788 * NDELAY flag, we need to check if media is write-protected. 27789 */ 27790 un->un_f_chk_wp_open = TRUE; 27791 27792 /* 27793 * need to start a SCSI watch thread to monitor media state, 27794 * when media is being inserted or ejected, notify syseventd. 27795 */ 27796 un->un_f_monitor_media_state = TRUE; 27797 27798 /* 27799 * Some devices don't support START_STOP_UNIT command. 27800 * Therefore, we'd better check if a device supports it 27801 * before sending it. 27802 */ 27803 un->un_f_check_start_stop = TRUE; 27804 27805 /* 27806 * support eject media ioctl: 27807 * FDEJECT, DKIOCEJECT, CDROMEJECT 27808 */ 27809 un->un_f_eject_media_supported = TRUE; 27810 27811 /* 27812 * Because many removable-media devices don't support 27813 * LOG_SENSE, we couldn't use this command to check if 27814 * a removable media device support power-management. 27815 * We assume that they support power-management via 27816 * START_STOP_UNIT command and can be spun up and down 27817 * without limitations. 27818 */ 27819 un->un_f_pm_supported = TRUE; 27820 27821 /* 27822 * Need to create a zero length (Boolean) property 27823 * removable-media for the removable media devices. 27824 * Note that the return value of the property is not being 27825 * checked, since if unable to create the property 27826 * then do not want the attach to fail altogether. Consistent 27827 * with other property creation in attach. 27828 */ 27829 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, 27830 DDI_PROP_CANSLEEP, "removable-media", NULL, 0); 27831 27832 } else { 27833 /* 27834 * create device ID for device 27835 */ 27836 un->un_f_devid_supported = TRUE; 27837 27838 /* 27839 * Spin up non-removable-media devices once it is attached 27840 */ 27841 un->un_f_attach_spinup = TRUE; 27842 27843 /* 27844 * According to SCSI specification, Sense data has two kinds of 27845 * format: fixed format, and descriptor format. At present, we 27846 * don't support descriptor format sense data for removable 27847 * media. 27848 */ 27849 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT) { 27850 un->un_f_descr_format_supported = TRUE; 27851 } 27852 27853 /* 27854 * kstats are created only for non-removable media devices. 27855 * 27856 * Set this in sd.conf to 0 in order to disable kstats. The 27857 * default is 1, so they are enabled by default. 27858 */ 27859 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY, 27860 SD_DEVINFO(un), DDI_PROP_DONTPASS, 27861 "enable-partition-kstats", 1)); 27862 27863 /* 27864 * Check if HBA has set the "pm-capable" property. 27865 * If "pm-capable" exists and is non-zero then we can 27866 * power manage the device without checking the start/stop 27867 * cycle count log sense page. 27868 * 27869 * If "pm-capable" exists and is SD_PM_CAPABLE_FALSE (0) 27870 * then we should not power manage the device. 27871 * 27872 * If "pm-capable" doesn't exist then pm_capable_prop will 27873 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case, 27874 * sd will check the start/stop cycle count log sense page 27875 * and power manage the device if the cycle count limit has 27876 * not been exceeded. 27877 */ 27878 pm_capable_prop = ddi_prop_get_int(DDI_DEV_T_ANY, devi, 27879 DDI_PROP_DONTPASS, "pm-capable", SD_PM_CAPABLE_UNDEFINED); 27880 if (pm_capable_prop == SD_PM_CAPABLE_UNDEFINED) { 27881 un->un_f_log_sense_supported = TRUE; 27882 } else { 27883 /* 27884 * pm-capable property exists. 27885 * 27886 * Convert "TRUE" values for pm_capable_prop to 27887 * SD_PM_CAPABLE_TRUE (1) to make it easier to check 27888 * later. "TRUE" values are any values except 27889 * SD_PM_CAPABLE_FALSE (0) and 27890 * SD_PM_CAPABLE_UNDEFINED (-1) 27891 */ 27892 if (pm_capable_prop == SD_PM_CAPABLE_FALSE) { 27893 un->un_f_log_sense_supported = FALSE; 27894 } else { 27895 un->un_f_pm_supported = TRUE; 27896 } 27897 27898 SD_INFO(SD_LOG_ATTACH_DETACH, un, 27899 "sd_unit_attach: un:0x%p pm-capable " 27900 "property set to %d.\n", un, un->un_f_pm_supported); 27901 } 27902 } 27903 27904 if (un->un_f_is_hotpluggable) { 27905 27906 /* 27907 * Have to watch hotpluggable devices as well, since 27908 * that's the only way for userland applications to 27909 * detect hot removal while device is busy/mounted. 27910 */ 27911 un->un_f_monitor_media_state = TRUE; 27912 27913 un->un_f_check_start_stop = TRUE; 27914 27915 } 27916 } 27917 27918 /* 27919 * sd_tg_rdwr: 27920 * Provides rdwr access for cmlb via sd_tgops. The start_block is 27921 * in sys block size, req_length in bytes. 27922 * 27923 */ 27924 static int 27925 sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 27926 diskaddr_t start_block, size_t reqlength, void *tg_cookie) 27927 { 27928 struct sd_lun *un; 27929 int path_flag = (int)(uintptr_t)tg_cookie; 27930 char *dkl = NULL; 27931 diskaddr_t real_addr = start_block; 27932 diskaddr_t first_byte, end_block; 27933 27934 size_t buffer_size = reqlength; 27935 int rval; 27936 diskaddr_t cap; 27937 uint32_t lbasize; 27938 27939 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 27940 if (un == NULL) 27941 return (ENXIO); 27942 27943 if (cmd != TG_READ && cmd != TG_WRITE) 27944 return (EINVAL); 27945 27946 mutex_enter(SD_MUTEX(un)); 27947 if (un->un_f_tgt_blocksize_is_valid == FALSE) { 27948 mutex_exit(SD_MUTEX(un)); 27949 rval = sd_send_scsi_READ_CAPACITY(un, (uint64_t *)&cap, 27950 &lbasize, path_flag); 27951 if (rval != 0) 27952 return (rval); 27953 mutex_enter(SD_MUTEX(un)); 27954 sd_update_block_info(un, lbasize, cap); 27955 if ((un->un_f_tgt_blocksize_is_valid == FALSE)) { 27956 mutex_exit(SD_MUTEX(un)); 27957 return (EIO); 27958 } 27959 } 27960 27961 if (NOT_DEVBSIZE(un)) { 27962 /* 27963 * sys_blocksize != tgt_blocksize, need to re-adjust 27964 * blkno and save the index to beginning of dk_label 27965 */ 27966 first_byte = SD_SYSBLOCKS2BYTES(un, start_block); 27967 real_addr = first_byte / un->un_tgt_blocksize; 27968 27969 end_block = (first_byte + reqlength + 27970 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize; 27971 27972 /* round up buffer size to multiple of target block size */ 27973 buffer_size = (end_block - real_addr) * un->un_tgt_blocksize; 27974 27975 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_tg_rdwr", 27976 "label_addr: 0x%x allocation size: 0x%x\n", 27977 real_addr, buffer_size); 27978 27979 if (((first_byte % un->un_tgt_blocksize) != 0) || 27980 (reqlength % un->un_tgt_blocksize) != 0) 27981 /* the request is not aligned */ 27982 dkl = kmem_zalloc(buffer_size, KM_SLEEP); 27983 } 27984 27985 /* 27986 * The MMC standard allows READ CAPACITY to be 27987 * inaccurate by a bounded amount (in the interest of 27988 * response latency). As a result, failed READs are 27989 * commonplace (due to the reading of metadata and not 27990 * data). Depending on the per-Vendor/drive Sense data, 27991 * the failed READ can cause many (unnecessary) retries. 27992 */ 27993 27994 if (ISCD(un) && (cmd == TG_READ) && 27995 (un->un_f_blockcount_is_valid == TRUE) && 27996 ((start_block == (un->un_blockcount - 1))|| 27997 (start_block == (un->un_blockcount - 2)))) { 27998 path_flag = SD_PATH_DIRECT_PRIORITY; 27999 } 28000 28001 mutex_exit(SD_MUTEX(un)); 28002 if (cmd == TG_READ) { 28003 rval = sd_send_scsi_READ(un, (dkl != NULL)? dkl: bufaddr, 28004 buffer_size, real_addr, path_flag); 28005 if (dkl != NULL) 28006 bcopy(dkl + SD_TGTBYTEOFFSET(un, start_block, 28007 real_addr), bufaddr, reqlength); 28008 } else { 28009 if (dkl) { 28010 rval = sd_send_scsi_READ(un, dkl, buffer_size, 28011 real_addr, path_flag); 28012 if (rval) { 28013 kmem_free(dkl, buffer_size); 28014 return (rval); 28015 } 28016 bcopy(bufaddr, dkl + SD_TGTBYTEOFFSET(un, start_block, 28017 real_addr), reqlength); 28018 } 28019 rval = sd_send_scsi_WRITE(un, (dkl != NULL)? dkl: bufaddr, 28020 buffer_size, real_addr, path_flag); 28021 } 28022 28023 if (dkl != NULL) 28024 kmem_free(dkl, buffer_size); 28025 28026 return (rval); 28027 } 28028 28029 28030 static int 28031 sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie) 28032 { 28033 28034 struct sd_lun *un; 28035 diskaddr_t cap; 28036 uint32_t lbasize; 28037 int path_flag = (int)(uintptr_t)tg_cookie; 28038 int ret = 0; 28039 28040 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 28041 if (un == NULL) 28042 return (ENXIO); 28043 28044 switch (cmd) { 28045 case TG_GETPHYGEOM: 28046 case TG_GETVIRTGEOM: 28047 case TG_GETCAPACITY: 28048 case TG_GETBLOCKSIZE: 28049 mutex_enter(SD_MUTEX(un)); 28050 28051 if ((un->un_f_blockcount_is_valid == TRUE) && 28052 (un->un_f_tgt_blocksize_is_valid == TRUE)) { 28053 cap = un->un_blockcount; 28054 lbasize = un->un_tgt_blocksize; 28055 mutex_exit(SD_MUTEX(un)); 28056 } else { 28057 mutex_exit(SD_MUTEX(un)); 28058 ret = sd_send_scsi_READ_CAPACITY(un, (uint64_t *)&cap, 28059 &lbasize, path_flag); 28060 if (ret != 0) 28061 return (ret); 28062 mutex_enter(SD_MUTEX(un)); 28063 sd_update_block_info(un, lbasize, cap); 28064 if ((un->un_f_blockcount_is_valid == FALSE) || 28065 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 28066 mutex_exit(SD_MUTEX(un)); 28067 return (EIO); 28068 } 28069 mutex_exit(SD_MUTEX(un)); 28070 } 28071 28072 if (cmd == TG_GETCAPACITY) { 28073 *(diskaddr_t *)arg = cap; 28074 return (0); 28075 } 28076 28077 if (cmd == TG_GETBLOCKSIZE) { 28078 *(uint32_t *)arg = lbasize; 28079 return (0); 28080 } 28081 28082 if (cmd == TG_GETPHYGEOM) 28083 ret = sd_get_physical_geometry(un, (cmlb_geom_t *)arg, 28084 cap, lbasize, path_flag); 28085 else 28086 /* TG_GETVIRTGEOM */ 28087 ret = sd_get_virtual_geometry(un, 28088 (cmlb_geom_t *)arg, cap, lbasize); 28089 28090 return (ret); 28091 28092 case TG_GETATTR: 28093 mutex_enter(SD_MUTEX(un)); 28094 ((tg_attribute_t *)arg)->media_is_writable = 28095 un->un_f_mmc_writable_media; 28096 mutex_exit(SD_MUTEX(un)); 28097 return (0); 28098 default: 28099 return (ENOTTY); 28100 28101 } 28102 28103 } 28104